gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
"""
Command-line interface to the app.
"""
# Copyright (c) 2013 contributors; see AUTHORS.
# Licensed under the Apache License, Version 2.0
# https://www.apache.org/licenses/LICENSE-2.0
import glob
import os
import sys
import plac # pylint: disable=F0401
from gossamer.main import dispatch
from gossamer.constant import modes, exits, states, \
DEFAULT_WEBDRIVER, DEFAULT_TESTFILE, \
DEFAULT_DIFFCOLOR, DEFAULT_SCREENSIZE, \
DEFAULT_BROWSER
from gossamer import util, exc
from gossamer import __version__
@plac.annotations(
names = plac.Annotation(
'Test case name(s) to use, comma-separated',
),
postdata = plac.Annotation(
'File for POST data or - for stdin'
),
testfile = plac.Annotation(
'Test file(s) to use',
'option', 'f', str,
metavar='GLOB'
),
record = plac.Annotation(
'Record a new test',
'flag', 'r'
),
rerecord = plac.Annotation(
'Re-run the test but take new screenshots',
'flag', 'rr'
),
selenium = plac.Annotation(
'Selenium WebDriver URL to use',
'option', 's',
metavar=DEFAULT_WEBDRIVER
),
browser = plac.Annotation(
'Browser to use, either firefox, chrome, phantomjs, ie, or opera',
'option', 'b', str,
metavar=DEFAULT_BROWSER,
),
screensize = plac.Annotation(
'Width and height for screen (i.e. 1024x768)',
'option', 'z',
metavar=DEFAULT_SCREENSIZE
),
diffcolor = plac.Annotation(
'Diff color for errors in RGB (i.e. 0,255,0)',
'option', 'p', str,
metavar=DEFAULT_DIFFCOLOR
),
save_diff = plac.Annotation(
'Save information about failures as last.png and diff.png',
'flag', 'e'
),
overwrite = plac.Annotation(
'Overwrite existing tests without asking',
'flag', 'o'
),
data_dir = plac.Annotation(
'Directory in which tests should be stored',
'option', 'd', str,
),
version = plac.Annotation(
'Get the current version',
'flag', 'version'
),
verbose = plac.Annotation(
'Verbosity, with -v as logging.DEBUG',
'flag', 'v', 'verbose'
),
stop_on_error = plac.Annotation(
'During playback, stop on error',
'flag', 't', 'stop'
) # pylint: disable=R0915,R0912,R0911,R0914
)
def initialize(
names=None,
testfile=None,
record=False,
rerecord=False,
selenium=None,
postdata=None,
browser=None,
screensize=None,
diffcolor=None,
save_diff=False,
overwrite=False,
data_dir=None,
version=False,
verbose=False,
stop_on_error=False
): # pylint: disable=R0913,W0613
"""
Gossamer CLI.
"""
if version:
sys.stdout.write('Gossamer %s\n' % __version__)
sys.stdout.flush()
return exits.OK
sys.stdout.write('Initializing gossamer and opening WebDriver...\n')
sys.stdout.flush()
if verbose:
util.log = util.logger(__name__, 'DEBUG')
names = names.split(',') if names else None
browser = browser or DEFAULT_BROWSER
cwd = os.getcwd()
test_files = []
for pattern in (testfile or DEFAULT_TESTFILE).split(','):
for name in glob.glob(pattern):
test_files.append(os.path.join(cwd, name))
if len(test_files) == 0:
sys.stdout.write('No Gossamerfile found.\n')
sys.stdout.flush()
return exits.ERROR
# data_dir
if not data_dir:
data_dir = os.path.join(cwd, 'gossamer')
sys.stdout.write('Default data directory of %s\n' % data_dir)
sys.stdout.flush()
else:
if not os.path.isabs(data_dir):
data_dir = os.path.join(os.getcwd(), data_dir)
# mode
if record and rerecord:
sys.stdout.write('Cannot specify both -r and -rr\n')
sys.stdout.flush()
return exits.ARGUMENT_ERROR
if record:
mode = modes.RECORD
elif rerecord:
mode = modes.RERECORD
else:
mode = modes.PLAYBACK
attrs = (
'names', 'selenium', 'postdata',
'browser', 'screensize', 'diffcolor', 'save_diff', 'overwrite'
)
options = {
key: val for key, val in \
[(each, locals()[each]) for each in attrs]
}
# make tests using the test_files and mode we've resolved to
try:
tests = util.make_tests(test_files, mode, data_dir, **options)
except (exc.DoNotOverwrite, exc.WebDriverConnectionFailed) as exception:
sys.stdout.write(str(exception))
sys.stdout.write('\n')
sys.stdout.flush()
return exits.ERROR
except (exc.RecordedRunDoesNotExist, exc.RecordedRunEmpty,
exc.CouldNotParseRecordedRun) as exception:
sys.stdout.write(str(exception))
sys.stdout.write('\n')
sys.stdout.flush()
return exits.RECORDED_RUN_ERROR
if mode == modes.RECORD:
sys.stdout.write('Recording...\n\n')
elif mode == modes.RERECORD:
sys.stdout.write('Rerecording...\n\n')
elif mode == modes.PLAYBACK:
sys.stdout.write('Playing back tests...\n\n')
sys.stdout.flush()
results = {}
errs = {}
driver = None
try:
for key, test in tests.items():
if driver is not None:
util.close_driver(driver)
try:
driver = util.get_driver(test.settings.browser, selenium)
except exc.WebDriverConnectionFailed:
sys.stderr.write(
'We cannot connect to the WebDriver %s -- is it running?\n' % selenium
)
return exits.ERROR
# run the tests
try:
result, err = dispatch(driver, mode, test)
results[key] = result
errs[key] = err
if (not result or result in (states.FAIL, states.ERROR)) and stop_on_error:
break
except exc.NoScreenshotsRecorded as exception:
sys.stdout.write(str(exception))
sys.stdout.flush()
return exits.ERROR
sys.stdout.write('\n')
sys.stdout.flush()
finally:
util.close_driver(driver)
if mode == modes.PLAYBACK:
fails = sum(x is states.FAIL for _, x in results.items())
errors = sum(x is states.ERROR for _, x in results.items())
if fails > 0 or errors > 0:
msg = []
if fails > 0:
msg.append('failed=%s' % fails)
if errors > 0:
msg.append('errors=%s' % errors)
sys.stdout.write(
'\nFAILED (%s)\n' % ', '.join(msg)
)
sys.stdout.flush()
return exits.FAILED
else:
sys.stdout.write('\nOK\n')
sys.stdout.flush()
return exits.OK
return exits.OK
def main():
"""
Defined as the `gossamer` command in setup.py.
Runs the argument parser and passes settings to
:func:`gossamer.main.dispatcher`.
"""
try:
sys.exit(plac.call(initialize))
except KeyboardInterrupt:
sys.stdout.write('\n')
sys.stdout.flush()
sys.exit(1)
| |
#Problem Set
#Name: Andrew Trattner
#Collaborators: none
#Time Spent: 0:
#for leslie kaelbling bling bling
#Michael and Andy
#Weight of time and solution cost
import time
import heapq
from math import log
import sys
import random
class Puzzle:
"""
A 15-puzzle instance which takes input startin_state as a well-formed typle of 16 entries
method next_states returns successors, goal_state is proper termination state
"""
def __init__(self, starting_state):
self.goal_state=(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0)
#State '0' represents the empty slot in the puzzle
self.initial_state=starting_state
def next_states(self, current_state):
i = current_state.index(0)
#Returns the position of the empty(0) slot
validswaps = []
v1 = i-4
#Moving up
v2 = i+4
#Moving down
v3 = i - 1
#Moving left
v4 = i + 1
#Moving right
if v1 >= 0:
#Prevents you from going past top left corner
validswaps.append(v1)
if v2 <= 15:
#Prevents you from going past bottom left corner
validswaps.append(v2)
if v3 % 4 < i % 4:
#Prevents you from going past left side
'''
WORKING CASE:
15(i) mod 4 returns 3
14(v3) mod 4 returns 2
So if the empty space is in position 15
This would be a valid swap
FAILURE CASE:
12(i) mod 4 returns 0
11(v2) mod 4 returns 1
So if the empty space is in position 12
This would not be a valid swap
(Same case for 8,4,0)
'''
validswaps.append(v3)
if v4 % 4 > i % 4:
'''
WORKING CASE:
10(i) mod 4 returns 2
11(v4) mod 4 returns 3
So if the empty space is in position 10
This would be a valid swap
FAILURE CASE:
1. 11(i) mod 4 returns 3
12(v4) mod 4 returns 0
So if the empty space is in position 11
This would not be a valid swap
(Same case for 3,7)
'''
validswaps.append(v4)
next_states = []
for v in validswaps:
'''
Swaps the empty space from the old position
to the new position
Will add each state from valid swaps to a list
And return that list
'''
old = list(current_state)
new = list(current_state)
new[i] = old[v]
new[v] = old[i]
next_states.append(tuple(new))
return next_states
#make a*
def a_star(puzzle, steps):
index_state = 1
index_parent_path = 2
index_cost = 0
index_birth_time = 3
percent = 0
closed = []
initial_distance = sys.maxint
frontier = [(sys.maxint, puzzle.initial_state,[])]
#States are composed of (cost, state, parent path)
#goal state dictionary allows for quick lookup for Manhattan Dist Calc
goal_state_dictionary = convert_to_tuples(puzzle.goal_state)
stopnow = 0
goal_dictionary = convert_to_tuples(puzzle.goal_state)
while len(frontier) > 0 and stopnow < steps:
#pop off element and check if goal. mark state as visited
current = heapq.heappop(frontier)
if puzzle.goal_state == current[index_state]:
current[index_parent_path].append(current[index_state])
return current[index_parent_path]
closed.append(current)
#expand state using Manhattan Distance heuristic
for state in puzzle.next_states(current[index_state]):
changed_frontier = False
parent_path = current[index_parent_path][:]
parent_path.append(current[index_state])
cost = len(parent_path) + 3* man_dist(state, goal_dictionary)
child = (cost, state, parent_path)
for state in frontier:
if child[index_state] == state[index_state]:
frontier = update_best_state_frontier(child, frontier, index_state, index_cost)
changed_frontier = True
break
if child[index_state] in closed:
pass
elif not(changed_frontier):
heapq.heappush(frontier, child)
if stopnow / float(steps) * 100 > percent:
print str(percent) + ' percent complete'
percent += 1
stopnow+=1
return_msg = 'search terminated due to timeout, length of frontier is: ' + str(len(frontier))
return return_msg
def bugsy(puzzle, steps):
"""
BUGSY(initial, U())
Utility = U_default - min(over children) { wf*cost + wt*time }
-U_default is utility of returning empty solution
-cost is length of parent path + manhattan distance
-time is distance to end from current (manhattan) * delay * t_exp
where delay is number of extra expansions estimated in between useful progress
and t_exp is typical time to expand each node
-->these parameters can be updated in realtime or they may be calculated beforehand (training)
U* = -(wf*cost + wt*nodes_on_s*t_exp)
u* = U* or U*-wt*t_exp
-->t_exp is time to perform expansion of node
estimating Max Util:
1. estimate cost of solution find beneath each node as f
2. estimates number expansions required to find a solution beneath each node n, exp(n) -- can be dist heuristic d
3. exp(n) = delay * d(n) since delay expansions expected on each of d's steps
Bugsy can stop and return empty or expand a node. Each node in frontier is possible outcome, so max util based on open nodes:
U_hat = max{ max(n in frontier){ -wf*f(n)+wt*d(n)*delay*t_exp }, U(empty,0)}
once uhat is found, substitute for U* to estimate u*
-->note that only expanding one node, so no need to estimate u* for all frontier nodes
-->note that computing maximization each time is unnecessary since simply ordering on u(n) is sufficient
UTILITY DETAILS:
"""
index_state = 1
index_parent_path = 2
index_cost = 0
index_birth_time = 3
DELAY = 1
T_EXP = 1
w_f = 1
w_t = 1
percent = 0
closed = []
initial_util = sys.maxint
frontier = [(initial_util, puzzle.initial_state,[])]
#States are composed of (utility, state, parent path)
#goal state dictionary allows for quick lookup for Manhattan Dist Calc
goal_state_dictionary = convert_to_tuples(puzzle.goal_state)
stopnow = 0
while len(frontier) > 0 and stopnow < steps:
#pop off MIN element and check if goal. mark state as visited
current = heapq.heappop(frontier)
if puzzle.goal_state == current[index_state]:
current[index_parent_path].append(current[index_state])
return current[index_parent_path]
closed.append(current[index_state])
#expand state using Manhattan Distance heuristic
for state in puzzle.next_states(current[index_state]):
changed_frontier = False
parent_path = current[index_parent_path][:]
parent_path.append(current[index_state])
util = calculate_utility(len(parent_path), state, goal_state_dictionary, w_f, w_t, DELAY, T_EXP)
child = (util, state, parent_path)
for state in frontier:
if child[index_state] == state[index_state]:
frontier = update_best_state_frontier(child, frontier, index_state, index_cost)
changed_frontier = True
break
if child[index_state] in closed:
pass
elif not(changed_frontier):
heapq.heappush(frontier, child)
if stopnow / (steps/100.) > percent:
print str(percent) + ' percent complete'
percent += 1
stopnow+=1
return_msg = 'search terminated due to timeout, length of frontier is: ' + len(frontier)
return return_msg
def convert_to_tuples(state):
output = {}
for i in range(1, len(state)+1):
x = (i-1) % 4
w = int((i-1) / 4)
output[state[i-1]] = (x, w)
return output
def calculate_utility(parent_path_length, state, goal_state_dictionary, w_f, w_t, delay, t_exp):
d = man_dist(state,goal_state_dictionary)
util = w_f * (parent_path_length + d) + w_t * d * delay * t_exp
return util
def man_dist(puzzle_state, goal_state_dict):
dict_puzzle = convert_to_tuples(puzzle_state)
d = 0
for i in xrange(1, len(goal_state_dict)):
dx = abs(dict_puzzle[i][0] - goal_state_dict[i][0])
dw = abs(dict_puzzle[i][1] - goal_state_dict[i][1])
d += (dx + dw)
return d
def find_uhat(frontier, count, u, delay, t_exp, w_t, w_f, g, Uhat):
if type(log(count, 2)) is int:
new_frontier = []
u_new = Uhat
for node in frontier:
util = u(delay, t_exp, w_t, w_f, g, node)
new_frontier.append((util, node[1], node[2], node[3]))
u_new = max(u_new, util)
return (heapq.heapify(new_frontier), u_new)
else:
return (frontier, Uhat)
def shuffle(n):
puzzle_initial = Puzzle((1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0))
out_state = puzzle_initial.goal_state
rand_ind = 0
for i in xrange(n):
next_states = puzzle_initial.next_states(out_state)
rand_ind = int(random.random()*len(next_states))
out_state = next_states[rand_ind]
return out_state
def update_best_state_frontier(state,frontier, index_state, index_cost):
for i in range(len(frontier)):
if frontier[i][index_state] == state[index_state]:
if frontier[i][index_cost] > state[index_cost]:
frontier[i] = state
return frontier
#test cases
times = []
#it works!!! .555 for A*, 0 for bugsy (3, 7, 0, 4, 1, 6, 2, 8, 5, 10, 13, 12, 9, 14, 11, 15)
#(2, 6, 9, 4, 5, 10, 3, 0, 1, 14, 7, 8, 13, 15, 12, 11) A* times out, bugsy finds in .05 ms
#test case 1
start_state = (2, 6, 9, 4, 5, 10, 3, 0, 1, 14, 7, 8, 13, 15, 12, 11) #shuffle(60)
# with time diff 0.546999931335
new_puz = Puzzle(start_state)
goal_state_dict = convert_to_tuples(new_puz.goal_state)
#do A*
start_time = time.time()
print a_star(new_puz, 10000)
end_time = time.time()
print 'A* takes: ' + str(end_time - start_time) + ' ms.'
#bugsy
start_time = time.time()
print bugsy(new_puz, 10000)
end_time = time.time()
print 'bugsy takes: ' + str(end_time - start_time) + ' ms.'
#should test if child in closed and has better util now?
| |
# -*- coding: utf-8 -*-
"""
werkzeug.urls
~~~~~~~~~~~~~
``werkzeug.urls`` used to provide several wrapper functions for Python 2
urlparse, whose main purpose were to work around the behavior of the Py2
stdlib and its lack of unicode support. While this was already a somewhat
inconvenient situation, it got even more complicated because Python 3's
``urllib.parse`` actually does handle unicode properly. In other words,
this module would wrap two libraries with completely different behavior. So
now this module contains a 2-and-3-compatible backport of Python 3's
``urllib.parse``, which is mostly API-compatible.
:copyright: 2007 Pallets
:license: BSD-3-Clause
"""
import codecs
import os
import re
from collections import namedtuple
from ._compat import fix_tuple_repr
from ._compat import implements_to_string
from ._compat import make_literal_wrapper
from ._compat import normalize_string_tuple
from ._compat import PY2
from ._compat import text_type
from ._compat import to_native
from ._compat import to_unicode
from ._compat import try_coerce_native
from ._internal import _decode_idna
from ._internal import _encode_idna
from .datastructures import iter_multi_items
from .datastructures import MultiDict
# A regular expression for what a valid schema looks like
_scheme_re = re.compile(r"^[a-zA-Z0-9+-.]+$")
# Characters that are safe in any part of an URL.
_always_safe = frozenset(
bytearray(
b"abcdefghijklmnopqrstuvwxyz"
b"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
b"0123456789"
b"-._~"
)
)
_hexdigits = "0123456789ABCDEFabcdef"
_hextobyte = dict(
((a + b).encode(), int(a + b, 16)) for a in _hexdigits for b in _hexdigits
)
_bytetohex = [("%%%02X" % char).encode("ascii") for char in range(256)]
_URLTuple = fix_tuple_repr(
namedtuple("_URLTuple", ["scheme", "netloc", "path", "query", "fragment"])
)
class BaseURL(_URLTuple):
"""Superclass of :py:class:`URL` and :py:class:`BytesURL`."""
__slots__ = ()
def replace(self, **kwargs):
"""Return an URL with the same values, except for those parameters
given new values by whichever keyword arguments are specified."""
return self._replace(**kwargs)
@property
def host(self):
"""The host part of the URL if available, otherwise `None`. The
host is either the hostname or the IP address mentioned in the
URL. It will not contain the port.
"""
return self._split_host()[0]
@property
def ascii_host(self):
"""Works exactly like :attr:`host` but will return a result that
is restricted to ASCII. If it finds a netloc that is not ASCII
it will attempt to idna decode it. This is useful for socket
operations when the URL might include internationalized characters.
"""
rv = self.host
if rv is not None and isinstance(rv, text_type):
try:
rv = _encode_idna(rv)
except UnicodeError:
rv = rv.encode("ascii", "ignore")
return to_native(rv, "ascii", "ignore")
@property
def port(self):
"""The port in the URL as an integer if it was present, `None`
otherwise. This does not fill in default ports.
"""
try:
rv = int(to_native(self._split_host()[1]))
if 0 <= rv <= 65535:
return rv
except (ValueError, TypeError):
pass
@property
def auth(self):
"""The authentication part in the URL if available, `None`
otherwise.
"""
return self._split_netloc()[0]
@property
def username(self):
"""The username if it was part of the URL, `None` otherwise.
This undergoes URL decoding and will always be a unicode string.
"""
rv = self._split_auth()[0]
if rv is not None:
return _url_unquote_legacy(rv)
@property
def raw_username(self):
"""The username if it was part of the URL, `None` otherwise.
Unlike :attr:`username` this one is not being decoded.
"""
return self._split_auth()[0]
@property
def password(self):
"""The password if it was part of the URL, `None` otherwise.
This undergoes URL decoding and will always be a unicode string.
"""
rv = self._split_auth()[1]
if rv is not None:
return _url_unquote_legacy(rv)
@property
def raw_password(self):
"""The password if it was part of the URL, `None` otherwise.
Unlike :attr:`password` this one is not being decoded.
"""
return self._split_auth()[1]
def decode_query(self, *args, **kwargs):
"""Decodes the query part of the URL. Ths is a shortcut for
calling :func:`url_decode` on the query argument. The arguments and
keyword arguments are forwarded to :func:`url_decode` unchanged.
"""
return url_decode(self.query, *args, **kwargs)
def join(self, *args, **kwargs):
"""Joins this URL with another one. This is just a convenience
function for calling into :meth:`url_join` and then parsing the
return value again.
"""
return url_parse(url_join(self, *args, **kwargs))
def to_url(self):
"""Returns a URL string or bytes depending on the type of the
information stored. This is just a convenience function
for calling :meth:`url_unparse` for this URL.
"""
return url_unparse(self)
def decode_netloc(self):
"""Decodes the netloc part into a string."""
rv = _decode_idna(self.host or "")
if ":" in rv:
rv = "[%s]" % rv
port = self.port
if port is not None:
rv = "%s:%d" % (rv, port)
auth = ":".join(
filter(
None,
[
_url_unquote_legacy(self.raw_username or "", "/:%@"),
_url_unquote_legacy(self.raw_password or "", "/:%@"),
],
)
)
if auth:
rv = "%s@%s" % (auth, rv)
return rv
def to_uri_tuple(self):
"""Returns a :class:`BytesURL` tuple that holds a URI. This will
encode all the information in the URL properly to ASCII using the
rules a web browser would follow.
It's usually more interesting to directly call :meth:`iri_to_uri` which
will return a string.
"""
return url_parse(iri_to_uri(self).encode("ascii"))
def to_iri_tuple(self):
"""Returns a :class:`URL` tuple that holds a IRI. This will try
to decode as much information as possible in the URL without
losing information similar to how a web browser does it for the
URL bar.
It's usually more interesting to directly call :meth:`uri_to_iri` which
will return a string.
"""
return url_parse(uri_to_iri(self))
def get_file_location(self, pathformat=None):
"""Returns a tuple with the location of the file in the form
``(server, location)``. If the netloc is empty in the URL or
points to localhost, it's represented as ``None``.
The `pathformat` by default is autodetection but needs to be set
when working with URLs of a specific system. The supported values
are ``'windows'`` when working with Windows or DOS paths and
``'posix'`` when working with posix paths.
If the URL does not point to a local file, the server and location
are both represented as ``None``.
:param pathformat: The expected format of the path component.
Currently ``'windows'`` and ``'posix'`` are
supported. Defaults to ``None`` which is
autodetect.
"""
if self.scheme != "file":
return None, None
path = url_unquote(self.path)
host = self.netloc or None
if pathformat is None:
if os.name == "nt":
pathformat = "windows"
else:
pathformat = "posix"
if pathformat == "windows":
if path[:1] == "/" and path[1:2].isalpha() and path[2:3] in "|:":
path = path[1:2] + ":" + path[3:]
windows_share = path[:3] in ("\\" * 3, "/" * 3)
import ntpath
path = ntpath.normpath(path)
# Windows shared drives are represented as ``\\host\\directory``.
# That results in a URL like ``file://///host/directory``, and a
# path like ``///host/directory``. We need to special-case this
# because the path contains the hostname.
if windows_share and host is None:
parts = path.lstrip("\\").split("\\", 1)
if len(parts) == 2:
host, path = parts
else:
host = parts[0]
path = ""
elif pathformat == "posix":
import posixpath
path = posixpath.normpath(path)
else:
raise TypeError("Invalid path format %s" % repr(pathformat))
if host in ("127.0.0.1", "::1", "localhost"):
host = None
return host, path
def _split_netloc(self):
if self._at in self.netloc:
return self.netloc.split(self._at, 1)
return None, self.netloc
def _split_auth(self):
auth = self._split_netloc()[0]
if not auth:
return None, None
if self._colon not in auth:
return auth, None
return auth.split(self._colon, 1)
def _split_host(self):
rv = self._split_netloc()[1]
if not rv:
return None, None
if not rv.startswith(self._lbracket):
if self._colon in rv:
return rv.split(self._colon, 1)
return rv, None
idx = rv.find(self._rbracket)
if idx < 0:
return rv, None
host = rv[1:idx]
rest = rv[idx + 1 :]
if rest.startswith(self._colon):
return host, rest[1:]
return host, None
@implements_to_string
class URL(BaseURL):
"""Represents a parsed URL. This behaves like a regular tuple but
also has some extra attributes that give further insight into the
URL.
"""
__slots__ = ()
_at = "@"
_colon = ":"
_lbracket = "["
_rbracket = "]"
def __str__(self):
return self.to_url()
def encode_netloc(self):
"""Encodes the netloc part to an ASCII safe URL as bytes."""
rv = self.ascii_host or ""
if ":" in rv:
rv = "[%s]" % rv
port = self.port
if port is not None:
rv = "%s:%d" % (rv, port)
auth = ":".join(
filter(
None,
[
url_quote(self.raw_username or "", "utf-8", "strict", "/:%"),
url_quote(self.raw_password or "", "utf-8", "strict", "/:%"),
],
)
)
if auth:
rv = "%s@%s" % (auth, rv)
return to_native(rv)
def encode(self, charset="utf-8", errors="replace"):
"""Encodes the URL to a tuple made out of bytes. The charset is
only being used for the path, query and fragment.
"""
return BytesURL(
self.scheme.encode("ascii"),
self.encode_netloc(),
self.path.encode(charset, errors),
self.query.encode(charset, errors),
self.fragment.encode(charset, errors),
)
class BytesURL(BaseURL):
"""Represents a parsed URL in bytes."""
__slots__ = ()
_at = b"@"
_colon = b":"
_lbracket = b"["
_rbracket = b"]"
def __str__(self):
return self.to_url().decode("utf-8", "replace")
def encode_netloc(self):
"""Returns the netloc unchanged as bytes."""
return self.netloc
def decode(self, charset="utf-8", errors="replace"):
"""Decodes the URL to a tuple made out of strings. The charset is
only being used for the path, query and fragment.
"""
return URL(
self.scheme.decode("ascii"),
self.decode_netloc(),
self.path.decode(charset, errors),
self.query.decode(charset, errors),
self.fragment.decode(charset, errors),
)
_unquote_maps = {frozenset(): _hextobyte}
def _unquote_to_bytes(string, unsafe=""):
if isinstance(string, text_type):
string = string.encode("utf-8")
if isinstance(unsafe, text_type):
unsafe = unsafe.encode("utf-8")
unsafe = frozenset(bytearray(unsafe))
groups = iter(string.split(b"%"))
result = bytearray(next(groups, b""))
try:
hex_to_byte = _unquote_maps[unsafe]
except KeyError:
hex_to_byte = _unquote_maps[unsafe] = {
h: b for h, b in _hextobyte.items() if b not in unsafe
}
for group in groups:
code = group[:2]
if code in hex_to_byte:
result.append(hex_to_byte[code])
result.extend(group[2:])
else:
result.append(37) # %
result.extend(group)
return bytes(result)
def _url_encode_impl(obj, charset, encode_keys, sort, key):
iterable = iter_multi_items(obj)
if sort:
iterable = sorted(iterable, key=key)
for key, value in iterable:
if value is None:
continue
if not isinstance(key, bytes):
key = text_type(key).encode(charset)
if not isinstance(value, bytes):
value = text_type(value).encode(charset)
yield _fast_url_quote_plus(key) + "=" + _fast_url_quote_plus(value)
def _url_unquote_legacy(value, unsafe=""):
try:
return url_unquote(value, charset="utf-8", errors="strict", unsafe=unsafe)
except UnicodeError:
return url_unquote(value, charset="latin1", unsafe=unsafe)
def url_parse(url, scheme=None, allow_fragments=True):
"""Parses a URL from a string into a :class:`URL` tuple. If the URL
is lacking a scheme it can be provided as second argument. Otherwise,
it is ignored. Optionally fragments can be stripped from the URL
by setting `allow_fragments` to `False`.
The inverse of this function is :func:`url_unparse`.
:param url: the URL to parse.
:param scheme: the default schema to use if the URL is schemaless.
:param allow_fragments: if set to `False` a fragment will be removed
from the URL.
"""
s = make_literal_wrapper(url)
is_text_based = isinstance(url, text_type)
if scheme is None:
scheme = s("")
netloc = query = fragment = s("")
i = url.find(s(":"))
if i > 0 and _scheme_re.match(to_native(url[:i], errors="replace")):
# make sure "iri" is not actually a port number (in which case
# "scheme" is really part of the path)
rest = url[i + 1 :]
if not rest or any(c not in s("0123456789") for c in rest):
# not a port number
scheme, url = url[:i].lower(), rest
if url[:2] == s("//"):
delim = len(url)
for c in s("/?#"):
wdelim = url.find(c, 2)
if wdelim >= 0:
delim = min(delim, wdelim)
netloc, url = url[2:delim], url[delim:]
if (s("[") in netloc and s("]") not in netloc) or (
s("]") in netloc and s("[") not in netloc
):
raise ValueError("Invalid IPv6 URL")
if allow_fragments and s("#") in url:
url, fragment = url.split(s("#"), 1)
if s("?") in url:
url, query = url.split(s("?"), 1)
result_type = URL if is_text_based else BytesURL
return result_type(scheme, netloc, url, query, fragment)
def _make_fast_url_quote(charset="utf-8", errors="strict", safe="/:", unsafe=""):
"""Precompile the translation table for a URL encoding function.
Unlike :func:`url_quote`, the generated function only takes the
string to quote.
:param charset: The charset to encode the result with.
:param errors: How to handle encoding errors.
:param safe: An optional sequence of safe characters to never encode.
:param unsafe: An optional sequence of unsafe characters to always encode.
"""
if isinstance(safe, text_type):
safe = safe.encode(charset, errors)
if isinstance(unsafe, text_type):
unsafe = unsafe.encode(charset, errors)
safe = (frozenset(bytearray(safe)) | _always_safe) - frozenset(bytearray(unsafe))
table = [chr(c) if c in safe else "%%%02X" % c for c in range(256)]
if not PY2:
def quote(string):
return "".join([table[c] for c in string])
else:
def quote(string):
return "".join([table[c] for c in bytearray(string)])
return quote
_fast_url_quote = _make_fast_url_quote()
_fast_quote_plus = _make_fast_url_quote(safe=" ", unsafe="+")
def _fast_url_quote_plus(string):
return _fast_quote_plus(string).replace(" ", "+")
def url_quote(string, charset="utf-8", errors="strict", safe="/:", unsafe=""):
"""URL encode a single string with a given encoding.
:param s: the string to quote.
:param charset: the charset to be used.
:param safe: an optional sequence of safe characters.
:param unsafe: an optional sequence of unsafe characters.
.. versionadded:: 0.9.2
The `unsafe` parameter was added.
"""
if not isinstance(string, (text_type, bytes, bytearray)):
string = text_type(string)
if isinstance(string, text_type):
string = string.encode(charset, errors)
if isinstance(safe, text_type):
safe = safe.encode(charset, errors)
if isinstance(unsafe, text_type):
unsafe = unsafe.encode(charset, errors)
safe = (frozenset(bytearray(safe)) | _always_safe) - frozenset(bytearray(unsafe))
rv = bytearray()
for char in bytearray(string):
if char in safe:
rv.append(char)
else:
rv.extend(_bytetohex[char])
return to_native(bytes(rv))
def url_quote_plus(string, charset="utf-8", errors="strict", safe=""):
"""URL encode a single string with the given encoding and convert
whitespace to "+".
:param s: The string to quote.
:param charset: The charset to be used.
:param safe: An optional sequence of safe characters.
"""
return url_quote(string, charset, errors, safe + " ", "+").replace(" ", "+")
def url_unparse(components):
"""The reverse operation to :meth:`url_parse`. This accepts arbitrary
as well as :class:`URL` tuples and returns a URL as a string.
:param components: the parsed URL as tuple which should be converted
into a URL string.
"""
scheme, netloc, path, query, fragment = normalize_string_tuple(components)
s = make_literal_wrapper(scheme)
url = s("")
# We generally treat file:///x and file:/x the same which is also
# what browsers seem to do. This also allows us to ignore a schema
# register for netloc utilization or having to differenciate between
# empty and missing netloc.
if netloc or (scheme and path.startswith(s("/"))):
if path and path[:1] != s("/"):
path = s("/") + path
url = s("//") + (netloc or s("")) + path
elif path:
url += path
if scheme:
url = scheme + s(":") + url
if query:
url = url + s("?") + query
if fragment:
url = url + s("#") + fragment
return url
def url_unquote(string, charset="utf-8", errors="replace", unsafe=""):
"""URL decode a single string with a given encoding. If the charset
is set to `None` no unicode decoding is performed and raw bytes
are returned.
:param s: the string to unquote.
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param errors: the error handling for the charset decoding.
"""
rv = _unquote_to_bytes(string, unsafe)
if charset is not None:
rv = rv.decode(charset, errors)
return rv
def url_unquote_plus(s, charset="utf-8", errors="replace"):
"""URL decode a single string with the given `charset` and decode "+" to
whitespace.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
:exc:`HTTPUnicodeError` is raised.
:param s: The string to unquote.
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param errors: The error handling for the `charset` decoding.
"""
if isinstance(s, text_type):
s = s.replace(u"+", u" ")
else:
s = s.replace(b"+", b" ")
return url_unquote(s, charset, errors)
def url_fix(s, charset="utf-8"):
r"""Sometimes you get an URL by a user that just isn't a real URL because
it contains unsafe characters like ' ' and so on. This function can fix
some of the problems in a similar way browsers handle data entered by the
user:
>>> url_fix(u'http://de.wikipedia.org/wiki/Elf (Begriffskl\xe4rung)')
'http://de.wikipedia.org/wiki/Elf%20(Begriffskl%C3%A4rung)'
:param s: the string with the URL to fix.
:param charset: The target charset for the URL if the url was given as
unicode string.
"""
# First step is to switch to unicode processing and to convert
# backslashes (which are invalid in URLs anyways) to slashes. This is
# consistent with what Chrome does.
s = to_unicode(s, charset, "replace").replace("\\", "/")
# For the specific case that we look like a malformed windows URL
# we want to fix this up manually:
if s.startswith("file://") and s[7:8].isalpha() and s[8:10] in (":/", "|/"):
s = "file:///" + s[7:]
url = url_parse(s)
path = url_quote(url.path, charset, safe="/%+$!*'(),")
qs = url_quote_plus(url.query, charset, safe=":&%=+$!*'(),")
anchor = url_quote_plus(url.fragment, charset, safe=":&%=+$!*'(),")
return to_native(url_unparse((url.scheme, url.encode_netloc(), path, qs, anchor)))
# not-unreserved characters remain quoted when unquoting to IRI
_to_iri_unsafe = "".join([chr(c) for c in range(128) if c not in _always_safe])
def _codec_error_url_quote(e):
"""Used in :func:`uri_to_iri` after unquoting to re-quote any
invalid bytes.
"""
out = _fast_url_quote(e.object[e.start : e.end])
if PY2:
out = out.decode("utf-8")
return out, e.end
codecs.register_error("werkzeug.url_quote", _codec_error_url_quote)
def uri_to_iri(uri, charset="utf-8", errors="werkzeug.url_quote"):
"""Convert a URI to an IRI. All valid UTF-8 characters are unquoted,
leaving all reserved and invalid characters quoted. If the URL has
a domain, it is decoded from Punycode.
>>> uri_to_iri("http://xn--n3h.net/p%C3%A5th?q=%C3%A8ry%DF")
'http://\\u2603.net/p\\xe5th?q=\\xe8ry%DF'
:param uri: The URI to convert.
:param charset: The encoding to encode unquoted bytes with.
:param errors: Error handler to use during ``bytes.encode``. By
default, invalid bytes are left quoted.
.. versionchanged:: 0.15
All reserved and invalid characters remain quoted. Previously,
only some reserved characters were preserved, and invalid bytes
were replaced instead of left quoted.
.. versionadded:: 0.6
"""
if isinstance(uri, tuple):
uri = url_unparse(uri)
uri = url_parse(to_unicode(uri, charset))
path = url_unquote(uri.path, charset, errors, _to_iri_unsafe)
query = url_unquote(uri.query, charset, errors, _to_iri_unsafe)
fragment = url_unquote(uri.fragment, charset, errors, _to_iri_unsafe)
return url_unparse((uri.scheme, uri.decode_netloc(), path, query, fragment))
# reserved characters remain unquoted when quoting to URI
_to_uri_safe = ":/?#[]@!$&'()*+,;=%"
def iri_to_uri(iri, charset="utf-8", errors="strict", safe_conversion=False):
"""Convert an IRI to a URI. All non-ASCII and unsafe characters are
quoted. If the URL has a domain, it is encoded to Punycode.
>>> iri_to_uri('http://\\u2603.net/p\\xe5th?q=\\xe8ry%DF')
'http://xn--n3h.net/p%C3%A5th?q=%C3%A8ry%DF'
:param iri: The IRI to convert.
:param charset: The encoding of the IRI.
:param errors: Error handler to use during ``bytes.encode``.
:param safe_conversion: Return the URL unchanged if it only contains
ASCII characters and no whitespace. See the explanation below.
There is a general problem with IRI conversion with some protocols
that are in violation of the URI specification. Consider the
following two IRIs::
magnet:?xt=uri:whatever
itms-services://?action=download-manifest
After parsing, we don't know if the scheme requires the ``//``,
which is dropped if empty, but conveys different meanings in the
final URL if it's present or not. In this case, you can use
``safe_conversion``, which will return the URL unchanged if it only
contains ASCII characters and no whitespace. This can result in a
URI with unquoted characters if it was not already quoted correctly,
but preserves the URL's semantics. Werkzeug uses this for the
``Location`` header for redirects.
.. versionchanged:: 0.15
All reserved characters remain unquoted. Previously, only some
reserved characters were left unquoted.
.. versionchanged:: 0.9.6
The ``safe_conversion`` parameter was added.
.. versionadded:: 0.6
"""
if isinstance(iri, tuple):
iri = url_unparse(iri)
if safe_conversion:
# If we're not sure if it's safe to convert the URL, and it only
# contains ASCII characters, return it unconverted.
try:
native_iri = to_native(iri)
ascii_iri = native_iri.encode("ascii")
# Only return if it doesn't have whitespace. (Why?)
if len(ascii_iri.split()) == 1:
return native_iri
except UnicodeError:
pass
iri = url_parse(to_unicode(iri, charset, errors))
path = url_quote(iri.path, charset, errors, _to_uri_safe)
query = url_quote(iri.query, charset, errors, _to_uri_safe)
fragment = url_quote(iri.fragment, charset, errors, _to_uri_safe)
return to_native(
url_unparse((iri.scheme, iri.encode_netloc(), path, query, fragment))
)
def url_decode(
s,
charset="utf-8",
decode_keys=False,
include_empty=True,
errors="replace",
separator="&",
cls=None,
):
"""
Parse a querystring and return it as :class:`MultiDict`. There is a
difference in key decoding on different Python versions. On Python 3
keys will always be fully decoded whereas on Python 2, keys will
remain bytestrings if they fit into ASCII. On 2.x keys can be forced
to be unicode by setting `decode_keys` to `True`.
If the charset is set to `None` no unicode decoding will happen and
raw bytes will be returned.
Per default a missing value for a key will default to an empty key. If
you don't want that behavior you can set `include_empty` to `False`.
Per default encoding errors are ignored. If you want a different behavior
you can set `errors` to ``'replace'`` or ``'strict'``. In strict mode a
`HTTPUnicodeError` is raised.
.. versionchanged:: 0.5
In previous versions ";" and "&" could be used for url decoding.
This changed in 0.5 where only "&" is supported. If you want to
use ";" instead a different `separator` can be provided.
The `cls` parameter was added.
:param s: a string with the query string to decode.
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param decode_keys: Used on Python 2.x to control whether keys should
be forced to be unicode objects. If set to `True`
then keys will be unicode in all cases. Otherwise,
they remain `str` if they fit into ASCII.
:param include_empty: Set to `False` if you don't want empty values to
appear in the dict.
:param errors: the decoding error behavior.
:param separator: the pair separator to be used, defaults to ``&``
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
"""
if cls is None:
cls = MultiDict
if isinstance(s, text_type) and not isinstance(separator, text_type):
separator = separator.decode(charset or "ascii")
elif isinstance(s, bytes) and not isinstance(separator, bytes):
separator = separator.encode(charset or "ascii")
return cls(
_url_decode_impl(
s.split(separator), charset, decode_keys, include_empty, errors
)
)
def url_decode_stream(
stream,
charset="utf-8",
decode_keys=False,
include_empty=True,
errors="replace",
separator="&",
cls=None,
limit=None,
return_iterator=False,
):
"""Works like :func:`url_decode` but decodes a stream. The behavior
of stream and limit follows functions like
:func:`~werkzeug.wsgi.make_line_iter`. The generator of pairs is
directly fed to the `cls` so you can consume the data while it's
parsed.
.. versionadded:: 0.8
:param stream: a stream with the encoded querystring
:param charset: the charset of the query string. If set to `None`
no unicode decoding will take place.
:param decode_keys: Used on Python 2.x to control whether keys should
be forced to be unicode objects. If set to `True`,
keys will be unicode in all cases. Otherwise, they
remain `str` if they fit into ASCII.
:param include_empty: Set to `False` if you don't want empty values to
appear in the dict.
:param errors: the decoding error behavior.
:param separator: the pair separator to be used, defaults to ``&``
:param cls: an optional dict class to use. If this is not specified
or `None` the default :class:`MultiDict` is used.
:param limit: the content length of the URL data. Not necessary if
a limited stream is provided.
:param return_iterator: if set to `True` the `cls` argument is ignored
and an iterator over all decoded pairs is
returned
"""
from .wsgi import make_chunk_iter
pair_iter = make_chunk_iter(stream, separator, limit)
decoder = _url_decode_impl(pair_iter, charset, decode_keys, include_empty, errors)
if return_iterator:
return decoder
if cls is None:
cls = MultiDict
return cls(decoder)
def _url_decode_impl(pair_iter, charset, decode_keys, include_empty, errors):
for pair in pair_iter:
if not pair:
continue
s = make_literal_wrapper(pair)
equal = s("=")
if equal in pair:
key, value = pair.split(equal, 1)
else:
if not include_empty:
continue
key = pair
value = s("")
key = url_unquote_plus(key, charset, errors)
if charset is not None and PY2 and not decode_keys:
key = try_coerce_native(key)
yield key, url_unquote_plus(value, charset, errors)
def url_encode(
obj, charset="utf-8", encode_keys=False, sort=False, key=None, separator=b"&"
):
"""URL encode a dict/`MultiDict`. If a value is `None` it will not appear
in the result string. Per default only values are encoded into the target
charset strings. If `encode_keys` is set to ``True`` unicode keys are
supported too.
If `sort` is set to `True` the items are sorted by `key` or the default
sorting algorithm.
.. versionadded:: 0.5
`sort`, `key`, and `separator` were added.
:param obj: the object to encode into a query string.
:param charset: the charset of the query string.
:param encode_keys: set to `True` if you have unicode keys. (Ignored on
Python 3.x)
:param sort: set to `True` if you want parameters to be sorted by `key`.
:param separator: the separator to be used for the pairs.
:param key: an optional function to be used for sorting. For more details
check out the :func:`sorted` documentation.
"""
separator = to_native(separator, "ascii")
return separator.join(_url_encode_impl(obj, charset, encode_keys, sort, key))
def url_encode_stream(
obj,
stream=None,
charset="utf-8",
encode_keys=False,
sort=False,
key=None,
separator=b"&",
):
"""Like :meth:`url_encode` but writes the results to a stream
object. If the stream is `None` a generator over all encoded
pairs is returned.
.. versionadded:: 0.8
:param obj: the object to encode into a query string.
:param stream: a stream to write the encoded object into or `None` if
an iterator over the encoded pairs should be returned. In
that case the separator argument is ignored.
:param charset: the charset of the query string.
:param encode_keys: set to `True` if you have unicode keys. (Ignored on
Python 3.x)
:param sort: set to `True` if you want parameters to be sorted by `key`.
:param separator: the separator to be used for the pairs.
:param key: an optional function to be used for sorting. For more details
check out the :func:`sorted` documentation.
"""
separator = to_native(separator, "ascii")
gen = _url_encode_impl(obj, charset, encode_keys, sort, key)
if stream is None:
return gen
for idx, chunk in enumerate(gen):
if idx:
stream.write(separator)
stream.write(chunk)
def url_join(base, url, allow_fragments=True):
"""Join a base URL and a possibly relative URL to form an absolute
interpretation of the latter.
:param base: the base URL for the join operation.
:param url: the URL to join.
:param allow_fragments: indicates whether fragments should be allowed.
"""
if isinstance(base, tuple):
base = url_unparse(base)
if isinstance(url, tuple):
url = url_unparse(url)
base, url = normalize_string_tuple((base, url))
s = make_literal_wrapper(base)
if not base:
return url
if not url:
return base
bscheme, bnetloc, bpath, bquery, bfragment = url_parse(
base, allow_fragments=allow_fragments
)
scheme, netloc, path, query, fragment = url_parse(url, bscheme, allow_fragments)
if scheme != bscheme:
return url
if netloc:
return url_unparse((scheme, netloc, path, query, fragment))
netloc = bnetloc
if path[:1] == s("/"):
segments = path.split(s("/"))
elif not path:
segments = bpath.split(s("/"))
if not query:
query = bquery
else:
segments = bpath.split(s("/"))[:-1] + path.split(s("/"))
# If the rightmost part is "./" we want to keep the slash but
# remove the dot.
if segments[-1] == s("."):
segments[-1] = s("")
# Resolve ".." and "."
segments = [segment for segment in segments if segment != s(".")]
while 1:
i = 1
n = len(segments) - 1
while i < n:
if segments[i] == s("..") and segments[i - 1] not in (s(""), s("..")):
del segments[i - 1 : i + 1]
break
i += 1
else:
break
# Remove trailing ".." if the URL is absolute
unwanted_marker = [s(""), s("..")]
while segments[:2] == unwanted_marker:
del segments[1]
path = s("/").join(segments)
return url_unparse((scheme, netloc, path, query, fragment))
class Href(object):
"""Implements a callable that constructs URLs with the given base. The
function can be called with any number of positional and keyword
arguments which than are used to assemble the URL. Works with URLs
and posix paths.
Positional arguments are appended as individual segments to
the path of the URL:
>>> href = Href('/foo')
>>> href('bar', 23)
'/foo/bar/23'
>>> href('foo', bar=23)
'/foo/foo?bar=23'
If any of the arguments (positional or keyword) evaluates to `None` it
will be skipped. If no keyword arguments are given the last argument
can be a :class:`dict` or :class:`MultiDict` (or any other dict subclass),
otherwise the keyword arguments are used for the query parameters, cutting
off the first trailing underscore of the parameter name:
>>> href(is_=42)
'/foo?is=42'
>>> href({'foo': 'bar'})
'/foo?foo=bar'
Combining of both methods is not allowed:
>>> href({'foo': 'bar'}, bar=42)
Traceback (most recent call last):
...
TypeError: keyword arguments and query-dicts can't be combined
Accessing attributes on the href object creates a new href object with
the attribute name as prefix:
>>> bar_href = href.bar
>>> bar_href("blub")
'/foo/bar/blub'
If `sort` is set to `True` the items are sorted by `key` or the default
sorting algorithm:
>>> href = Href("/", sort=True)
>>> href(a=1, b=2, c=3)
'/?a=1&b=2&c=3'
.. versionadded:: 0.5
`sort` and `key` were added.
"""
def __init__(self, base="./", charset="utf-8", sort=False, key=None):
if not base:
base = "./"
self.base = base
self.charset = charset
self.sort = sort
self.key = key
def __getattr__(self, name):
if name[:2] == "__":
raise AttributeError(name)
base = self.base
if base[-1:] != "/":
base += "/"
return Href(url_join(base, name), self.charset, self.sort, self.key)
def __call__(self, *path, **query):
if path and isinstance(path[-1], dict):
if query:
raise TypeError("keyword arguments and query-dicts can't be combined")
query, path = path[-1], path[:-1]
elif query:
query = dict(
[(k.endswith("_") and k[:-1] or k, v) for k, v in query.items()]
)
path = "/".join(
[
to_unicode(url_quote(x, self.charset), "ascii")
for x in path
if x is not None
]
).lstrip("/")
rv = self.base
if path:
if not rv.endswith("/"):
rv += "/"
rv = url_join(rv, "./" + path)
if query:
rv += "?" + to_unicode(
url_encode(query, self.charset, sort=self.sort, key=self.key), "ascii"
)
return to_native(rv)
| |
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of SQLAlchemy backend."""
import uuid
from sqlalchemy.sql.expression import asc
from sqlalchemy.sql.expression import literal_column
import nova.context
from nova.db.sqlalchemy import api as sqlalchemy_api
from nova import exception
from nova.openstack.common.db import exception as db_exc
from nova.openstack.common.gettextutils import _
from nova.openstack.common import timeutils
from nova.openstack.common import uuidutils
from nova.virt.baremetal.db.sqlalchemy import models
from nova.virt.baremetal.db.sqlalchemy import session as db_session
def model_query(context, *args, **kwargs):
"""Query helper that accounts for context's `read_deleted` field.
:param context: context to query under
:param session: if present, the session to use
:param read_deleted: if present, overrides context's read_deleted field.
:param project_only: if present and context is user-type, then restrict
query to match the context's project_id.
"""
session = kwargs.get('session') or db_session.get_session()
read_deleted = kwargs.get('read_deleted') or context.read_deleted
project_only = kwargs.get('project_only')
query = session.query(*args)
if read_deleted == 'no':
query = query.filter_by(deleted=False)
elif read_deleted == 'yes':
pass # omit the filter to include deleted and active
elif read_deleted == 'only':
query = query.filter_by(deleted=True)
else:
raise Exception(
_("Unrecognized read_deleted value '%s'") % read_deleted)
if project_only and nova.context.is_user_context(context):
query = query.filter_by(project_id=context.project_id)
return query
def _save(ref, session=None):
if not session:
session = db_session.get_session()
# We must not call ref.save() with session=None, otherwise NovaBase
# uses nova-db's session, which cannot access bm-db.
ref.save(session=session)
def _build_node_order_by(query):
query = query.order_by(asc(models.BareMetalNode.memory_mb))
query = query.order_by(asc(models.BareMetalNode.cpus))
query = query.order_by(asc(models.BareMetalNode.local_gb))
return query
@sqlalchemy_api.require_admin_context
def bm_node_get_all(context, service_host=None):
query = model_query(context, models.BareMetalNode, read_deleted="no")
if service_host:
query = query.filter_by(service_host=service_host)
return query.all()
@sqlalchemy_api.require_admin_context
def bm_node_get_associated(context, service_host=None):
query = model_query(context, models.BareMetalNode, read_deleted="no").\
filter(models.BareMetalNode.instance_uuid != None)
if service_host:
query = query.filter_by(service_host=service_host)
return query.all()
@sqlalchemy_api.require_admin_context
def bm_node_get_unassociated(context, service_host=None):
query = model_query(context, models.BareMetalNode, read_deleted="no").\
filter(models.BareMetalNode.instance_uuid == None)
if service_host:
query = query.filter_by(service_host=service_host)
return query.all()
@sqlalchemy_api.require_admin_context
def bm_node_find_free(context, service_host=None,
cpus=None, memory_mb=None, local_gb=None):
query = model_query(context, models.BareMetalNode, read_deleted="no")
query = query.filter(models.BareMetalNode.instance_uuid == None)
if service_host:
query = query.filter_by(service_host=service_host)
if cpus is not None:
query = query.filter(models.BareMetalNode.cpus >= cpus)
if memory_mb is not None:
query = query.filter(models.BareMetalNode.memory_mb >= memory_mb)
if local_gb is not None:
query = query.filter(models.BareMetalNode.local_gb >= local_gb)
query = _build_node_order_by(query)
return query.first()
@sqlalchemy_api.require_admin_context
def bm_node_get(context, bm_node_id):
# bm_node_id may be passed as a string. Convert to INT to improve DB perf.
bm_node_id = int(bm_node_id)
result = model_query(context, models.BareMetalNode, read_deleted="no").\
filter_by(id=bm_node_id).\
first()
if not result:
raise exception.NodeNotFound(node_id=bm_node_id)
return result
@sqlalchemy_api.require_admin_context
def bm_node_get_by_instance_uuid(context, instance_uuid):
if not uuidutils.is_uuid_like(instance_uuid):
raise exception.InstanceNotFound(instance_id=instance_uuid)
result = model_query(context, models.BareMetalNode, read_deleted="no").\
filter_by(instance_uuid=instance_uuid).\
first()
if not result:
raise exception.InstanceNotFound(instance_id=instance_uuid)
return result
@sqlalchemy_api.require_admin_context
def bm_node_get_by_node_uuid(context, bm_node_uuid):
result = model_query(context, models.BareMetalNode, read_deleted="no").\
filter_by(uuid=bm_node_uuid).\
first()
if not result:
raise exception.NodeNotFoundByUUID(node_uuid=bm_node_uuid)
return result
@sqlalchemy_api.require_admin_context
def bm_node_create(context, values):
if not values.get('uuid'):
values['uuid'] = str(uuid.uuid4())
bm_node_ref = models.BareMetalNode()
bm_node_ref.update(values)
_save(bm_node_ref)
return bm_node_ref
@sqlalchemy_api.require_admin_context
def bm_node_update(context, bm_node_id, values):
rows = model_query(context, models.BareMetalNode, read_deleted="no").\
filter_by(id=bm_node_id).\
update(values)
if not rows:
raise exception.NodeNotFound(node_id=bm_node_id)
@sqlalchemy_api.require_admin_context
def bm_node_associate_and_update(context, node_uuid, values):
"""Associate an instance to a node safely
Associate an instance to a node only if that node is not yet associated.
Allow the caller to set any other fields they require in the same
operation. For example, this is used to set the node's task_state to
BUILDING at the beginning of driver.spawn().
"""
if 'instance_uuid' not in values:
raise exception.NovaException(_(
"instance_uuid must be supplied to bm_node_associate_and_update"))
session = db_session.get_session()
with session.begin():
query = model_query(context, models.BareMetalNode,
session=session, read_deleted="no").\
filter_by(uuid=node_uuid)
count = query.filter_by(instance_uuid=None).\
update(values, synchronize_session=False)
if count != 1:
raise exception.NovaException(_(
"Failed to associate instance %(i_uuid)s to baremetal node "
"%(n_uuid)s.") % {'i_uuid': values['instance_uuid'],
'n_uuid': node_uuid})
ref = query.first()
return ref
@sqlalchemy_api.require_admin_context
def bm_node_destroy(context, bm_node_id):
# First, delete all interfaces belonging to the node.
# Delete physically since these have unique columns.
session = db_session.get_session()
with session.begin():
model_query(context, models.BareMetalInterface, read_deleted="no").\
filter_by(bm_node_id=bm_node_id).\
delete()
rows = model_query(context, models.BareMetalNode, read_deleted="no").\
filter_by(id=bm_node_id).\
update({'deleted': True,
'deleted_at': timeutils.utcnow(),
'updated_at': literal_column('updated_at')})
if not rows:
raise exception.NodeNotFound(node_id=bm_node_id)
@sqlalchemy_api.require_admin_context
def bm_interface_get(context, if_id):
result = model_query(context, models.BareMetalInterface,
read_deleted="no").\
filter_by(id=if_id).\
first()
if not result:
raise exception.NovaException(_("Baremetal interface %s "
"not found") % if_id)
return result
@sqlalchemy_api.require_admin_context
def bm_interface_get_all(context):
query = model_query(context, models.BareMetalInterface,
read_deleted="no")
return query.all()
@sqlalchemy_api.require_admin_context
def bm_interface_destroy(context, if_id):
# Delete physically since it has unique columns
model_query(context, models.BareMetalInterface, read_deleted="no").\
filter_by(id=if_id).\
delete()
@sqlalchemy_api.require_admin_context
def bm_interface_create(context, bm_node_id, address, datapath_id, port_no):
ref = models.BareMetalInterface()
ref.bm_node_id = bm_node_id
ref.address = address
ref.datapath_id = datapath_id
ref.port_no = port_no
_save(ref)
return ref.id
@sqlalchemy_api.require_admin_context
def bm_interface_set_vif_uuid(context, if_id, vif_uuid):
session = db_session.get_session()
with session.begin():
bm_interface = model_query(context, models.BareMetalInterface,
read_deleted="no", session=session).\
filter_by(id=if_id).\
with_lockmode('update').\
first()
if not bm_interface:
raise exception.NovaException(_("Baremetal interface %s "
"not found") % if_id)
bm_interface.vif_uuid = vif_uuid
try:
session.add(bm_interface)
session.flush()
except db_exc.DBError as e:
# TODO(deva): clean up when db layer raises DuplicateKeyError
if str(e).find('IntegrityError') != -1:
raise exception.NovaException(_("Baremetal interface %s "
"already in use") % vif_uuid)
raise
@sqlalchemy_api.require_admin_context
def bm_interface_get_by_vif_uuid(context, vif_uuid):
result = model_query(context, models.BareMetalInterface,
read_deleted="no").\
filter_by(vif_uuid=vif_uuid).\
first()
if not result:
raise exception.NovaException(_("Baremetal virtual interface %s "
"not found") % vif_uuid)
return result
@sqlalchemy_api.require_admin_context
def bm_interface_get_all_by_bm_node_id(context, bm_node_id):
result = model_query(context, models.BareMetalInterface,
read_deleted="no").\
filter_by(bm_node_id=bm_node_id).\
all()
if not result:
raise exception.NodeNotFound(node_id=bm_node_id)
return result
| |
# Copyright (c) 2013 - 2019 Adam Caudill and Contributors.
# This file is part of YAWAST which is released under the MIT license.
# See the LICENSE file or go to https://yawast.org/license/ for full license details.
import argparse
import sys
from typing import List
from yawast._version import get_version
from yawast.commands import scan, dns, ssl
from yawast.reporting import reporter
from yawast.scanner.session import Session
from yawast.shared import utils
def build_parser():
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser.add_argument(
"--debug", action="store_true", help="Displays debug output (very noisy)"
)
parent_parser.add_argument(
"--nocolors", action="store_true", help="Disables the use of colors in output"
)
parent_parser.add_argument(
"--nowrap",
action="store_true",
help="Disables the use of line wrapping in output",
)
parser = argparse.ArgumentParser(prog="yawast")
parser.description = "YAWAST ...where a pentest starts. A tool for web-based application security testing."
parser.epilog = "For more information, see https://yawast.org"
parser.add_argument(
"-v", "--version", action="version", version=f"{parser.prog} v{get_version()}"
)
subparsers = parser.add_subparsers()
subparsers.required = True
subparsers.dest = "command"
# create the parser for the "scan" command
parser_scan = subparsers.add_parser(
"scan", help="Scans the provided URL(s)", parents=[parent_parser]
)
parser_scan.add_argument("--nossl", action="store_true", help="Disables SSL checks")
parser_scan.add_argument(
"--internalssl", action="store_true", help="Disable SSL Labs integration"
)
parser_scan.add_argument(
"--tdessessioncount",
action="store_true",
help="Counts the number of messages that can be sent in a single session (SWEET32)",
)
parser_scan.add_argument(
"--dir", action="store_true", help="Enables directory search"
)
parser_scan.add_argument(
"--dirrecursive",
action="store_true",
help="Recursive directory search (only with --dir)",
)
parser_scan.add_argument(
"--dirlistredir",
action="store_true",
help="Show 301 redirects (only with --dir)",
)
parser_scan.add_argument(
"--files",
action="store_true",
help="Performs a search for a large list of common files",
)
parser_scan.add_argument(
"--srv", action="store_true", help="Scan for known SRV DNS Records"
)
parser_scan.add_argument(
"--subdomains", action="store_true", help="Search for Common Subdomains"
)
parser_scan.add_argument("--nodns", action="store_true", help="Disable DNS checks")
parser_scan.add_argument(
"--ports", action="store_true", help="Scan common TCP ports"
)
parser_scan.add_argument(
"--proxy", type=str, help="HTTP Proxy Server (such as Burp Suite)"
)
parser_scan.add_argument("--cookie", type=str, help="Session cookie")
parser_scan.add_argument(
"--header",
type=str,
help="HTTP header (such as Authorization) sent with each request ('name=value')",
)
parser_scan.add_argument("--output", type=str, help="Output JSON file")
parser_scan.add_argument(
"--user",
type=str,
help="Valid username for the application (will prompt if not provided)",
)
parser_scan.add_argument(
"--pass_reset_page",
type=str,
help="Password reset page URL (will prompt if not provided)",
)
parser_scan.add_argument(
"--php_page",
type=str,
help="Relative path to PHP script (for additional tests)",
)
parser_scan.set_defaults(func=command_scan)
# create the parser for the "dns" command
parser_dns = subparsers.add_parser(
"dns", help="Scans DNS for the provided URL(s)", parents=[parent_parser]
)
parser_dns.add_argument(
"--srv", action="store_true", help="Scan for known SRV DNS Records"
)
parser_dns.add_argument(
"--subdomains", action="store_true", help="Search for Common Subdomains"
)
parser_dns.add_argument("--output", type=str, help="Output JSON file")
parser_dns.set_defaults(func=command_dns)
# create the parser for the "ssl" command
parser_ssl = subparsers.add_parser(
"ssl", help="Scans TLS/SSL for the provided URL(s)", parents=[parent_parser]
)
parser_ssl.add_argument(
"--internalssl", action="store_true", help="Disable SSL Labs integration"
)
parser_ssl.add_argument(
"--tdessessioncount",
action="store_true",
help="Counts the number of messages that can be sent in a single session (SWEET32)",
)
parser_ssl.add_argument("--nodns", action="store_true", help="Disable DNS checks")
parser_ssl.add_argument("--output", type=str, help="Output JSON file")
parser_ssl.set_defaults(func=command_ssl)
# create the parser for the "version" command
parser_version = subparsers.add_parser(
"version",
help="Displays information about YAWAST and the current environment",
parents=[parent_parser],
)
parser_version.add_argument("--output", type=str, help="Output JSON file")
parser_version.set_defaults(func=command_version)
return parser
def process_urls(urls) -> List[str]:
ret = []
# now we need to make we have at least one arg that could be a URL.
if len(urls) == 0:
utils.exit_message("YAWAST Error: You must specify at least one URL.")
# Next, we need to make sure we have something that looks like URLs.
for val in enumerate(urls):
if not str(val[1]).startswith("-"):
if not utils.is_url(val[1]):
utils.exit_message("YAWAST Error: Invalid URL Specified: '%s" % val[1])
else:
ret.append(val[1])
else:
print(
"YAWAST Error: Invalid parameter: '%s' - Ignored." % val[1],
file=sys.stderr,
)
return ret
def command_scan(args, urls):
for val in enumerate(urls):
url = utils.extract_url(val[1])
reporter.setup(utils.get_domain(url))
session = Session(args, url)
scan.start(session)
def command_dns(args, urls):
for val in enumerate(urls):
url = utils.extract_url(val[1])
reporter.setup(utils.get_domain(url))
session = Session(args, url)
dns.start(session)
def command_ssl(args, urls):
for val in enumerate(urls):
url = utils.extract_url(val[1])
reporter.setup(utils.get_domain(url))
session = Session(args, url)
ssl.start(session)
def command_version(args, urls):
pass
| |
from __future__ import unicode_literals
import os
import pickle
import time
from datetime import datetime
from django.test import RequestFactory, TestCase
from django.conf import settings
from django.template import Template, Context
from django.template.response import (TemplateResponse, SimpleTemplateResponse,
ContentNotRenderedError)
from django.test.utils import override_settings
from django.utils._os import upath
def test_processor(request):
return {'processors': 'yes'}
test_processor_name = 'template_tests.test_response.test_processor'
# A test middleware that installs a temporary URLConf
class CustomURLConfMiddleware(object):
def process_request(self, request):
request.urlconf = 'template_tests.alternate_urls'
class SimpleTemplateResponseTest(TestCase):
def _response(self, template='foo', *args, **kwargs):
return SimpleTemplateResponse(Template(template), *args, **kwargs)
def test_template_resolving(self):
response = SimpleTemplateResponse('first/test.html')
response.render()
self.assertEqual(response.content, b'First template\n')
templates = ['foo.html', 'second/test.html', 'first/test.html']
response = SimpleTemplateResponse(templates)
response.render()
self.assertEqual(response.content, b'Second template\n')
response = self._response()
response.render()
self.assertEqual(response.content, b'foo')
def test_explicit_baking(self):
# explicit baking
response = self._response()
self.assertFalse(response.is_rendered)
response.render()
self.assertTrue(response.is_rendered)
def test_render(self):
# response is not re-rendered without the render call
response = self._response().render()
self.assertEqual(response.content, b'foo')
# rebaking doesn't change the rendered content
response.template_name = Template('bar{{ baz }}')
response.render()
self.assertEqual(response.content, b'foo')
# but rendered content can be overridden by manually
# setting content
response.content = 'bar'
self.assertEqual(response.content, b'bar')
def test_iteration_unrendered(self):
# unrendered response raises an exception on iteration
response = self._response()
self.assertFalse(response.is_rendered)
def iteration():
for x in response:
pass
self.assertRaises(ContentNotRenderedError, iteration)
self.assertFalse(response.is_rendered)
def test_iteration_rendered(self):
# iteration works for rendered responses
response = self._response().render()
res = [x for x in response]
self.assertEqual(res, [b'foo'])
def test_content_access_unrendered(self):
# unrendered response raises an exception when content is accessed
response = self._response()
self.assertFalse(response.is_rendered)
self.assertRaises(ContentNotRenderedError, lambda: response.content)
self.assertFalse(response.is_rendered)
def test_content_access_rendered(self):
# rendered response content can be accessed
response = self._response().render()
self.assertEqual(response.content, b'foo')
def test_set_content(self):
# content can be overridden
response = self._response()
self.assertFalse(response.is_rendered)
response.content = 'spam'
self.assertTrue(response.is_rendered)
self.assertEqual(response.content, b'spam')
response.content = 'baz'
self.assertEqual(response.content, b'baz')
def test_dict_context(self):
response = self._response('{{ foo }}{{ processors }}',
{'foo': 'bar'})
self.assertEqual(response.context_data, {'foo': 'bar'})
response.render()
self.assertEqual(response.content, b'bar')
def test_context_instance(self):
response = self._response('{{ foo }}{{ processors }}',
Context({'foo': 'bar'}))
self.assertEqual(response.context_data.__class__, Context)
response.render()
self.assertEqual(response.content, b'bar')
def test_kwargs(self):
response = self._response(content_type = 'application/json', status=504)
self.assertEqual(response['content-type'], 'application/json')
self.assertEqual(response.status_code, 504)
def test_args(self):
response = SimpleTemplateResponse('', {}, 'application/json', 504)
self.assertEqual(response['content-type'], 'application/json')
self.assertEqual(response.status_code, 504)
def test_post_callbacks(self):
"Rendering a template response triggers the post-render callbacks"
post = []
def post1(obj):
post.append('post1')
def post2(obj):
post.append('post2')
response = SimpleTemplateResponse('first/test.html', {})
response.add_post_render_callback(post1)
response.add_post_render_callback(post2)
# When the content is rendered, all the callbacks are invoked, too.
response.render()
self.assertEqual(response.content, b'First template\n')
self.assertEqual(post, ['post1', 'post2'])
def test_pickling(self):
# Create a template response. The context is
# known to be unpickleable (e.g., a function).
response = SimpleTemplateResponse('first/test.html', {
'value': 123,
'fn': datetime.now,
})
self.assertRaises(ContentNotRenderedError,
pickle.dumps, response)
# But if we render the response, we can pickle it.
response.render()
pickled_response = pickle.dumps(response)
unpickled_response = pickle.loads(pickled_response)
self.assertEqual(unpickled_response.content, response.content)
self.assertEqual(unpickled_response['content-type'], response['content-type'])
self.assertEqual(unpickled_response.status_code, response.status_code)
# ...and the unpickled reponse doesn't have the
# template-related attributes, so it can't be re-rendered
template_attrs = ('template_name', 'context_data', '_post_render_callbacks')
for attr in template_attrs:
self.assertFalse(hasattr(unpickled_response, attr))
# ...and requesting any of those attributes raises an exception
for attr in template_attrs:
with self.assertRaises(AttributeError):
getattr(unpickled_response, attr)
def test_repickling(self):
response = SimpleTemplateResponse('first/test.html', {
'value': 123,
'fn': datetime.now,
})
self.assertRaises(ContentNotRenderedError,
pickle.dumps, response)
response.render()
pickled_response = pickle.dumps(response)
unpickled_response = pickle.loads(pickled_response)
pickle.dumps(unpickled_response)
def test_pickling_cookie(self):
response = SimpleTemplateResponse('first/test.html', {
'value': 123,
'fn': datetime.now,
})
response.cookies['key'] = 'value'
response.render()
pickled_response = pickle.dumps(response, pickle.HIGHEST_PROTOCOL)
unpickled_response = pickle.loads(pickled_response)
self.assertEqual(unpickled_response.cookies['key'].value, 'value')
@override_settings(
TEMPLATE_CONTEXT_PROCESSORS=[test_processor_name],
TEMPLATE_DIRS=(os.path.join(os.path.dirname(upath(__file__)), 'templates')),
)
class TemplateResponseTest(TestCase):
def setUp(self):
self.factory = RequestFactory()
def _response(self, template='foo', *args, **kwargs):
return TemplateResponse(self.factory.get('/'), Template(template),
*args, **kwargs)
def test_render(self):
response = self._response('{{ foo }}{{ processors }}').render()
self.assertEqual(response.content, b'yes')
def test_render_with_requestcontext(self):
response = self._response('{{ foo }}{{ processors }}',
{'foo': 'bar'}).render()
self.assertEqual(response.content, b'baryes')
def test_render_with_context(self):
response = self._response('{{ foo }}{{ processors }}',
Context({'foo': 'bar'})).render()
self.assertEqual(response.content, b'bar')
def test_kwargs(self):
response = self._response(content_type = 'application/json',
status=504)
self.assertEqual(response['content-type'], 'application/json')
self.assertEqual(response.status_code, 504)
def test_args(self):
response = TemplateResponse(self.factory.get('/'), '', {},
'application/json', 504)
self.assertEqual(response['content-type'], 'application/json')
self.assertEqual(response.status_code, 504)
def test_custom_app(self):
response = self._response('{{ foo }}', current_app="foobar")
rc = response.resolve_context(response.context_data)
self.assertEqual(rc.current_app, 'foobar')
def test_pickling(self):
# Create a template response. The context is
# known to be unpickleable (e.g., a function).
response = TemplateResponse(self.factory.get('/'),
'first/test.html', {
'value': 123,
'fn': datetime.now,
})
self.assertRaises(ContentNotRenderedError,
pickle.dumps, response)
# But if we render the response, we can pickle it.
response.render()
pickled_response = pickle.dumps(response)
unpickled_response = pickle.loads(pickled_response)
self.assertEqual(unpickled_response.content, response.content)
self.assertEqual(unpickled_response['content-type'], response['content-type'])
self.assertEqual(unpickled_response.status_code, response.status_code)
# ...and the unpickled reponse doesn't have the
# template-related attributes, so it can't be re-rendered
template_attrs = ('template_name', 'context_data',
'_post_render_callbacks', '_request', '_current_app')
for attr in template_attrs:
self.assertFalse(hasattr(unpickled_response, attr))
# ...and requesting any of those attributes raises an exception
for attr in template_attrs:
with self.assertRaises(AttributeError):
getattr(unpickled_response, attr)
def test_repickling(self):
response = SimpleTemplateResponse('first/test.html', {
'value': 123,
'fn': datetime.now,
})
self.assertRaises(ContentNotRenderedError,
pickle.dumps, response)
response.render()
pickled_response = pickle.dumps(response)
unpickled_response = pickle.loads(pickled_response)
pickle.dumps(unpickled_response)
@override_settings(
MIDDLEWARE_CLASSES=list(settings.MIDDLEWARE_CLASSES) + [
'template_tests.test_response.CustomURLConfMiddleware'
]
)
class CustomURLConfTest(TestCase):
urls = 'template_tests.urls'
def test_custom_urlconf(self):
response = self.client.get('/template_response_view/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'This is where you can find the snark: /snark/')
@override_settings(
CACHE_MIDDLEWARE_SECONDS=2.0,
MIDDLEWARE_CLASSES=list(settings.MIDDLEWARE_CLASSES) + [
'django.middleware.cache.FetchFromCacheMiddleware',
'django.middleware.cache.UpdateCacheMiddleware',
]
)
class CacheMiddlewareTest(TestCase):
urls = 'template_tests.alternate_urls'
def test_middleware_caching(self):
response = self.client.get('/template_response_view/')
self.assertEqual(response.status_code, 200)
time.sleep(1.0)
response2 = self.client.get('/template_response_view/')
self.assertEqual(response2.status_code, 200)
self.assertEqual(response.content, response2.content)
time.sleep(2.0)
# Let the cache expire and test again
response2 = self.client.get('/template_response_view/')
self.assertEqual(response2.status_code, 200)
self.assertNotEqual(response.content, response2.content)
| |
#!/usr/bin/env python
# coding=utf-8
"""Author: Konrad Zemek
Copyright (C) 2015 ACK CYFRONET AGH
This software is released under the MIT license cited in 'LICENSE.txt'
Runs a command in a dockerized development environment. The files are copied
from 'source directory' to 'output directory' and then the command is ran.
The copy operation is optimized, so that only new and changed files are copied.
The script uses user's SSH keys in case dependency fetching is needed.
Unknown arguments will be passed to the command.
Run the script with -h flag to learn about script's running options.
"""
from os.path import expanduser
import argparse
import os
import platform
import sys
from environment import docker
def default_keys_location():
ssh_dir = expanduser('~/.ssh')
ssh_slash_docker = os.path.join(ssh_dir, 'docker')
if os.path.isdir(ssh_slash_docker):
ssh_dir = ssh_slash_docker
return ssh_dir
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Run a command inside a dockerized development environment.')
parser.add_argument(
'-i', '--image',
action='store',
default='onedata/builder:v60',
help='docker image to use for building',
dest='image')
parser.add_argument(
'-s', '--src',
action='store',
default=os.getcwd(),
help='source directory to run make from',
dest='src')
parser.add_argument(
'--no-cache',
action='store_false',
default=True,
help='disable mounting /var/cache/ccache and /var/cache/rebar3',
dest='mount_cache')
parser.add_argument(
'-k', '--keys',
action='store',
default=default_keys_location(),
help='directory of ssh keys used for dependency fetching',
dest='keys')
parser.add_argument(
'-r', '--reflect-volume',
action='append',
default=[],
help="host's paths to reflect in container's filesystem",
dest='reflect')
parser.add_argument(
'-c', '--command',
action='store',
default='make',
help='command to run in the container',
dest='command')
parser.add_argument(
'-w', '--workdir',
action='store',
default=None,
help='path to the working directory; defaults to src dir if unset',
dest='workdir')
parser.add_argument(
'-e', '--env',
action='append',
default=[],
help='env variables to set in the environment',
dest='envs')
parser.add_argument(
'--group',
action='append',
default=[],
help='system groups user should be a part of',
dest='groups')
parser.add_argument(
'--privileged',
action='store_true',
default=False,
help='run the container with --privileged=true',
dest='privileged')
parser.add_argument(
'--cpuset-cpus',
action='store',
default=None,
help='CPUs in which to allow execution (0-3, 0,1)',
dest='cpuset_cpus')
[args, pass_args] = parser.parse_known_args()
command = '''
import os, shutil, subprocess, sys
os.environ['HOME'] = '/root'
ssh_home = '/root/.ssh'
docker_home = '/root/.docker/'
if {shed_privileges}:
useradd = ['useradd', '--create-home', '--uid', '{uid}', 'maketmp']
if {groups}:
useradd.extend(['-G', ','.join({groups})])
subprocess.call(useradd)
os.environ['PATH'] = os.environ['PATH'].replace('sbin', 'bin')
os.environ['HOME'] = '/home/maketmp'
ssh_home = '/home/maketmp/.ssh'
docker_home = '/home/maketmp/.docker'
docker_gid = os.stat('/var/run/docker.sock').st_gid
os.setgroups([docker_gid])
os.setregid({gid}, {gid})
os.setreuid({uid}, {uid})
shutil.copytree('/tmp/keys', ssh_home)
for root, dirs, files in os.walk(ssh_home):
for dir in dirs:
os.chmod(os.path.join(root, dir), 0o700)
for file in files:
os.chmod(os.path.join(root, file), 0o600)
# Try to copy config.json, continue if it fails (might not exist on host).
try:
os.makedirs(docker_home)
except:
pass
try:
shutil.copyfile(
'/tmp/docker_config/config.json',
os.path.join(docker_home, 'config.json'
))
except:
pass
sh_command = 'eval $(ssh-agent) > /dev/null; ssh-add 2>&1; {command} {params}'
ret = subprocess.call(['sh', '-c', sh_command])
sys.exit(ret)
'''
command = command.format(
command=args.command,
params=' '.join(pass_args),
uid=os.geteuid(),
gid=os.getegid(),
src=args.src,
shed_privileges=(platform.system() == 'Linux' and os.geteuid() != 0),
groups=args.groups)
# Mount docker socket so dockers can start dockers
reflect = [(args.src, 'rw'), ('/var/run/docker.sock', 'rw')]
reflect.extend(zip(args.reflect, ['rw'] * len(args.reflect)))
if args.mount_cache:
reflect.extend([
('/var/cache/ccache', 'rw'), ('/var/cache/rebar3', 'rw')
])
# Mount keys required for git and docker config that holds auth to
# docker.onedata.org, so the docker can pull images from there.
# Mount it in /tmp/docker_config and then cp the json.
# If .docker is not existent on host, just skip the volume and config copying.
volumes = [
(args.keys, '/tmp/keys', 'ro')
]
if os.path.isdir(expanduser('~/.docker')):
volumes += [(expanduser('~/.docker'), '/tmp/docker_config', 'ro')]
# @TODO MUSIMY WPYCHAC DOCKERY Z GUI DO OFICJALNEGO REPO ZEBY LUDZIE MOGLI BUDOWAC,
# JAK NIE TO FALLBACK DO DOCKER.ONEDATA.ORG
# NIE WOLNO PRZEPUSCIC BEZ TEGO PRZEZ REVIEW!!!!
split_envs = [e.split('=') for e in args.envs]
envs = {kv[0]: kv[1] for kv in split_envs}
ret = docker.run(tty=True,
interactive=True,
rm=True,
reflect=reflect,
volumes=volumes,
envs=envs,
workdir=args.workdir if args.workdir else args.src,
image=args.image,
privileged=args.privileged,
cpuset_cpus=args.cpuset_cpus,
command=['python', '-c', command])
sys.exit(ret)
| |
# encoding: utf-8
"""
attribute.py
Created by Thomas Mangin on 2009-11-05.
Copyright (c) 2009-2013 Exa Networks. All rights reserved.
"""
from struct import pack
from exabgp.bgp.message.notification import Notify
from exabgp.util.cache import Cache
# ==================================================================== Attribute
#
class Attribute (object):
# we need to define ID and FLAG inside of the subclasses
# otherwise we can not dynamically create different GenericAttribute
# ID = 0x00
# FLAG = 0x00
# Should this Attribute be cached
CACHING = False
# Registered subclasses we know how to decode
registered_attributes = dict()
# what this implementation knows as attributes
attributes_known = []
attributes_well_know = []
attributes_optional = []
# Are we caching Attributes (configuration)
caching = False
# The attribute cache per attribute ID
cache = {}
# ---------------------------------------------------------------------------
# XXX : FIXME : The API of ID is a bit different (it can be instanciated)
# XXX : FIXME : This is legacy. should we change to not be ?
class ID (int):
__slots__ = []
# This should move within the classes and not be here
# RFC 4271
ORIGIN = 0x01
AS_PATH = 0x02
NEXT_HOP = 0x03
MED = 0x04
LOCAL_PREF = 0x05
ATOMIC_AGGREGATE = 0x06
AGGREGATOR = 0x07
# RFC 1997
COMMUNITY = 0x08
# RFC 4456
ORIGINATOR_ID = 0x09
CLUSTER_LIST = 0x0A # 10
# RFC 4760
MP_REACH_NLRI = 0x0E # 14
MP_UNREACH_NLRI = 0x0F # 15
# RFC 4360
EXTENDED_COMMUNITY = 0x10 # 16
# RFC 4893
AS4_PATH = 0x11 # 17
AS4_AGGREGATOR = 0x12 # 18
# RFC6514
PMSI_TUNNEL = 0x16 # 22
# RFC5512
TUNNEL_ENCAP = 0x17 # 23
AIGP = 0x1A # 26
INTERNAL_WITHDRAW = 0xFFFD
INTERNAL_WATCHDOG = 0xFFFE
INTERNAL_SPLIT = 0xFFFF
names = {
0x01: 'origin',
0x02: 'as-path',
0x03: 'next-hop',
0x04: 'med',
# 0x04: 'multi-exit-disc',
0x05: 'local-preference',
0x06: 'atomic-aggregate',
0x07: 'aggregator',
0x08: 'community',
0x09: 'originator-id',
0x0a: 'cluster-list',
0x0e: 'mp-reach-nlri',
0x0f: 'mp-unreach-nlri',
# 0x0e: 'multi-protocol reacheable nlri'
# 0x0f: 'multi-protocol unreacheable nlri'
0x10: 'extended-community',
0x11: 'as4-path',
0x12: 'as4-aggregator',
0x16: 'pmsi-tunnel',
0x17: 'tunnel-encaps',
0x1a: 'aigp',
0xfffd: 'internal-withdraw',
0xfffe: 'internal-watchdog',
0xffff: 'internal-split',
}
def __str__ (self):
return self.names.get(self,'unknown-attribute-%s' % hex(self))
@classmethod
def name (cls,self):
return cls.names.get(self,'unknown-attribute-%s' % hex(self))
# ---------------------------------------------------------------------------
class Flag (int):
EXTENDED_LENGTH = 0x10 # . 16 - 0001 0000
PARTIAL = 0x20 # . 32 - 0010 0000
TRANSITIVE = 0x40 # . 64 - 0100 0000
OPTIONAL = 0x80 # . 128 - 1000 0000
__slots__ = []
def __str__ (self):
r = []
v = int(self)
if v & 0x10:
r.append("EXTENDED_LENGTH")
v -= 0x10
if v & 0x20:
r.append("PARTIAL")
v -= 0x20
if v & 0x40:
r.append("TRANSITIVE")
v -= 0x40
if v & 0x80:
r.append("OPTIONAL")
v -= 0x80
if v:
r.append("UNKNOWN %s" % hex(v))
return " ".join(r)
def matches (self,value):
return self | 0x10 == value | 0x10
# ---------------------------------------------------------------------------
def _attribute (self,value):
flag = self.FLAG
if flag & Attribute.Flag.OPTIONAL and not value:
return ''
length = len(value)
if length > 0xFF:
flag |= Attribute.Flag.EXTENDED_LENGTH
if flag & Attribute.Flag.EXTENDED_LENGTH:
len_value = pack('!H',length)
else:
len_value = chr(length)
return "%s%s%s%s" % (chr(flag),chr(self.ID),len_value,value)
def __eq__ (self,other):
return self.ID == other.ID
def __ne__ (self,other):
return self.ID != other.ID
@classmethod
def register_attribute (cls,attribute_id=None,flag=None):
aid = cls.ID if attribute_id is None else attribute_id
flg = cls.FLAG | Attribute.Flag.EXTENDED_LENGTH if flag is None else flag | Attribute.Flag.EXTENDED_LENGTH
if (aid,flg) in cls.registered_attributes:
raise RuntimeError('only one class can be registered per capability')
cls.registered_attributes[(aid,flg)] = cls
cls.attributes_known.append(aid)
if cls.FLAG & Attribute.Flag.OPTIONAL:
Attribute.attributes_optional.append(aid)
else:
Attribute.attributes_well_know.append(aid)
@classmethod
def registered (cls,attribute_id,flag):
return (attribute_id,flag | Attribute.Flag.EXTENDED_LENGTH) in cls.registered_attributes
@classmethod
def klass (cls,attribute_id,flag):
key = (attribute_id,flag | Attribute.Flag.EXTENDED_LENGTH)
if key in cls.registered_attributes:
kls = cls.registered_attributes[key]
kls.ID = attribute_id
return kls
# XXX: we do see some AS4_PATH with the partial instead of transitive bit set !!
if attribute_id == Attribute.ID.AS4_PATH:
kls = cls.attributes_known[attribute_id]
kls.ID = attribute_id
return kls
raise Notify (2,4,'can not handle attribute id %s' % attribute_id)
@classmethod
def unpack (cls,attribute_id,flag,data,negotiated):
cache = cls.caching and cls.CACHING
if cache and data in cls.cache.get(cls.ID,{}):
return cls.cache[cls.ID].retrieve(data)
key = (attribute_id,flag | Attribute.Flag.EXTENDED_LENGTH)
if key in Attribute.registered_attributes.keys():
instance = cls.klass(attribute_id,flag).unpack(data,negotiated)
if cache:
cls.cache.cache[cls.ID].cache(data,instance)
return instance
raise Notify (2,4,'can not handle attribute id %s' % attribute_id)
@classmethod
def setCache (cls):
if not cls.cache:
for attribute in Attribute.ID.names:
if attribute not in cls.cache:
cls.cache[attribute] = Cache()
Attribute.setCache()
| |
# Copyright (c) 2012 NetApp, Inc.
# Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import inspect
import json
import os
import re
import tempfile
import time
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import units
import six
from cinder import compute
from cinder import db
from cinder import exception
from cinder import utils
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder.volume import driver
LOG = logging.getLogger(__name__)
nas_opts = [
# TODO(eharney): deprecate nas_ip and change this to nas_host
cfg.StrOpt('nas_ip',
default='',
help='IP address or Hostname of NAS system.'),
cfg.StrOpt('nas_login',
default='admin',
help='User name to connect to NAS system.'),
cfg.StrOpt('nas_password',
default='',
help='Password to connect to NAS system.',
secret=True),
cfg.IntOpt('nas_ssh_port',
default=22,
min=1, max=65535,
help='SSH port to use to connect to NAS system.'),
cfg.StrOpt('nas_private_key',
default='',
help='Filename of private key to use for SSH authentication.'),
cfg.StrOpt('nas_secure_file_operations',
default='auto',
help=('Allow network-attached storage systems to operate in a '
'secure environment where root level access is not '
'permitted. If set to False, access is as the root user '
'and insecure. If set to True, access is not as root. '
'If set to auto, a check is done to determine if this is '
'a new installation: True is used if so, otherwise '
'False. Default is auto.')),
cfg.StrOpt('nas_secure_file_permissions',
default='auto',
help=('Set more secure file permissions on network-attached '
'storage volume files to restrict broad other/world '
'access. If set to False, volumes are created with open '
'permissions. If set to True, volumes are created with '
'permissions for the cinder user and group (660). If '
'set to auto, a check is done to determine if '
'this is a new installation: True is used if so, '
'otherwise False. Default is auto.')),
cfg.StrOpt('nas_share_path',
default='',
help=('Path to the share to use for storing Cinder volumes. '
'For example: "/srv/export1" for an NFS server export '
'available at 10.0.5.10:/srv/export1 .')),
cfg.StrOpt('nas_mount_options',
default=None,
help=('Options used to mount the storage backend file system '
'where Cinder volumes are stored.')),
]
old_vol_type_opts = [cfg.DeprecatedOpt('glusterfs_sparsed_volumes'),
cfg.DeprecatedOpt('glusterfs_qcow2_volumes')]
volume_opts = [
cfg.StrOpt('nas_volume_prov_type',
default='thin',
choices=['thin', 'thick'],
deprecated_opts=old_vol_type_opts,
help=('Provisioning type that will be used when '
'creating volumes.')),
]
CONF = cfg.CONF
CONF.register_opts(nas_opts)
CONF.register_opts(volume_opts)
def locked_volume_id_operation(f, external=False):
"""Lock decorator for volume operations.
Takes a named lock prior to executing the operation. The lock is named
with the id of the volume. This lock can then be used
by other operations to avoid operation conflicts on shared volumes.
May be applied to methods of signature:
method(<self>, volume, *, **)
"""
def lvo_inner1(inst, *args, **kwargs):
lock_tag = inst.driver_prefix
call_args = inspect.getcallargs(f, inst, *args, **kwargs)
if call_args.get('volume'):
volume_id = call_args['volume']['id']
elif call_args.get('snapshot'):
volume_id = call_args['snapshot']['volume']['id']
else:
err_msg = _('The decorated method must accept either a volume or '
'a snapshot object')
raise exception.VolumeBackendAPIException(data=err_msg)
@utils.synchronized('%s-%s' % (lock_tag, volume_id),
external=external)
def lvo_inner2():
return f(inst, *args, **kwargs)
return lvo_inner2()
return lvo_inner1
class RemoteFSDriver(driver.LocalVD, driver.TransferVD, driver.BaseVD):
"""Common base for drivers that work like NFS."""
driver_volume_type = None
driver_prefix = 'remotefs'
volume_backend_name = None
SHARE_FORMAT_REGEX = r'.+:/.+'
def __init__(self, *args, **kwargs):
super(RemoteFSDriver, self).__init__(*args, **kwargs)
self.shares = {}
self._mounted_shares = []
self._execute_as_root = True
self._is_voldb_empty_at_startup = kwargs.pop('is_vol_db_empty', None)
if self.configuration:
self.configuration.append_config_values(nas_opts)
self.configuration.append_config_values(volume_opts)
def check_for_setup_error(self):
"""Just to override parent behavior."""
pass
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info.
:param volume: volume reference
:param connector: connector reference
"""
data = {'export': volume['provider_location'],
'name': volume['name']}
if volume['provider_location'] in self.shares:
data['options'] = self.shares[volume['provider_location']]
return {
'driver_volume_type': self.driver_volume_type,
'data': data,
'mount_point_base': self._get_mount_point_base()
}
def do_setup(self, context):
"""Any initialization the volume driver does while starting."""
super(RemoteFSDriver, self).do_setup(context)
# Validate the settings for our secure file options.
self.configuration.nas_secure_file_permissions = \
self.configuration.nas_secure_file_permissions.lower()
self.configuration.nas_secure_file_operations = \
self.configuration.nas_secure_file_operations.lower()
valid_secure_opts = ['auto', 'true', 'false']
secure_options = {'nas_secure_file_permissions':
self.configuration.nas_secure_file_permissions,
'nas_secure_file_operations':
self.configuration.nas_secure_file_operations}
for opt_name, opt_value in secure_options.items():
if opt_value not in valid_secure_opts:
err_parms = {'name': opt_name, 'value': opt_value}
msg = _("NAS config '%(name)s=%(value)s' invalid. Must be "
"'auto', 'true', or 'false'") % err_parms
LOG.error(msg)
raise exception.InvalidConfigurationValue(msg)
def _get_provisioned_capacity(self):
"""Returns the provisioned capacity.
Get the sum of sizes of volumes, snapshots and any other
files on the mountpoint.
"""
provisioned_size = 0.0
for share in self.shares.keys():
mount_path = self._get_mount_point_for_share(share)
out, _ = self._execute('du', '--bytes', mount_path,
run_as_root=True)
provisioned_size += int(out.split()[0])
return round(provisioned_size / units.Gi, 2)
def _get_mount_point_base(self):
"""Returns the mount point base for the remote fs.
This method facilitates returning mount point base
for the specific remote fs. Override this method
in the respective driver to return the entry to be
used while attach/detach using brick in cinder.
If not overridden then it returns None without
raising exception to continue working for cases
when not used with brick.
"""
LOG.debug("Driver specific implementation needs to return"
" mount_point_base.")
return None
def create_volume(self, volume):
"""Creates a volume.
:param volume: volume reference
:returns: provider_location update dict for database
"""
self._ensure_shares_mounted()
volume['provider_location'] = self._find_share(volume['size'])
LOG.info(_LI('casted to %s'), volume['provider_location'])
self._do_create_volume(volume)
return {'provider_location': volume['provider_location']}
def _do_create_volume(self, volume):
"""Create a volume on given remote share.
:param volume: volume reference
"""
volume_path = self.local_path(volume)
volume_size = volume['size']
if getattr(self.configuration,
self.driver_prefix + '_sparsed_volumes'):
self._create_sparsed_file(volume_path, volume_size)
else:
self._create_regular_file(volume_path, volume_size)
self._set_rw_permissions(volume_path)
def _ensure_shares_mounted(self):
"""Look for remote shares in the flags and mount them locally."""
mounted_shares = []
self._load_shares_config(getattr(self.configuration,
self.driver_prefix +
'_shares_config'))
for share in self.shares.keys():
try:
self._ensure_share_mounted(share)
mounted_shares.append(share)
except Exception as exc:
LOG.error(_LE('Exception during mounting %s'), exc)
self._mounted_shares = mounted_shares
LOG.debug('Available shares %s', self._mounted_shares)
def delete_volume(self, volume):
"""Deletes a logical volume.
:param volume: volume reference
"""
if not volume['provider_location']:
LOG.warning(_LW('Volume %s does not have '
'provider_location specified, '
'skipping'), volume['name'])
return
self._ensure_share_mounted(volume['provider_location'])
mounted_path = self.local_path(volume)
self._delete(mounted_path)
def ensure_export(self, ctx, volume):
"""Synchronously recreates an export for a logical volume."""
self._ensure_share_mounted(volume['provider_location'])
def create_export(self, ctx, volume, connector):
"""Exports the volume.
Can optionally return a dictionary of changes
to the volume object to be persisted.
"""
pass
def remove_export(self, ctx, volume):
"""Removes an export for a logical volume."""
pass
def delete_snapshot(self, snapshot):
"""Delete snapshot.
Do nothing for this driver, but allow manager to handle deletion
of snapshot in error state.
"""
pass
def _delete(self, path):
# Note(lpetrut): this method is needed in order to provide
# interoperability with Windows as it will be overridden.
self._execute('rm', '-f', path, run_as_root=self._execute_as_root)
def _create_sparsed_file(self, path, size):
"""Creates a sparse file of a given size in GiB."""
self._execute('truncate', '-s', '%sG' % size,
path, run_as_root=self._execute_as_root)
def _create_regular_file(self, path, size):
"""Creates a regular file of given size in GiB."""
block_size_mb = 1
block_count = size * units.Gi / (block_size_mb * units.Mi)
self._execute('dd', 'if=/dev/zero', 'of=%s' % path,
'bs=%dM' % block_size_mb,
'count=%d' % block_count,
run_as_root=self._execute_as_root)
def _fallocate(self, path, size):
"""Creates a raw file of given size in GiB using fallocate."""
self._execute('fallocate', '--length=%sG' % size,
path, run_as_root=True)
def _create_qcow2_file(self, path, size_gb):
"""Creates a QCOW2 file of a given size in GiB."""
self._execute('qemu-img', 'create', '-f', 'qcow2',
'-o', 'preallocation=metadata',
path, str(size_gb * units.Gi),
run_as_root=self._execute_as_root)
def _set_rw_permissions(self, path):
"""Sets access permissions for given NFS path.
Volume file permissions are set based upon the value of
secure_file_permissions: 'true' sets secure access permissions and
'false' sets more open (insecure) access permissions.
:param path: the volume file path.
"""
if self.configuration.nas_secure_file_permissions == 'true':
permissions = '660'
LOG.debug('File path %(path)s is being set with permissions: '
'%(permissions)s',
{'path': path, 'permissions': permissions})
else:
permissions = 'ugo+rw'
LOG.warning(_LW('%(path)s is being set with open permissions: '
'%(perm)s'), {'path': path, 'perm': permissions})
self._execute('chmod', permissions, path,
run_as_root=self._execute_as_root)
def _set_rw_permissions_for_all(self, path):
"""Sets 666 permissions for the path."""
self._execute('chmod', 'ugo+rw', path,
run_as_root=self._execute_as_root)
def _set_rw_permissions_for_owner(self, path):
"""Sets read-write permissions to the owner for the path."""
self._execute('chmod', 'u+rw', path,
run_as_root=self._execute_as_root)
def local_path(self, volume):
"""Get volume path (mounted locally fs path) for given volume.
:param volume: volume reference
"""
remotefs_share = volume['provider_location']
return os.path.join(self._get_mount_point_for_share(remotefs_share),
volume['name'])
def copy_image_to_volume(self, context, volume, image_service, image_id):
"""Fetch the image from image_service and write it to the volume."""
run_as_root = self._execute_as_root
image_utils.fetch_to_raw(context,
image_service,
image_id,
self.local_path(volume),
self.configuration.volume_dd_blocksize,
size=volume['size'],
run_as_root=run_as_root)
# NOTE (leseb): Set the virtual size of the image
# the raw conversion overwrote the destination file
# (which had the correct size)
# with the fetched glance image size,
# thus the initial 'size' parameter is not honored
# this sets the size to the one asked in the first place by the user
# and then verify the final virtual size
image_utils.resize_image(self.local_path(volume), volume['size'],
run_as_root=run_as_root)
data = image_utils.qemu_img_info(self.local_path(volume),
run_as_root=run_as_root)
virt_size = data.virtual_size / units.Gi
if virt_size != volume['size']:
raise exception.ImageUnacceptable(
image_id=image_id,
reason=(_("Expected volume size was %d") % volume['size'])
+ (_(" but size is now %d") % virt_size))
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
image_utils.upload_volume(context,
image_service,
image_meta,
self.local_path(volume))
def _read_config_file(self, config_file):
# Returns list of lines in file
with open(config_file) as f:
return f.readlines()
def _load_shares_config(self, share_file=None):
self.shares = {}
if all((self.configuration.nas_ip,
self.configuration.nas_share_path)):
LOG.debug('Using nas_ip and nas_share_path configuration.')
nas_ip = self.configuration.nas_ip
nas_share_path = self.configuration.nas_share_path
share_address = '%s:%s' % (nas_ip, nas_share_path)
if not re.match(self.SHARE_FORMAT_REGEX, share_address):
msg = (_("Share %s ignored due to invalid format. Must "
"be of form address:/export. Please check the "
"nas_ip and nas_share_path settings."),
share_address)
raise exception.InvalidConfigurationValue(msg)
self.shares[share_address] = self.configuration.nas_mount_options
elif share_file is not None:
LOG.debug('Loading shares from %s.', share_file)
for share in self._read_config_file(share_file):
# A configuration line may be either:
# host:/vol_name
# or
# host:/vol_name -o options=123,rw --other
if not share.strip():
# Skip blank or whitespace-only lines
continue
if share.startswith('#'):
continue
share_info = share.split(' ', 1)
# results in share_info =
# [ 'address:/vol', '-o options=123,rw --other' ]
share_address = share_info[0].strip()
# Replace \040 with a space, to support paths with spaces
share_address = share_address.replace("\\040", " ")
share_opts = None
if len(share_info) > 1:
share_opts = share_info[1].strip()
if not re.match(self.SHARE_FORMAT_REGEX, share_address):
LOG.error(_LE("Share %s ignored due to invalid format. "
"Must be of form address:/export."),
share_address)
continue
self.shares[share_address] = share_opts
LOG.debug("shares loaded: %s", self.shares)
def _get_mount_point_for_share(self, path):
raise NotImplementedError()
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
pass
def get_volume_stats(self, refresh=False):
"""Get volume stats.
If 'refresh' is True, update the stats first.
"""
if refresh or not self._stats:
self._update_volume_stats()
return self._stats
def _update_volume_stats(self):
"""Retrieve stats info from volume group."""
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or self.volume_backend_name
data['vendor_name'] = 'Open Source'
data['driver_version'] = self.get_version()
data['storage_protocol'] = self.driver_volume_type
self._ensure_shares_mounted()
global_capacity = 0
global_free = 0
for share in self._mounted_shares:
capacity, free, used = self._get_capacity_info(share)
global_capacity += capacity
global_free += free
data['total_capacity_gb'] = global_capacity / float(units.Gi)
data['free_capacity_gb'] = global_free / float(units.Gi)
data['reserved_percentage'] = self.configuration.reserved_percentage
data['QoS_support'] = False
self._stats = data
def _get_capacity_info(self, share):
raise NotImplementedError()
def _find_share(self, volume_size_in_gib):
raise NotImplementedError()
def _ensure_share_mounted(self, share):
raise NotImplementedError()
def secure_file_operations_enabled(self):
"""Determine if driver is operating in Secure File Operations mode.
The Cinder Volume driver needs to query if this driver is operating
in a secure file mode; check our nas_secure_file_operations flag.
"""
if self.configuration.nas_secure_file_operations == 'true':
return True
return False
def set_nas_security_options(self, is_new_cinder_install):
"""Determine the setting to use for Secure NAS options.
This method must be overridden by child wishing to use secure
NAS file operations. This base method will set the NAS security
options to false.
"""
doc_html = "http://docs.openstack.org/admin-guide-cloud/content" \
"/nfs_backend.html"
self.configuration.nas_secure_file_operations = 'false'
LOG.warning(_LW("The NAS file operations will be run as root: "
"allowing root level access at the storage backend. "
"This is considered an insecure NAS environment. "
"Please see %s for information on a secure NAS "
"configuration."),
doc_html)
self.configuration.nas_secure_file_permissions = 'false'
LOG.warning(_LW("The NAS file permissions mode will be 666 (allowing "
"other/world read & write access). This is considered "
"an insecure NAS environment. Please see %s for "
"information on a secure NFS configuration."),
doc_html)
def _determine_nas_security_option_setting(self, nas_option, mount_point,
is_new_cinder_install):
"""Determine NAS security option setting when 'auto' is assigned.
This method determines the final 'true'/'false' setting of an NAS
security option when the default value of 'auto' has been detected.
If the nas option isn't 'auto' then its current value is used.
:param nas_option: The NAS security option value loaded from config.
:param mount_point: Mount where indicator file is written.
:param is_new_cinder_install: boolean for new Cinder installation.
:return string: 'true' or 'false' for new option setting.
"""
if nas_option == 'auto':
# For auto detection, we first check to see if we have been
# through this process before by checking for the existence of
# the Cinder secure environment indicator file.
file_name = '.cinderSecureEnvIndicator'
file_path = os.path.join(mount_point, file_name)
if os.path.isfile(file_path):
nas_option = 'true'
LOG.info(_LI('Cinder secure environment '
'indicator file exists.'))
else:
# The indicator file does not exist. If it is a new
# installation, set to 'true' and create the indicator file.
if is_new_cinder_install:
nas_option = 'true'
try:
with open(file_path, 'w') as fh:
fh.write('Detector file for Cinder secure '
'environment usage.\n')
fh.write('Do not delete this file.\n')
# Set the permissions on our special marker file to
# protect from accidental removal (owner write only).
self._execute('chmod', '640', file_path,
run_as_root=False)
LOG.info(_LI('New Cinder secure environment indicator'
' file created at path %s.'), file_path)
except IOError as err:
LOG.error(_LE('Failed to created Cinder secure '
'environment indicator file: %s'),
err)
else:
# For existing installs, we default to 'false'. The
# admin can always set the option at the driver config.
nas_option = 'false'
return nas_option
class RemoteFSSnapDriver(RemoteFSDriver, driver.SnapshotVD):
"""Base class for remotefs drivers implementing qcow2 snapshots.
Driver must implement:
_local_volume_dir(self, volume)
"""
def __init__(self, *args, **kwargs):
self._remotefsclient = None
self.base = None
self._nova = None
super(RemoteFSSnapDriver, self).__init__(*args, **kwargs)
def do_setup(self, context):
super(RemoteFSSnapDriver, self).do_setup(context)
self._nova = compute.API()
def _local_volume_dir(self, volume):
share = volume['provider_location']
local_dir = self._get_mount_point_for_share(share)
return local_dir
def _local_path_volume(self, volume):
path_to_disk = os.path.join(
self._local_volume_dir(volume),
volume['name'])
return path_to_disk
def _get_new_snap_path(self, snapshot):
vol_path = self.local_path(snapshot['volume'])
snap_path = '%s.%s' % (vol_path, snapshot['id'])
return snap_path
def _local_path_volume_info(self, volume):
return '%s%s' % (self.local_path(volume), '.info')
def _read_file(self, filename):
"""This method is to make it easier to stub out code for testing.
Returns a string representing the contents of the file.
"""
with open(filename, 'r') as f:
return f.read()
def _write_info_file(self, info_path, snap_info):
if 'active' not in snap_info.keys():
msg = _("'active' must be present when writing snap_info.")
raise exception.RemoteFSException(msg)
with open(info_path, 'w') as f:
json.dump(snap_info, f, indent=1, sort_keys=True)
def _qemu_img_info_base(self, path, volume_name, basedir):
"""Sanitize image_utils' qemu_img_info.
This code expects to deal only with relative filenames.
"""
info = image_utils.qemu_img_info(path)
if info.image:
info.image = os.path.basename(info.image)
if info.backing_file:
backing_file_template = \
"(%(basedir)s/[0-9a-f]+/)?%" \
"(volname)s(.(tmp-snap-)?[0-9a-f-]+)?$" % {
'basedir': basedir,
'volname': volume_name
}
if not re.match(backing_file_template, info.backing_file):
msg = _("File %(path)s has invalid backing file "
"%(bfile)s, aborting.") % {'path': path,
'bfile': info.backing_file}
raise exception.RemoteFSException(msg)
info.backing_file = os.path.basename(info.backing_file)
return info
def _qemu_img_info(self, path, volume_name):
raise NotImplementedError()
def _img_commit(self, path):
self._execute('qemu-img', 'commit', path,
run_as_root=self._execute_as_root)
self._delete(path)
def _rebase_img(self, image, backing_file, volume_format):
self._execute('qemu-img', 'rebase', '-u', '-b', backing_file, image,
'-F', volume_format, run_as_root=self._execute_as_root)
def _read_info_file(self, info_path, empty_if_missing=False):
"""Return dict of snapshot information.
:param: info_path: path to file
:param: empty_if_missing: True=return empty dict if no file
"""
if not os.path.exists(info_path):
if empty_if_missing is True:
return {}
return json.loads(self._read_file(info_path))
def _get_backing_chain_for_path(self, volume, path):
"""Returns list of dicts containing backing-chain information.
Includes 'filename', and 'backing-filename' for each
applicable entry.
Consider converting this to use --backing-chain and --output=json
when environment supports qemu-img 1.5.0.
:param volume: volume reference
:param path: path to image file at top of chain
"""
output = []
info = self._qemu_img_info(path, volume['name'])
new_info = {}
new_info['filename'] = os.path.basename(path)
new_info['backing-filename'] = info.backing_file
output.append(new_info)
while new_info['backing-filename']:
filename = new_info['backing-filename']
path = os.path.join(self._local_volume_dir(volume), filename)
info = self._qemu_img_info(path, volume['name'])
backing_filename = info.backing_file
new_info = {}
new_info['filename'] = filename
new_info['backing-filename'] = backing_filename
output.append(new_info)
return output
def _get_hash_str(self, base_str):
"""Return a string that represents hash of base_str.
Returns string in a hex format.
"""
if isinstance(base_str, six.text_type):
base_str = base_str.encode('utf-8')
return hashlib.md5(base_str).hexdigest()
def _get_mount_point_for_share(self, share):
"""Return mount point for share.
:param share: example 172.18.194.100:/var/fs
"""
return self._remotefsclient.get_mount_point(share)
def _get_available_capacity(self, share):
"""Calculate available space on the share.
:param share: example 172.18.194.100:/var/fs
"""
mount_point = self._get_mount_point_for_share(share)
out, _ = self._execute('df', '--portability', '--block-size', '1',
mount_point,
run_as_root=self._execute_as_root)
out = out.splitlines()[1]
size = int(out.split()[1])
available = int(out.split()[3])
return available, size
def _get_capacity_info(self, remotefs_share):
available, size = self._get_available_capacity(remotefs_share)
return size, available, size - available
def _get_mount_point_base(self):
return self.base
def _ensure_share_writable(self, path):
"""Ensure that the Cinder user can write to the share.
If not, raise an exception.
:param path: path to test
:raises: RemoteFSException
:returns: None
"""
prefix = '.cinder-write-test-' + str(os.getpid()) + '-'
try:
tempfile.NamedTemporaryFile(prefix=prefix, dir=path)
except OSError:
msg = _('Share at %(dir)s is not writable by the '
'Cinder volume service. Snapshot operations will not be '
'supported.') % {'dir': path}
raise exception.RemoteFSException(msg)
def _copy_volume_to_image(self, context, volume, image_service,
image_meta):
"""Copy the volume to the specified image."""
# If snapshots exist, flatten to a temporary image, and upload it
active_file = self.get_active_image_from_info(volume)
active_file_path = os.path.join(self._local_volume_dir(volume),
active_file)
info = self._qemu_img_info(active_file_path, volume['name'])
backing_file = info.backing_file
root_file_fmt = info.file_format
tmp_params = {
'prefix': '%s.temp_image.%s' % (volume['id'], image_meta['id']),
'suffix': '.img'
}
with image_utils.temporary_file(**tmp_params) as temp_path:
if backing_file or (root_file_fmt != 'raw'):
# Convert due to snapshots
# or volume data not being stored in raw format
# (upload_volume assumes raw format input)
image_utils.convert_image(active_file_path, temp_path, 'raw')
upload_path = temp_path
else:
upload_path = active_file_path
image_utils.upload_volume(context,
image_service,
image_meta,
upload_path)
def get_active_image_from_info(self, volume):
"""Returns filename of the active image from the info file."""
info_file = self._local_path_volume_info(volume)
snap_info = self._read_info_file(info_file, empty_if_missing=True)
if not snap_info:
# No info file = no snapshots exist
vol_path = os.path.basename(self.local_path(volume))
return vol_path
return snap_info['active']
def _create_cloned_volume(self, volume, src_vref):
LOG.info(_LI('Cloning volume %(src)s to volume %(dst)s'),
{'src': src_vref['id'],
'dst': volume['id']})
if src_vref['status'] != 'available':
msg = _("Volume status must be 'available'.")
raise exception.InvalidVolume(msg)
volume_name = CONF.volume_name_template % volume['id']
volume_info = {'provider_location': src_vref['provider_location'],
'size': src_vref['size'],
'id': volume['id'],
'name': volume_name,
'status': src_vref['status']}
temp_snapshot = {'volume_name': volume_name,
'size': src_vref['size'],
'volume_size': src_vref['size'],
'name': 'clone-snap-%s' % src_vref['id'],
'volume_id': src_vref['id'],
'id': 'tmp-snap-%s' % src_vref['id'],
'volume': src_vref}
self._create_snapshot(temp_snapshot)
try:
self._copy_volume_from_snapshot(temp_snapshot,
volume_info,
volume['size'])
finally:
self._delete_snapshot(temp_snapshot)
return {'provider_location': src_vref['provider_location']}
def _delete_stale_snapshot(self, snapshot):
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path)
snapshot_file = snap_info[snapshot['id']]
active_file = self.get_active_image_from_info(snapshot['volume'])
snapshot_path = os.path.join(
self._local_volume_dir(snapshot['volume']), snapshot_file)
if (snapshot_file == active_file):
return
LOG.info(_LI('Deleting stale snapshot: %s'), snapshot['id'])
self._delete(snapshot_path)
del(snap_info[snapshot['id']])
self._write_info_file(info_path, snap_info)
def _delete_snapshot(self, snapshot):
"""Delete a snapshot.
If volume status is 'available', delete snapshot here in Cinder
using qemu-img.
If volume status is 'in-use', calculate what qcow2 files need to
merge, and call to Nova to perform this operation.
:raises: InvalidVolume if status not acceptable
:raises: RemoteFSException(msg) if operation fails
:returns: None
"""
LOG.debug('Deleting snapshot %s:', snapshot['id'])
volume_status = snapshot['volume']['status']
if volume_status not in ['available', 'in-use']:
msg = _('Volume status must be "available" or "in-use".')
raise exception.InvalidVolume(msg)
vol_path = self._local_volume_dir(snapshot['volume'])
self._ensure_share_writable(vol_path)
# Determine the true snapshot file for this snapshot
# based on the .info file
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path, empty_if_missing=True)
if snapshot['id'] not in snap_info:
# If snapshot info file is present, but snapshot record does not
# exist, do not attempt to delete.
# (This happens, for example, if snapshot_create failed due to lack
# of permission to write to the share.)
LOG.info(_LI('Snapshot record for %s is not present, allowing '
'snapshot_delete to proceed.'), snapshot['id'])
return
snapshot_file = snap_info[snapshot['id']]
LOG.debug('snapshot_file for this snap is: %s', snapshot_file)
snapshot_path = os.path.join(
self._local_volume_dir(snapshot['volume']),
snapshot_file)
snapshot_path_img_info = self._qemu_img_info(
snapshot_path,
snapshot['volume']['name'])
base_file = snapshot_path_img_info.backing_file
if base_file is None:
# There should always be at least the original volume
# file as base.
LOG.warning(_LW('No backing file found for %s, allowing '
'snapshot to be deleted.'), snapshot_path)
# Snapshot may be stale, so just delete it and update the
# info file instead of blocking
return self._delete_stale_snapshot(snapshot)
base_path = os.path.join(vol_path, base_file)
base_file_img_info = self._qemu_img_info(base_path,
snapshot['volume']['name'])
# Find what file has this as its backing file
active_file = self.get_active_image_from_info(snapshot['volume'])
active_file_path = os.path.join(vol_path, active_file)
if volume_status == 'in-use':
# Online delete
context = snapshot['context']
new_base_file = base_file_img_info.backing_file
base_id = None
for key, value in snap_info.items():
if value == base_file and key != 'active':
base_id = key
break
if base_id is None:
# This means we are deleting the oldest snapshot
LOG.debug('No %(base_id)s found for %(file)s',
{'base_id': 'base_id', 'file': snapshot_file})
online_delete_info = {
'active_file': active_file,
'snapshot_file': snapshot_file,
'base_file': base_file,
'base_id': base_id,
'new_base_file': new_base_file
}
return self._delete_snapshot_online(context,
snapshot,
online_delete_info)
if snapshot_file == active_file:
# There is no top file
# T0 | T1 |
# base | snapshot_file | None
# (guaranteed to| (being deleted, |
# exist) | commited down) |
self._img_commit(snapshot_path)
# Active file has changed
snap_info['active'] = base_file
else:
# T0 | T1 | T2 | T3
# base | snapshot_file | higher_file | highest_file
# (guaranteed to | (being deleted, | (guaranteed to | (may exist)
# exist, not | commited down) | exist, needs |
# used here) | | ptr update) |
backing_chain = self._get_backing_chain_for_path(
snapshot['volume'], active_file_path)
# This file is guaranteed to exist since we aren't operating on
# the active file.
higher_file = next((os.path.basename(f['filename'])
for f in backing_chain
if f.get('backing-filename', '') ==
snapshot_file),
None)
if higher_file is None:
msg = _('No file found with %s as backing file.') %\
snapshot_file
raise exception.RemoteFSException(msg)
higher_id = next((i for i in snap_info
if snap_info[i] == higher_file
and i != 'active'),
None)
if higher_id is None:
msg = _('No snap found with %s as backing file.') %\
higher_file
raise exception.RemoteFSException(msg)
self._img_commit(snapshot_path)
higher_file_path = os.path.join(vol_path, higher_file)
base_file_fmt = base_file_img_info.file_format
self._rebase_img(higher_file_path, base_file, base_file_fmt)
# Remove snapshot_file from info
del(snap_info[snapshot['id']])
self._write_info_file(info_path, snap_info)
def _create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot.
Snapshot must not be the active snapshot. (offline)
"""
if snapshot['status'] != 'available':
msg = _('Snapshot status must be "available" to clone.')
raise exception.InvalidSnapshot(msg)
self._ensure_shares_mounted()
volume['provider_location'] = self._find_share(volume['size'])
self._do_create_volume(volume)
self._copy_volume_from_snapshot(snapshot,
volume,
volume['size'])
return {'provider_location': volume['provider_location']}
def _copy_volume_from_snapshot(self, snapshot, volume, volume_size):
raise NotImplementedError()
def _do_create_snapshot(self, snapshot, backing_filename,
new_snap_path):
"""Create a QCOW2 file backed by another file.
:param snapshot: snapshot reference
:param backing_filename: filename of file that will back the
new qcow2 file
:param new_snap_path: filename of new qcow2 file
"""
backing_path_full_path = os.path.join(
self._local_volume_dir(snapshot['volume']),
backing_filename)
command = ['qemu-img', 'create', '-f', 'qcow2', '-o',
'backing_file=%s' % backing_path_full_path, new_snap_path]
self._execute(*command, run_as_root=self._execute_as_root)
info = self._qemu_img_info(backing_path_full_path,
snapshot['volume']['name'])
backing_fmt = info.file_format
command = ['qemu-img', 'rebase', '-u',
'-b', backing_filename,
'-F', backing_fmt,
new_snap_path]
self._execute(*command, run_as_root=self._execute_as_root)
self._set_rw_permissions(new_snap_path)
def _create_snapshot(self, snapshot):
"""Create a snapshot.
If volume is attached, call to Nova to create snapshot, providing a
qcow2 file. Cinder creates and deletes qcow2 files, but Nova is
responsible for transitioning the VM between them and handling live
transfers of data between files as required.
If volume is detached, create locally with qemu-img. Cinder handles
manipulation of qcow2 files.
A file named volume-<uuid>.info is stored with the volume
data and is a JSON table which contains a mapping between
Cinder snapshot UUIDs and filenames, as these associations
will change as snapshots are deleted.
Basic snapshot operation:
1. Initial volume file:
volume-1234
2. Snapshot created:
volume-1234 <- volume-1234.aaaa
volume-1234.aaaa becomes the new "active" disk image.
If the volume is not attached, this filename will be used to
attach the volume to a VM at volume-attach time.
If the volume is attached, the VM will switch to this file as
part of the snapshot process.
Note that volume-1234.aaaa represents changes after snapshot
'aaaa' was created. So the data for snapshot 'aaaa' is actually
in the backing file(s) of volume-1234.aaaa.
This file has a qcow2 header recording the fact that volume-1234 is
its backing file. Delta changes since the snapshot was created are
stored in this file, and the backing file (volume-1234) does not
change.
info file: { 'active': 'volume-1234.aaaa',
'aaaa': 'volume-1234.aaaa' }
3. Second snapshot created:
volume-1234 <- volume-1234.aaaa <- volume-1234.bbbb
volume-1234.bbbb now becomes the "active" disk image, recording
changes made to the volume.
info file: { 'active': 'volume-1234.bbbb', (* changed!)
'aaaa': 'volume-1234.aaaa',
'bbbb': 'volume-1234.bbbb' } (* added!)
4. Snapshot deletion when volume is attached ('in-use' state):
* When first snapshot is deleted, Cinder calls Nova for online
snapshot deletion. Nova deletes snapshot with id "aaaa" and
makes snapshot with id "bbbb" point to the base image.
Snapshot with id "bbbb" is the active image.
volume-1234 <- volume-1234.bbbb
info file: { 'active': 'volume-1234.bbbb',
'bbbb': 'volume-1234.bbbb'
}
* When second snapshot is deleted, Cinder calls Nova for online
snapshot deletion. Nova deletes snapshot with id "bbbb" by
pulling volume-1234's data into volume-1234.bbbb. This
(logically) removes snapshot with id "bbbb" and the active
file remains the same.
volume-1234.bbbb
info file: { 'active': 'volume-1234.bbbb' }
TODO (deepakcs): Change this once Nova supports blockCommit for
in-use volumes.
5. Snapshot deletion when volume is detached ('available' state):
* When first snapshot is deleted, Cinder does the snapshot
deletion. volume-1234.aaaa is removed from the snapshot chain.
The data from it is merged into its parent.
volume-1234.bbbb is rebased, having volume-1234 as its new
parent.
volume-1234 <- volume-1234.bbbb
info file: { 'active': 'volume-1234.bbbb',
'bbbb': 'volume-1234.bbbb'
}
* When second snapshot is deleted, Cinder does the snapshot
deletion. volume-1234.aaaa is removed from the snapshot chain.
The base image, volume-1234 becomes the active image for this
volume again.
volume-1234
info file: { 'active': 'volume-1234' } (* changed!)
"""
status = snapshot['volume']['status']
if status not in ['available', 'in-use']:
msg = _('Volume status must be "available" or "in-use"'
' for snapshot. (is %s)') % status
raise exception.InvalidVolume(msg)
info_path = self._local_path_volume_info(snapshot['volume'])
snap_info = self._read_info_file(info_path, empty_if_missing=True)
backing_filename = self.get_active_image_from_info(
snapshot['volume'])
new_snap_path = self._get_new_snap_path(snapshot)
if status == 'in-use':
self._create_snapshot_online(snapshot,
backing_filename,
new_snap_path)
else:
self._do_create_snapshot(snapshot,
backing_filename,
new_snap_path)
snap_info['active'] = os.path.basename(new_snap_path)
snap_info[snapshot['id']] = os.path.basename(new_snap_path)
self._write_info_file(info_path, snap_info)
def _create_snapshot_online(self, snapshot, backing_filename,
new_snap_path):
# Perform online snapshot via Nova
context = snapshot['context']
self._do_create_snapshot(snapshot,
backing_filename,
new_snap_path)
connection_info = {
'type': 'qcow2',
'new_file': os.path.basename(new_snap_path),
'snapshot_id': snapshot['id']
}
try:
result = self._nova.create_volume_snapshot(
context,
snapshot['volume_id'],
connection_info)
LOG.debug('nova call result: %s', result)
except Exception:
LOG.exception(_LE('Call to Nova to create snapshot failed'))
raise
# Loop and wait for result
# Nova will call Cinderclient to update the status in the database
# An update of progress = '90%' means that Nova is done
seconds_elapsed = 0
increment = 1
timeout = 600
while True:
s = db.snapshot_get(context, snapshot['id'])
if s['status'] == 'creating':
if s['progress'] == '90%':
# Nova tasks completed successfully
break
time.sleep(increment)
seconds_elapsed += increment
elif s['status'] == 'error':
msg = _('Nova returned "error" status '
'while creating snapshot.')
raise exception.RemoteFSException(msg)
LOG.debug('Status of snapshot %(id)s is now %(status)s',
{'id': snapshot['id'],
'status': s['status']})
if 10 < seconds_elapsed <= 20:
increment = 2
elif 20 < seconds_elapsed <= 60:
increment = 5
elif 60 < seconds_elapsed:
increment = 10
if seconds_elapsed > timeout:
msg = _('Timed out while waiting for Nova update '
'for creation of snapshot %s.') % snapshot['id']
raise exception.RemoteFSException(msg)
def _delete_snapshot_online(self, context, snapshot, info):
# Update info over the course of this method
# active file never changes
info_path = self._local_path_volume(snapshot['volume']) + '.info'
snap_info = self._read_info_file(info_path)
if info['active_file'] == info['snapshot_file']:
# blockRebase/Pull base into active
# info['base'] => snapshot_file
file_to_delete = info['base_file']
if info['base_id'] is None:
# Passing base=none to blockRebase ensures that
# libvirt blanks out the qcow2 backing file pointer
new_base = None
else:
new_base = info['new_base_file']
snap_info[info['base_id']] = info['snapshot_file']
delete_info = {'file_to_merge': new_base,
'merge_target_file': None, # current
'type': 'qcow2',
'volume_id': snapshot['volume']['id']}
del(snap_info[snapshot['id']])
else:
# blockCommit snapshot into base
# info['base'] <= snapshot_file
# delete record of snapshot
file_to_delete = info['snapshot_file']
delete_info = {'file_to_merge': info['snapshot_file'],
'merge_target_file': info['base_file'],
'type': 'qcow2',
'volume_id': snapshot['volume']['id']}
del(snap_info[snapshot['id']])
try:
self._nova.delete_volume_snapshot(
context,
snapshot['id'],
delete_info)
except Exception:
LOG.exception(_LE('Call to Nova delete snapshot failed'))
raise
# Loop and wait for result
# Nova will call Cinderclient to update the status in the database
# An update of progress = '90%' means that Nova is done
seconds_elapsed = 0
increment = 1
timeout = 7200
while True:
s = db.snapshot_get(context, snapshot['id'])
if s['status'] == 'deleting':
if s['progress'] == '90%':
# Nova tasks completed successfully
break
else:
LOG.debug('status of snapshot %s is still "deleting"... '
'waiting', snapshot['id'])
time.sleep(increment)
seconds_elapsed += increment
else:
msg = _('Unable to delete snapshot %(id)s, '
'status: %(status)s.') % {'id': snapshot['id'],
'status': s['status']}
raise exception.RemoteFSException(msg)
if 10 < seconds_elapsed <= 20:
increment = 2
elif 20 < seconds_elapsed <= 60:
increment = 5
elif 60 < seconds_elapsed:
increment = 10
if seconds_elapsed > timeout:
msg = _('Timed out while waiting for Nova update '
'for deletion of snapshot %(id)s.') %\
{'id': snapshot['id']}
raise exception.RemoteFSException(msg)
# Write info file updated above
self._write_info_file(info_path, snap_info)
# Delete stale file
path_to_delete = os.path.join(
self._local_volume_dir(snapshot['volume']), file_to_delete)
self._execute('rm', '-f', path_to_delete, run_as_root=True)
@locked_volume_id_operation
def create_snapshot(self, snapshot):
"""Apply locking to the create snapshot operation."""
return self._create_snapshot(snapshot)
@locked_volume_id_operation
def delete_snapshot(self, snapshot):
"""Apply locking to the delete snapshot operation."""
return self._delete_snapshot(snapshot)
@locked_volume_id_operation
def create_volume_from_snapshot(self, volume, snapshot):
return self._create_volume_from_snapshot(volume, snapshot)
@locked_volume_id_operation
def create_cloned_volume(self, volume, src_vref):
"""Creates a clone of the specified volume."""
return self._create_cloned_volume(volume, src_vref)
@locked_volume_id_operation
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""Copy the volume to the specified image."""
return self._copy_volume_to_image(context,
volume,
image_service,
image_meta)
| |
import os
import pprint
import sys
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core import files
from devilry.apps.core.models import AssignmentGroup
from devilry.apps.core.models import Candidate
from devilry.devilry_comment.models import Comment, CommentFile
from devilry.devilry_group.models import GroupComment, FeedbackSet
from devilry.devilry_import_v2database import modelimporter
from devilry.devilry_import_v2database.modelimporters import modelimporter_utils
from devilry.devilry_import_v2database.modelimporters.modelimporter_utils import BulkCreator
from devilry.utils import datetimeutils
class CommentFileFileDoesNotExist(Exception):
def __init__(self, filepath, comment_file):
self.filepath = filepath
self.comment_file = comment_file
def __str__(self):
message = \
'File {filepath!r}, CommentFile.id={commentfile_id}, v2_id={v2_id!r}. ' \
'Not writing file, this means that the CommentFile.file will be blank.'.format(
filepath=self.filepath,
v2_id=self.comment_file.v2_id,
commentfile_id=self.comment_file.id)
return message
class CommentFileIOError(Exception):
pass
class ImporterMixin(object):
def _get_feedback_set_from_id(self, feedback_set_id):
try:
feedback_set = FeedbackSet.objects.get(id=feedback_set_id)
except FeedbackSet.DoesNotExist:
raise modelimporter.ModelImporterException(
'FeedbackSet with id {} does not exist'.format(feedback_set_id))
return feedback_set
def _get_user_from_id(self, user_id):
user_model = get_user_model()
try:
user = user_model.objects.get(id=user_id)
except user_model.DoesNotExist:
raise modelimporter.ModelImporterException(
'User with id {} does not exist'.format(user_id))
return user
def _get_user_from_id_with_fallback(self, user_id, fallback=None):
user_model = get_user_model()
try:
user = user_model.objects.get(id=user_id)
except user_model.DoesNotExist:
return fallback
return user
class DeliveryImporter(ImporterMixin, modelimporter.ModelImporter):
def get_model_class(self):
return GroupComment
def get_model_super_class(self):
"""
Notes::
We use the Comment(which GroupComment inherits from) to be able to set the sequencing number
for Comment objects.
"""
return Comment
def _user_is_candidate_in_group(self, assignment_group, user):
group_queryset = AssignmentGroup.objects.filter(id=assignment_group.id).filter_user_is_candidate(user=user)
if group_queryset.count() == 0:
return False
return True
def _get_user_from_candidate_id(self, candidate_id):
try:
candidate = Candidate.objects.get(id=candidate_id)
except Candidate.DoesNotExist:
return None
else:
return candidate.relatedstudent.user
def _create_group_comment_from_object_dict(self, object_dict):
group_comment = self.get_model_class()()
self.patch_model_from_object_dict(
model_object=group_comment,
object_dict=object_dict,
attributes=[
'pk',
('pk', 'id'),
('time_of_delivery', 'created_datetime'),
('time_of_delivery', 'published_datetime')
]
)
feedback_set_id = object_dict['fields']['deadline']
group_comment.user = self._get_user_from_candidate_id(object_dict['fields']['delivered_by'])
group_comment.feedback_set_id = feedback_set_id
group_comment.text = 'Delivery'
group_comment.comment_type = GroupComment.COMMENT_TYPE_GROUPCOMMENT
group_comment.user_role = GroupComment.USER_ROLE_STUDENT
group_comment.v2_id = modelimporter_utils.make_flat_v2_id(object_dict)
if self.should_clean():
group_comment.full_clean()
group_comment.save()
return group_comment
def import_models(self, fake=False):
directory_parser = self.v2delivery_directoryparser
directory_parser.set_max_id_for_models_with_auto_generated_sequence_numbers(
model_class=self.get_model_super_class())
for object_dict in directory_parser.iterate_object_dicts():
if fake:
print(('Would import: {}'.format(pprint.pformat(object_dict))))
else:
self._create_group_comment_from_object_dict(object_dict=object_dict)
class StaticFeedbackImporter(ImporterMixin, modelimporter.ModelImporter):
def get_model_class(self):
return GroupComment
def _user_is_examiner_on_group(self, assignment_group, user):
group_queryset = AssignmentGroup.objects\
.filter(id=assignment_group.id)\
.filter_user_is_examiner(user=user)
if group_queryset.count() == 0:
return False
return True
def _save_and_publish_feedback_set(self, feedback_set, published_by, grading_points, publish_datetime):
"""
Publish Feedback.
"""
feedback_set.grading_published_by = published_by
feedback_set.grading_points = grading_points
feedback_set.grading_published_datetime = publish_datetime
if self.should_clean():
feedback_set.full_clean()
feedback_set.save()
def _create_feedback_comment_files(self, group_comment, staticfeedback_id, file_infos_dict):
"""
Create and save CommentFiles for each file uploaded by examiners in v2.
"""
if not isinstance(file_infos_dict, dict):
# Handle the slightly older format where the files where
# a list, not a dict - just to avoid crashes until we
# create a new dump. This can be removed later.
sys.stderr.write('x')
return []
commentfiles = []
for file_info_dict in list(file_infos_dict.values()):
mimetype = modelimporter_utils.get_mimetype_from_filename(file_info_dict['filename'])
comment_file = CommentFile(
comment=group_comment,
mimetype=mimetype,
filename=file_info_dict['filename'],
filesize=0,
v2_id=modelimporter_utils.make_staticfeedback_fileattachment_v2_id(
staticfeedback_id=staticfeedback_id,
attachment_id=file_info_dict['id'])
)
commentfiles.append(comment_file)
return commentfiles
def _create_group_comment_from_object_dict(self, object_dict):
group_comment = self.get_model_class()()
self.patch_model_from_object_dict(
model_object=group_comment,
object_dict=object_dict,
attributes=[
('save_timestamp', 'created_datetime'),
('save_timestamp', 'published_datetime')
]
)
feedback_set = self._get_feedback_set_from_id(feedback_set_id=object_dict['fields']['deadline_id'])
examiner_user = self._get_user_from_id_with_fallback(object_dict['fields']['saved_by'])
group_comment.user = examiner_user
self._save_and_publish_feedback_set(
feedback_set=feedback_set,
published_by=examiner_user,
grading_points=object_dict['fields']['points'],
publish_datetime=datetimeutils.from_isoformat(object_dict['fields']['save_timestamp'])
)
group_comment.feedback_set = feedback_set
group_comment.part_of_grading = True
group_comment.text = object_dict['fields']['rendered_view']
group_comment.comment_type = GroupComment.COMMENT_TYPE_GROUPCOMMENT
group_comment.user_role = GroupComment.USER_ROLE_EXAMINER
group_comment.v2_id = modelimporter_utils.make_flat_v2_id(object_dict)
if self.should_clean():
group_comment.full_clean()
group_comment.save()
commentfiles = self._create_feedback_comment_files(
group_comment,
staticfeedback_id=object_dict['pk'],
file_infos_dict=object_dict['fields']['files'])
self.log_create(model_object=group_comment, data=object_dict)
return group_comment, commentfiles
def import_models(self, fake=False):
with BulkCreator(model_class=CommentFile) as commentfile_bulk_creator:
for object_dict in self.v2staticfeedback_directoryparser.iterate_object_dicts():
if fake:
print(('Would import: {}'.format(pprint.pformat(object_dict))))
else:
group_comment, commentfiles = self._create_group_comment_from_object_dict(object_dict=object_dict)
if commentfiles:
commentfile_bulk_creator.add(*commentfiles)
class FileMetaImporter(ImporterMixin, modelimporter.ModelImporter):
def get_model_class(self):
return CommentFile
def _create_comment_file_from_object_id(self, object_dict):
comment_file = self.get_model_class()()
self.patch_model_from_object_dict(
model_object=comment_file,
object_dict=object_dict,
attributes=[
'filename'
]
)
comment_id = object_dict['fields']['delivery']
comment_file.comment_id = comment_id
comment_file.filesize = 0
comment_file.mimetype = modelimporter_utils.get_mimetype_from_filename(
filename=object_dict['fields'].get('filename', None))
comment_file.v2_id = modelimporter_utils.make_flat_v2_id(object_dict)
return comment_file
def import_models(self, fake=False):
with BulkCreator(model_class=CommentFile) as commentfile_bulk_creator:
for object_dict in self.v2filemeta_directoryparser.iterate_object_dicts():
if fake:
print(('Would import: {}'.format(pprint.pformat(object_dict))))
else:
comment_file = self._create_comment_file_from_object_id(object_dict=object_dict)
commentfile_bulk_creator.add(comment_file)
class CommentFileContentImporter(ImporterMixin, modelimporter.ModelImporter):
def get_model_class(self):
return CommentFile
def _write_file_to_commentfile(self, comment_file, filepath):
if not os.path.exists(filepath):
raise CommentFileFileDoesNotExist(filepath, comment_file)
comment_file.filesize = os.stat(filepath).st_size
fp = open(filepath, 'rb')
comment_file.file = files.File(fp, comment_file.filename)
if self.should_clean():
comment_file.full_clean()
try:
comment_file.save()
except IOError as error:
raise CommentFileIOError('Failed to write CommentFile#{commentfile_id}, filepath={filepath}: {error}'.format(
commentfile_id=comment_file.id,
filepath=filepath,
error=error))
fp.close()
def _copy_commentfile_file_from_filemeta(self, comment_file, v2idstring):
v2_delivery_file_root = getattr(settings, 'DEVILRY_V2_DELIVERY_FILE_ROOT', None)
if not v2_delivery_file_root:
return
object_dict = self.v2filemeta_directoryparser.get_object_dict_by_id(id=v2idstring)
filepath = os.path.join(v2_delivery_file_root,
object_dict['fields']['relative_file_path'])
self._write_file_to_commentfile(comment_file=comment_file,
filepath=filepath)
def _copy_commentfile_file_from_staticfeedbackfileattachment(self, comment_file, v2idstring):
v2_media_root = getattr(settings, 'DEVILRY_V2_MEDIA_ROOT', None)
if not v2_media_root:
return
staticfeedback_id, attachment_id = v2idstring.split('__')
object_dict = self.v2staticfeedback_directoryparser.get_object_dict_by_id(id=staticfeedback_id)
feedbackattachments = object_dict['fields'].get('files', None) or {}
attachment = feedbackattachments[attachment_id]
filepath = os.path.join(v2_media_root, attachment['relative_file_path'])
self._write_file_to_commentfile(comment_file=comment_file,
filepath=filepath)
def _copy_commentfile_file(self, comment_file):
v2model, v2idstring = comment_file.v2_id.split('__', 1)
if v2model == 'filemeta':
# Deliveries
self._copy_commentfile_file_from_filemeta(
comment_file=comment_file,
v2idstring=v2idstring)
elif v2model == 'staticfeedbackfileattachment':
# Attachments to feedbacks
self._copy_commentfile_file_from_staticfeedbackfileattachment(
comment_file=comment_file,
v2idstring=v2idstring)
else:
raise ValueError('Invalid v2model: {}'.format(v2model))
def import_models(self, fake=False):
does_not_exist = []
with modelimporter_utils.ProgressDots() as progressdots:
for comment_file in CommentFile.objects.exclude(v2_id='').iterator():
try:
self._copy_commentfile_file(comment_file)
except CommentFileFileDoesNotExist as error:
does_not_exist.append(error)
progressdots.increment_progress()
if does_not_exist:
print('Some of the source files did not exist.', file=sys.stderr)
for error in does_not_exist:
print('- {}'.format(error), file=sys.stderr)
| |
"""
Google Drive (Sheets and Docs) preprocessors allow you to store content in
Google Drive and bring it into Grow. Grow will authenticate to the Google
Drive API using OAuth2 and then download content as specified in
`podspec.yaml`.
Grow supports various ways to transform the content, e.g. Sheets can be
downloaded and converted to yaml, and Docs can be downloaded and converted
to markdown.
"""
from . import base
from googleapiclient import discovery
from googleapiclient import errors
from grow.common import oauth
from grow.common import utils
from grow.pods import formats
from protorpc import messages
import cStringIO
import csv
import httplib2
import json
import logging
import os
import yaml
OAUTH_SCOPE = 'https://www.googleapis.com/auth/drive'
# Silence extra logging from googleapiclient.
discovery.logger.setLevel(logging.WARNING)
class BaseGooglePreprocessor(base.BasePreprocessor):
scheduleable = True
@staticmethod
def create_service():
credentials = oauth.get_or_create_credentials(
scope=OAUTH_SCOPE, storage_key='Grow SDK')
http = httplib2.Http(ca_certs=utils.get_cacerts_path())
http = credentials.authorize(http)
return discovery.build('drive', 'v2', http=http)
def run(self, build=True):
try:
self.execute(self.config)
except errors.HttpError as e:
self.logger.error(str(e))
def execute(self, config):
raise NotImplementedError
class GoogleDocsPreprocessor(BaseGooglePreprocessor):
KIND = 'google_docs'
_edit_url_format = 'https://docs.google.com/document/d/{id}/edit'
class Config(messages.Message):
path = messages.StringField(1)
id = messages.StringField(2)
convert = messages.BooleanField(3)
@classmethod
def download(cls, path, doc_id, logger=None, raise_errors=False):
logger = logger or logging
service = BaseGooglePreprocessor.create_service()
resp = service.files().get(fileId=doc_id).execute()
if 'exportLinks' not in resp:
text = 'Unable to export Google Doc: {}'
logger.error(text.format(path))
logger.error('Received: {}'.format(resp))
return
for mimetype, url in resp['exportLinks'].iteritems():
if not mimetype.endswith('html'):
continue
resp, content = service._http.request(url)
if resp.status != 200:
text = 'Error {} downloading Google Doc: {}'
text = text.format(resp.status, path)
if raise_errors:
raise base.PreprocessorError(text)
logger.error(text)
return content
if raise_errors:
text = 'No file to export from Google Docs: {}'.format(path)
raise base.PreprocessorError(text)
@classmethod
def format_content(cls, path, content, convert=True, existing_data=None):
ext = os.path.splitext(path)[1]
convert_to_markdown = ext == '.md' and convert is not False
content = utils.clean_html(content, convert_to_markdown=convert_to_markdown)
# Preserve any existing frontmatter.
if existing_data:
if formats.Format.has_front_matter(existing_data):
content = formats.Format.update(
existing_data, body=content)
return content
def execute(self, config):
doc_id = config.id
path = config.path
convert = config.convert is not False
content = GoogleDocsPreprocessor.download(
path, doc_id=doc_id, logger=self.pod.logger)
existing_data = None
if self.pod.file_exists(path):
existing_data = self.pod.read_file(path)
content = GoogleDocsPreprocessor.format_content(
path, content, convert=convert, existing_data=existing_data)
self.pod.write_file(path, content)
self.logger.info('Downloaded Google Doc -> {}'.format(path))
def get_edit_url(self, doc=None):
"""Returns the URL to edit in Google Docs."""
return GoogleDocsPreprocessor._edit_url_format.format(id=self.config.id)
class GoogleSheetsPreprocessor(BaseGooglePreprocessor):
KIND = 'google_sheets'
_edit_url_format = 'https://docs.google.com/spreadsheets/d/{id}/edit#gid={gid}'
class Config(messages.Message):
path = messages.StringField(1)
id = messages.StringField(2)
gid = messages.IntegerField(3)
output_style = messages.StringField(4, default='compressed')
format = messages.StringField(5, default='list')
preserve = messages.StringField(6, default='builtins')
@staticmethod
def _convert_rows_to_mapping(reader):
results = {}
def _update_node(root, part):
if isinstance(root, dict) and part not in root:
root[part] = {}
for row in reader:
key = row[0]
value = row[1]
if key.startswith('#'):
continue
if '.' in key:
parts = key.split('.')
parent = results
for i, part in enumerate(parts):
_update_node(parent, part)
if i + 1 < len(parts):
parent = parent[part]
if isinstance(parent, dict):
parent[part] = value
else:
results[key] = value
return results
@staticmethod
def format_as_map(fp):
reader = csv.reader(fp)
results = GoogleSheetsPreprocessor._convert_rows_to_mapping(reader)
return results
@classmethod
def download(cls, path, sheet_id, gid=None, logger=None,
raise_errors=False):
logger = logger or logging
ext = os.path.splitext(path)[1]
service = BaseGooglePreprocessor.create_service()
resp = service.files().get(fileId=sheet_id).execute()
if 'exportLinks' not in resp:
text = 'Unable to export Google Sheet: {} / Received: {}'
logger.error(text.format(path, resp))
if raise_errors:
raise base.PreprocessorError(text)
return
for mimetype, url in resp['exportLinks'].iteritems():
if not mimetype.endswith('csv'):
continue
if gid is not None:
url += '&gid={}'.format(gid)
resp, content = service._http.request(url)
if resp.status != 200:
text = 'Error {} downloading Google Sheet: {}'
text = text.format(resp.status, path)
logger.error(text)
if raise_errors:
raise base.PreprocessorError(text)
return content
if raise_errors:
text = 'No file to export from Google Sheets: {}'.format(path)
raise base.PreprocessorError(text)
def _parse_path(self, path):
if ':' in path:
return path.rsplit(':', 1)
return path, None
def execute(self, config):
path, key_to_update = self._parse_path(config.path)
sheet_id = config.id
gid = config.gid
content = GoogleSheetsPreprocessor.download(
path=path, sheet_id=sheet_id, gid=gid, logger=self.pod.logger)
existing_data = None
if (path.endswith(('.yaml', '.yml'))
and self.config.preserve
and self.pod.file_exists(path)):
existing_data = self.pod.read_yaml(path)
content = GoogleSheetsPreprocessor.format_content(
content=content, path=path, format_as=self.config.format,
preserve=self.config.preserve,
existing_data=existing_data, key_to_update=key_to_update)
content = GoogleSheetsPreprocessor.serialize_content(
formatted_data=content, path=path,
output_style=self.config.output_style)
self.pod.write_file(path, content)
self.logger.info('Downloaded Google Sheet -> {}'.format(path))
@classmethod
def get_convert_to(cls, path):
ext = os.path.splitext(path)[1]
convert_to = None
if ext == '.json':
return ext
elif ext in ['.yaml', '.yml']:
return ext
return convert_to
@classmethod
def format_content(cls, content, path, format_as=None, preserve=None,
existing_data=None, key_to_update=None):
"""Formats content into either a CSV (text), list, or dictionary."""
convert_to = cls.get_convert_to(path)
if convert_to in ['.json', '.yaml', '.yml']:
fp = cStringIO.StringIO()
fp.write(content)
fp.seek(0)
if format_as == 'map':
formatted_data = GoogleSheetsPreprocessor.format_as_map(fp)
else:
reader = csv.DictReader(fp)
formatted_data = list(reader)
formatted_data = utils.format_existing_data(
old_data=existing_data, new_data=formatted_data,
preserve=preserve, key_to_update=key_to_update)
return formatted_data
return content
@classmethod
def serialize_content(cls, formatted_data, path, output_style=None):
"""Serializes an object into a string as JSON, YAML, or a CSV
(default)."""
kwargs = {}
convert_to = cls.get_convert_to(path)
if convert_to == '.json':
if output_style == 'pretty':
kwargs['indent'] = 2
kwargs['separators'] = (',', ': ')
kwargs['sort_keys'] = True
return json.dumps(formatted_data, **kwargs)
elif convert_to in ('.yaml', '.yml'):
return yaml.safe_dump(formatted_data, default_flow_style=False)
return formatted_data
def can_inject(self, doc=None, collection=None):
if not self.injected:
return False
path, key_to_update = self._parse_path(self.config.path)
if doc and doc.pod_path == path:
return True
return False
def inject(self, doc):
path, key_to_update = self._parse_path(self.config.path)
try:
content = GoogleSheetsPreprocessor.download(
path=path, sheet_id=self.config.id, gid=self.config.gid,
raise_errors=True)
except (errors.HttpError, base.PreprocessorError):
doc.pod.logger.error('Error downloading sheet -> %s', path)
raise
if not content:
return
existing_data = doc.pod.read_yaml(doc.pod_path)
fields = GoogleSheetsPreprocessor.format_content(
content, path=path, format_as=self.config.format,
preserve=self.config.preserve, existing_data=existing_data,
key_to_update=key_to_update)
fields = utils.untag_fields(fields)
doc.inject(fields=fields)
def get_edit_url(self, doc=None):
"""Returns the URL to edit in Google Sheets."""
gid = self.config.gid or '0'
return GoogleSheetsPreprocessor._edit_url_format.format(
id=self.config.id, gid=gid)
| |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, division, print_function, absolute_import
from . import TestCase, skipIf, SkipTest, Server
import os
import testdata
import endpoints
from endpoints.environ import *
from endpoints.utils import ByteString
from endpoints.http import Request, Response
from endpoints.call import Controller, Router
from endpoints.exception import CallError
class ControllerTest(TestCase):
def test_any_1(self):
c = Server(contents=[
"from endpoints import Controller, param",
"class Default(Controller):",
" def ANY(self):",
" return 'any'",
])
res = c.handle("/")
self.assertEqual(200, res.code)
self.assertEqual("any", res.body)
res = c.handle("/", method="POST")
self.assertEqual(200, res.code)
self.assertEqual("any", res.body)
res = c.handle("/foo/bar")
self.assertEqual(404, res.code)
def test_any_2(self):
c = Server(contents=[
"from endpoints import Controller, param",
"class Default(Controller):",
" def ANY(self, **kwargs):",
" return 'any'",
" def POST(self, **kwargs):",
" return 'post'",
])
res = c.handle("/")
self.assertEqual(200, res.code)
self.assertEqual("any", res.body)
res = c.handle("/", method="POST")
self.assertEqual(200, res.code)
self.assertEqual("post", res.body)
def test_unsupported_method_404(self):
c = Server(contents=[
"from endpoints import Controller, param",
"class Default(Controller):",
" def POST(self, foo):",
" pass",
])
res = c.handle("/foo/bar")
self.assertEqual(404, res.code)
res = c.handle("/foo")
self.assertEqual(404, res.code)
res = c.handle("/")
self.assertEqual(501, res.code)
def test_cors(self):
class Cors(Controller):
def __init__(self, *args, **kwargs):
super(Cors, self).__init__(*args, **kwargs)
self.handle_cors()
def POST(self): pass
res = Response()
req = Request()
c = Cors(req, res)
self.assertTrue(c.OPTIONS)
self.assertFalse('Access-Control-Allow-Origin' in c.response.headers)
req.set_header('Origin', 'http://example.com')
c = Cors(req, res)
self.assertEqual(req.get_header('Origin'), c.response.get_header('Access-Control-Allow-Origin'))
req.set_header('Access-Control-Request-Method', 'POST')
req.set_header('Access-Control-Request-Headers', 'xone, xtwo')
c = Cors(req, res)
c.OPTIONS()
self.assertEqual(req.get_header('Origin'), c.response.get_header('Access-Control-Allow-Origin'))
self.assertEqual(req.get_header('Access-Control-Request-Method'), c.response.get_header('Access-Control-Allow-Methods'))
self.assertEqual(req.get_header('Access-Control-Request-Headers'), c.response.get_header('Access-Control-Allow-Headers'))
c = Cors(req, res)
c.POST()
self.assertEqual(req.get_header('Origin'), c.response.get_header('Access-Control-Allow-Origin'))
def test_bad_typeerror(self):
"""There is a bug that is making the controller method is throw a 404 when it should throw a 500"""
c = Server(contents=[
"from endpoints import Controller",
"class Default(Controller):",
" def GET(self):",
" raise TypeError('This should not cause a 404')"
])
res = c.handle('/')
self.assertEqual(500, res.code)
c = Server(contents=[
"from endpoints import Controller",
"class Bogus(object):",
" def handle_controller(self, foo):",
" pass",
"",
"class Default(Controller):",
" def GET(self):",
" b = Bogus()",
" b.handle_controller()",
])
res = c.handle('/')
self.assertEqual(500, res.code)
class RouterTest(TestCase):
def get_http_instances(self, path="", method="GET"):
req = Request()
req.method = method
req.path = path
res = Response()
return req, res
def test_multiple_controller_prefixes_1(self):
r = testdata.create_modules({
"foo": os.linesep.join([
"from endpoints import Controller",
"class Default(Controller): pass",
]),
"bar": os.linesep.join([
"from endpoints import Controller",
"class User(Controller): pass",
]),
})
r = Router(["foo", "bar"])
t = r.find(*self.get_http_instances("/user"))
self.assertTrue("bar", t["module_name"])
t = r.find(*self.get_http_instances("/che"))
self.assertTrue("foo", t["module_name"])
def test_routing(self):
"""there was a bug that caused errors raised after the yield to return another
iteration of a body instead of raising them"""
controller_prefix = "routing1"
contents = [
"from endpoints import Controller",
"class Default(Controller):",
" def GET(self): pass",
"",
"class Foo(Controller):",
" def GET(self): pass",
"",
"class Bar(Controller):",
" def GET(self): pass",
]
testdata.create_module(controller_prefix, contents=contents)
r = Router([controller_prefix])
info = r.find(*self.get_http_instances())
self.assertEqual(info['module_name'], controller_prefix)
self.assertEqual(info['class_name'], "Default")
r = Router([controller_prefix])
info = r.find(*self.get_http_instances("/foo/che/baz"))
self.assertEqual(2, len(info['method_args']))
self.assertEqual(info['class_name'], "Foo")
def test_default_match_with_path(self):
"""when the default controller is used, make sure it falls back to default class
name if the path bit fails to be a controller class name"""
controller_prefix = "nomodcontroller2"
c = Server(controller_prefix, {"nmcon": [
"from endpoints import Controller",
"class Default(Controller):",
" def GET(self, *args, **kwargs):",
" return args[0]"
]})
res = c.handle('/nmcon/8')
self.assertEqual("8", res.body)
def test_no_match(self):
"""make sure a controller module that imports a class with the same as
one of the query args doesen't get picked up as the controller class"""
controller_prefix = "nomodcontroller"
contents = {
"{}.nomod".format(controller_prefix): [
"class Nomodbar(object): pass",
""
],
controller_prefix: [
"from endpoints import Controller",
"from .nomod import Nomodbar",
"class Default(Controller):",
" def GET(): pass",
""
]
}
m = testdata.create_modules(contents)
path = '/nomodbar' # same name as one of the non controller classes
r = Router([controller_prefix])
info = r.find(*self.get_http_instances(path))
self.assertEqual('Default', info['class_name'])
self.assertEqual('nomodcontroller', info['module_name'])
self.assertEqual('nomodbar', info['method_args'][0])
def test_import_error(self):
controller_prefix = "importerrorcontroller"
c = Server(controller_prefix, [
"from endpoints import Controller",
"from does_not_exist import FairyDust",
"class Default(Controller):",
" def GET(): pass",
""
])
res = c.handle('/')
self.assertEqual(404, res.code)
def test_get_controller_info_default(self):
"""I introduced a bug on 1-12-14 that caused default controllers to fail
to be found, this makes sure that bug is squashed"""
controller_prefix = "controller_info_default"
r = testdata.create_modules({
controller_prefix: os.linesep.join([
"from endpoints import Controller",
"class Default(Controller):",
" def GET(): pass",
""
])
})
r = Router([controller_prefix])
info = r.find(*self.get_http_instances("/"))
self.assertEqual('Default', info['class_name'])
self.assertTrue(issubclass(info['class'], Controller))
def test_callback_info(self):
controller_prefix = "callback_info"
req, res = self.get_http_instances("/foo/bar")
req.query_kwargs = {'foo': 'bar', 'che': 'baz'}
r = Router([controller_prefix])
with self.assertRaises(ImportError):
d = r.find(req, res)
contents = [
"from endpoints import Controller",
"class Bar(Controller):",
" def GET(*args, **kwargs): pass"
]
testdata.create_module("{}.foo".format(controller_prefix), contents=contents)
# if it succeeds, then it passed the test :)
d = r.find(req, res)
def test_get_controller_info(self):
controller_prefix = "controller_info_advanced"
r = testdata.create_modules({
controller_prefix: [
"from endpoints import Controller",
"class Default(Controller):",
" def GET(*args, **kwargs): pass",
""
],
"{}.default".format(controller_prefix): [
"from endpoints import Controller",
"class Default(Controller):",
" def GET(*args, **kwargs): pass",
""
],
"{}.foo".format(controller_prefix): [
"from endpoints import Controller",
"class Default(Controller):",
" def GET(*args, **kwargs): pass",
"",
"class Bar(Controller):",
" def GET(*args, **kwargs): pass",
" def POST(*args, **kwargs): pass",
""
],
"{}.foo.baz".format(controller_prefix): [
"from endpoints import Controller",
"class Default(Controller):",
" def GET(*args, **kwargs): pass",
"",
"class Che(Controller):",
" def GET(*args, **kwargs): pass",
""
],
"{}.foo.boom".format(controller_prefix): [
"from endpoints import Controller",
"",
"class Bang(Controller):",
" def GET(*args, **kwargs): pass",
""
],
})
ts = [
{
'in': dict(method="GET", path="/foo/bar/happy/sad"),
'out': {
'module_name': "controller_info_advanced.foo",
'class_name': 'Bar',
'method_args': ['happy', 'sad'],
#'method_name': "GET",
}
},
{
'in': dict(method="GET", path="/"),
'out': {
'module_name': "controller_info_advanced",
'class_name': 'Default',
'method_args': [],
#'method_name': "GET",
}
},
{
'in': dict(method="GET", path="/happy"),
'out': {
'module_name': "controller_info_advanced",
'class_name': 'Default',
'method_args': ["happy"],
#'method_name': "GET",
}
},
{
'in': dict(method="GET", path="/foo/baz"),
'out': {
'module_name': "controller_info_advanced.foo.baz",
'class_name': 'Default',
'method_args': [],
#'method_name': "GET",
}
},
{
'in': dict(method="GET", path="/foo/baz/che"),
'out': {
'module_name': "controller_info_advanced.foo.baz",
'class_name': 'Che',
'method_args': [],
#'method_name': "GET",
}
},
{
'in': dict(method="GET", path="/foo/baz/happy"),
'out': {
'module_name': "controller_info_advanced.foo.baz",
'class_name': 'Default',
'method_args': ["happy"],
#'method_name': "GET",
}
},
{
'in': dict(method="GET", path="/foo/happy"),
'out': {
'module_name': "controller_info_advanced.foo",
'class_name': 'Default',
'method_args': ["happy"],
#'method_name': "GET",
}
},
]
for t in ts:
req, res = self.get_http_instances(**t['in'])
r = Router([controller_prefix])
d = r.find(req, res)
for key, val in t['out'].items():
self.assertEqual(val, d[key])
class CallTest(TestCase):
def test_routing_error_unexpected_args(self):
c = Server(contents=[
"from endpoints import Controller, param",
"class Default(Controller):",
" def GET(self, foo):",
" pass",
])
res = c.handle("/foo/bar", query_kwargs=dict(foo=2))
self.assertEqual(404, res.code)
c = Server(contents=[
"from endpoints import Controller, param",
"class Default(Controller):",
" def GET(self, foo):",
" pass",
])
res = c.handle("/foo/bar", query_kwargs=dict(che=2))
self.assertEqual(404, res.code)
c = Server(contents=[
"from endpoints import Controller, param",
"class Default(Controller):",
" def GET(self, foo):",
" pass",
])
res = c.handle("/foo/bar", query_kwargs=dict(foo=1, bar=2))
self.assertEqual(404, res.code)
c = Server(contents=[
"from endpoints import Controller, param",
"class Default(Controller):",
" def GET(self, foo):",
" pass",
])
res = c.handle("/foo", query_kwargs=dict(foo=1, bar=2))
self.assertEqual(409, res.code)
c = Server(contents=[
"from endpoints import Controller, param",
"class Default(Controller):",
" def GET(self, foo):",
" pass",
])
res = c.handle("/foo", query_kwargs=dict(bar=2))
self.assertEqual(405, res.code)
c = Server(contents=[
"from endpoints import Controller, param",
"class Default(Controller):",
" def GET(self, foo):",
" pass",
])
res = c.handle("/foo/bar")
self.assertEqual(404, res.code)
c = Server(contents=[
"from endpoints import Controller, param",
"class Default(Controller):",
" def GET(self, foo, **kwargs):",
" pass",
])
res = c.handle("/foo/bar", query_kwargs=dict(foo=1, bar=2))
self.assertEqual(404, res.code)
def test_routing_error_no_args(self):
c = Server(contents=[
"from endpoints import Controller, param",
"class Default(Controller):",
" def GET(self):",
" pass",
])
res = c.handle("/foo/bar/che/baz/boom/bam/blah")
self.assertEqual(404, res.code)
c = Server(contents=[
"from endpoints import Controller, param",
"class Default(Controller):",
" def GET(self, **kwargs):",
" pass",
])
res = c.handle("/foo/bar/che/baz/boom/bam/blah", query_kwargs=dict(foo=1, bar=2))
self.assertEqual(404, res.code)
c = Server(contents=[
"from endpoints import Controller, param",
"class Default(Controller):",
" def GET(self, **kwargs):",
" return kwargs",
])
res = c.handle("/", query_kwargs=dict(foo=1, bar=2))
self.assertEqual(200, res.code)
self.assertTrue("foo" in res._body)
c = Server(contents=[
"from endpoints import Controller, param",
"class Default(Controller):",
" def GET(self):",
" return kwargs",
])
res = c.handle("/", query_kwargs=dict(foo=1, bar=2))
self.assertEqual(405, res.code)
def test_lowercase_method(self):
c = Server("controller2", {"foo2": [
"from endpoints import Controller",
"class Bar(Controller):",
" def get(*args, **kwargs): pass"
]})
res = c.handle("/foo2/bar", query_kwargs={'foo2': 'bar', 'che': 'baz'})
self.assertEqual(501, res.code)
def test_handle_redirect(self):
c = Server("controllerhr", {"handle": [
"from endpoints import Controller, Redirect",
"class Testredirect(Controller):",
" def GET(*args, **kwargs):",
" raise Redirect('http://example.com')"
]})
res = c.handle("/handle/testredirect")
self.assertEqual(302, res.code)
self.assertEqual('http://example.com', res.headers['Location'])
def test_handle_404_typeerror_1(self):
"""make sure not having a controller is correctly identified as a 404"""
c = Server(contents=[
"from endpoints import Controller, Redirect",
"class NoFoo(Controller):",
" def GET(*args, **kwargs):",
" pass",
])
res = c.handle("/foo/boom")
self.assertEqual(404, res.code)
def test_handle_405_typeerror_2(self):
"""make sure 405 works when a path bit is missing"""
controller_prefix = "h404te2"
c = Server(controller_prefix, [
"from endpoints import Controller, decorators",
"class Default(Controller):",
" def GET(self, needed_bit, **kwargs):",
" return ''",
"",
" def POST(self, needed_bit, **kwargs):",
" return ''",
"",
"class Htype(Controller):",
" def POST(self, needed_bit, **kwargs):",
" return ''",
"",
"class Hdec(Controller):",
" @decorators.param('foo', default='bar')",
" def POST(self, needed_bit, **kwargs):",
" return ''",
"",
])
res = c.handle("/hdec", "POST")
self.assertEqual(405, res.code)
res = c.handle("/htype", "POST")
self.assertEqual(405, res.code)
res = c.handle("/")
self.assertEqual(405, res.code)
res = c.handle("/", "POST")
self.assertEqual(405, res.code)
def test_handle_404_typeerror_3(self):
"""there was an error when there was only one expected argument, turns out
the call was checking for "arguments" when the message just had "argument" """
c = Server(contents=[
"from endpoints import Controller",
"class Foo(Controller):",
" def GET(self): pass",
"",
])
res = c.handle("/foo/bar/baz", query='che=1&boo=2')
self.assertEqual(404, res.code)
def test_handle_accessdenied(self):
"""raising an AccessDenied error should set code to 401 and the correct header"""
controller_prefix = "haccessdenied"
c = Server(controller_prefix, [
"from endpoints import Controller, AccessDenied",
"class Default(Controller):",
" def GET(*args, **kwargs):",
" raise AccessDenied(scheme='basic')",
"class Bar(Controller):",
" def GET(*args, **kwargs):",
" raise AccessDenied()",
])
res = c.handle("/")
self.assertEqual(401, res.code)
self.assertTrue('Basic' in res.headers['WWW-Authenticate'])
res = c.handle("/bar")
self.assertEqual(401, res.code)
self.assertTrue('Auth' in res.headers['WWW-Authenticate'])
def test_handle_callstop(self):
controller_prefix = "handlecallstop"
c = Server(controller_prefix, [
"from endpoints import Controller, CallStop",
"class Testcallstop(Controller):",
" def GET(*args, **kwargs):",
" raise CallStop(205, None)",
"",
"class Testcallstop2(Controller):",
" def GET(*args, **kwargs):",
" raise CallStop(200, 'this is the body')",
"",
"class Testcallstop3(Controller):",
" def GET(*args, **kwargs):",
" raise CallStop(204, 'this is ignored')",
])
res = c.handle("/testcallstop")
self.assertEqual(None, res.body)
self.assertEqual(None, res._body)
self.assertEqual(205, res.code)
res = c.handle("/testcallstop2")
self.assertEqual("this is the body", res.body)
self.assertEqual(200, res.code)
res = c.handle("/testcallstop3")
self.assertEqual(None, res._body)
self.assertEqual(204, res.code)
def test_nice_error_405(self):
controller_prefix = "nicerr405"
c = Server(controller_prefix, [
"from endpoints import Controller",
"class Foo(Controller):",
" def GET(self, bar): pass",
"",
])
# TODO -- capture stdout to make sure the error printed out, until then
# you will just have to manually check to make sure the warning was raised
# correctly
res = c.handle("/foo/bar/che")
def test_handle_method_chain(self):
controller_prefix = "handle_method_chain"
c = Server(controller_prefix, [
"from endpoints import Controller",
"class Foo(Controller):",
" def handle_GET(self, *args, **kwargs):",
" self.response.handle_GET_called = True",
" self.handle(*args, **kwargs)",
"",
" def handle(self, *args, **kwargs):",
" self.response.handle_called = True",
" super(Foo, self).handle(*args, **kwargs)",
"",
" def GET(self, *args, **kwargs):",
" self.response.GET_called = True",
"",
])
res = c.handle("/foo")
self.assertTrue(res.handle_GET_called)
self.assertTrue(res.handle_called)
self.assertTrue(res.GET_called)
class CallVersioningTest(TestCase):
def test_get_version(self):
r = Request()
r.set_header('accept', 'application/json;version=v1')
v = r.version()
self.assertEqual("v1", v)
v = r.version("application/json")
self.assertEqual("v1", v)
v = r.version("plain/text")
self.assertEqual("", v)
def test_get_version_default(self):
"""turns out, calls were failing if there was no accept header even if there were defaults set"""
r = Request()
r.headers = {}
self.assertEqual("", r.version('application/json'))
r = Request()
r.set_header('accept', 'application/json;version=v1')
self.assertEqual('v1', r.version())
r = Request()
r.set_header('accept', '*/*')
self.assertEqual("", r.version('application/json'))
r = Request()
r.set_header('accept', '*/*;version=v8')
self.assertEqual('v8', r.version('application/json'))
| |
"""Code to handle a Hue bridge."""
import asyncio
from functools import partial
import logging
from aiohttp import client_exceptions
import aiohue
import async_timeout
import slugify as unicode_slug
import voluptuous as vol
from homeassistant import core
from homeassistant.const import HTTP_INTERNAL_SERVER_ERROR
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import aiohttp_client, config_validation as cv
from .const import (
CONF_ALLOW_HUE_GROUPS,
CONF_ALLOW_UNREACHABLE,
DEFAULT_ALLOW_HUE_GROUPS,
DEFAULT_ALLOW_UNREACHABLE,
DOMAIN,
LOGGER,
)
from .errors import AuthenticationRequired, CannotConnect
from .helpers import create_config_flow
from .sensor_base import SensorManager
SERVICE_HUE_SCENE = "hue_activate_scene"
ATTR_GROUP_NAME = "group_name"
ATTR_SCENE_NAME = "scene_name"
SCENE_SCHEMA = vol.Schema(
{vol.Required(ATTR_GROUP_NAME): cv.string, vol.Required(ATTR_SCENE_NAME): cv.string}
)
# How long should we sleep if the hub is busy
HUB_BUSY_SLEEP = 0.5
_LOGGER = logging.getLogger(__name__)
class HueBridge:
"""Manages a single Hue bridge."""
def __init__(self, hass, config_entry):
"""Initialize the system."""
self.config_entry = config_entry
self.hass = hass
self.available = True
self.authorized = False
self.api = None
self.parallel_updates_semaphore = None
# Jobs to be executed when API is reset.
self.reset_jobs = []
self.sensor_manager = None
self.unsub_config_entry_listener = None
@property
def host(self):
"""Return the host of this bridge."""
return self.config_entry.data["host"]
@property
def allow_unreachable(self):
"""Allow unreachable light bulbs."""
return self.config_entry.options.get(
CONF_ALLOW_UNREACHABLE, DEFAULT_ALLOW_UNREACHABLE
)
@property
def allow_groups(self):
"""Allow groups defined in the Hue bridge."""
return self.config_entry.options.get(
CONF_ALLOW_HUE_GROUPS, DEFAULT_ALLOW_HUE_GROUPS
)
async def async_setup(self, tries=0):
"""Set up a phue bridge based on host parameter."""
host = self.host
hass = self.hass
bridge = aiohue.Bridge(
host,
username=self.config_entry.data["username"],
websession=aiohttp_client.async_get_clientsession(hass),
)
try:
await authenticate_bridge(hass, bridge)
except AuthenticationRequired:
# Usernames can become invalid if hub is reset or user removed.
# We are going to fail the config entry setup and initiate a new
# linking procedure. When linking succeeds, it will remove the
# old config entry.
create_config_flow(hass, host)
return False
except CannotConnect as err:
LOGGER.error("Error connecting to the Hue bridge at %s", host)
raise ConfigEntryNotReady from err
except Exception: # pylint: disable=broad-except
LOGGER.exception("Unknown error connecting with Hue bridge at %s", host)
return False
self.api = bridge
self.sensor_manager = SensorManager(self)
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(self.config_entry, "light")
)
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(
self.config_entry, "binary_sensor"
)
)
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(self.config_entry, "sensor")
)
hass.services.async_register(
DOMAIN, SERVICE_HUE_SCENE, self.hue_activate_scene, schema=SCENE_SCHEMA
)
self.parallel_updates_semaphore = asyncio.Semaphore(
3 if self.api.config.modelid == "BSB001" else 10
)
self.unsub_config_entry_listener = self.config_entry.add_update_listener(
_update_listener
)
self.authorized = True
return True
async def async_request_call(self, task):
"""Limit parallel requests to Hue hub.
The Hue hub can only handle a certain amount of parallel requests, total.
Although we limit our parallel requests, we still will run into issues because
other products are hitting up Hue.
ClientOSError means hub closed the socket on us.
ContentResponseError means hub raised an error.
Since we don't make bad requests, this is on them.
"""
async with self.parallel_updates_semaphore:
for tries in range(4):
try:
return await task()
except (
client_exceptions.ClientOSError,
client_exceptions.ClientResponseError,
client_exceptions.ServerDisconnectedError,
) as err:
if tries == 3:
_LOGGER.error("Request failed %s times, giving up", tries)
raise
# We only retry if it's a server error. So raise on all 4XX errors.
if (
isinstance(err, client_exceptions.ClientResponseError)
and err.status < HTTP_INTERNAL_SERVER_ERROR
):
raise
await asyncio.sleep(HUB_BUSY_SLEEP * tries)
async def async_reset(self):
"""Reset this bridge to default state.
Will cancel any scheduled setup retry and will unload
the config entry.
"""
# The bridge can be in 3 states:
# - Setup was successful, self.api is not None
# - Authentication was wrong, self.api is None, not retrying setup.
# If the authentication was wrong.
if self.api is None:
return True
self.hass.services.async_remove(DOMAIN, SERVICE_HUE_SCENE)
while self.reset_jobs:
self.reset_jobs.pop()()
if self.unsub_config_entry_listener is not None:
self.unsub_config_entry_listener()
# If setup was successful, we set api variable, forwarded entry and
# register service
results = await asyncio.gather(
self.hass.config_entries.async_forward_entry_unload(
self.config_entry, "light"
),
self.hass.config_entries.async_forward_entry_unload(
self.config_entry, "binary_sensor"
),
self.hass.config_entries.async_forward_entry_unload(
self.config_entry, "sensor"
),
)
# None and True are OK
return False not in results
async def hue_activate_scene(self, call, updated=False):
"""Service to call directly into bridge to set scenes."""
group_name = call.data[ATTR_GROUP_NAME]
scene_name = call.data[ATTR_SCENE_NAME]
group = next(
(group for group in self.api.groups.values() if group.name == group_name),
None,
)
# Additional scene logic to handle duplicate scene names across groups
scene = next(
(
scene
for scene in self.api.scenes.values()
if scene.name == scene_name
and group is not None
and sorted(scene.lights) == sorted(group.lights)
),
None,
)
# If we can't find it, fetch latest info.
if not updated and (group is None or scene is None):
await self.async_request_call(self.api.groups.update)
await self.async_request_call(self.api.scenes.update)
await self.hue_activate_scene(call, updated=True)
return
if group is None:
LOGGER.warning("Unable to find group %s", group_name)
return
if scene is None:
LOGGER.warning("Unable to find scene %s", scene_name)
return
await self.async_request_call(partial(group.set_action, scene=scene.id))
async def handle_unauthorized_error(self):
"""Create a new config flow when the authorization is no longer valid."""
if not self.authorized:
# we already created a new config flow, no need to do it again
return
LOGGER.error(
"Unable to authorize to bridge %s, setup the linking again.", self.host
)
self.authorized = False
create_config_flow(self.hass, self.host)
async def authenticate_bridge(hass: core.HomeAssistant, bridge: aiohue.Bridge):
"""Create a bridge object and verify authentication."""
try:
with async_timeout.timeout(10):
# Create username if we don't have one
if not bridge.username:
device_name = unicode_slug.slugify(
hass.config.location_name, max_length=19
)
await bridge.create_user(f"home-assistant#{device_name}")
# Initialize bridge (and validate our username)
await bridge.initialize()
except (aiohue.LinkButtonNotPressed, aiohue.Unauthorized) as err:
raise AuthenticationRequired from err
except (
asyncio.TimeoutError,
client_exceptions.ClientOSError,
client_exceptions.ServerDisconnectedError,
client_exceptions.ContentTypeError,
) as err:
raise CannotConnect from err
except aiohue.AiohueException as err:
LOGGER.exception("Unknown Hue linking error occurred")
raise AuthenticationRequired from err
async def _update_listener(hass, entry):
"""Handle options update."""
await hass.config_entries.async_reload(entry.entry_id)
| |
# pyOCD debugger
# Copyright (c) 2020 Cypress Semiconductor Corporation
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from ...family.flash_psoc6 import Flash_PSoC64, PSoC6FlashParams
from ...family.target_psoc6 import PSoC64, CortexM_PSoC64_BLE2
from ....core.memory_map import (FlashRegion, MemoryMap)
LOG = logging.getLogger(__name__)
class cy8c64xx(PSoC64):
from .flash_algos.flash_algo_CY8C64xx import flash_algo as flash_algo_main
from .flash_algos.flash_algo_CY8C6xxx_WFLASH import flash_algo as flash_algo_work
from .flash_algos.flash_algo_CY8C6xxx_SMIF_S25FL128S import flash_algo as flash_algo_smif
MEMORY_MAP = MemoryMap(
PSoC6FlashParams.defaultRomRegion,
PSoC6FlashParams.defaultRamRegion,
FlashRegion(start=0x10000000, length=0xE0000, blocksize=0x200,
is_boot_memory=True,
erased_byte_value=0,
algo=flash_algo_main,
erase_all_weight=PSoC6FlashParams.MFLASH_ERASE_ALL_WEIGHT,
erase_sector_weight=PSoC6FlashParams.MFLASH_ERASE_SECTOR_WEIGHT,
program_page_weight=PSoC6FlashParams.MFLASH_PROGRAM_PAGE_WEIGHT,
flash_class=Flash_PSoC64),
FlashRegion(start=0x14000000, length=0x8000, blocksize=0x200,
is_boot_memory=False,
erased_byte_value=0,
algo=flash_algo_work,
erase_all_weight=PSoC6FlashParams.MFLASH_ERASE_ALL_WEIGHT,
erase_sector_weight=PSoC6FlashParams.MFLASH_ERASE_SECTOR_WEIGHT,
program_page_weight=PSoC6FlashParams.MFLASH_PROGRAM_PAGE_WEIGHT,
flash_class=Flash_PSoC64),
FlashRegion(start=0x18000000, length=0x1000000, blocksize=0x40000, page_size=0x1000,
is_boot_memory=False,
is_testable=False,
erased_byte_value=0xFF,
is_powered_on_boot=False,
algo=flash_algo_smif,
erase_all_weight=PSoC6FlashParams.SMIF_ERASE_ALL_WEIGHT,
erase_sector_weight=PSoC6FlashParams.SMIF_ERASE_SECTOR_WEIGHT,
program_page_weight=PSoC6FlashParams.SMIF_PROGRAM_PAGE_WEIGHT,
flash_class=Flash_PSoC64),
)
def __init__(self, session, ap_num):
super(cy8c64xx, self).__init__(session, CortexM_PSoC64_BLE2, self.MEMORY_MAP, ap_num)
class cy8c64xx_s25hx512t(PSoC64):
from .flash_algos.flash_algo_CY8C64xx import flash_algo as flash_algo_main
from .flash_algos.flash_algo_CY8C6xxx_WFLASH import flash_algo as flash_algo_work
from .flash_algos.flash_algo_CY8C6xxx_SMIF_S25Hx512T import flash_algo as flash_algo_smif
MEMORY_MAP = MemoryMap(
PSoC6FlashParams.defaultRomRegion,
PSoC6FlashParams.defaultRamRegion,
FlashRegion(start=0x10000000, length=0xE0000, blocksize=0x200,
is_boot_memory=True,
erased_byte_value=0,
algo=flash_algo_main,
erase_all_weight=PSoC6FlashParams.MFLASH_ERASE_ALL_WEIGHT,
erase_sector_weight=PSoC6FlashParams.MFLASH_ERASE_SECTOR_WEIGHT,
program_page_weight=PSoC6FlashParams.MFLASH_PROGRAM_PAGE_WEIGHT,
flash_class=Flash_PSoC64),
FlashRegion(start=0x14000000, length=0x8000, blocksize=0x200,
is_boot_memory=False,
erased_byte_value=0,
algo=flash_algo_work,
erase_all_weight=PSoC6FlashParams.MFLASH_ERASE_ALL_WEIGHT,
erase_sector_weight=PSoC6FlashParams.MFLASH_ERASE_SECTOR_WEIGHT,
program_page_weight=PSoC6FlashParams.MFLASH_PROGRAM_PAGE_WEIGHT,
flash_class=Flash_PSoC64),
FlashRegion(start=0x18000000, length=0x1000000, blocksize=0x40000, page_size=0x1000,
is_boot_memory=False,
is_testable=False,
erased_byte_value=0xFF,
is_powered_on_boot=False,
algo=flash_algo_smif,
erase_all_weight=PSoC6FlashParams.SMIF_ERASE_ALL_WEIGHT,
erase_sector_weight=PSoC6FlashParams.SMIF_ERASE_SECTOR_WEIGHT,
program_page_weight=PSoC6FlashParams.SMIF_PROGRAM_PAGE_WEIGHT,
flash_class=Flash_PSoC64),
)
def __init__(self, session, ap_num):
super(cy8c64xx_s25hx512t, self).__init__(session, CortexM_PSoC64_BLE2, self.MEMORY_MAP, ap_num)
class cy8c64xx_nosmif(PSoC64):
from .flash_algos.flash_algo_CY8C64xx import flash_algo as flash_algo_main
from .flash_algos.flash_algo_CY8C6xxx_WFLASH import flash_algo as flash_algo_work
MEMORY_MAP = MemoryMap(
PSoC6FlashParams.defaultRomRegion,
PSoC6FlashParams.defaultRamRegion,
FlashRegion(start=0x10000000, length=0xE0000, blocksize=0x200,
is_boot_memory=True,
erased_byte_value=0,
algo=flash_algo_main,
erase_all_weight=PSoC6FlashParams.MFLASH_ERASE_ALL_WEIGHT,
erase_sector_weight=PSoC6FlashParams.MFLASH_ERASE_SECTOR_WEIGHT,
program_page_weight=PSoC6FlashParams.MFLASH_PROGRAM_PAGE_WEIGHT,
flash_class=Flash_PSoC64),
FlashRegion(start=0x14000000, length=0x8000, blocksize=0x200,
is_boot_memory=False,
erased_byte_value=0,
algo=flash_algo_work,
erase_all_weight=PSoC6FlashParams.MFLASH_ERASE_ALL_WEIGHT,
erase_sector_weight=PSoC6FlashParams.MFLASH_ERASE_SECTOR_WEIGHT,
program_page_weight=PSoC6FlashParams.MFLASH_PROGRAM_PAGE_WEIGHT,
flash_class=Flash_PSoC64),
)
def __init__(self, session, ap_num):
super(cy8c64xx_nosmif, self).__init__(session, CortexM_PSoC64_BLE2, self.MEMORY_MAP, ap_num)
class cy8c64xx_cm0(cy8c64xx):
def __init__(self, session):
super(cy8c64xx_cm0, self).__init__(session, 1)
class cy8c64xx_cm4(cy8c64xx):
def __init__(self, session):
super(cy8c64xx_cm4, self).__init__(session, 2)
class cy8c64xx_cm0_s25hx512t(cy8c64xx_s25hx512t):
def __init__(self, session):
super(cy8c64xx_cm0_s25hx512t, self).__init__(session, 1)
class cy8c64xx_cm4_s25hx512t(cy8c64xx_s25hx512t):
def __init__(self, session):
super(cy8c64xx_cm4_s25hx512t, self).__init__(session, 2)
class cy8c64xx_cm0_nosmif(cy8c64xx_nosmif):
def __init__(self, session):
super(cy8c64xx_cm0_nosmif, self).__init__(session, 1)
class cy8c64xx_cm4_nosmif(cy8c64xx_nosmif):
def __init__(self, session):
super(cy8c64xx_cm4_nosmif, self).__init__(session, 2)
class cy8c64xx_cm4_full_flash(cy8c64xx_cm4):
from .flash_algos.flash_algo_CY8C64xx import flash_algo as flash_algo_main
from .flash_algos.flash_algo_CY8C6xxx_WFLASH import flash_algo as flash_algo_work
from .flash_algos.flash_algo_CY8C6xxx_SMIF_S25Hx512T import flash_algo as flash_algo_smif
MEMORY_MAP = MemoryMap(
PSoC6FlashParams.defaultRomRegion,
PSoC6FlashParams.defaultRamRegion,
FlashRegion(start=0x10000000, length=0x100000, blocksize=0x200,
is_boot_memory=True,
erased_byte_value=0,
algo=flash_algo_main,
erase_all_weight=PSoC6FlashParams.MFLASH_ERASE_ALL_WEIGHT,
erase_sector_weight=PSoC6FlashParams.MFLASH_ERASE_SECTOR_WEIGHT,
program_page_weight=PSoC6FlashParams.MFLASH_PROGRAM_PAGE_WEIGHT,
flash_class=Flash_PSoC64),
FlashRegion(start=0x14000000, length=0x8000, blocksize=0x200,
is_boot_memory=False,
erased_byte_value=0,
algo=flash_algo_work,
erase_all_weight=PSoC6FlashParams.MFLASH_ERASE_ALL_WEIGHT,
erase_sector_weight=PSoC6FlashParams.MFLASH_ERASE_SECTOR_WEIGHT,
program_page_weight=PSoC6FlashParams.MFLASH_PROGRAM_PAGE_WEIGHT,
flash_class=Flash_PSoC64),
FlashRegion(start=0x18000000, length=0x1000000, blocksize=0x40000, page_size=0x1000,
is_boot_memory=False,
is_testable=False,
erased_byte_value=0xFF,
is_powered_on_boot=False,
algo=flash_algo_smif,
erase_all_weight=PSoC6FlashParams.SMIF_ERASE_ALL_WEIGHT,
erase_sector_weight=PSoC6FlashParams.SMIF_ERASE_SECTOR_WEIGHT,
program_page_weight=PSoC6FlashParams.SMIF_PROGRAM_PAGE_WEIGHT,
flash_class=Flash_PSoC64),
)
class cy8c64xx_cm0_full_flash(cy8c64xx_cm0):
from .flash_algos.flash_algo_CY8C64xx import flash_algo as flash_algo_main
from .flash_algos.flash_algo_CY8C6xxx_WFLASH import flash_algo as flash_algo_work
from .flash_algos.flash_algo_CY8C6xxx_SMIF_S25Hx512T import flash_algo as flash_algo_smif
MEMORY_MAP = MemoryMap(
PSoC6FlashParams.defaultRomRegion,
PSoC6FlashParams.defaultRamRegion,
FlashRegion(start=0x10000000, length=0x100000, blocksize=0x200,
is_boot_memory=True,
erased_byte_value=0,
algo=flash_algo_main,
erase_all_weight=PSoC6FlashParams.MFLASH_ERASE_ALL_WEIGHT,
erase_sector_weight=PSoC6FlashParams.MFLASH_ERASE_SECTOR_WEIGHT,
program_page_weight=PSoC6FlashParams.MFLASH_PROGRAM_PAGE_WEIGHT,
flash_class=Flash_PSoC64),
FlashRegion(start=0x14000000, length=0x8000, blocksize=0x200,
is_boot_memory=False,
erased_byte_value=0,
algo=flash_algo_work,
erase_all_weight=PSoC6FlashParams.MFLASH_ERASE_ALL_WEIGHT,
erase_sector_weight=PSoC6FlashParams.MFLASH_ERASE_SECTOR_WEIGHT,
program_page_weight=PSoC6FlashParams.MFLASH_PROGRAM_PAGE_WEIGHT,
flash_class=Flash_PSoC64),
FlashRegion(start=0x18000000, length=0x1000000, blocksize=0x40000, page_size=0x1000,
is_boot_memory=False,
is_testable=False,
erased_byte_value=0xFF,
is_powered_on_boot=False,
algo=flash_algo_smif,
erase_all_weight=PSoC6FlashParams.SMIF_ERASE_ALL_WEIGHT,
erase_sector_weight=PSoC6FlashParams.SMIF_ERASE_SECTOR_WEIGHT,
program_page_weight=PSoC6FlashParams.SMIF_PROGRAM_PAGE_WEIGHT,
flash_class=Flash_PSoC64),
)
| |
"""Support for PlayStation 4 consoles."""
import asyncio
from contextlib import suppress
import logging
from pyps4_2ndscreen.errors import NotReady, PSDataIncomplete
from pyps4_2ndscreen.media_art import TYPE_APP as PS_TYPE_APP
import pyps4_2ndscreen.ps4 as pyps4
from homeassistant.components.media_player import MediaPlayerEntity
from homeassistant.components.media_player.const import (
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_TITLE,
MEDIA_TYPE_APP,
MEDIA_TYPE_GAME,
SUPPORT_PAUSE,
SUPPORT_SELECT_SOURCE,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
)
from homeassistant.const import (
ATTR_LOCKED,
CONF_HOST,
CONF_NAME,
CONF_REGION,
CONF_TOKEN,
STATE_IDLE,
STATE_PLAYING,
STATE_STANDBY,
)
from homeassistant.core import callback
from homeassistant.helpers import device_registry, entity_registry
from homeassistant.helpers.entity import DeviceInfo
from . import format_unique_id, load_games, save_games
from .const import (
ATTR_MEDIA_IMAGE_URL,
DEFAULT_ALIAS,
DOMAIN as PS4_DOMAIN,
PS4_DATA,
REGIONS as deprecated_regions,
)
_LOGGER = logging.getLogger(__name__)
SUPPORT_PS4 = (
SUPPORT_TURN_OFF
| SUPPORT_TURN_ON
| SUPPORT_PAUSE
| SUPPORT_STOP
| SUPPORT_SELECT_SOURCE
)
ICON = "mdi:sony-playstation"
MEDIA_IMAGE_DEFAULT = None
DEFAULT_RETRIES = 2
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up PS4 from a config entry."""
config = config_entry
creds = config.data[CONF_TOKEN]
device_list = []
for device in config.data["devices"]:
host = device[CONF_HOST]
region = device[CONF_REGION]
name = device[CONF_NAME]
ps4 = pyps4.Ps4Async(host, creds, device_name=DEFAULT_ALIAS)
device_list.append(PS4Device(config, name, host, region, ps4, creds))
async_add_entities(device_list, update_before_add=True)
class PS4Device(MediaPlayerEntity):
"""Representation of a PS4."""
def __init__(self, config, name, host, region, ps4, creds):
"""Initialize the ps4 device."""
self._entry_id = config.entry_id
self._ps4 = ps4
self._host = host
self._name = name
self._region = region
self._creds = creds
self._state = None
self._media_content_id = None
self._media_title = None
self._media_image = None
self._media_type = None
self._source = None
self._games = {}
self._source_list = []
self._retry = 0
self._disconnected = False
self._unique_id = None
@callback
def status_callback(self):
"""Handle status callback. Parse status."""
self._parse_status()
self.async_write_ha_state()
@callback
def subscribe_to_protocol(self):
"""Notify protocol to callback with update changes."""
self.hass.data[PS4_DATA].protocol.add_callback(self._ps4, self.status_callback)
@callback
def unsubscribe_to_protocol(self):
"""Notify protocol to remove callback."""
self.hass.data[PS4_DATA].protocol.remove_callback(
self._ps4, self.status_callback
)
def check_region(self):
"""Display logger msg if region is deprecated."""
# Non-Breaking although data returned may be inaccurate.
if self._region in deprecated_regions:
_LOGGER.info(
"""Region: %s has been deprecated.
Please remove PS4 integration
and Re-configure again to utilize
current regions""",
self._region,
)
async def async_added_to_hass(self):
"""Subscribe PS4 events."""
self.hass.data[PS4_DATA].devices.append(self)
self.check_region()
async def async_update(self):
"""Retrieve the latest data."""
if self._ps4.ddp_protocol is not None:
# Request Status with asyncio transport.
self._ps4.get_status()
# Don't attempt to connect if entity is connected or if,
# PS4 is in standby or disconnected from LAN or powered off.
if (
not self._ps4.connected
and not self._ps4.is_standby
and self._ps4.is_available
):
with suppress(NotReady):
await self._ps4.async_connect()
# Try to ensure correct status is set on startup for device info.
if self._ps4.ddp_protocol is None:
# Use socket.socket.
await self.hass.async_add_executor_job(self._ps4.get_status)
if self._attr_device_info is None:
# Add entity to registry.
await self.async_get_device_info(self._ps4.status)
self._ps4.ddp_protocol = self.hass.data[PS4_DATA].protocol
self.subscribe_to_protocol()
self._parse_status()
def _parse_status(self):
"""Parse status."""
if (status := self._ps4.status) is not None:
self._games = load_games(self.hass, self._unique_id)
if self._games:
self.get_source_list()
self._retry = 0
self._disconnected = False
if status.get("status") == "Ok":
title_id = status.get("running-app-titleid")
name = status.get("running-app-name")
if title_id and name is not None:
self._state = STATE_PLAYING
if self._media_content_id != title_id:
self._media_content_id = title_id
if self._use_saved():
_LOGGER.debug("Using saved data for media: %s", title_id)
return
self._media_title = name
self._source = self._media_title
self._media_type = None
# Get data from PS Store.
asyncio.ensure_future(self.async_get_title_data(title_id, name))
else:
if self._state != STATE_IDLE:
self.idle()
else:
if self._state != STATE_STANDBY:
self.state_standby()
elif self._retry > DEFAULT_RETRIES:
self.state_unknown()
else:
self._retry += 1
def _use_saved(self) -> bool:
"""Return True, Set media attrs if data is locked."""
if self._media_content_id in self._games:
store = self._games[self._media_content_id]
# If locked get attributes from file.
if store.get(ATTR_LOCKED):
self._media_title = store.get(ATTR_MEDIA_TITLE)
self._source = self._media_title
self._media_image = store.get(ATTR_MEDIA_IMAGE_URL)
self._media_type = store.get(ATTR_MEDIA_CONTENT_TYPE)
return True
return False
def idle(self):
"""Set states for state idle."""
self.reset_title()
self._state = STATE_IDLE
def state_standby(self):
"""Set states for state standby."""
self.reset_title()
self._state = STATE_STANDBY
def state_unknown(self):
"""Set states for state unknown."""
self.reset_title()
self._state = None
if self._disconnected is False:
_LOGGER.warning("PS4 could not be reached")
self._disconnected = True
self._retry = 0
def reset_title(self):
"""Update if there is no title."""
self._media_title = None
self._media_content_id = None
self._media_type = None
self._source = None
async def async_get_title_data(self, title_id, name):
"""Get PS Store Data."""
app_name = None
art = None
media_type = None
try:
title = await self._ps4.async_get_ps_store_data(
name, title_id, self._region
)
except PSDataIncomplete:
title = None
except asyncio.TimeoutError:
title = None
_LOGGER.error("PS Store Search Timed out")
else:
if title is not None:
app_name = title.name
art = title.cover_art
# Assume media type is game if not app.
if title.game_type != PS_TYPE_APP:
media_type = MEDIA_TYPE_GAME
else:
media_type = MEDIA_TYPE_APP
else:
_LOGGER.error(
"Could not find data in region: %s for PS ID: %s",
self._region,
title_id,
)
finally:
self._media_title = app_name or name
self._source = self._media_title
self._media_image = art or None
self._media_type = media_type
await self.hass.async_add_executor_job(self.update_list)
self.async_write_ha_state()
def update_list(self):
"""Update Game List, Correct data if different."""
if self._media_content_id in self._games:
store = self._games[self._media_content_id]
if (
store.get(ATTR_MEDIA_TITLE) != self._media_title
or store.get(ATTR_MEDIA_IMAGE_URL) != self._media_image
):
self._games.pop(self._media_content_id)
if self._media_content_id not in self._games:
self.add_games(
self._media_content_id,
self._media_title,
self._media_image,
self._media_type,
)
self._games = load_games(self.hass, self._unique_id)
self.get_source_list()
def get_source_list(self):
"""Parse data entry and update source list."""
games = []
for data in self._games.values():
games.append(data[ATTR_MEDIA_TITLE])
self._source_list = sorted(games)
def add_games(self, title_id, app_name, image, g_type, is_locked=False):
"""Add games to list."""
games = self._games
if title_id is not None and title_id not in games:
game = {
title_id: {
ATTR_MEDIA_TITLE: app_name,
ATTR_MEDIA_IMAGE_URL: image,
ATTR_MEDIA_CONTENT_TYPE: g_type,
ATTR_LOCKED: is_locked,
}
}
games.update(game)
save_games(self.hass, games, self._unique_id)
async def async_get_device_info(self, status):
"""Set device info for registry."""
# If cannot get status on startup, assume info from registry.
if status is None:
_LOGGER.info("Assuming status from registry")
e_registry = await entity_registry.async_get_registry(self.hass)
d_registry = await device_registry.async_get_registry(self.hass)
for entity_id, entry in e_registry.entities.items():
if entry.config_entry_id == self._entry_id:
self._unique_id = entry.unique_id
self.entity_id = entity_id
break
for device in d_registry.devices.values():
if self._entry_id in device.config_entries:
self._attr_device_info = DeviceInfo(
identifiers=device.identifiers,
manufacturer=device.manufacturer,
model=device.model,
name=device.name,
sw_version=device.sw_version,
)
break
else:
_sw_version = status["system-version"]
_sw_version = _sw_version[1:4]
sw_version = f"{_sw_version[0]}.{_sw_version[1:]}"
self._attr_device_info = DeviceInfo(
identifiers={(PS4_DOMAIN, status["host-id"])},
manufacturer="Sony Interactive Entertainment Inc.",
model="PlayStation 4",
name=status["host-name"],
sw_version=sw_version,
)
self._unique_id = format_unique_id(self._creds, status["host-id"])
async def async_will_remove_from_hass(self):
"""Remove Entity from Home Assistant."""
# Close TCP Transport.
if self._ps4.connected:
await self._ps4.close()
self.unsubscribe_to_protocol()
self.hass.data[PS4_DATA].devices.remove(self)
@property
def unique_id(self):
"""Return Unique ID for entity."""
return self._unique_id
@property
def entity_picture(self):
"""Return picture."""
if (
self._state == STATE_PLAYING
and self._media_content_id is not None
and (image_hash := self.media_image_hash) is not None
):
return (
f"/api/media_player_proxy/{self.entity_id}?"
f"token={self.access_token}&cache={image_hash}"
)
return MEDIA_IMAGE_DEFAULT
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def icon(self):
"""Icon."""
return ICON
@property
def media_content_id(self):
"""Content ID of current playing media."""
return self._media_content_id
@property
def media_content_type(self):
"""Content type of current playing media."""
return self._media_type
@property
def media_image_url(self):
"""Image url of current playing media."""
if self._media_content_id is None:
return MEDIA_IMAGE_DEFAULT
return self._media_image
@property
def media_title(self):
"""Title of current playing media."""
return self._media_title
@property
def supported_features(self):
"""Media player features that are supported."""
return SUPPORT_PS4
@property
def source(self):
"""Return the current input source."""
return self._source
@property
def source_list(self):
"""List of available input sources."""
return self._source_list
async def async_turn_off(self):
"""Turn off media player."""
await self._ps4.standby()
async def async_turn_on(self):
"""Turn on the media player."""
self._ps4.wakeup()
async def async_toggle(self):
"""Toggle media player."""
await self._ps4.toggle()
async def async_media_pause(self):
"""Send keypress ps to return to menu."""
await self.async_send_remote_control("ps")
async def async_media_stop(self):
"""Send keypress ps to return to menu."""
await self.async_send_remote_control("ps")
async def async_select_source(self, source):
"""Select input source."""
for title_id, data in self._games.items():
game = data[ATTR_MEDIA_TITLE]
if (
source.lower().encode(encoding="utf-8")
== game.lower().encode(encoding="utf-8")
or source == title_id
):
_LOGGER.debug(
"Starting PS4 game %s (%s) using source %s", game, title_id, source
)
await self._ps4.start_title(title_id, self._media_content_id)
return
_LOGGER.warning("Could not start title. '%s' is not in source list", source)
return
async def async_send_command(self, command):
"""Send Button Command."""
await self.async_send_remote_control(command)
async def async_send_remote_control(self, command):
"""Send RC command."""
await self._ps4.remote_control(command)
| |
import os
import unittest
import numpy
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
import logging
import dicom
from slicer.util import VTKObservationMixin
#
# AutoSegmentationSliceletWidget
#
class AutoSegmentationSliceletWidget:
def __init__(self, parent=None):
try:
parent
self.parent = parent
except Exception, e:
import traceback
traceback.print_exc()
logging.error("There is no parent to AutoSegmentationSliceletWidget!")
#
# SliceletMainFrame
# Handles the event when the slicelet is hidden (its window closed)
#
class SliceletMainFrame(qt.QDialog):
def setSlicelet(self, slicelet):
self.slicelet = slicelet
def hideEvent(self, event):
self.slicelet.disconnect()
import gc
refs = gc.get_referrers(self.slicelet)
if len(refs) > 1:
# logging.debug('Stuck slicelet references (' + repr(len(refs)) + '):\n' + repr(refs))
pass
slicer.autoSegmentationSliceletInstance = None
self.slicelet = None
self.deleteLater()
#
# GelDosimetryAnalysisSlicelet
#
class AutoSegmentationSlicelet(VTKObservationMixin):
def __init__(self, parent, developerMode=False, widgetClass=None):
VTKObservationMixin.__init__(self)
# Set up main frame
self.parent = parent
self.parent.setLayout(qt.QHBoxLayout())
self.layout = self.parent.layout()
self.layout.setMargin(0)
self.layout.setSpacing(0)
self.sliceletPanel = qt.QFrame(self.parent)
self.sliceletPanelLayout = qt.QVBoxLayout(self.sliceletPanel)
self.sliceletPanelLayout.setMargin(4)
self.sliceletPanelLayout.setSpacing(0)
self.layout.addWidget(self.sliceletPanel,1)
#Set Advanced Parameters Collapsible Button
self.parametersCollapsibleButton = ctk.ctkCollapsibleButton()
self.parametersCollapsibleButton.text = "Set Advanced Segmentation Parameters"
self.parametersCollapsibleButton.collapsed = True
self.sliceletPanelLayout.addWidget(self.parametersCollapsibleButton)
# Layout within the collapsible button
self.parametersLayout = qt.QFormLayout(self.parametersCollapsibleButton)
# Set Minimum Threshold of Percentage Increase to First Post-Contrast Image
self.inputMinimumThreshold = qt.QLabel("Minimum Threshold of Increase", self.parametersCollapsibleButton)
self.inputMinimumThreshold.setToolTip('Minimum Threshold of Percentage Increase (Pre- to First Post-contrast (Range: 10% to 150%)')
self.inputSelectorMinimumThreshold = qt.QDoubleSpinBox(self.parametersCollapsibleButton)
self.inputSelectorMinimumThreshold.setSuffix("%")
self.inputSelectorMinimumThreshold.singleStep = (1)
self.inputSelectorMinimumThreshold.minimum = (10)
self.inputSelectorMinimumThreshold.maximum = (150)
self.inputSelectorMinimumThreshold.value = (75)
self.inputSelectorMinimumThreshold.setToolTip('Minimum Threshold of Percentage Increase (Pre- to First Post-contrast (Range: 10% to 150%)')
self.parametersLayout.addRow(self.inputMinimumThreshold, self.inputSelectorMinimumThreshold)
# Curve 1 Type Parameters (Slopes from First to Fourth Post-Contrast Images)
self.inputCurve1 = qt.QLabel("Type 1 (Persistent) Curve Minimum Slope", self.parametersCollapsibleButton)
self.inputCurve1.setToolTip('Minimum Slope of Delayed Curve to classify as Persistent (Range: 0.02 to 0.3)')
self.inputSelectorCurve1 = qt.QDoubleSpinBox(self.parametersCollapsibleButton)
self.inputSelectorCurve1.singleStep = (0.02)
self.inputSelectorCurve1.minimum = (0.02)
self.inputSelectorCurve1.maximum = (0.30)
self.inputSelectorCurve1.value = (0.20)
self.inputSelectorCurve1.setToolTip('Minimum Slope of Delayed Curve to classify as Persistent (Range: 0.02 to 0.3)')
self.parametersLayout.addRow(self.inputCurve1, self.inputSelectorCurve1)
# Curve 3 Type Parameters (Slopes from First to Fourth Post-Contrast Images)
self.inputCurve3 = qt.QLabel("Type 3 (Washout) Curve Maximum Slope", self.parametersCollapsibleButton)
self.inputCurve3.setToolTip('Maximum Slope of Delayed Curve to classify as Washout (Range: -0.02 to -0.3)')
self.inputSelectorCurve3 = qt.QDoubleSpinBox(self.parametersCollapsibleButton)
self.inputSelectorCurve3.singleStep = (0.02)
self.inputSelectorCurve3.setPrefix("-")
self.inputSelectorCurve3.minimum = (0.02)
self.inputSelectorCurve3.maximum = (0.30)
self.inputSelectorCurve3.value = (0.20)
self.inputSelectorCurve3.setToolTip('Maximum Slope of Delayed Curve to classify as Washout (Range: -0.02 to -0.3)')
self.parametersLayout.addRow(self.inputCurve3, self.inputSelectorCurve3)
# Path input for dicom data to analyze
self.inputPath = qt.QFileDialog()
self.inputPath.setFileMode(qt.QFileDialog.Directory)
self.sliceletPanelLayout.addWidget(self.inputPath)
self.inputPath.connect('accepted()', self.createLogic)
##############
self.layoutWidget = slicer.qMRMLLayoutWidget()
self.layoutWidget.setMRMLScene(slicer.mrmlScene)
self.layoutWidget.setLayout(slicer.vtkMRMLLayoutNode.SlicerLayoutOneUp3DView)
self.layout.addWidget(self.layoutWidget,2)
if widgetClass:
self.widget = widgetClass(self.parent)
self.parent.show()
def createLogic(self):
pathToDICOM = self.inputPath.directory().absolutePath()
minThreshold = (self.inputSelectorMinimumThreshold.value)/(100)
curve3Maximum = -1 * (self.inputSelectorCurve3.value)
curve1Minimum = self.inputSelectorCurve1.value
self.logic = AutoSegmentationLogic(pathToDICOM, minThreshold, curve1Minimum, curve3Maximum)
def disconnect(self):
logging.debug("disconnecting")
#asdf
#asdf
#
# AutoSegmentation
#
class AutoSegmentation(ScriptedLoadableModule):
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
# TODO make this more human readable by adding spaces
self.parent.title = "AutoSegmentation"
self.parent.categories = ["Examples"]
self.parent.dependencies = []
self.parent.contributors = ["Thomas Tramberger (TU Wien)"]
self.parent.helpText = """
This is an example of scripted loadable module bundled in an extension. It performs a simple thresholding on the input volume and optionally captures a screenshot.
"""
self.parent.acknowledgementText = """
This file was originally developed by Jean-Christophe Fillion-Robin, Kitware Inc.
and Steve Pieper, Isomics, Inc. and was partially funded by NIH grant 3P41RR013218-12S1.
""" # replace with organization, grant and thanks.
#
# AutoSegmentationWidget
#
class AutoSegmentationWidget(ScriptedLoadableModuleWidget):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def setup(self):
# gui for testing and reloading, /todo remove at end
ScriptedLoadableModuleWidget.setup(self)
# Show slicelet button
showSliceletButton = qt.QPushButton("Show slicelet")
showSliceletButton.toolTip = "Launch the slicelet"
self.layout.addWidget(qt.QLabel(' '))
self.layout.addWidget(showSliceletButton)
showSliceletButton.connect('clicked()', self.launchSlicelet)
# Add vertical spacer
self.layout.addStretch(1)
def launchSlicelet(self):
mainFrame = SliceletMainFrame()
mainFrame.minimumWidth = 800
mainFrame.minimumHeight = 600
mainFrame.windowTitle = "AutoSegmentation"
mainFrame.setWindowFlags(qt.Qt.WindowCloseButtonHint | qt.Qt.WindowMaximizeButtonHint | qt.Qt.WindowTitleHint)
iconPath = os.path.join(os.path.dirname(slicer.modules.autosegmentation.path), 'Resources/Icons', self.moduleName+'.png')
mainFrame.windowIcon = qt.QIcon(iconPath)
mainFrame.connect('destroyed()', self.onSliceletClosed)
slicelet = AutoSegmentationSlicelet(mainFrame, self.developerMode)
mainFrame.setSlicelet(slicelet)
# Make the slicelet reachable from the Slicer python interactor for testing
slicer.autoSegmentationSliceletInstance = slicelet
return slicelet
def onSliceletClosed(self):
logging.debug('Slicelet closed')
#
# AutoSegmentationLogic
#
class AutoSegmentationLogic(ScriptedLoadableModuleLogic):
def __init__(self, pathToDICOM, minThreshold, curve1Minimum, curve3Maximum):
self.pathToDICOM = pathToDICOM
self.minThreshold = minThreshold
self.curve1Minimum = curve1Minimum
self.curve3Maximum = curve3Maximum
self.dicomDataNumpyArrays = self.readData()
self.initialRiseArray = self.calcInitialRise()
self.slopeArray = self.calcSlope()
self.roi = self.createROI()
#boolean array with targeted voxels
self.targetVoxels = self.getTargetedVoxels()
self.washoutVoxels = self.getWashoutVoxels()
self.coordsOfHighestDens = numpy.unravel_index(numpy.argmax(self.dicomDataNumpyArrays[1]), self.dicomDataNumpyArrays[1].shape)
print self.coordsOfHighestDens
self.createAndSaveVolume(self.washoutVoxels, "washout.stl", self.coordsOfHighestDens)
#self.createAndSaveVolume(self.persistenceVoxels, "persistence.stl")
#self.createAndSaveVolume(self.plateauVoxels, "plateau.stl")
def readData(self):
filesDCM = []
for dirName, subdirList, fileList in os.walk(self.pathToDICOM):
for filename in fileList:
if ".dcm" in filename.lower():
filesDCM.append(os.path.join(dirName, filename))
#load all dicom data
dicomData = []
for files in filesDCM:
dicomData.append(dicom.read_file(files))
#sort dicom data by instance number
dicomData.sort(key=lambda dicomData: dicomData.InstanceNumber)
#separate data to their contrast volumes
dicomDataContrastVolumes = []
firstContrastVolume = []
dicomDataContrastVolumes.append(firstContrastVolume)
contrastVolumeIndexHelper = 0
for i in range(0,len(dicomData)-1):
if dicomData[i].SliceLocation < dicomData[i+1].SliceLocation:
dicomDataContrastVolumes[contrastVolumeIndexHelper].append(dicomData[i])
else:
dicomDataContrastVolumes[contrastVolumeIndexHelper].append(dicomData[i])
contrastVolumeIndexHelper = contrastVolumeIndexHelper+1
nextContrastVolume = []
dicomDataContrastVolumes.append(nextContrastVolume)
#assign last element
dicomDataContrastVolumes[contrastVolumeIndexHelper].append(dicomData[-1])
#convert sets into numpy Arrays
dicomNumpyArrays = []
for dicomSeries in dicomDataContrastVolumes:
dicomNumpyArrays.append(self.createNumpyArray(dicomSeries))
return dicomNumpyArrays
# Computes percentage increase from baseline (pre-contrast) at each voxel for each volume as numpy arrays.
def calcInitialRise(self):
# Initial Rise at each voxel (percentage increase from pre-contrast to first post-contrast)
temp = numpy.divide((self.dicomDataNumpyArrays[1] - self.dicomDataNumpyArrays[0]), self.dicomDataNumpyArrays[0])
temp = numpy.nan_to_num(temp)
return temp
#return ((self.dicomDataNumpyArrays[1]).__truediv__(self.dicomDataNumpyArrays[0]+1.0))-1.0
def calcSlope(self):
# Compute slope at each voxel from first to fourth volume to determine curve type
temp = numpy.divide((self.dicomDataNumpyArrays[-1] - self.dicomDataNumpyArrays[1]), self.dicomDataNumpyArrays[1])
temp = numpy.nan_to_num(temp)
return temp
#return (self.dicomDataNumpyArrays[-1] - self.dicomDataNumpyArrays[1]).__truediv__(self.dicomDataNumpyArrays[1]+1.0)
def getTargetedVoxels(self):
targetVoxels = (self.initialRiseArray > self.minThreshold) & (self.dicomDataNumpyArrays[0] > 20)
targetVoxels = targetVoxels & self.roi
return targetVoxels
def getPersistanceVoxels(self):
persistenceVoxels = (self.slopeArray > self.curve1Minimum) & (self.targetVoxels)
return persistenceVoxels
def getPlateauVoxels(self):
plateauVoxels = (self.slopeArray > self.curve3Maximum) & (self.slopeArray < self.curve1Minimum) & (self.targetVoxels)
return plateauVoxels
def getWashoutVoxels(self):
washoutVoxels = (self.slopeArray < self.curve3Maximum) & (self.targetVoxels)
return washoutVoxels
def createAndSaveVolume(self, numpyBoolArray, name, seedCoordinates):
#convert numpy to vtkImageData
VTKTargetVoxelsImageImport = vtk.vtkImageImport()
#numpyBoolArray = numpy.flipud(numpyBoolArray)
#numpyBoolArray = numpy.fliplr(numpyBoolArray)
w, d, h = numpyBoolArray.shape
numpyBoolArray[w-1,:,:] = 0
numpyBoolArray[:,0,:] = 0
numpyBoolArray[:,d-1,:] = 0
numpyBoolArray[:,:,0] = 0
numpyBoolArray[:,:,h-1] = 0
array_string = numpyBoolArray.tostring()
VTKTargetVoxelsImageImport.CopyImportVoidPointer(array_string, len(array_string))
VTKTargetVoxelsImageImport.SetDataScalarTypeToUnsignedChar()
VTKTargetVoxelsImageImport.SetNumberOfScalarComponents(1)
VTKTargetVoxelsImageImport.SetDataExtent(0,h-1,0,d-1,0,w-1)
VTKTargetVoxelsImageImport.SetWholeExtent(0,h-1,0,d-1,0,w-1)
VTKTargetVoxelsImageImport.SetDataSpacing(1,1,1)
threshold = vtk.vtkImageThreshold()
threshold.SetInputConnection(VTKTargetVoxelsImageImport.GetOutputPort())
threshold.ThresholdByLower(0)
threshold.ReplaceInOn()
threshold.SetInValue(0)
threshold.SetOutValue(1)
threshold.Update()
dmc = vtk.vtkDiscreteMarchingCubes()
dmc.SetInputConnection(threshold.GetOutputPort())
dmc.GenerateValues(1,1,1)
dmc.Update()
smoothVolume = vtk.vtkSmoothPolyDataFilter()
smoothVolume.SetInputConnection(dmc.GetOutputPort())
smoothVolume.SetNumberOfIterations(1)
smoothVolume.SetRelaxationFactor(0.5)
smoothVolume.FeatureEdgeSmoothingOff()
smoothVolume.BoundarySmoothingOn()
smoothVolume.Update()
#slicer.modules.models.logic().AddModel(smoothVolume.GetOutputPort())
#slicer.modules.models.logic().AddModel(dmc.GetOutput())
biggestArea = vtk.vtkPolyDataConnectivityFilter()
biggestArea.SetInputConnection(smoothVolume.GetOutputPort())
biggestArea.SetExtractionModeToLargestRegion()
biggestArea.Update()
seededArea = vtk.vtkPolyDataConnectivityFilter()
seededArea.SetInputConnection(smoothVolume.GetOutputPort())
seededArea.SetExtractionModeToClosestPointRegion()
seededArea.SetClosestPoint(seedCoordinates[0], seedCoordinates[1], seedCoordinates[2])
seededArea.Update()
slicer.modules.models.logic().AddModel(biggestArea.GetOutputPort())
#modelNode = slicer.vtkMRMLScalarVolumeNode()
#modelNode.SetPolyDataConnection(smoothVolume.GetOutputPort())
#slicer.mrmlScene.AddNode(modelNode)
#slicer.mrmlScene.SetAndObserveMRMLScene()
writer = vtk.vtkSTLWriter()
writer.SetInputConnection(smoothVolume.GetOutputPort())
writer.SetFileTypeToBinary()
writer.SetFileName(name)
writer.Write()
logging.info("finished writing to file")
### helper and converter functions ###
def createNumpyArray(self, dicomSeries):
#convert sets into numpy arrays to modify voxels in a new vtkMRMLScalarVolumeNode
constPixelDims = (int(dicomSeries[0].Rows), int(dicomSeries[0].Columns), len(dicomSeries))
constPixelSpacing = (float(dicomSeries[0].PixelSpacing[0]), float(dicomSeries[0].PixelSpacing[1]),float(dicomSeries[0].SliceThickness))
numpyArray = numpy.zeros(constPixelDims, dtype=dicomSeries[0].pixel_array.dtype)
for dicomData in dicomSeries:
numpyArray[:,:, dicomSeries.index(dicomData)] = dicomData.pixel_array
return numpyArray
def createROI(self):
roi = numpy.zeros(self.dicomDataNumpyArrays[0].shape)
thresholdedArray = (self.dicomDataNumpyArrays[0] > 20)
firstValue = 0
y, x, z = roi.shape
for i in xrange(0, x-1):
if thresholdedArray[255, i, 29] == True:
firstValue = i
break
roi[:,0:firstValue,:] = True
return roi.astype('bool_')
#
# AutoSegmentationLogic
#
class AutoSegmentationTest(ScriptedLoadableModuleTest):
"""
This is the test case for your scripted module.
Uses ScriptedLoadableModuleTest base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
# def setUp(self):
# slicer.mrmlScene.Clear(0)
#
# Main
#
if __name__ == "__main__":
#TODO: access and parse command line arguments
# Example: SlicerRt/src/BatchProcessing
# Ideally handle --xml
import sys
logging.debug( sys.argv )
mainFrame = qt.QFrame()
slicelet = AutoSegmentationSlicelet(mainFrame)
| |
###############################################################################
##
## Copyright (C) 2011-2014, NYU-Poly.
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from xml.dom import minidom
import xml.parsers.expat
import __builtin__
def eval_xml_value(node):
"""eval_xml_value(node) -> value
evaluates an xml node as the following examples:
<str value='foo'/> -> 'foo'
<int value='3'/> -> 3
<float value='3.141592'> -> 3.141592
<bool value='False'> -> False
"""
key_name = node.nodeName
type_ = getattr(__builtin__, key_name)
str_value = str(node.attributes['value'].value)
# Tricky case bool('False') == True
if type_ == bool:
if str_value == 'True':
return True
elif str_value == 'False':
return False
else:
raise ValueError("eval_xml_value: Bogus bool value '%s'" % str_value)
return type_(str_value)
def quote_xml_value(dom, value):
"""quote_xml_value(dom, value) -> value
quotes a value as an xml node so that
eval_xml_value(quote_xml_value(dom, value)) == value
<str value='foo'/> <- 'foo'
<int value='3'/> <- 3
<float value='3.141592'> <- 3.141592
<bool value='False'> <- False
"""
el = dom.createElement(type(value).__name__)
el.setAttribute('value', str(value))
return el
def named_elements(element, elname):
"""named_elements(element, elname) -> Node
Helper function that iterates over the element child Nodes searching
for node with name elname.
"""
for node in element.childNodes:
if node.nodeName == elname:
yield node
def enter_named_element(element, elname):
"""enter_named_element(element, elname) -> Node
Returns first child of element with name elname
"""
for node in named_elements(element, elname):
return node
return None
def elements_filter(element, element_predicate):
"""elements_filter(element, element_predicate) -> Node iterator
Helper function that iterates over the element child Nodes searching
for nodes that pass element_predicate, that is, node for which
element_predicate(node) == True
"""
for node in element.childNodes:
if element_predicate(node):
yield node
class XMLWrapper(object):
"""Helper to parse a general XML file. It provides functions to open and
close files.
It must be subclassed to parse specifi files. """
class XMLParseError(Exception):
def __init__(self, line, char, code):
self._line = line
self._char = char
self._code = code
def __str__(self):
return ("XML Parse error at line %s, col %s: %s" %
(self._line,
self._char,
xml.parsers.expat.ErrorString(self._code)))
def open_file(self, filename):
"""open_file(filename: str) -> None
Parses an XML file.
"""
self.filename = filename
try:
self.dom = minidom.parse(filename)
except xml.parsers.expat.ExpatError, e:
raise self.XMLParseError(e.lineno, e.offset, e.code)
def create_document_from_string(self, text):
"""parse_string(text:str) -> dom
Parses an xml string and returns the DOM object
"""
try:
dom = minidom.parseString(text)
except xml.parsers.expat.ExpatError, e:
raise self.XMLParseError(e.lineno, e.offset, e.code)
return dom
def close_file(self):
"""close_file() -> None
Removes the association with the XML file loaded by open_file
method.
"""
if self.dom:
self.dom.unlink()
self.filename = None
self.dom = None
def create_document(self, nodename):
"""create_document(nodename: str) -> xml element
Creates a documentElement
"""
impl = minidom.getDOMImplementation()
dom = impl.createDocument(None, nodename, None)
return dom
def write_document(self, root, filename):
"""write_document(root:xml element, filename: str) -> None
Save as an XML file
"""
output_file = open(filename,'w')
root.writexml(output_file, " ", " ", '\n')
output_file.close()
def __str__(self):
""" __str__() -> str
Returns the XML that self.dom represents as a string
"""
return self.dom.toprettyxml()
################################################################################
# Testing
import unittest
class TestXmlUtils(unittest.TestCase):
def test_named_elements(self):
""" Exercises searching for elements """
xmlStr = """<root>
<child>
<grandchild></grandchild>
<grandchild></grandchild>
</child>
<child></child>
</root>"""
dom = minidom.parseString(xmlStr)
root = dom.documentElement
childcount = 0
for node in named_elements(root,'child'):
childcount += 1
self.assertEquals(childcount,2)
grandchildcount = 0
for node in named_elements(root,'grandchild'):
grandchildcount += 1
self.assertEquals(grandchildcount,0)
def test_eval_quote(self):
xmlStr = """<root>
<child>
<grandchild></grandchild>
<grandchild></grandchild>
</child>
<child></child>
</root>"""
dom = minidom.parseString(xmlStr)
def do_it_1(v):
q = quote_xml_value(dom, v)
v2 = eval_xml_value(q)
self.assertEquals(v, v2)
def do_it_2(q):
q = minidom.parseString(q).documentElement
v = eval_xml_value(q)
self.assertEquals(q.toxml(), quote_xml_value(dom, v).toxml())
do_it_1(2)
do_it_1(3.0)
do_it_1(False)
do_it_1(True)
do_it_1('Foobar')
do_it_1('with<brackets>')
do_it_2('<str value="Foo"/>')
do_it_2('<bool value="False"/>')
do_it_2('<bool value="True"/>')
do_it_2('<int value="3"/>')
do_it_2('<float value="4.0"/>')
if __name__ == "__main__":
unittest.main()
| |
#!/usr/bin/env python
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Common utility library."""
from __future__ import with_statement
__author__ = ['rafek@google.com (Rafe Kaplan)',
'guido@google.com (Guido van Rossum)',
]
import cgi
import datetime
import inspect
import os
import re
import sys
__all__ = ['AcceptItem',
'AcceptError',
'Error',
'choose_content_type',
'decode_datetime',
'get_package_for_module',
'pad_string',
'parse_accept_header',
'positional',
'PROTORPC_PROJECT_URL',
'TimeZoneOffset',
]
class Error(Exception):
"""Base class for protorpc exceptions."""
class AcceptError(Error):
"""Raised when there is an error parsing the accept header."""
PROTORPC_PROJECT_URL = 'http://code.google.com/p/google-protorpc'
_TIME_ZONE_RE_STRING = r"""
# Examples:
# +01:00
# -05:30
# Z12:00
((?P<z>Z) | (?P<sign>[-+])
(?P<hours>\d\d) :
(?P<minutes>\d\d))$
"""
_TIME_ZONE_RE = re.compile(_TIME_ZONE_RE_STRING, re.IGNORECASE | re.VERBOSE)
def pad_string(string):
"""Pad a string for safe HTTP error responses.
Prevents Internet Explorer from displaying their own error messages
when sent as the content of error responses.
Args:
string: A string.
Returns:
Formatted string left justified within a 512 byte field.
"""
return string.ljust(512)
def positional(max_positional_args):
"""A decorator to declare that only the first N arguments may be positional.
This decorator makes it easy to support Python 3 style keyword-only
parameters. For example, in Python 3 it is possible to write:
def fn(pos1, *, kwonly1=None, kwonly1=None):
...
All named parameters after * must be a keyword:
fn(10, 'kw1', 'kw2') # Raises exception.
fn(10, kwonly1='kw1') # Ok.
Example:
To define a function like above, do:
@positional(1)
def fn(pos1, kwonly1=None, kwonly2=None):
...
If no default value is provided to a keyword argument, it becomes a required
keyword argument:
@positional(0)
def fn(required_kw):
...
This must be called with the keyword parameter:
fn() # Raises exception.
fn(10) # Raises exception.
fn(required_kw=10) # Ok.
When defining instance or class methods always remember to account for
'self' and 'cls':
class MyClass(object):
@positional(2)
def my_method(self, pos1, kwonly1=None):
...
@classmethod
@positional(2)
def my_method(cls, pos1, kwonly1=None):
...
One can omit the argument to 'positional' altogether, and then no
arguments with default values may be passed positionally. This
would be equivalent to placing a '*' before the first argument
with a default value in Python 3. If there are no arguments with
default values, and no argument is given to 'positional', an error
is raised.
@positional
def fn(arg1, arg2, required_kw1=None, required_kw2=0):
...
fn(1, 3, 5) # Raises exception.
fn(1, 3) # Ok.
fn(1, 3, required_kw1=5) # Ok.
Args:
max_positional_arguments: Maximum number of positional arguments. All
parameters after the this index must be keyword only.
Returns:
A decorator that prevents using arguments after max_positional_args from
being used as positional parameters.
Raises:
TypeError if a keyword-only argument is provided as a positional parameter.
ValueError if no maximum number of arguments is provided and the function
has no arguments with default values.
"""
def positional_decorator(wrapped):
def positional_wrapper(*args, **kwargs):
if len(args) > max_positional_args:
plural_s = ''
if max_positional_args != 1:
plural_s = 's'
raise TypeError('%s() takes at most %d positional argument%s '
'(%d given)' % (wrapped.__name__,
max_positional_args,
plural_s, len(args)))
return wrapped(*args, **kwargs)
return positional_wrapper
if isinstance(max_positional_args, (int, long)):
return positional_decorator
else:
args, _, _, defaults = inspect.getargspec(max_positional_args)
if defaults is None:
raise ValueError(
'Functions with no keyword arguments must specify '
'max_positional_args')
return positional(len(args) - len(defaults))(max_positional_args)
# TODO(rafek): Support 'level' from the Accept header standard.
class AcceptItem(object):
"""Encapsulate a single entry of an Accept header.
Parses and extracts relevent values from an Accept header and implements
a sort order based on the priority of each requested type as defined
here:
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
Accept headers are normally a list of comma separated items. Each item
has the format of a normal HTTP header. For example:
Accept: text/plain, text/html, text/*, */*
This header means to prefer plain text over HTML, HTML over any other
kind of text and text over any other kind of supported format.
This class does not attempt to parse the list of items from the Accept header.
The constructor expects the unparsed sub header and the index within the
Accept header that the fragment was found.
Properties:
index: The index that this accept item was found in the Accept header.
main_type: The main type of the content type.
sub_type: The sub type of the content type.
q: The q value extracted from the header as a float. If there is no q
value, defaults to 1.0.
values: All header attributes parsed form the sub-header.
sort_key: A tuple (no_main_type, no_sub_type, q, no_values, index):
no_main_type: */* has the least priority.
no_sub_type: Items with no sub-type have less priority.
q: Items with lower q value have less priority.
no_values: Items with no values have less priority.
index: Index of item in accept header is the last priority.
"""
__CONTENT_TYPE_REGEX = re.compile(r'^([^/]+)/([^/]+)$')
def __init__(self, accept_header, index):
"""Parse component of an Accept header.
Args:
accept_header: Unparsed sub-expression of accept header.
index: The index that this accept item was found in the Accept header.
"""
accept_header = accept_header.lower()
content_type, values = cgi.parse_header(accept_header)
match = self.__CONTENT_TYPE_REGEX.match(content_type)
if not match:
raise AcceptError('Not valid Accept header: %s' % accept_header)
self.__index = index
self.__main_type = match.group(1)
self.__sub_type = match.group(2)
self.__q = float(values.get('q', 1))
self.__values = values
if self.__main_type == '*':
self.__main_type = None
if self.__sub_type == '*':
self.__sub_type = None
self.__sort_key = (not self.__main_type,
not self.__sub_type,
-self.__q,
not self.__values,
self.__index)
@property
def index(self):
return self.__index
@property
def main_type(self):
return self.__main_type
@property
def sub_type(self):
return self.__sub_type
@property
def q(self):
return self.__q
@property
def values(self):
"""Copy the dictionary of values parsed from the header fragment."""
return dict(self.__values)
@property
def sort_key(self):
return self.__sort_key
def match(self, content_type):
"""Determine if the given accept header matches content type.
Args:
content_type: Unparsed content type string.
Returns:
True if accept header matches content type, else False.
"""
content_type, _ = cgi.parse_header(content_type)
match = self.__CONTENT_TYPE_REGEX.match(content_type.lower())
if not match:
return False
main_type, sub_type = match.group(1), match.group(2)
if not(main_type and sub_type):
return False
return ((self.__main_type is None or self.__main_type == main_type) and
(self.__sub_type is None or self.__sub_type == sub_type))
def __cmp__(self, other):
"""Comparison operator based on sort keys."""
if not isinstance(other, AcceptItem):
return NotImplemented
return cmp(self.sort_key, other.sort_key)
def __str__(self):
"""Rebuilds Accept header."""
content_type = '%s/%s' % (self.__main_type or '*', self.__sub_type or '*')
values = self.values
if values:
value_strings = ['%s=%s' % (i, v) for i, v in values.iteritems()]
return '%s; %s' % (content_type, '; '.join(value_strings))
else:
return content_type
def __repr__(self):
return 'AcceptItem(%r, %d)' % (str(self), self.__index)
def parse_accept_header(accept_header):
"""Parse accept header.
Args:
accept_header: Unparsed accept header. Does not include name of header.
Returns:
List of AcceptItem instances sorted according to their priority.
"""
accept_items = []
for index, header in enumerate(accept_header.split(',')):
accept_items.append(AcceptItem(header, index))
return sorted(accept_items)
def choose_content_type(accept_header, supported_types):
"""Choose most appropriate supported type based on what client accepts.
Args:
accept_header: Unparsed accept header. Does not include name of header.
supported_types: List of content-types supported by the server. The index
of the supported types determines which supported type is prefered by
the server should the accept header match more than one at the same
priority.
Returns:
The preferred supported type if the accept header matches any, else None.
"""
for accept_item in parse_accept_header(accept_header):
for supported_type in supported_types:
if accept_item.match(supported_type):
return supported_type
return None
@positional(1)
def get_package_for_module(module):
"""Get package name for a module.
Helper calculates the package name of a module.
Args:
module: Module to get name for. If module is a string, try to find
module in sys.modules.
Returns:
If module contains 'package' attribute, uses that as package name.
Else, if module is not the '__main__' module, the module __name__.
Else, the base name of the module file name. Else None.
"""
if isinstance(module, basestring):
try:
module = sys.modules[module]
except KeyError:
return None
try:
return unicode(module.package)
except AttributeError:
if module.__name__ == '__main__':
try:
file_name = module.__file__
except AttributeError:
pass
else:
base_name = os.path.basename(file_name)
split_name = os.path.splitext(base_name)
if len(split_name) == 1:
return unicode(base_name)
else:
return u'.'.join(split_name[:-1])
return unicode(module.__name__)
class TimeZoneOffset(datetime.tzinfo):
"""Time zone information as encoded/decoded for DateTimeFields."""
def __init__(self, offset):
"""Initialize a time zone offset.
Args:
offset: Integer or timedelta time zone offset, in minutes from UTC. This
can be negative.
"""
super(TimeZoneOffset, self).__init__()
if isinstance(offset, datetime.timedelta):
offset = offset.total_seconds()
self.__offset = offset
def utcoffset(self, dt):
"""Get the a timedelta with the time zone's offset from UTC.
Returns:
The time zone offset from UTC, as a timedelta.
"""
return datetime.timedelta(minutes=self.__offset)
def dst(self, dt):
"""Get the daylight savings time offset.
The formats that ProtoRPC uses to encode/decode time zone information don't
contain any information about daylight savings time. So this always
returns a timedelta of 0.
Returns:
A timedelta of 0.
"""
return datetime.timedelta(0)
def decode_datetime(encoded_datetime):
"""Decode a DateTimeField parameter from a string to a python datetime.
Args:
encoded_datetime: A string in RFC 3339 format.
Returns:
A datetime object with the date and time specified in encoded_datetime.
Raises:
ValueError: If the string is not in a recognized format.
"""
# Check if the string includes a time zone offset. Break out the
# part that doesn't include time zone info. Convert to uppercase
# because all our comparisons should be case-insensitive.
time_zone_match = _TIME_ZONE_RE.search(encoded_datetime)
if time_zone_match:
time_string = encoded_datetime[:time_zone_match.start(1)].upper()
else:
time_string = encoded_datetime.upper()
if '.' in time_string:
format_string = '%Y-%m-%dT%H:%M:%S.%f'
else:
format_string = '%Y-%m-%dT%H:%M:%S'
decoded_datetime = datetime.datetime.strptime(time_string, format_string)
if not time_zone_match:
return decoded_datetime
# Time zone info was included in the parameter. Add a tzinfo
# object to the datetime. Datetimes can't be changed after they're
# created, so we'll need to create a new one.
if time_zone_match.group('z'):
offset_minutes = 0
else:
sign = time_zone_match.group('sign')
hours, minutes = [int(value) for value in
time_zone_match.group('hours', 'minutes')]
offset_minutes = hours * 60 + minutes
if sign == '-':
offset_minutes *= -1
return datetime.datetime(decoded_datetime.year,
decoded_datetime.month,
decoded_datetime.day,
decoded_datetime.hour,
decoded_datetime.minute,
decoded_datetime.second,
decoded_datetime.microsecond,
TimeZoneOffset(offset_minutes))
| |
#!/usr/bin/env python
"""
@package mi.dataset.parser.vel3d_l_wfp
@file marine-integrations/mi/dataset/parser/vel3d_l_wfp.py
@author Steve Myerson (Raytheon)
@brief Parser for the vel3d_l_wfp dataset driver"
This file contains classes for both the vel3d_l_wfp parser (recovered data)
and the vel3d_l_wfp_sio_mule parser (telemetered data wrapped in SIO blocks).
Release notes:
Initial Release
"""
__author__ = 'Steve Myerson (Raytheon)'
__license__ = 'Apache 2.0'
import calendar
import copy
import struct
from mi.core.log import get_logger; log = get_logger()
from mi.core.common import BaseEnum
from mi.core.instrument.data_particle import DataParticle, DataParticleKey
from mi.core.exceptions import \
DatasetParserException, \
SampleException, \
UnexpectedDataException
from mi.dataset.dataset_parser import \
Parser, \
BufferLoadingParser
from mi.dataset.parser.sio_mule_common import \
SioMuleParser, \
SIO_HEADER_MATCHER, \
SIO_HEADER_GROUP_ID, \
SIO_HEADER_GROUP_TIMESTAMP
ID_VEL3D_L_WFP_SIO_MULE = 'WA' # The type of instrument for telemetered data
#
# File format (this does not include the SIO header
# which is applicable to telemetered data only):
# Data bytes (4 bytes) - Field is used for Recovered, ignored for Telemetered
# FSI Header (279 bytes)
# FSI Record (47 bytes * N instances)
# Sensor start time (4 bytes)
# Sensor stop time (4 bytes)
# Decimation (2 bytes, optional for Telemetered, N/A for Recovered)
#
DATA_BYTES_SIZE = 4 # byte in the Data bytes field
FSI_HEADER_SIZE = 279 # bytes in the FSI header
FSI_RECORD_SIZE = 47 # bytes in each FSI record
FSI_HEADER_SERIAL_NUMBER_OFFSET = 3 # byte offset into FSI Header
FSI_HEADER_SERIAL_NUMBER_SIZE = 4 # bytes in the serial number field
#
# FSI Record Format
# Offset Bytes Format Field
# 0 1 uint8 hour
# 1 1 uint8 minute
# 2 1 uint8 second
# 3 1 uint8 month
# 4 1 uint8 day
# 5 2 uint16 year
# 7 4 float32 heading
# 11 4 float32 tx
# 15 4 float32 ty
# 19 4 float32 hx
# 23 4 float32 hy
# 27 4 float32 hz
# 31 4 float32 vp1
# 35 4 float32 vp2
# 39 4 float32 vp3
# 43 4 float32 vp4
#
FSI_RECORD_FORMAT = '<5BH10f' # format for unpacking data from FSI records
#
# Keys to be used when generating particles.
# Instrument values are extracted from the FSI Record.
# Metadata values are extracted from the Time Record and the FSI Header.
# They are listed in order corresponding to the data record payload.
# Instrument Particles are the same for both recovered and telemetered data.
#
INDEX_PARTICLE_KEY = 0 # Index into the xxx_PARTICLE_KEYS tables
INDEX_VALUE_TYPE = 1 # Index into the xxx_PARTICLE_KEYS tables
INSTRUMENT_PARTICLE_KEYS = \
[
['vel3d_l_date_time_array', list],
['vel3d_l_heading', float],
['vel3d_l_tx', float],
['vel3d_l_ty', float],
['vel3d_l_hx', float],
['vel3d_l_hy', float],
['vel3d_l_hz', float],
['vel3d_l_vp1', float],
['vel3d_l_vp2', float],
['vel3d_l_vp3', float],
['vel3d_l_vp4', float]
]
SIO_METADATA_PARTICLE_KEYS = \
[
[None, None], # Metadata timestamp handled separately
['vel3d_l_time_on', int],
['vel3d_l_time_off', int],
['vel3d_l_serial_number', int],
['vel3d_l_number_of_records', int],
['vel3d_l_decimation_factor', int],
['vel3d_l_controller_timestamp', int]
]
WFP_METADATA_PARTICLE_KEYS = \
[
[None, None], # Metadata timestamp handled separately
['vel3d_l_time_on', int],
['vel3d_l_time_off', int],
['vel3d_l_serial_number', int],
['vel3d_l_number_of_records', int]
]
DATE_TIME_ARRAY = 'vel3d_l_date_time_array' # This one needs to be special-cased
DATE_TIME_SIZE = 6 # 6 bytes for the output date time field
DECIMATION_RECORD_SIZE = 10 # bytes in time fields plus decimation field
DECIMATION_FORMAT = '>2IH' # 2 uint32, 1 uint16
TIME_RECORD_SIZE = 8 # bytes in time fields
TIME_FORMAT = '>2I' # 2 uint32
FIELD_METADATA_TIMESTAMP = 0
FIELD_TIME_ON = 1
FIELD_TIME_OFF = 2
FIELD_SERIAL_NUMBER = 3
FIELD_NUMBER_OF_RECORDS = 4
FIELD_DECIMATION = 5
FIELD_CONTROLLER_TIMESTAMP = 6
PARTICLE_TYPE_SIO_INSTRUMENT = 1
PARTICLE_TYPE_SIO_METADATA = 2
PARTICLE_TYPE_WFP_INSTRUMENT = 3
PARTICLE_TYPE_WFP_METADATA = 4
class Vel3dLWfpStateKey(BaseEnum):
POSITION = 'position' # holds the file position
PARTICLE_NUMBER = 'particle_number' # particle number of N
class Vel3dLWfpDataParticleType(BaseEnum):
"""
These are the names of the output particle streams as specified in the IDD.
"""
SIO_INSTRUMENT_PARTICLE = 'vel3d_l_wfp_instrument'
SIO_METADATA_PARTICLE = 'vel3d_l_wfp_sio_mule_metadata'
WFP_INSTRUMENT_PARTICLE = 'vel3d_l_wfp_instrument_recovered'
WFP_METADATA_PARTICLE = 'vel3d_l_wfp_metadata_recovered'
class Vel3dLWfpInstrumentDataParticle(DataParticle):
"""
Generic class for generating vel3d_l_wfp instrument particles.
This class is for both recovered and telemetered data.
The output particle streams for vel3d_l instrument data have different
names, but the contents of the 2 streams are identical.
"""
def _build_parsed_values(self):
"""
Take something in the data format and turn it into an array of
dictionaries defining the data in the particle with the appropriate tag.
@throws SampleException If there is a problem with sample creation
Parameters:
particle_key_table - list of particle keywords to be matched against the
raw_data which has the parsed fields in the same order as the keys
Returns:
list of instrument particle values
"""
#
# Generate an Instrument data particle.
# Note that raw_data already contains the individual fields
# extracted and unpacked from the data record.
#
particle = []
field_index = 0
fields = self.raw_data
for x in range(0, len(INSTRUMENT_PARTICLE_KEYS)):
key = INSTRUMENT_PARTICLE_KEYS[x][INDEX_PARTICLE_KEY]
data_type = INSTRUMENT_PARTICLE_KEYS[x][INDEX_VALUE_TYPE]
#
# The date time array data must be special-cased since multiple
# values from the parsed values are used for a single particle value.
#
if key == DATE_TIME_ARRAY:
#
# When generating the date time array field in the instrument particle,
# use these same values to generate the timestamp for this particle.
#
hour = fields[field_index]
minute = fields[field_index + 1]
second = fields[field_index + 2]
month = fields[field_index + 3]
day = fields[field_index + 4]
year = fields[field_index + 5]
timestamp = (year, month, day, hour, minute, second, 0, 0, 0)
elapsed_seconds = calendar.timegm(timestamp)
self.set_internal_timestamp(unix_time=elapsed_seconds)
#
# Generate the date time array to be stored in the particle.
#
date_time_array = [year, month, day, hour, minute, second]
particle_value = self._encode_value(key, date_time_array, data_type)
field_index += DATE_TIME_SIZE
else:
#
# Other particle values are extracted directly from the
# previously parsed fields.
#
particle_value = self._encode_value(key, fields[field_index], data_type)
field_index += 1
particle.append(particle_value)
return particle
class Vel3dLWfpInstrumentParticle(Vel3dLWfpInstrumentDataParticle):
"""
Class for generating vel3d_l_wfp instrument telemetered particles.
All processing is handled by the parent class as long as the
data particle type is set correctly.
"""
_data_particle_type = Vel3dLWfpDataParticleType.SIO_INSTRUMENT_PARTICLE
class Vel3dLWfpInstrumentRecoveredParticle(Vel3dLWfpInstrumentDataParticle):
"""
Class for generating vel3d_l_wfp instrument recovered particles.
All processing is handled by the parent class as long as the
data particle type is set correctly.
"""
_data_particle_type = Vel3dLWfpDataParticleType.WFP_INSTRUMENT_PARTICLE
class Vel3dLMetadataParticle(DataParticle):
"""
Generic class for generating vel3d_l metadata particles,
both recovered and telemetered.
"""
def generate_metadata_particle(self, particle_key_table):
"""
Take something in the data format and turn it into an array of
dictionaries defining the data in the particle with the appropriate tag.
@throws SampleException If there is a problem with sample creation
Parameters:
particle_key_table - list of particle keywords to be matched against the
raw_data which has the parsed fields in the same order as the keys
"""
#
# Generate a Metadata data particle.
# Note that raw_data already contains the individual fields
# extracted and unpacked from the metadata record.
# It is assumed that the individual fields are in the correct order
# corresponding to the table of keys.
#
particle = []
field_index = 0
fields = self.raw_data
#
# The timestamp for the Metadata particle varies depending on whether
# this is recovered or telemetered data.
# This determination is made when the input file is parsed.
# Here, whatever value is sent is used as the timestamp.
#
self.set_internal_timestamp(unix_time=fields[FIELD_METADATA_TIMESTAMP])
#
# Extract the metadata particle fields from the parsed values.
#
for x in range(0, len(particle_key_table)):
key = particle_key_table[x][INDEX_PARTICLE_KEY]
if key is not None:
#
# There is a bug in encode_value in the parent class.
# If the value is 'None', encode_value chokes.
# Apparently this bug is not important enough to fix in the parent
# class, so it is incumbent on each child class to generate a
# work-around.
#
if fields[field_index] is None:
particle.append({DataParticleKey.VALUE_ID: key,
DataParticleKey.VALUE: None})
else:
particle_value = self._encode_value(key, fields[field_index],
particle_key_table[x][INDEX_VALUE_TYPE])
particle.append(particle_value)
field_index += 1
return particle
class Vel3dLWfpMetadataRecoveredParticle(Vel3dLMetadataParticle):
"""
Class for generating vel3d_l_wfp metadata recovered particles.
"""
_data_particle_type = Vel3dLWfpDataParticleType.WFP_METADATA_PARTICLE
def _build_parsed_values(self):
"""
Call the generic generate_metadata_particle function to generate the
WFP Metadata particle.
"""
return self.generate_metadata_particle(WFP_METADATA_PARTICLE_KEYS)
class Vel3dLWfpSioMuleMetadataParticle(Vel3dLMetadataParticle):
"""
Class for generating vel3d_l_wfp_sio_mule metadata particles.
"""
_data_particle_type = Vel3dLWfpDataParticleType.SIO_METADATA_PARTICLE
def _build_parsed_values(self):
"""
Call the generic generate_metadata_particle function to generate the
SIO Mule Metadata particle.
"""
return self.generate_metadata_particle(SIO_METADATA_PARTICLE_KEYS)
class Vel3dLParser(Parser):
"""
This class contains functions that are common to both the Vel3dLWfp and
Vel3dLWfpSioMule parsers.
"""
def generate_samples(self, fields):
"""
Given a list of groups of particle fields, generate particles for each group.
Parameters:
fields - (particle type, (parsed values to be put into output particles))
Returns:
sample_count - number of samples found
samples - list of samples found
"""
samples = []
sample_count = 0
if len(fields) > 0:
for x in range(0, len(fields)):
particle_type = fields[x][0]
if particle_type == PARTICLE_TYPE_SIO_INSTRUMENT:
particle_class = Vel3dLWfpInstrumentParticle
elif particle_type == PARTICLE_TYPE_WFP_INSTRUMENT:
particle_class = Vel3dLWfpInstrumentRecoveredParticle
elif particle_type == PARTICLE_TYPE_SIO_METADATA:
particle_class = Vel3dLWfpSioMuleMetadataParticle
else:
particle_class = Vel3dLWfpMetadataRecoveredParticle
#
# Since the record has already been parsed,
# the individual fields are passed to be stored and used
# when generating the particle key/value pairs.
# Timestamp is None since the particle generation handles that.
#
sample = self._extract_sample(particle_class, None,
fields[x][1], None)
if sample:
#
# Add the particle to the list of particles
#
samples.append(sample)
sample_count += 1
return sample_count, samples
def parse_vel3d_data(self, instrument_particle_type, metadata_particle_type,
chunk, time_stamp=None):
"""
This function parses the Vel3d data, including the FSI Header,
FSI Records, and Metadata.
Parameters:
instrument_particle_type - Which instrument particle is being generated.
metadata_particle_type - Which metadata particle is being generated.
chunk - Vel3d data, starting with the data_bytes field.
time_stamp (optional) - specified for SIO Mule data only.
Returns:
particle_fields - The fields resulting from parsing the FSI Header,
FSI records, and Metadata.
"""
particle_fields = [] # Initialize return parameter to empty
#
# Skip past the Data Bytes field to get to the start of the FSI Header.
# We don't care about the Data Bytes field.
#
start_index = DATA_BYTES_SIZE
#
# Extract the little endian 32-bit serial number from the FSI Header.
#
serial_number_start = start_index + FSI_HEADER_SERIAL_NUMBER_OFFSET
serial_number = struct.unpack('<I',
chunk[serial_number_start :
serial_number_start + FSI_HEADER_SERIAL_NUMBER_SIZE])[0]
#
# Skip past the FSI Header to get to the first FSI record.
#
start_index += FSI_HEADER_SIZE
#
# Calculate the number of bytes remaining to be processed.
#
bytes_remaining = len(chunk) - start_index
#
# Calculate the number of FSI records expected.
#
records_expected = bytes_remaining / FSI_RECORD_SIZE
#
# As long as there is more data in the chunk.
#
records_processed = 0
metadata_found = False
while bytes_remaining > 0:
fields = []
#
# If there are enough bytes to comprise an FSI record and
# we haven't yet processed the expected number of FSI records,
# extract the fields from the FSI record.
#
if bytes_remaining >= FSI_RECORD_SIZE and \
records_processed != records_expected:
particle_type = instrument_particle_type
fields.append(particle_type)
fields.append(struct.unpack(FSI_RECORD_FORMAT,
chunk[start_index : start_index + FSI_RECORD_SIZE]))
bytes_remaining -= FSI_RECORD_SIZE
start_index += FSI_RECORD_SIZE
records_processed += 1
#
# Once all the FSI records have been processed,
# check for a decimation or time record (Metadata).
# If there are enough bytes to comprise a decimation record
# or a time record, extract the fields from the record.
#
elif records_processed == records_expected and \
not metadata_found and \
(bytes_remaining == DECIMATION_RECORD_SIZE or
bytes_remaining == TIME_RECORD_SIZE):
#
# If it's a decimation record, extract time on, time off
# and the decimation factor.
#
particle_type = metadata_particle_type
fields.append(particle_type)
if bytes_remaining == DECIMATION_RECORD_SIZE:
(time_on, time_off, decimation) = struct.unpack(DECIMATION_FORMAT,
chunk[start_index : start_index + bytes_remaining])
bytes_remaining -= DECIMATION_RECORD_SIZE
#
# If it's a time record, extract time on and time off,
# and set decimation to None.
#
else:
(time_on, time_off) = struct.unpack(TIME_FORMAT,
chunk[start_index : start_index + bytes_remaining])
decimation = None
bytes_remaining -= TIME_RECORD_SIZE
#
# Create the metadata fields depending on which metadata
# particle type is being created.
# The fields must be in the same order as the Particle Keys tables.
#
if particle_type == PARTICLE_TYPE_SIO_METADATA:
metadata = (time_stamp, time_on, time_off, serial_number,
records_processed, decimation, time_stamp)
else:
metadata = (time_off, time_on, time_off, serial_number,
records_processed)
fields.append(metadata)
metadata_found = True
#
# It's an error if we don't recognize any type of record
# of if we've processed everything we expected to and there's
# still more bytes remaining.
#
else:
self.report_error(SampleException, 'Improperly formatted input file')
bytes_remaining = 0
particle_type = None
if particle_type is not None:
particle_fields.append(fields)
return particle_fields
def report_error(self, exception, error_message):
"""
This function reports an error condition by issuing a warning
and raising an exception.
Parameters:
exception - type of exception to raise
error_message - accompanying text
"""
log.warn(error_message)
raise exception(error_message)
class Vel3dLWfpParser(BufferLoadingParser, Vel3dLParser):
def __init__(self, config, state, file_handle,
state_callback, publish_callback, exception_callback):
"""
@param config The configuration parameters to feed into the parser
@param state The location in the file to start parsing from.
This reflects what has already been published.
@param file_handle An already open file-like file handle
@param state_callback The callback method from the agent driver
(ultimately the agent) to call back when a state needs to be
updated
@param publish_callback The callback from the agent driver (and
ultimately from the agent) where we send our sample particle to
be published into ION
@param exception_callback The callback from the agent driver to
send an exception to
"""
super(Vel3dLWfpParser, self).__init__(config, file_handle, state,
self.sieve_function, state_callback, publish_callback, exception_callback)
self.input_file = file_handle
self._read_state = {
Vel3dLWfpStateKey.POSITION: 0,
Vel3dLWfpStateKey.PARTICLE_NUMBER: 0
}
if state is not None:
log.debug('XXX VEL state %s', state)
self.set_state(state)
def handle_non_data(self, non_data, non_end, start):
"""
Handle any non-data that is found in the file
"""
# Handle non-data here by calling the exception callback.
if non_data is not None and non_end <= start:
# increment the state
self._increment_position(len(non_data))
# use the _exception_callback
self._exception_callback(UnexpectedDataException(
"Found %d bytes of un-expected non-data %s" % (len(non_data), non_data)))
def _increment_position(self, bytes_read):
"""
Increment the parser position
@param bytes_read The number of bytes just read
"""
self._read_state[Vel3dLWfpStateKey.POSITION] += bytes_read
def parse_chunks(self):
"""
Parse out any pending data chunks in the chunker. If
it is a valid data piece, build a particle, update the position and
timestamp. Go until the chunker has no more valid data.
@retval a list of tuples with sample particles encountered in this
parsing, plus the state.
"""
result_particles = []
(nd_timestamp, non_data, non_start, non_end) = self._chunker.get_next_non_data_with_index(clean=False)
(timestamp, chunk, start, end) = self._chunker.get_next_data_with_index(clean=True)
self.handle_non_data(non_data, non_end, start)
while chunk is not None:
fields = self.parse_vel3d_data(PARTICLE_TYPE_WFP_INSTRUMENT,
PARTICLE_TYPE_WFP_METADATA,
chunk)
#
# Generate the particles for this chunk.
# Add them to the return list of particles.
# Increment the state (position within the file) for the last particle.
# The first N-1 particles are tagged with the previous file position
# and a PARTICLE_NUMBER 1 to N.
# The Nth particle is tagged with with current file position
# and a PARTICLE_NUMBER of 0.
#
(sample_count, particles) = self.generate_samples(fields)
for x in range(self._read_state[Vel3dLWfpStateKey.PARTICLE_NUMBER],
sample_count - 1):
self._read_state[Vel3dLWfpStateKey.PARTICLE_NUMBER] += 1
result_particles.append((particles[x], copy.copy(self._read_state)))
self._increment_position(len(chunk))
self._read_state[Vel3dLWfpStateKey.PARTICLE_NUMBER] = 0
result_particles.append((particles[sample_count - 1],
copy.copy(self._read_state)))
(nd_timestamp, non_data, non_start, non_end) = self._chunker.get_next_non_data_with_index(clean=False)
(timestamp, chunk, start, end) = self._chunker.get_next_data_with_index(clean=True)
self.handle_non_data(non_data, non_end, start)
return result_particles
def set_state(self, state_obj):
"""
Set the value of the state object for this parser
@param state_obj The object to set the state to.
@throws DatasetParserException if there is a bad state structure
"""
if not isinstance(state_obj, dict):
raise DatasetParserException("Invalid state structure")
if not (Vel3dLWfpStateKey.POSITION in state_obj):
raise DatasetParserException("State key %s missing" %
Vel3dLWfpStateKey.POSITION)
if not (Vel3dLWfpStateKey.PARTICLE_NUMBER in state_obj):
raise DatasetParserException("State key %s missing" %
Vel3dLWfpStateKey.PARTICLE_NUMBER)
self._record_buffer = []
self._state = state_obj
self._read_state = state_obj
self.input_file.seek(state_obj[Vel3dLWfpStateKey.POSITION])
def sieve_function(self, input_buffer):
"""
Sort through the input buffer looking for Recovered data records.
Arguments:
input_buffer - the contents of the input stream
Returns:
A list of start,end tuples
"""
indices_list = [] # initialize the return list to empty
start_index = 0
while start_index < len(input_buffer):
#
# Extract the number of data_bytes.
# This is the number of bytes in the FSI Header and FSI records,
# and excludes the data_bytes field and the time fields.
#
data_bytes = struct.unpack('>I',
input_buffer[start_index : start_index + DATA_BYTES_SIZE])[0]
#
# Calculate the end of packet.
# This includes the data_bytes field, the FSI Header,
# some number of FSI records, and the 2 time fields.
#
end_index = start_index + DATA_BYTES_SIZE + data_bytes + TIME_RECORD_SIZE
#
# If the input buffer has enough bytes for the entire packet,
# add the start,end pair to the list of indices.
# If not enough room, we're done for now.
#
if end_index <= len(input_buffer):
indices_list.append((start_index, end_index))
start_index = end_index
else:
break
return indices_list
class Vel3dLWfpSioMuleParser(SioMuleParser, Vel3dLParser):
def __init__(self, config, state, stream_handle,
state_callback, publish_callback, exception_callback):
"""
@param config The configuration parameters to feed into the parser
@param state The location in the file to start parsing from.
This reflects what has already been published.
@param stream_handle An already open file-like file handle
@param state_callback The callback method from the agent driver
(ultimately the agent) to call back when a state needs to be
updated
@param publish_callback The callback from the agent driver (and
ultimately from the agent) where we send our sample particle to
be published into ION
@param exception_callback The callback from the agent driver to
send an exception to
"""
super(Vel3dLWfpSioMuleParser, self).__init__(config, stream_handle, state,
self.sieve_function, state_callback, publish_callback, exception_callback)
def parse_chunks(self):
"""
Parse out any pending data chunks in the chunker. If
it is a valid data piece, build a particle, update the position and
timestamp. Go until the chunker has no more valid data.
@retval a list of tuples with sample particles encountered in this
parsing, plus the state. An empty list of nothing was parsed.
"""
result_particles = []
(timestamp, chunk, start, end) = self._chunker.get_next_data_with_index(clean=True)
while chunk is not None:
#
# Verify that the Instrument ID is the one that we want.
#
header = SIO_HEADER_MATCHER.match(chunk)
if header.group(SIO_HEADER_GROUP_ID) == ID_VEL3D_L_WFP_SIO_MULE:
#
# Extract the POSIX timestamp from the SIO Header.
#
sio_timestamp = int(header.group(SIO_HEADER_GROUP_TIMESTAMP), 16)
#
# Process the remaining Vel3d data, starting from the end of the
# SIO Header, but not including the trailing 0x03.
#
fields = self.parse_vel3d_data(PARTICLE_TYPE_SIO_INSTRUMENT,
PARTICLE_TYPE_SIO_METADATA,
chunk[header.end(0) : -1],
time_stamp=sio_timestamp)
#
# Generate the particles for this SIO block.
# Add them to the return list of particles.
#
(samples, particles) = self.generate_samples(fields)
for x in range(0, samples):
result_particles.append(particles[x])
#
# Not our instrument, but still must indicate that no samples were found.
#
else:
samples = 0
# keep track of how many samples were found in this chunk
self._chunk_sample_count.append(samples)
(timestamp, chunk, start, end) = self._chunker.get_next_data_with_index(clean=True)
return result_particles
| |
import os
import platform
import textwrap
import unittest
import pytest
from conans.model.ref import ConanFileReference, PackageReference
from conans.test.assets.sources import gen_function_cpp, gen_function_h
from conans.test.utils.tools import TestClient, NO_SETTINGS_PACKAGE_ID, replace_in_file
from conans.util.files import load
@pytest.mark.tool_cmake
class TestCMakeFindPackageMultiGenerator:
@pytest.mark.parametrize("use_components", [False, True])
def test_build_modules_alias_target(self, use_components):
client = TestClient()
conanfile = textwrap.dedent("""
import os
from conans import ConanFile, CMake
class Conan(ConanFile):
name = "hello"
version = "1.0"
settings = "os", "arch", "compiler", "build_type"
exports_sources = ["target-alias.cmake"]
generators = "cmake"
def package(self):
self.copy("target-alias.cmake", dst="share/cmake")
def package_info(self):
module = os.path.join("share", "cmake", "target-alias.cmake")
%s
""")
if use_components:
info = textwrap.dedent("""\
self.cpp_info.name = "namespace"
self.cpp_info.filenames["cmake_find_package_multi"] = "hello"
self.cpp_info.components["comp"].libs = ["hello"]
self.cpp_info.components["comp"].build_modules["cmake_find_package_multi"].append(module)
""")
else:
info = textwrap.dedent("""\
self.cpp_info.libs = ["hello"]
self.cpp_info.build_modules["cmake_find_package_multi"].append(module)
""")
target_alias = textwrap.dedent("""
add_library(otherhello INTERFACE IMPORTED)
target_link_libraries(otherhello INTERFACE {target_name})
""").format(target_name="namespace::comp" if use_components else "hello::hello")
conanfile = conanfile % "\n".join([" %s" % line for line in info.splitlines()])
client.save({"conanfile.py": conanfile, "target-alias.cmake": target_alias})
client.run("create .")
consumer = textwrap.dedent("""
from conans import ConanFile, CMake
class Conan(ConanFile):
name = "consumer"
version = "1.0"
settings = "os", "compiler", "build_type", "arch"
exports_sources = ["CMakeLists.txt"]
generators = "cmake_find_package_multi"
requires = "hello/1.0"
def build(self):
cmake = CMake(self)
cmake.configure()
""")
cmakelists = textwrap.dedent("""
cmake_minimum_required(VERSION 3.0)
project(test)
find_package(hello)
get_target_property(tmp otherhello INTERFACE_LINK_LIBRARIES)
message("otherhello link libraries: ${tmp}")
""")
client.save({"conanfile.py": consumer, "CMakeLists.txt": cmakelists})
client.run("create .")
if use_components:
assert "otherhello link libraries: namespace::comp" in client.out
else:
assert "otherhello link libraries: hello::hello" in client.out
@pytest.mark.slow
@pytest.mark.tool_cmake
class CMakeFindPathMultiGeneratorTest(unittest.TestCase):
def test_native_export_multi(self):
"""
bye depends on hello. Both use find_package in their CMakeLists.txt
The consumer depends on bye, using the cmake_find_package_multi generator
"""
c = TestClient()
project_folder_name = "project_targets"
c.copy_assets("cmake_find_package_multi", ["bye", "hello", project_folder_name])
# Create packages for hello and bye
for p in ("hello", "bye"):
for bt in ("Debug", "Release"):
c.run("create {} user/channel -s build_type={}".format(p, bt))
with c.chdir(project_folder_name):
# Save conanfile and example
conanfile = textwrap.dedent("""
[requires]
bye/1.0@user/channel
[generators]
cmake_find_package_multi
""")
example_cpp = gen_function_cpp(name="main", includes=["bye"], calls=["bye"])
c.save({"conanfile.txt": conanfile, "example.cpp": example_cpp})
with c.chdir("build"):
for bt in ("Debug", "Release"):
c.run("install .. user/channel -s build_type={}".format(bt))
# Test that we are using find_dependency with the NO_MODULE option
# to skip finding first possible FindBye somewhere
self.assertIn("find_dependency(hello REQUIRED NO_MODULE)",
load(os.path.join(c.current_folder, "bye-config.cmake")))
if platform.system() == "Windows":
c.run_command('cmake .. -G "Visual Studio 15 Win64"')
c.run_command('cmake --build . --config Debug')
c.run_command('cmake --build . --config Release')
c.run_command('Debug\\example.exe')
self.assertIn("Hello World Debug!", c.out)
self.assertIn("bye World Debug!", c.out)
c.run_command('Release\\example.exe')
self.assertIn("Hello World Release!", c.out)
self.assertIn("bye World Release!", c.out)
else:
for bt in ("Debug", "Release"):
c.run_command('cmake .. -DCMAKE_BUILD_TYPE={}'.format(bt))
c.run_command('cmake --build .')
c.run_command('./example')
self.assertIn("Hello World {}!".format(bt), c.out)
self.assertIn("bye World {}!".format(bt), c.out)
os.remove(os.path.join(c.current_folder, "example"))
def test_build_modules(self):
conanfile = textwrap.dedent("""
import os
from conans import ConanFile, CMake
class Conan(ConanFile):
name = "test"
version = "1.0"
exports_sources = ["my-module.cmake", "FindFindModule.cmake"]
def package(self):
self.copy("*.cmake", dst="share/cmake")
def package_info(self):
# Only first module is defined
# (the other one should be found by CMAKE_MODULE_PATH in builddirs)
builddir = os.path.join("share", "cmake")
module = os.path.join(builddir, "my-module.cmake")
self.cpp_info.build_modules.append(module)
self.cpp_info.builddirs = [builddir]
""")
# This is a module that has other find_package() calls
my_module = textwrap.dedent("""
find_package(FindModule REQUIRED)
""")
# This is a module that defines some functionality
find_module = textwrap.dedent("""
function(conan_message MESSAGE_OUTPUT)
message(${ARGV${0}})
endfunction()
""")
client = TestClient()
client.save({"conanfile.py": conanfile, "my-module.cmake": my_module,
"FindFindModule.cmake": find_module})
client.run("create .")
ref = ConanFileReference("test", "1.0", None, None)
pref = PackageReference(ref, NO_SETTINGS_PACKAGE_ID, None)
package_path = client.cache.package_layout(ref).package(pref)
modules_path = os.path.join(package_path, "share", "cmake")
self.assertEqual(set(os.listdir(modules_path)),
{"FindFindModule.cmake", "my-module.cmake"})
consumer = textwrap.dedent("""
from conans import ConanFile, CMake
class Conan(ConanFile):
name = "consumer"
version = "1.0"
settings = "os", "compiler", "build_type", "arch"
exports_sources = ["CMakeLists.txt"]
generators = "cmake_find_package_multi"
requires = "test/1.0"
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
""")
cmakelists = textwrap.dedent("""
cmake_minimum_required(VERSION 3.0)
project(test)
find_package(test)
conan_message("Printing using a external module!")
""")
client.save({"conanfile.py": consumer, "CMakeLists.txt": cmakelists})
client.run("create .")
self.assertIn("Printing using a external module!", client.out)
def test_cmake_find_package_system_libs(self):
conanfile = textwrap.dedent("""
from conans import ConanFile, tools
class Test(ConanFile):
name = "Test"
version = "0.1"
settings = "build_type"
def package_info(self):
self.cpp_info.libs = ["lib1"]
if self.settings.build_type == "Debug":
self.cpp_info.system_libs.append("sys1d")
else:
self.cpp_info.system_libs.append("sys1")
""")
client = TestClient()
client.save({"conanfile.py": conanfile})
client.run("export .")
conanfile = textwrap.dedent("""
[requires]
Test/0.1
[generators]
cmake_find_package_multi
""")
cmakelists_release = textwrap.dedent("""
cmake_minimum_required(VERSION 3.1)
set(CMAKE_CXX_COMPILER_WORKS 1)
set(CMAKE_CXX_ABI_COMPILED 1)
project(consumer CXX)
set(CMAKE_PREFIX_PATH ${CMAKE_BINARY_DIR})
set(CMAKE_MODULE_PATH ${CMAKE_BINARY_DIR})
find_package(Test)
message("System libs: ${Test_SYSTEM_LIBS_RELEASE}")
message("Libraries to Link: ${Test_LIBS_RELEASE}")
get_target_property(tmp Test::Test INTERFACE_LINK_LIBRARIES)
message("Target libs: ${tmp}")
""")
cmakelists_debug = textwrap.dedent("""
cmake_minimum_required(VERSION 3.1)
set(CMAKE_CXX_COMPILER_WORKS 1)
set(CMAKE_CXX_ABI_COMPILED 1)
project(consumer CXX)
set(CMAKE_PREFIX_PATH ${CMAKE_BINARY_DIR})
set(CMAKE_MODULE_PATH ${CMAKE_BINARY_DIR})
find_package(Test)
message("System libs: ${Test_SYSTEM_LIBS_DEBUG}")
message("Libraries to Link: ${Test_LIBS_DEBUG}")
get_target_property(tmp Test::Test INTERFACE_LINK_LIBRARIES)
message("Target libs: ${tmp}")
""")
for build_type in ["Release", "Debug"]:
cmakelists = cmakelists_release if build_type == "Release" else cmakelists_debug
client.save({"conanfile.txt": conanfile, "CMakeLists.txt": cmakelists}, clean_first=True)
client.run("install conanfile.txt --build missing -s build_type=%s" % build_type)
client.run_command('cmake . -DCMAKE_BUILD_TYPE={0}'.format(build_type))
library_name = "sys1d" if build_type == "Debug" else "sys1"
self.assertIn("System libs: %s" % library_name, client.out)
self.assertIn("Libraries to Link: lib1", client.out)
self.assertNotIn("-- Library %s not found in package, might be system one" %
library_name, client.out)
if build_type == "Release":
target_libs = ("$<$<CONFIG:Debug>:;>;"
"$<$<CONFIG:Release>:lib1;sys1;"
"$<$<STREQUAL:$<TARGET_PROPERTY:TYPE>,SHARED_LIBRARY>:>;"
"$<$<STREQUAL:$<TARGET_PROPERTY:TYPE>,MODULE_LIBRARY>:>;"
"$<$<STREQUAL:$<TARGET_PROPERTY:TYPE>,EXECUTABLE>:>>;"
"$<$<CONFIG:RelWithDebInfo>:;>;$<$<CONFIG:MinSizeRel>:;>")
else:
target_libs = ("$<$<CONFIG:Debug>:lib1;sys1d;"
"$<$<STREQUAL:$<TARGET_PROPERTY:TYPE>,SHARED_LIBRARY>:>;"
"$<$<STREQUAL:$<TARGET_PROPERTY:TYPE>,MODULE_LIBRARY>:>;"
"$<$<STREQUAL:$<TARGET_PROPERTY:TYPE>,EXECUTABLE>:>>;"
"$<$<CONFIG:Release>:;>;"
"$<$<CONFIG:RelWithDebInfo>:;>;$<$<CONFIG:MinSizeRel>:;>")
self.assertIn("Target libs: %s" % target_libs, client.out)
def test_cpp_info_name(self):
client = TestClient()
client.run("new hello/1.0 -s")
replace_in_file(os.path.join(client.current_folder, "conanfile.py"),
'self.cpp_info.libs = ["hello"]',
'self.cpp_info.libs = ["hello"]\n self.cpp_info.name = "MYHELLO"',
output=client.out)
client.run("create .")
client.run("new hello2/1.0 -s")
replace_in_file(os.path.join(client.current_folder, "conanfile.py"),
'self.cpp_info.libs = ["hello2"]',
'self.cpp_info.libs = ["hello2"]\n self.cpp_info.name = "MYHELLO2"',
output=client.out)
replace_in_file(os.path.join(client.current_folder, "conanfile.py"),
'exports_sources = "src/*"',
'exports_sources = "src/*"\n requires = "hello/1.0"',
output=client.out)
client.run("create .")
cmakelists = textwrap.dedent("""
cmake_minimum_required(VERSION 3.1)
project(consumer)
find_package(MYHELLO2)
get_target_property(tmp MYHELLO2::MYHELLO2 INTERFACE_LINK_LIBRARIES)
message("Target libs (hello2): ${tmp}")
get_target_property(tmp MYHELLO::MYHELLO INTERFACE_LINK_LIBRARIES)
message("Target libs (hello): ${tmp}")
""")
conanfile = textwrap.dedent("""
from conans import ConanFile, CMake
class Conan(ConanFile):
settings = "build_type"
requires = "hello2/1.0"
generators = "cmake_find_package_multi"
def build(self):
cmake = CMake(self)
cmake.configure()
""")
client.save({"conanfile.py": conanfile, "CMakeLists.txt": cmakelists})
client.run("install .")
client.run("build .")
assert ("Target libs (hello2): "
"$<$<CONFIG:Debug>:;>;"
"$<$<CONFIG:Release>:CONAN_LIB::MYHELLO2_hello2_RELEASE;MYHELLO::MYHELLO;"
"$<$<STREQUAL:$<TARGET_PROPERTY:TYPE>,SHARED_LIBRARY>:>;"
"$<$<STREQUAL:$<TARGET_PROPERTY:TYPE>,MODULE_LIBRARY>:>;"
"$<$<STREQUAL:$<TARGET_PROPERTY:TYPE>,EXECUTABLE>:>>;"
"$<$<CONFIG:RelWithDebInfo>:;>;$<$<CONFIG:MinSizeRel>:;>") in client.out
assert ("Target libs (hello): "
"$<$<CONFIG:Debug>:;>;"
"$<$<CONFIG:Release>:CONAN_LIB::MYHELLO_hello_RELEASE;"
"$<$<STREQUAL:$<TARGET_PROPERTY:TYPE>,SHARED_LIBRARY>:>;"
"$<$<STREQUAL:$<TARGET_PROPERTY:TYPE>,MODULE_LIBRARY>:>;"
"$<$<STREQUAL:$<TARGET_PROPERTY:TYPE>,EXECUTABLE>:>>;"
"$<$<CONFIG:RelWithDebInfo>:;>;$<$<CONFIG:MinSizeRel>:;>") in client.out
def test_cpp_info_config(self):
conanfile = textwrap.dedent("""
from conans import ConanFile
class Requirement(ConanFile):
name = "requirement"
version = "version"
settings = "os", "arch", "compiler", "build_type"
def package_info(self):
self.cpp_info.libs = ["lib_both"]
self.cpp_info.debug.libs = ["lib_debug"]
self.cpp_info.release.libs = ["lib_release"]
self.cpp_info.cxxflags = ["-req_both"]
self.cpp_info.debug.cxxflags = ["-req_debug"]
self.cpp_info.release.cxxflags = ["-req_release"]
""")
t = TestClient()
t.save({"conanfile.py": conanfile})
t.run("create . -s build_type=Release")
t.run("create . -s build_type=Debug")
t.run("install requirement/version@ -g cmake_find_package_multi -s build_type=Release")
t.run("install requirement/version@ -g cmake_find_package_multi -s build_type=Debug")
content_release = t.load("requirementTarget-release.cmake")
content_debug = t.load("requirementTarget-debug.cmake")
self.assertIn('set(requirement_COMPILE_OPTIONS_RELEASE_LIST "-req_both;-req_release" "")',
content_release)
self.assertIn('set(requirement_COMPILE_OPTIONS_DEBUG_LIST "-req_both;-req_debug" "")',
content_debug)
self.assertIn('set(requirement_LIBRARY_LIST_RELEASE lib_both lib_release)', content_release)
self.assertIn('set(requirement_LIBRARY_LIST_DEBUG lib_both lib_debug)', content_debug)
def test_components_system_libs(self):
conanfile = textwrap.dedent("""
from conans import ConanFile
class Requirement(ConanFile):
name = "requirement"
version = "system"
settings = "os", "arch", "compiler", "build_type"
def package_info(self):
self.cpp_info.components["component"].system_libs = ["system_lib_component"]
""")
t = TestClient()
t.save({"conanfile.py": conanfile})
t.run("create .")
conanfile = textwrap.dedent("""
from conans import ConanFile, tools, CMake
class Consumer(ConanFile):
name = "consumer"
version = "0.1"
requires = "requirement/system"
generators = "cmake_find_package_multi"
exports_sources = "CMakeLists.txt"
settings = "os", "arch", "compiler", "build_type"
def build(self):
cmake = CMake(self)
cmake.configure()
""")
cmakelists = textwrap.dedent("""
project(consumer)
cmake_minimum_required(VERSION 3.1)
find_package(requirement)
get_target_property(tmp requirement::component INTERFACE_LINK_LIBRARIES)
message("component libs: ${tmp}")
""")
t.save({"conanfile.py": conanfile, "CMakeLists.txt": cmakelists})
t.run("create . --build missing -s build_type=Release")
assert ("component libs: $<$<CONFIG:Debug>:;>;"
"$<$<CONFIG:Release>:system_lib_component;"
"$<$<STREQUAL:$<TARGET_PROPERTY:TYPE>,SHARED_LIBRARY>:>;"
"$<$<STREQUAL:$<TARGET_PROPERTY:TYPE>,MODULE_LIBRARY>:>;"
"$<$<STREQUAL:$<TARGET_PROPERTY:TYPE>,EXECUTABLE>:>>;"
"$<$<CONFIG:RelWithDebInfo>:;>;$<$<CONFIG:MinSizeRel>:;>") in t.out
@pytest.mark.tool_cmake
class TestNoNamespaceTarget:
""" This test case uses build-modules feature to create a target without a namespace. This
target uses targets create by Conan (build_modules are included after Conan targets)
"""
conanfile = textwrap.dedent("""
import os
from conans import ConanFile, CMake
class Recipe(ConanFile):
settings = "os", "compiler", "arch", "build_type"
exports_sources = ["src/*", "build-module.cmake"]
generators = "cmake"
def build(self):
cmake = CMake(self)
cmake.configure(source_folder="src")
cmake.build()
def package(self):
self.copy("*.h", dst="include", src="src")
self.copy("*.lib", dst="lib", keep_path=False)
self.copy("*.dll", dst="bin", keep_path=False)
self.copy("*.dylib*", dst="lib", keep_path=False)
self.copy("*.so", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False)
self.copy("build-module.cmake", dst="share/cmake")
def package_info(self):
self.cpp_info.libs = ["library"]
module = os.path.join("share", "cmake", "build-module.cmake")
self.cpp_info.build_modules['cmake_find_package'] = [module, ]
self.cpp_info.build_modules['cmake_find_package_multi'] = [module, ]
""")
build_module = textwrap.dedent("""
message(">> Build-module is included")
if(NOT TARGET nonamespace)
add_library(nonamespace INTERFACE IMPORTED)
target_link_libraries(nonamespace INTERFACE library::library)
endif()
""")
consumer = textwrap.dedent("""
cmake_minimum_required(VERSION 3.0)
set(CMAKE_CXX_COMPILER_WORKS 1)
set(CMAKE_CXX_ABI_COMPILED 1)
project(consumer)
find_package(library)
get_target_property(LIBS1 library::library INTERFACE_LINK_LIBRARIES)
message(">> library::library libs: ${LIBS1}")
get_target_property(LIBS2 nonamespace INTERFACE_LINK_LIBRARIES)
message(">> nonamespace libs: ${LIBS2}")
add_executable(consumer main.cpp)
target_link_libraries(consumer nonamespace)
""")
main = textwrap.dedent("""
#include "library.h"
int main() {
library();
}
""")
@classmethod
def setup_class(cls):
cls.t = t = TestClient()
# Create a library providing a build-module
t.run('new library/version -s')
t.save({'conanfile.py': cls.conanfile,
'build-module.cmake': cls.build_module})
t.run('create conanfile.py library/version@ -s build_type=Debug')
t.run('create conanfile.py library/version@ -s build_type=Release')
# Prepare project to consume the targets
t.save({'CMakeLists.txt': cls.consumer, 'main.cpp': cls.main}, clean_first=True)
def test_non_multi_generator(self):
t = self.t
with t.chdir('not_multi'):
t.run('install library/version@ -g cmake_find_package -s build_type=Release')
generator = '-G "Visual Studio 15 Win64"' if platform.system() == "Windows" else ''
t.run_command(
'cmake .. {} -DCMAKE_MODULE_PATH:PATH="{}"'.format(generator, t.current_folder))
assert str(t.out).count('>> Build-module is included') == 1
assert '>> nonamespace libs: library::library' in t.out
t.run_command('cmake --build .') # Compiles and links.
@pytest.mark.skipif(platform.system() != "Windows", reason="Only windows")
@pytest.mark.tool_visual_studio
def test_multi_generator_windows(self):
t = self.t
with t.chdir('multi_windows'):
t.run('install library/version@ -g cmake_find_package_multi -s build_type=Release')
t.run('install library/version@ -g cmake_find_package_multi -s build_type=Debug')
generator = '-G "Visual Studio 15 Win64"'
t.run_command(
'cmake .. {} -DCMAKE_PREFIX_PATH:PATH="{}"'.format(generator, t.current_folder))
assert str(t.out).count('>> Build-module is included') == 2 # FIXME: Known bug
assert '>> nonamespace libs: library::library' in t.out
t.run_command('cmake --build . --config Release') # Compiles and links.
@pytest.mark.skipif(platform.system() != "Darwin", reason="Requires Macos")
@pytest.mark.tool_xcodebuild
@pytest.mark.tool_cmake(version="3.19")
def test_multi_generator_macos(self):
t = self.t
with t.chdir('multi_macos'):
t.run('install library/version@ -g cmake_find_package_multi -s build_type=Release')
t.run('install library/version@ -g cmake_find_package_multi -s build_type=Debug')
t.run_command('cmake .. -G Xcode -DCMAKE_PREFIX_PATH:PATH="{}"'.format(t.current_folder))
assert str(t.out).count('>> Build-module is included') == 2 # FIXME: Known bug
assert '>> nonamespace libs: library::library' in t.out
t.run_command('cmake --build . --config Release') # Compiles and links.
@pytest.mark.skipif(platform.system() != "Linux", reason="Only Linux")
@pytest.mark.tool_cmake
def test_no_soname_flag():
""" This test case is testing this graph structure:
* 'LibNoSoname' -> 'OtherLib' -> 'Executable'
Where:
* LibNoSoname: is a package built as shared and without the SONAME flag.
* OtherLib: is a package which requires LibNoSoname.
* Executable: is the final consumer building an application and depending on OtherLib.
"""
client = TestClient()
conanfile = textwrap.dedent("""
from conans import ConanFile, CMake, tools
class {name}Conan(ConanFile):
name = "{name}"
version = "1.0"
# Binary configuration
settings = "os", "compiler", "build_type", "arch"
options = {{"shared": [True, False], "fPIC": [True, False]}}
default_options = {{"shared": True, "fPIC": True}}
# Sources are located in the same place as this recipe, copy them to the recipe
exports_sources = "CMakeLists.txt", "src/*"
generators = "cmake_find_package_multi"
{requires}
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
def build(self):
cmake = CMake(self)
cmake.configure()
cmake.build()
def package(self):
self.copy("*.h", dst="include", src="src")
self.copy("*.lib", dst="lib", keep_path=False)
self.copy("*.dll", dst="bin", keep_path=False)
self.copy("*.so", dst="lib", keep_path=False)
self.copy("*.dylib", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.libs = ["{name}"]
self.cpp_info.names["cmake_find_package_multi"] = "{name}"
""")
cmakelists_nosoname = textwrap.dedent("""
cmake_minimum_required(VERSION 3.15)
project(nosoname CXX)
add_library(nosoname SHARED src/nosoname.cpp)
# Adding NO_SONAME flag to main library
set_target_properties(nosoname PROPERTIES PUBLIC_HEADER "src/nosoname.h" NO_SONAME 1)
install(TARGETS nosoname DESTINATION "."
PUBLIC_HEADER DESTINATION include
RUNTIME DESTINATION bin
ARCHIVE DESTINATION lib
LIBRARY DESTINATION lib
)
""")
cpp = gen_function_cpp(name="nosoname")
h = gen_function_h(name="nosoname")
client.save({"CMakeLists.txt": cmakelists_nosoname,
"src/nosoname.cpp": cpp,
"src/nosoname.h": h,
"conanfile.py": conanfile.format(name="nosoname", requires="")})
# Now, let's create both libraries
client.run("create .")
cmakelists_libB = textwrap.dedent("""
cmake_minimum_required(VERSION 3.15)
project(libB CXX)
find_package(nosoname CONFIG REQUIRED)
add_library(libB SHARED src/libB.cpp)
target_link_libraries(libB nosoname::nosoname)
set_target_properties(libB PROPERTIES PUBLIC_HEADER "src/libB.h")
install(TARGETS libB DESTINATION "."
PUBLIC_HEADER DESTINATION include
RUNTIME DESTINATION bin
ARCHIVE DESTINATION lib
LIBRARY DESTINATION lib
)
""")
cpp = gen_function_cpp(name="libB", includes=["nosoname"], calls=["nosoname"])
h = gen_function_h(name="libB")
client.save({"CMakeLists.txt": cmakelists_libB,
"src/libB.cpp": cpp,
"src/libB.h": h,
"conanfile.py": conanfile.format(name="libB", requires='requires = "nosoname/1.0"')},
clean_first=True)
# Now, let's create both libraries
client.run("create .")
# Now, let's create the application consuming libB
cmakelists = textwrap.dedent("""
cmake_minimum_required(VERSION 3.15)
project(PackageTest CXX)
include(${{CMAKE_BINARY_DIR}}/conanbuildinfo.cmake)
conan_basic_setup()
set(CMAKE_MODULE_PATH ${{CMAKE_BINARY_DIR}})
set(CMAKE_PREFIX_PATH ${{CMAKE_BINARY_DIR}})
find_package(libB CONFIG REQUIRED)
add_executable(example src/example.cpp)
target_link_libraries(example libB)
""")
conanfile = textwrap.dedent("""
[requires]
libB/1.0
[generators]
cmake
cmake_find_package_multi
""")
cpp = gen_function_cpp(name="main", includes=["libB"], calls=["libB"])
client.save({"CMakeLists.txt": cmakelists.format(current_folder=client.current_folder),
"src/example.cpp": cpp,
"conanfile.txt": conanfile},
clean_first=True)
client.run('install . ')
client.run_command('cmake -G "Unix Makefiles" . && cmake --build . && ./bin/example')
| |
# -*- coding: utf-8 -*-
"""
myads_service.models
~~~~~~~~~~~~~~~~~~~~~
Models for the users (users) of AdsWS
"""
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.ext.mutable import Mutable
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy import Column, String, Text
from adsmutils import UTCDateTime
import json
import logging
Base = declarative_base()
class MutableDict(Mutable, dict):
"""
By default, SQLAlchemy only tracks changes of the value itself, which works
"as expected" for simple values, such as ints and strings, but not dicts.
http://stackoverflow.com/questions/25300447/
using-list-on-postgresql-json-type-with-sqlalchemy
"""
@classmethod
def coerce(cls, key, value):
"""
Convert plain dictionaries to MutableDict.
"""
if not isinstance(value, MutableDict):
if isinstance(value, dict):
return MutableDict(value)
# this call will raise ValueError
return Mutable.coerce(key, value)
else:
return value
def __setitem__(self, key, value):
"""
Detect dictionary set events and emit change events.
"""
dict.__setitem__(self, key, value)
self.changed()
def __delitem__(self, key):
"""
Detect dictionary del events and emit change events.
"""
dict.__delitem__(self, key)
self.changed()
def setdefault(self, key, value):
"""
Detect dictionary setdefault events and emit change events
"""
dict.setdefault(self, key, value)
self.changed()
def update(self, subdict):
"""
Detect dictionary update events and emit change events
"""
dict.update(self, subdict)
self.changed()
def pop(self, key, default):
"""
Detect dictionary pop events and emit change events
:param key: key to pop
:param default: default if key does not exist
:return: the item under the given key
"""
dict.pop(self, key, default)
self.changed()
class User(Base):
__tablename__ = 'users'
orcid_id = Column(String(255), primary_key=True)
access_token = Column(String(255))
created = Column(UTCDateTime)
updated = Column(UTCDateTime)
profile = Column(Text)
info = Column(Text)
def toJSON(self):
"""Returns value formatted as python dict."""
return {
'orcid_id': self.orcid_id,
'access_token': self.access_token,
'created': self.created and self.created.isoformat() or None,
'updated': self.updated and self.updated.isoformat() or None,
'profile': self.profile and json.loads(self.profile) or None,
'info': self.info and json.loads(self.info) or None
}
class Profile(Base):
__tablename__ = 'profile'
orcid_id = Column(String(255), primary_key=True)
created = Column(UTCDateTime)
updated = Column(UTCDateTime)
bibcode = Column(MutableDict.as_mutable(JSON), default={})
bib_status = ['verified', 'pending', 'rejected']
nonbib_status = ['not in ADS']
keys = ['status', 'title', 'pubyear', 'pubmonth']
def get_bibcodes(self):
"""
Returns the bibcodes of the ORCID profile
"""
bibcodes, statuses = self.find_nested(self.bibcode, 'status', self.bib_status)
return bibcodes, statuses
def get_non_bibcodes(self):
"""
Returns the non-ADS records of the ORCID profile
"""
non_bibcodes, status = self.find_nested(self.bibcode, 'status', self.nonbib_status)
return non_bibcodes
def get_records(self):
"""
Returns all records from an ORCID profile
"""
return self.bibcode
def add_records(self, records):
"""
Adds a record to the bibcode field, first making sure it has the appropriate nested dict
:param records: dict of dicts of bibcodes and non-bibcodes
"""
if not self.bibcode:
self.bibcode = {}
for r in records:
for k in self.keys:
tmp = records[r].setdefault(k, None)
self.bibcode.update(records)
def remove_bibcodes(self, bibcodes):
"""
Removes a bibcode(s) from the bibcode field.
Given the way in which bibcodes are stored may change, it seems simpler
to keep the method of adding/removing in a small wrapper so that only
one location needs to be modified (or YAGNI?).
:param bibcodes: list of bibcodes
"""
[self.bibcode.pop(key, None) for key in bibcodes]
def get_nested(self, dictionary, nested_key):
"""Get all values from the nested dictionary for a given nested key"""
keys = dictionary.keys()
vals = []
for key in keys:
vals.append(dictionary[key].setdefault(nested_key, None))
return vals
def find_nested(self, dictionary, nested_key, nested_value):
"""Get all top-level keys from a nested dictionary for a given list of nested values
belonging to a given nested key
:param dictionary - nested dictionary to search; searches one level deep
:param nested_key - key within nested dictionary to search for
:param nested_value - list (or string or number) of acceptable values to search for within the
given nested_key
:return good_keys - list of top-level keys with a matching nested value to the given nested key
:return good_values - list of the value (from nested_value) retrieved
"""
if type(nested_value) is not list:
nested_value = [nested_value]
keys = dictionary.keys()
good_keys = []
good_values = []
for key in keys:
if dictionary[key].get(nested_key,'') in nested_value:
good_keys.append(key)
good_values.append(dictionary[key].get(nested_key))
return good_keys, good_values
def update_status(self, keys, status):
"""
Update the status for a given key or keys
:param keys: str or list
:param status: str
:return: None
"""
if type(keys) is not list:
keys = [keys]
if not isinstance(status, str):
logging.warning('Status to update for record %s, ORCID %s must be passed as a string'.
format(keys, self.orcid_id))
for key in keys:
if key in self.bibcode:
self.bibcode[key]['status'] = status
self.bibcode.changed()
else:
logging.warning('Record %s not in profile for %s'.format(key, self.orcid_id))
def get_status(self, keys):
"""
For a given set of records, return the statuses
:param keys: str or list
:return: good_keys - list of keys that exist in the set
:return: statuses - list of statuses of good_keys
"""
if type(keys) is not list:
keys = [keys]
good_keys = []
statuses = []
for key in keys:
if key in self.bibcode:
good_keys.append(key)
statuses.append(self.bibcode[key]['status'])
return good_keys, statuses
| |
# The MIT License
#
# Copyright 2011 Sony Ericsson Mobile Communications. All rights reserved.
# Copyright 2012 Sony Mobile Communications. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
""" Gerrit event classes. """
import json
import logging
from .error import GerritError
from .models import Account, Approval, Change, Patchset, RefUpdate
class GerritEventFactory(object):
""" Gerrit event factory. """
_events = {}
@classmethod
def register(cls, name):
""" Decorator to register the event identified by `name`.
Return the decorated class.
Raise GerritError if the event is already registered.
"""
def decorate(klazz):
""" Decorator. """
if name in cls._events:
raise GerritError("Duplicate event: %s" % name)
cls._events[name] = [klazz.__module__, klazz.__name__]
klazz.name = name
return klazz
return decorate
@classmethod
def create(cls, data):
""" Create a new event instance.
Return an instance of the `GerritEvent` subclass after converting
`data` to json.
Raise GerritError if json parsed from `data` does not contain a `type`
key.
"""
try:
json_data = json.loads(data)
except ValueError as err:
logging.debug("Failed to load json data: %s: [%s]", str(err), data)
json_data = json.loads(ErrorEvent.error_json(err))
if "type" not in json_data:
raise GerritError("`type` not in json_data")
name = json_data["type"]
if name not in cls._events:
name = 'unhandled-event'
event = cls._events[name]
module_name = event[0]
class_name = event[1]
module = __import__(module_name, fromlist=[module_name])
klazz = getattr(module, class_name)
return klazz(json_data)
class GerritEvent(object):
""" Gerrit event base class. """
def __init__(self, json_data):
self.json = json_data
@GerritEventFactory.register("unhandled-event")
class UnhandledEvent(GerritEvent):
""" Unknown event type received in json data from Gerrit's event stream. """
def __init__(self, json_data):
super(UnhandledEvent, self).__init__(json_data)
def __repr__(self):
return u"<UnhandledEvent>"
@GerritEventFactory.register("error-event")
class ErrorEvent(GerritEvent):
""" Error occurred when processing json data from Gerrit's event stream. """
def __init__(self, json_data):
super(ErrorEvent, self).__init__(json_data)
self.error = json_data["error"]
@classmethod
def error_json(cls, error):
""" Return a json string for the `error`. """
return '{"type":"error-event",' \
'"error":"%s"}' % str(error)
def __repr__(self):
return u"<ErrorEvent: %s>" % self.error
@GerritEventFactory.register("patchset-created")
class PatchsetCreatedEvent(GerritEvent):
""" Gerrit "patchset-created" event. """
def __init__(self, json_data):
super(PatchsetCreatedEvent, self).__init__(json_data)
try:
self.change = Change(json_data["change"])
self.patchset = Patchset(json_data["patchSet"])
self.uploader = Account(json_data["uploader"])
except KeyError as e:
raise GerritError("PatchsetCreatedEvent: %s" % e)
def __repr__(self):
return u"<PatchsetCreatedEvent>: %s %s %s" % (self.change,
self.patchset,
self.uploader)
@GerritEventFactory.register("draft-published")
class DraftPublishedEvent(GerritEvent):
""" Gerrit "draft-published" event. """
def __init__(self, json_data):
super(DraftPublishedEvent, self).__init__(json_data)
try:
self.change = Change(json_data["change"])
self.patchset = Patchset(json_data["patchSet"])
self.uploader = Account(json_data["uploader"])
except KeyError as e:
raise GerritError("DraftPublishedEvent: %s" % e)
def __repr__(self):
return u"<DraftPublishedEvent>: %s %s %s" % (self.change,
self.patchset,
self.uploader)
@GerritEventFactory.register("comment-added")
class CommentAddedEvent(GerritEvent):
""" Gerrit "comment-added" event. """
def __init__(self, json_data):
super(CommentAddedEvent, self).__init__(json_data)
try:
self.change = Change(json_data["change"])
self.patchset = Patchset(json_data["patchSet"])
self.author = Account(json_data["author"])
self.approvals = []
if "approvals" in json_data:
for approval in json_data["approvals"]:
self.approvals.append(Approval(approval))
self.comment = json_data["comment"]
except (KeyError, ValueError) as e:
raise GerritError("CommentAddedEvent: %s" % e)
def __repr__(self):
return u"<CommentAddedEvent>: %s %s %s" % (self.change,
self.patchset,
self.author)
@GerritEventFactory.register("change-merged")
class ChangeMergedEvent(GerritEvent):
""" Gerrit "change-merged" event. """
def __init__(self, json_data):
super(ChangeMergedEvent, self).__init__(json_data)
try:
self.change = Change(json_data["change"])
self.patchset = Patchset(json_data["patchSet"])
self.submitter = Account(json_data["submitter"])
except KeyError as e:
raise GerritError("ChangeMergedEvent: %s" % e)
def __repr__(self):
return u"<ChangeMergedEvent>: %s %s %s" % (self.change,
self.patchset,
self.submitter)
@GerritEventFactory.register("merge-failed")
class MergeFailedEvent(GerritEvent):
""" Gerrit "merge-failed" event. """
def __init__(self, json_data):
super(MergeFailedEvent, self).__init__(json_data)
try:
self.change = Change(json_data["change"])
self.patchset = Patchset(json_data["patchSet"])
self.submitter = Account(json_data["submitter"])
if 'reason' in json_data:
self.reason = json_data["reason"]
except KeyError as e:
raise GerritError("MergeFailedEvent: %s" % e)
def __repr__(self):
return u"<MergeFailedEvent>: %s %s %s" % (self.change,
self.patchset,
self.submitter)
@GerritEventFactory.register("change-abandoned")
class ChangeAbandonedEvent(GerritEvent):
""" Gerrit "change-abandoned" event. """
def __init__(self, json_data):
super(ChangeAbandonedEvent, self).__init__(json_data)
try:
self.change = Change(json_data["change"])
self.abandoner = Account(json_data["abandoner"])
if 'reason' in json_data:
self.reason = json_data["reason"]
except KeyError as e:
raise GerritError("ChangeAbandonedEvent: %s" % e)
def __repr__(self):
return u"<ChangeAbandonedEvent>: %s %s" % (self.change,
self.abandoner)
@GerritEventFactory.register("change-restored")
class ChangeRestoredEvent(GerritEvent):
""" Gerrit "change-restored" event. """
def __init__(self, json_data):
super(ChangeRestoredEvent, self).__init__(json_data)
try:
self.change = Change(json_data["change"])
self.restorer = Account(json_data["restorer"])
if 'reason' in json_data:
self.reason = json_data["reason"]
except KeyError as e:
raise GerritError("ChangeRestoredEvent: %s" % e)
def __repr__(self):
return u"<ChangeRestoredEvent>: %s %s" % (self.change,
self.restorer)
@GerritEventFactory.register("ref-updated")
class RefUpdatedEvent(GerritEvent):
""" Gerrit "ref-updated" event. """
def __init__(self, json_data):
super(RefUpdatedEvent, self).__init__(json_data)
try:
self.ref_update = RefUpdate(json_data["refUpdate"])
self.submitter = Account.from_json(json_data, "submitter")
except KeyError as e:
raise GerritError("RefUpdatedEvent: %s" % e)
def __repr__(self):
return u"<RefUpdatedEvent>: %s %s" % (self.ref_update, self.submitter)
@GerritEventFactory.register("reviewer-added")
class ReviewerAddedEvent(GerritEvent):
""" Gerrit "reviewer-added" event. """
def __init__(self, json_data):
super(ReviewerAddedEvent, self).__init__(json_data)
try:
self.change = Change(json_data["change"])
self.patchset = Patchset.from_json(json_data)
self.reviewer = Account(json_data["reviewer"])
except KeyError as e:
raise GerritError("ReviewerAddedEvent: %s" % e)
def __repr__(self):
return u"<ReviewerAddedEvent>: %s %s %s" % (self.change,
self.patchset,
self.reviewer)
@GerritEventFactory.register("topic-changed")
class TopicChangedEvent(GerritEvent):
""" Gerrit "topic-changed" event. """
def __init__(self, json_data):
super(TopicChangedEvent, self).__init__(json_data)
try:
self.change = Change(json_data["change"])
self.changer = Account(json_data["changer"])
if "oldTopic" in json_data:
self.oldtopic = json_data["oldTopic"]
else:
self.oldtopic = ""
except KeyError as e:
raise GerritError("TopicChangedEvent: %s" % e)
def __repr__(self):
return u"<TopicChangedEvent>: %s %s [%s]" % (self.change,
self.changer,
self.oldtopic)
| |
# Copyright 2011 Viewfinder Inc. All Rights Reserved.
"""Base object for building python classes to represent the data
in a database row.
See DBHashObject and DBRangeObject.
DBObject: base class of all data objects
"""
__author__ = 'spencer@emailscrubbed.com (Spencer Kimball)'
from functools import partial
import logging
from tornado import gen
from viewfinder.backend.base import util
from tornado.concurrent import return_future
from viewfinder.backend.db import db_client, query_parser, schema, vf_schema
from viewfinder.backend.db.versions import Version
class DBObject(object):
"""Base class for representing a row of data. Setting a column value
to None will delete the column from the datastore on Update().
"""
_VISIT_LIMIT = 50
_schema = vf_schema.SCHEMA
__slots__ = ['_columns', '_reindex']
def __init__(self, columns=None):
"""The base datastore object class manages columns according to
the database schema as defined by the subclass' schema table
definition. However, derived classes can override the column set
by specifying the "columns" argument. Columns of type IndexTermsColumn
are ignored here. They will not create column values which can be
accessed via the __{Get,Set}Property() methods.
Creates a new python property for each column in the table for the
data object according to the schema. This is only done once per class,
as properties actually modify the class, not the instance.
"""
self._columns = {}
self._reindex = False
columns = columns or self._table.GetColumns()
for c in columns:
if not isinstance(c, schema.IndexTermsColumn):
self._columns[c.name] = c.NewInstance()
@staticmethod
def map_table_attributes(cls):
"""Class decorator which adds properties for all columns defined in a table.
The class must define a class attribute _table.
Example:
@DBObject.map_table_attributes
class Foo(DBRangeObject):
_table = DBObject._schema.GetTable(vf_schema.FOO)
"""
assert issubclass(cls, DBObject)
for c in cls._table.GetColumns():
if not isinstance(c, schema.IndexTermsColumn):
fget = (lambda name: lambda self: self.__GetProperty(name))(c.name)
fset = (lambda name: lambda self, value: self.__SetProperty(name, value))(c.name)
setattr(cls, c.name, property(fget, fset))
return cls
def __dir__(self):
return self._columns.keys()
def __repr__(self):
items = []
for name, column in self._columns.iteritems():
if column.Get() is None:
continue
value = repr(column.Get())
if self.ShouldScrubColumn(name):
value = '...scrubbed %s bytes...' % len(value)
items.append((name, value))
return '{' + ', '.join('\'%s\': %s' % (n, v) for n, v in items) + '}'
@classmethod
def ShouldScrubColumn(cls, name):
"""Override to return True for columns that should not appear in logs."""
return False
def __GetProperty(self, name):
return self._columns[name].Get()
def __SetProperty(self, name, value):
return self._columns[name].Set(value)
def _asdict(self):
return dict([(n, c.Get(asdict=True)) for n, c in self._columns.items() \
if c.Get() is not None])
def _Clone(self):
# Construct new instance of this type and transfer raw in-memory column values.
o = type(self)()
for n, col in self._columns.items():
o._columns[n]._value = col._value
return o
def _IsModified(self, name):
"""Returns whether or not a column value has been modified."""
return self._columns[name].IsModified()
def GetColNames(self):
"""Returns all column names."""
return self._columns.keys()
def GetModifiedColNames(self):
"""Returns all column names where the column value has been modified."""
return [c.col_def.name for c in self._columns.values() if c.IsModified()]
def SetReindexOnUpdate(self, reindex):
"""Sets the _reindex boolean. If set to True, index terms for all
columns will be re-generated on update, regardless of whether or
not the column has been modified. This is used during data
migrations when the indexing algorithm for a particular column
type (or types) has been modified. Only generates writes (and
deletes for pre-existing, now obsolete terms) to the index table
when terms for a column change.
"""
self._reindex = reindex
@classmethod
def CreateFromKeywords(cls, **obj_dict):
"""Creates a new object of type 'cls' with attributes as specified
in 'obj_dict'. The key columns must be present in the attribute
dictionary. Returns new object instance.
"""
assert obj_dict.has_key(cls._table.hash_key_col.name)
if cls._table.range_key_col:
assert obj_dict.has_key(cls._table.range_key_col.name), (cls._table.range_key_col.name, obj_dict)
o = cls()
o.UpdateFromKeywords(**obj_dict)
o._columns[schema.Table.VERSION_COLUMN.name].Set(Version.GetCurrentVersion())
return o
def UpdateFromKeywords(self, **obj_dict):
"""Updates the contents of the object according to **obj_dict."""
for k, v in obj_dict.items():
if k in self._columns:
self._columns[k].Set(v)
else:
raise KeyError('column %s (value %r) not found in class %s' % (k, v, self.__class__))
def HasMismatchedValues(self, mismatch_allowed_set=None, **obj_dict):
"""Check that each of the dictionary values matches what's in the object.
The only keys that don't need to match are ones contained in the mismatch_allowed_set.
Returns: True if mismatch found. Otherwise, False.
"""
for k, v in obj_dict.items():
if k in self._columns:
if mismatch_allowed_set is None or k not in mismatch_allowed_set:
if self._columns[k].Get(asdict=isinstance(v, dict)) != v:
return True
return False
@return_future
def Update(self, client, callback, expected=None, replace=True, return_col_names=False):
"""Updates or inserts the object. Only modified columns are
updated. Updates the index terms first and finally the object, so
the update operation, on retry, will be idempotent.
'expected' are preconditions for attribute values for the update
to succeed.
If 'replace' is False, forces a conditional update which verifies
that the primary key does not already exist in the datastore.
If 'return_col_names' is True, 'callback' is invoked with a list
of the modified column names.
"""
mod_cols = [c for c in self._columns.values() if c.IsModified()]
if return_col_names:
callback = partial(callback, [c.col_def.name for c in mod_cols])
if not mod_cols and not self._reindex:
callback()
return
# Transform expected attributes dict to refer to column keys instead of names.
if expected:
expected = dict([(self._table.GetColumn(k).key, v) for k, v in expected.items()])
else:
expected = {}
def _OnUpdate(result):
[col.OnUpdate() for col in mod_cols]
callback()
def _OnUpdateIndexTerms(term_attrs):
attrs = dict()
for c in mod_cols:
update = c.Update()
if update:
attrs[c.col_def.key] = update
if term_attrs:
attrs.update(term_attrs)
if not replace:
expected[self._table.hash_key_col.key] = False
if self._table.range_key_col:
expected[self._table.range_key_col.key] = False
client.UpdateItem(table=self._table.name, key=self.GetKey(),
attributes=attrs, expected=expected, callback=_OnUpdate)
def _OnQueryIndexTerms(term_updates, result):
old_dict = result.attributes or {}
term_attrs = {}
add_terms = {} # dict of term dicts by term key
del_terms = [] # list of term keys
for name, update in term_updates.items():
key = self._table.GetColumn(name).key + ':t'
terms = set(update.value.keys()) if update.value else set()
# Special check here; you cannot 'PUT' an empty set. Must 'DELETE'.
if update.action == 'PUT' and not terms:
term_attrs[key] = db_client.UpdateAttr(value=None, action='DELETE')
else:
term_attrs[key] = db_client.UpdateAttr(value=list(terms), action=update.action)
# Compute which index terms to add and which to delete.
if update.action == 'PUT':
old_terms = set(old_dict.get(key, []))
add_terms.update(dict([(t, update.value[t]) for t in terms.difference(old_terms)]))
del_terms += old_terms.difference(terms)
elif update.action == 'ADD':
add_terms.update(update.value)
elif update.action == 'DELETE':
del_terms += terms
# Add and delete all terms as necessary.
with util.Barrier(partial(_OnUpdateIndexTerms, term_attrs)) as b:
index_key = self._GetIndexKey()
for term, data in add_terms.items():
attrs = {'d': data} if data else {}
client.PutItem(table=vf_schema.INDEX, callback=b.Callback(), attributes=attrs,
key=db_client.DBKey(hash_key=term, range_key=index_key))
for term in del_terms:
client.DeleteItem(table=vf_schema.INDEX, callback=b.Callback(),
key=db_client.DBKey(hash_key=term, range_key=index_key))
if isinstance(self._table, schema.IndexedTable):
index_cols = mod_cols if not self._reindex else \
[c for c in self._columns.values() if c.Get() is not None]
index_cols = [c for c in index_cols if c.col_def.indexer]
# Get a dictionary of term updates for the object.
term_updates = dict([(c.col_def.name, c.IndexTerms()) for c in index_cols])
col_names = [n for n, u in term_updates.items() if u.action == 'PUT']
# For any term updates which are PUT, fetch the previous term sets.
self._QueryIndexTerms(client, col_names=col_names,
callback=partial(_OnQueryIndexTerms, term_updates))
else:
_OnUpdateIndexTerms(None)
def Delete(self, client, callback, expected=None):
"""Deletes all columns of the object and all associated index
terms. Deletes the index terms first and finally the object, so
the deletion operation, on retry, will be idempotent.
'expected' are preconditions for attribute values for the delete
to succeed.
"""
# Transform expected attributes dict to refer to column keys instead of names.
if expected:
expected = dict([(self._table.GetColumn(k).key, v) for k, v in expected.items()])
def _OnDelete(result):
callback()
def _OnDeleteIndexTerms():
client.DeleteItem(table=self._table.name, key=self.GetKey(),
callback=_OnDelete, expected=expected)
def _OnQueryIndexTerms(get_result):
terms = [t for term_set in get_result.attributes.values() for t in term_set]
with util.Barrier(_OnDeleteIndexTerms) as b:
index_key = self._GetIndexKey()
[client.DeleteItem(table=vf_schema.INDEX,
key=db_client.DBKey(hash_key=term, range_key=index_key),
callback=b.Callback()) for term in terms]
if isinstance(self._table, schema.IndexedTable):
assert expected is None, expected
self._QueryIndexTerms(client, col_names=self._table.GetColumnNames(),
callback=_OnQueryIndexTerms)
else:
_OnDeleteIndexTerms()
def _QueryIndexTerms(self, client, col_names, callback):
"""Queries the index terms for the specified columns. If no
columns are specified, invokes callback immediately. When a column
is indexed, the set of index terms produced is stored near the
column value to be queried on modifications. Having access to the
old set is especially crucial if the indexing algorithm changes.
"""
idx_cols = [self._columns[name] for name in col_names if self._table.GetColumn(name).indexer]
attrs = [c.col_def.key + ':t' for c in idx_cols]
def _OnQuery(get_result):
"""Handle case of new object and a term attributes query failure."""
if get_result is None:
callback(db_client.GetResult(attributes=dict([(a, set()) for a in attrs]), read_units=0))
else:
callback(get_result)
if attrs:
client.GetItem(table=self._table.name, key=self.GetKey(), attributes=attrs,
must_exist=False, consistent_read=True, callback=_OnQuery)
else:
# This may happen if no indexed columns were updated. Simply
# supply an empty attribute dict to the callback.
callback(db_client.GetResult(attributes=dict(), read_units=0))
def _GetIndexKey(self):
"""Returns the indexing key for this object by calling the
_MakeIndexKey class method, which is overridden by derived classes.
"""
return self._MakeIndexKey(self.GetKey())
@classmethod
def _CreateFromQuery(cls, **attr_dict):
"""Creates a new instance of cls and sets the values of its
columns from 'attr_dict'. Returns the new object instance.
"""
assert attr_dict.has_key(cls._table.hash_key_col.key), attr_dict
if cls._table.range_key_col:
assert attr_dict.has_key(cls._table.range_key_col.key), attr_dict
o = cls()
for k, v in attr_dict.items():
name = cls._table.GetColumnName(k)
o._columns[name].Load(v)
return o
@classmethod
def Scan(cls, client, col_names, callback, limit=None, excl_start_key=None,
scan_filter=None):
"""Scans the table up to a count of 'limit', starting at the hash
key value provided in 'excl_start_key'. Invokes the callback with
the list of elements and the last scanned key (list, last_key).
The last_key will be None if the last item was scanned.
'scan_filter' is a map from attribute name to a tuple of
([attr_value], ('EQ'|'LE'|'LT'|'GE'|'GT'|'BEGINS_WITH')),
--or-- ([start_attr_value, end_attr_value], 'BETWEEN').
"""
if limit == 0:
callback(([], None))
col_set = cls._CreateColumnSet(col_names)
# Convert scan filter from attribute names to keys.
if scan_filter:
scan_filter = dict([(cls._table.GetColumn(k).key, v) for k, v in scan_filter.items()])
def _OnScan(result):
objs = []
for item in result.items:
objs.append(cls._CreateFromQuery(**item))
callback((objs, result.last_key))
client.Scan(table=cls._table.name, callback=_OnScan,
attributes=[cls._table.GetColumn(name).key for name in col_set],
limit=limit, excl_start_key=excl_start_key, scan_filter=scan_filter)
@classmethod
@gen.engine
def BatchQuery(cls, client, keys, col_names, callback,
must_exist=True, consistent_read=False):
"""Queries for a batch of items identified by DBKey objects in the 'keys' array. Projects
the specified columns (or all columns if col_names==None). If 'must_exist' is False, then
return None for each item that does not exist in the database.
"""
col_set = cls._CreateColumnSet(col_names)
request = db_client.BatchGetRequest(keys=keys,
attributes=[cls._table.GetColumn(name).key
for name in col_set],
consistent_read=consistent_read)
result = yield gen.Task(client.BatchGetItem,
batch_dict={cls._table.name: request},
must_exist=must_exist)
result_objects = []
for item in result[cls._table.name].items:
if item is not None:
result_objects.append(cls._CreateFromQuery(**item))
else:
result_objects.append(None)
callback(result_objects)
@classmethod
def KeyQuery(cls, client, key, col_names, callback,
must_exist=True, consistent_read=False):
"""Queries the specified columns (or all columns if
col_names==None), using key as the object hash key.
"""
col_set = cls._CreateColumnSet(col_names)
def _OnQuery(result):
o = None
if result and result.attributes:
o = cls._CreateFromQuery(**result.attributes)
callback(o)
client.GetItem(table=cls._table.name, key=key,
attributes=[cls._table.GetColumn(name).key for name in col_set],
must_exist=must_exist, consistent_read=consistent_read,
callback=_OnQuery)
@classmethod
def IndexQueryKeys(cls, client, bound_query_str, callback,
start_index_key=None, end_index_key=None,
limit=50, consistent_read=False):
"""Returns a sequence of object keys to 'callback' resulting from
execution of 'bound_query_str'.
"""
def _OnQueryKeys(index_keys):
callback([cls._ParseIndexKey(index_key) for index_key in index_keys])
try:
start_key = cls._MakeIndexKey(start_index_key) if start_index_key is not None else None
end_key = cls._MakeIndexKey(end_index_key) if end_index_key is not None else None
query, param_dict = query_parser.CompileQuery(cls._schema, bound_query_str)
query.Evaluate(client,
callback=_OnQueryKeys,
start_key=start_key,
end_key=end_key,
limit=limit,
consistent_read=consistent_read,
param_dict=param_dict)
except:
logging.exception('query evaluates to empty: ' + str(bound_query_str))
callback([])
@classmethod
@gen.engine
def IndexQuery(cls, client, bound_query_str, col_names, callback,
start_index_key=None, end_index_key=None,
limit=50, consistent_read=False):
"""Returns a sequence of Objects resulting from the execution of
'query' as the first parameter to 'callback'. Only the columns
specified in 'col_names' are queried, or all columns if None.
"""
try:
start_key = cls._MakeIndexKey(start_index_key) if start_index_key is not None else None
end_key = cls._MakeIndexKey(end_index_key) if end_index_key is not None else None
query, param_dict = query_parser.CompileQuery(cls._schema, bound_query_str)
index_keys = yield gen.Task(query.Evaluate,
client,
start_key=start_key,
end_key=end_key,
limit=limit,
consistent_read=consistent_read,
param_dict=param_dict)
except:
logging.exception('query evaluates to empty: ' + str(bound_query_str))
callback([])
return
query_keys = [cls._ParseIndexKey(index_key) for index_key in index_keys]
objects = yield gen.Task(cls._GetIndexedObjectClass().BatchQuery,
client,
query_keys,
col_names,
must_exist=False,
consistent_read=consistent_read)
# Compact results
compacted_result = [obj for obj in objects if obj is not None]
callback(compacted_result)
@classmethod
def VisitIndexKeys(cls, client, bound_query_str, visitor, callback,
start_index_key=None, end_index_key=None, consistent_read=False):
"""Query for all object keys in the specified key range. For each key,
invoke the "visitor" function:
visitor(object_key, visit_callback)
When the visitor function has completed the visit, it should invoke
"visit_callback" with no parameters. Once all object keys have been
visited, then "callback" is invoked.
"""
def _OnQueryKeys(index_keys):
if len(index_keys) < DBObject._VISIT_LIMIT:
barrier_callback = callback
else:
barrier_callback = partial(DBObject.VisitIndexKeys, client, bound_query_str, visitor, callback,
start_index_key=index_keys[-1], end_index_key=end_index_key,
consistent_read=consistent_read)
with util.Barrier(barrier_callback) as b:
for index_key in index_keys:
visitor(index_key, callback=b.Callback())
cls.IndexQueryKeys(client, bound_query_str, _OnQueryKeys, limit=DBObject._VISIT_LIMIT,
start_index_key=start_index_key, end_index_key=end_index_key,
consistent_read=consistent_read)
@classmethod
def VisitIndex(cls, client, bound_query_str, visitor, col_names, callback,
start_index_key=None, end_index_key=None, consistent_read=False):
"""Query for all objects in the specified key range. For each object,
invoke the "visitor" function:
visitor(object, visit_callback)
When the visitor function has completed the visit, it should invoke
"visit_callback" with no parameters. Once all objects have been
visited, then "callback" is invoked.
"""
def _OnQuery(objects):
if len(objects) < DBObject._VISIT_LIMIT:
barrier_callback = callback
else:
barrier_callback = partial(DBObject.VisitIndex, client, bound_query_str, visitor, col_names, callback,
start_index_key=objects[-1]._GetIndexKey(), end_index_key=end_index_key,
consistent_read=consistent_read)
with util.Barrier(barrier_callback) as b:
for object in objects:
visitor(object, b.Callback())
cls.IndexQuery(client, bound_query_str, col_names, _OnQuery, limit=DBObject._VISIT_LIMIT,
start_index_key=start_index_key, end_index_key=end_index_key,
consistent_read=consistent_read)
@classmethod
def _CreateColumnSet(cls, col_names):
"""Creates a set of column names from the 'col_names' list (all columns in the table if
col_names == None). Ensures that the hash key, range key, and version column are always
included in the set.
"""
col_set = set(col_names or cls._table.GetColumnNames())
col_set.add(cls._table.hash_key_col.name)
if cls._table.range_key_col:
col_set.add(cls._table.range_key_col.name)
col_set.add(schema.Table.VERSION_COLUMN.name)
return col_set
| |
import httplib as http
from django.utils.translation import ugettext_lazy as _
from rest_framework import status
from rest_framework.exceptions import APIException, AuthenticationFailed
def get_resource_object_member(error_key, context):
from api.base.serializers import RelationshipField
field = context['view'].serializer_class._declared_fields.get(error_key, None)
if field:
return 'relationships' if isinstance(field, RelationshipField) else 'attributes'
# If field cannot be found (where read/write operations have different serializers,
# assume error was in 'attributes' by default
return 'attributes'
def dict_error_formatting(errors, context, index=None):
"""
Formats all dictionary error messages for both single and bulk requests
"""
formatted_error_list = []
# Error objects may have the following members. Title and id removed to avoid clash with "title" and "id" field errors.
top_level_error_keys = ['links', 'status', 'code', 'detail', 'source', 'meta']
# Resource objects must contain at least 'id' and 'type'
resource_object_identifiers = ['type', 'id']
if index is None:
index = ''
else:
index = str(index) + '/'
for error_key, error_description in errors.items():
if isinstance(error_description, basestring):
error_description = [error_description]
if error_key in top_level_error_keys:
formatted_error_list.extend({error_key: description} for description in error_description)
elif error_key in resource_object_identifiers:
formatted_error_list.extend([{'source': {'pointer': '/data/{}'.format(index) + error_key}, 'detail': reason} for reason in error_description])
elif error_key == 'non_field_errors':
formatted_error_list.extend([{'detail': description for description in error_description}])
else:
formatted_error_list.extend([{'source': {'pointer': '/data/{}{}/'.format(index, get_resource_object_member(error_key, context)) + error_key}, 'detail': reason} for reason in error_description])
return formatted_error_list
def json_api_exception_handler(exc, context):
"""
Custom exception handler that returns errors object as an array
"""
# We're deliberately not stripping html from exception detail.
# This creates potential vulnerabilities to script injection attacks
# when returning raw user input into error messages.
#
# Fortunately, Django's templating language strips markup bu default,
# but if our frontend changes we may lose that protection.
# TODO: write tests to ensure our html frontend strips html
# Import inside method to avoid errors when the OSF is loaded without Django
from rest_framework.views import exception_handler
response = exception_handler(exc, context)
errors = []
if response:
message = response.data
if isinstance(exc, TwoFactorRequiredError):
response['X-OSF-OTP'] = 'required; app'
if isinstance(exc, JSONAPIException):
errors.extend([{'source': exc.source or {}, 'detail': exc.detail, 'meta': exc.meta or {}}])
elif isinstance(message, dict):
errors.extend(dict_error_formatting(message, context, index=None))
else:
if isinstance(message, basestring):
message = [message]
for index, error in enumerate(message):
if isinstance(error, dict):
errors.extend(dict_error_formatting(error, context, index=index))
else:
errors.append({'detail': error})
response.data = {'errors': errors}
return response
class EndpointNotImplementedError(APIException):
status_code = status.HTTP_501_NOT_IMPLEMENTED
default_detail = _('This endpoint is not yet implemented.')
class ServiceUnavailableError(APIException):
status_code = status.HTTP_503_SERVICE_UNAVAILABLE
default_detail = _('Service is unavailable at this time.')
class JSONAPIException(APIException):
"""Inherits from the base DRF API exception and adds extra metadata to support JSONAPI error objects
:param str detail: a human-readable explanation specific to this occurrence of the problem
:param dict source: A dictionary containing references to the source of the error.
See http://jsonapi.org/format/#error-objects.
Example: ``source={'pointer': '/data/attributes/title'}``
:param dict meta: A meta object containing non-standard meta info about the error.
"""
status_code = status.HTTP_400_BAD_REQUEST
def __init__(self, detail=None, source=None, meta=None):
super(JSONAPIException, self).__init__(detail=detail)
self.source = source
self.meta = meta
# Custom Exceptions the Django Rest Framework does not support
class Gone(JSONAPIException):
status_code = status.HTTP_410_GONE
default_detail = ('The requested resource is no longer available.')
def UserGone(user):
return Gone(
detail='The requested user is no longer available.',
meta={
'full_name': user.fullname, 'family_name': user.family_name, 'given_name': user.given_name,
'middle_names': user.middle_names, 'profile_image': user.profile_image_url(),
},
)
class Conflict(JSONAPIException):
status_code = status.HTTP_409_CONFLICT
default_detail = ('Resource identifier does not match server endpoint.')
class JSONAPIParameterException(JSONAPIException):
def __init__(self, detail=None, parameter=None):
source = {
'parameter': parameter,
}
super(JSONAPIParameterException, self).__init__(detail=detail, source=source)
class JSONAPIAttributeException(JSONAPIException):
def __init__(self, detail=None, attribute=None):
source = {
'pointer': '/data/attributes/{}'.format(attribute),
}
super(JSONAPIAttributeException, self).__init__(detail=detail, source=source)
class InvalidQueryStringError(JSONAPIParameterException):
"""Raised when client passes an invalid value to a query string parameter."""
default_detail = 'Query string contains an invalid value.'
status_code = http.BAD_REQUEST
class InvalidFilterOperator(JSONAPIParameterException):
"""Raised when client passes an invalid operator to a query param filter."""
status_code = http.BAD_REQUEST
def __init__(self, detail=None, value=None, valid_operators=('eq', 'lt', 'lte', 'gt', 'gte', 'contains', 'icontains')):
if value and not detail:
valid_operators = ', '.join(valid_operators)
detail = "Value '{0}' is not a supported filter operator; use one of {1}.".format(
value,
valid_operators,
)
super(InvalidFilterOperator, self).__init__(detail=detail, parameter='filter')
class InvalidFilterValue(JSONAPIParameterException):
"""Raised when client passes an invalid value to a query param filter."""
status_code = http.BAD_REQUEST
def __init__(self, detail=None, value=None, field_type=None):
if not detail:
detail = "Value '{0}' is not valid".format(value)
if field_type:
detail += ' for a filter on type {0}'.format(
field_type,
)
detail += '.'
super(InvalidFilterValue, self).__init__(detail=detail, parameter='filter')
class InvalidFilterError(JSONAPIParameterException):
"""Raised when client passes an malformed filter in the query string."""
default_detail = _('Query string contains a malformed filter.')
status_code = http.BAD_REQUEST
def __init__(self, detail=None):
super(InvalidFilterError, self).__init__(detail=detail, parameter='filter')
class InvalidFilterComparisonType(JSONAPIParameterException):
"""Raised when client tries to filter on a field that is not a date or number type"""
default_detail = _('Comparison operators are only supported for dates and numbers.')
status_code = http.BAD_REQUEST
class InvalidFilterMatchType(JSONAPIParameterException):
"""Raised when client tries to do a match filter on a field that is not a string or a list"""
default_detail = _('Match operators are only supported for strings and lists.')
status_code = http.BAD_REQUEST
class InvalidFilterFieldError(JSONAPIParameterException):
"""Raised when client tries to filter on a field that is not supported"""
default_detail = _('Query contained one or more filters for invalid fields.')
status_code = http.BAD_REQUEST
def __init__(self, detail=None, parameter=None, value=None):
if value and not detail:
detail = "Value '{}' is not a filterable field.".format(value)
super(InvalidFilterFieldError, self).__init__(detail=detail, parameter=parameter)
class UnconfirmedAccountError(APIException):
status_code = 400
default_detail = _('Please confirm your account before using the API.')
class UnclaimedAccountError(APIException):
status_code = 400
default_detail = _('Please claim your account before using the API.')
class DeactivatedAccountError(APIException):
status_code = 400
default_detail = _('Making API requests with credentials associated with a deactivated account is not allowed.')
class MergedAccountError(APIException):
status_code = 400
default_detail = _('Making API requests with credentials associated with a merged account is not allowed.')
class InvalidAccountError(APIException):
status_code = 400
default_detail = _('Making API requests with credentials associated with an invalid account is not allowed.')
class TwoFactorRequiredError(AuthenticationFailed):
default_detail = _('Must specify two-factor authentication OTP code.')
pass
class InvalidModelValueError(JSONAPIException):
status_code = 400
default_detail = _('Invalid value in POST/PUT/PATCH request.')
class TargetNotSupportedError(Exception):
"""Raised if a TargetField is used for a resource that isn't supported."""
pass
class RelationshipPostMakesNoChanges(Exception):
"""Raised when a post is on a relationship that already exists, so view can return a 204"""
pass
class NonDescendantNodeError(APIException):
"""Raised when a client attempts to associate a non-descendant node with a view only link"""
status_code = 400
default_detail = _('The node {0} cannot be affiliated with this View Only Link because the node you\'re trying to affiliate is not descended from the node that the View Only Link is attached to.')
def __init__(self, node_id, detail=None):
if not detail:
detail = self.default_detail.format(node_id)
super(NonDescendantNodeError, self).__init__(detail=detail)
| |
# -*- mode: python; coding: utf-8 -*-
import os
import re
import logging
import datetime
import mimetypes
import time
import jinja2
import email.Utils
from Cookie import BaseCookie
from routes import url_for
from google.appengine.ext.webapp import Response
from google.appengine.api import memcache, users
from drydrop.lib.json import json_encode
from drydrop_handler import DRY_ROOT, APP_ROOT, APP_ID, VER_ID, LOCAL
from drydrop.app.models import *
from drydrop.app.core.appceptions import *
from drydrop.lib.utils import *
from drydrop.lib.jinja_loaders import InternalTemplateLoader
from drydrop.app.helpers.buster import cache_buster
class AbstractController(object):
def __init__(self, request, response, handler):
self.request = request
self.response = response
self.handler = handler
self.view = {'params': request.params }
self.params = request.params
self.emited = False
self.cookies = request.cookies
def render(self, template_name):
env = jinja2.Environment(loader = InternalTemplateLoader(os.path.join(DRY_ROOT, 'app', 'views')))
try:
template = env.get_template(template_name)
except jinja2.TemplateNotFound:
raise jinja2.TemplateNotFound(template_name)
content = template.render(self.view)
if LOCAL:
content = cache_buster(content)
self.response.out.write(content)
def before_action(self):
pass
def after_action(self):
pass
def render_view(self, file_name, params = None):
if params:
self.view.update(params)
self.response.headers['Content-Type'] = 'text/html'
self.render(file_name)
self.emited = True
def render_text(self, text):
self.response.headers['Content-Type'] = 'text/html'
if LOCAL:
text = cache_buster(text)
self.response.out.write(text)
self.emited = True
def render_html(self, html, params = None):
if params:
self.view.update(params)
if LOCAL:
html = cache_buster(html)
self.response.out.write(html)
self.emited = True
def render_xml(self, xml):
self.response.headers['Content-Type'] = 'text/xml'
self.render(file_name)
self.emited = True
def render_json(self, json):
self.response.headers['Content-Type'] = "application/json"
self.response.out.write(json)
self.emited = True
def redirect_to(self, url):
"""Redirects to a specified url"""
# self.handler.redirect(url)
# self.emited = True
# raise PageRedirect, (url)
# mrizka delala problemy pri claimovani openid
m = re.match(r'^(.*)#.*?$', url)
if m: url = m.group(1)
logging.info("Redirecting to: %s" % url)
# send the redirect! we use a meta because appengine bombs out sometimes with long redirect urls
self.response.out.write("<html><head><meta http-equiv=\"refresh\" content=\"0;url=%s\"></head><body></body></html>" % (url,))
self.emited = True
raise PageRedirect, (url)
def notfound(self, code, message = None):
self.response.set_status(code, str(message))
if message is None: message = Response.http_status_message(code)
self.view['message'] = message
self.view['code'] = code
self.render_view('system/notfound.html')
def error(self, code, message = None):
self.response.set_status(code, str(message))
if message is None: message = Response.http_status_message(code)
self.view['message'] = message
self.view['code'] = code
self.render_view('system/error.html')
class CookieController(AbstractController):
def set_cookie(self, key, value='', max_age=None,
path='/', domain=None, secure=None, httponly=False,
version=None, comment=None):
"""
Set (add) a cookie for the response
"""
cookies = BaseCookie()
cookies[key] = value
for var_name, var_value in [
('max-age', max_age),
('path', path),
('domain', domain),
('secure', secure),
('HttpOnly', httponly),
('version', version),
('comment', comment),
]:
if var_value is not None and var_value is not False:
cookies[key][var_name] = str(var_value)
if max_age is not None:
cookies[key]['expires'] = max_age
header_value = cookies[key].output(header='').lstrip()
self.response.headers._headers.append(('Set-Cookie', header_value))
def delete_cookie(self, key, path='/', domain=None):
"""
Delete a cookie from the client. Note that path and domain must match
how the cookie was originally set.
This sets the cookie to the empty string, and max_age=0 so
that it should expire immediately.
"""
self.set_cookie(key, '', path=path, domain=domain, max_age=0)
def unset_cookie(self, key):
"""
Unset a cookie with the given name (remove it from the
response). If there are multiple cookies (e.g., two cookies
with the same name and different paths or domains), all such
cookies will be deleted.
"""
existing = self.response.headers.get_all('Set-Cookie')
if not existing:
raise KeyError("No cookies at all have been set")
del self.response.headers['Set-Cookie']
found = False
for header in existing:
cookies = BaseCookie()
cookies.load(header)
if key in cookies:
found = True
del cookies[key]
header = cookies.output(header='').lstrip()
if header:
self.response.headers.add('Set-Cookie', header)
if not found:
raise KeyError("No cookie has been set with the name %r" % key)
class BaseController(CookieController):
SESSION_MEMCACHE_TIMEOUT = 0
CACHE_TIMEOUT = 7200
def serve_static_file(self, base_path, path, more = None, more_placeholder = None, filter=None):
file_path = os.path.join(base_path, path)
try:
logging.debug('Serving static file %s', file_path)
data = universal_read(file_path)
if filter: data = filter(data, base_path, path)
mime_type, encoding = mimetypes.guess_type(path)
self.response.headers['Content-Type'] = mime_type
self.set_caching_headers(self.CACHE_TIMEOUT)
if more and more_placeholder:
data = data.replace(more_placeholder, more)
self.response.out.write(data)
if more and not more_placeholder:
self.response.out.write(more)
except IOError:
return self.error(404, '404 File %s Not Found' % path)
def set_caching_headers(self, max_age, public = True):
self.response.headers['Expires'] = email.Utils.formatdate(time.time() + max_age, usegmt=True)
cache_control = []
if public: cache_control.append('public')
cache_control.append('max-age=%d' % max_age)
self.response.headers['Cache-Control'] = ', '.join(cache_control)
def render_json_response(self, data):
json = json_encode(data, nice=LOCAL)
is_test = self.params.get('test')
if is_test:
# this branch is here for testing purposes
return self.render_html("<html><body><pre>%s</pre></body></html>" % json)
callback = self.params.get('callback')
if callback:
# JSONP style
self.render_text("__callback__(%s);" % json)
else:
# classic style
self.render_json(json)
def format_json_response(self, message, code=1):
return {
"status": code,
"message": message,
}
def json_error(self, message, code=1):
self.render_json_response(self.format_json_response(message, code))
def json_ok(self, message = "OK"):
self.render_json_response(self.format_json_response(message, 0))
class SessionController(BaseController):
SESSION_KEY = 'session'
SESSION_COOKIE_TIMEOUT_IN_SECONDS = 60*60*24*14
session = None
def _session_memcache_id(self, session_id):
return "session-"+session_id
def create_session(self, user_id):
self.session = Session(user_id=user_id)
self.session.save()
logging.debug("Created session: %s", self.session.get_id())
def load_session(self):
if self.session: return self.session
logging.debug("Loading session ...")
# look for session id in request and cookies
session_id = self.request.get(self.SESSION_KEY)
if not session_id: session_id = self.cookies.get(self.SESSION_KEY)
if not session_id:
logging.debug("session_id not found in %s", self.cookies)
return None
# hit memcache first
cache_id = self._session_memcache_id(session_id)
self.session = memcache.get(cache_id)
if self.session:
logging.debug("Session found in memcache %s", self.session)
return self.session
# hit database if not in memcache
self.session = Session.get(session_id)
if self.session:
logging.debug("Session loaded from store %s", self.session)
memcache.set(cache_id, self.session, self.SESSION_MEMCACHE_TIMEOUT)
return self.session
# session not found
return None
def store_session(self):
assert self.session
cache_id = self._session_memcache_id(self.session.get_id())
logging.debug("Storing session (%s) into memcache as %s" % (self.session, cache_id))
self.set_cookie(self.SESSION_KEY,
str(self.session.key()),
max_age=self.SESSION_COOKIE_TIMEOUT_IN_SECONDS
)
memcache.set(cache_id, self.session, self.SESSION_MEMCACHE_TIMEOUT)
self.session.save()
def clear_session_cookie(self):
logging.debug("Clearing session cookie (%s)" % self.SESSION_KEY)
self.delete_cookie(self.SESSION_KEY)
def clear_session(self):
if not self.session:
if not self.load_session(): return
logging.debug("Clearing session %s", self.session)
cache_id = self._session_memcache_id(self.session.get_id())
memcache.delete(cache_id)
self.session.delete()
class AuthenticatedController(SessionController):
def __init__(self, *arguments, **keywords):
super(AuthenticatedController, self).__init__(*arguments, **keywords)
self.user = None
def authenticate_user(self, url=None):
self.user = users.get_current_user()
if not self.user:
return self.redirect_to(users.create_login_url(url or self.request.url))
logging.info('Authenticated as user %s', self.user)
def before_action(self, *arguments, **keywords):
if super(AuthenticatedController, self).before_action(*arguments, **keywords): return True
return self.authenticate_user()
| |
from __future__ import unicode_literals
import django
from django import VERSION
from django.contrib.contenttypes.models import ContentType
from django.db import IntegrityError, models, transaction
from django.db.models.query import QuerySet
from django.template.defaultfilters import slugify as default_slugify
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext
from taggit.utils import _get_field
try:
from unidecode import unidecode
except ImportError:
unidecode = lambda tag: tag
try:
from django.contrib.contenttypes.fields import GenericForeignKey
except ImportError: # django < 1.7
from django.contrib.contenttypes.generic import GenericForeignKey
try:
atomic = transaction.atomic
except AttributeError:
from contextlib import contextmanager
@contextmanager
def atomic(using=None):
sid = transaction.savepoint(using=using)
try:
yield
except IntegrityError:
transaction.savepoint_rollback(sid, using=using)
raise
else:
transaction.savepoint_commit(sid, using=using)
@python_2_unicode_compatible
class TagBase(models.Model):
name = models.CharField(verbose_name=_('Name'), unique=True, max_length=100)
slug = models.SlugField(verbose_name=_('Slug'), unique=True, max_length=100)
def __str__(self):
return self.name
class Meta:
abstract = True
def save(self, *args, **kwargs):
if not self.pk and not self.slug:
self.slug = self.slugify(self.name)
from django.db import router
using = kwargs.get("using") or router.db_for_write(
type(self), instance=self)
# Make sure we write to the same db for all attempted writes,
# with a multi-master setup, theoretically we could try to
# write and rollback on different DBs
kwargs["using"] = using
# Be oportunistic and try to save the tag, this should work for
# most cases ;)
try:
with atomic(using=using):
res = super(TagBase, self).save(*args, **kwargs)
return res
except IntegrityError:
pass
# Now try to find existing slugs with similar names
slugs = set(
self.__class__._default_manager
.filter(slug__startswith=self.slug)
.values_list('slug', flat=True)
)
i = 1
while True:
slug = self.slugify(self.name, i)
if slug not in slugs:
self.slug = slug
# We purposely ignore concurrecny issues here for now.
# (That is, till we found a nice solution...)
return super(TagBase, self).save(*args, **kwargs)
i += 1
else:
return super(TagBase, self).save(*args, **kwargs)
def slugify(self, tag, i=None):
slug = default_slugify(unidecode(tag))
if i is not None:
slug += "_%d" % i
return slug
class Tag(TagBase):
class Meta:
verbose_name = _("Tag")
verbose_name_plural = _("Tags")
@python_2_unicode_compatible
class ItemBase(models.Model):
def __str__(self):
return ugettext("%(object)s tagged with %(tag)s") % {
"object": self.content_object,
"tag": self.tag
}
class Meta:
abstract = True
@classmethod
def tag_model(cls):
field = _get_field(cls, 'tag')
return field.remote_field.model if VERSION >= (1, 9) else field.rel.to
@classmethod
def tag_relname(cls):
field = _get_field(cls, 'tag')
return field.remote_field.related_name if VERSION >= (1, 9) else field.rel.related_name
@classmethod
def lookup_kwargs(cls, instance):
return {
'content_object': instance
}
@classmethod
def bulk_lookup_kwargs(cls, instances):
return {
"content_object__in": instances,
}
class TaggedItemBase(ItemBase):
tag = models.ForeignKey(Tag, related_name="%(app_label)s_%(class)s_items", on_delete=models.CASCADE)
class Meta:
abstract = True
@classmethod
def tags_for(cls, model, instance=None, **extra_filters):
kwargs = extra_filters or {}
if instance is not None:
kwargs.update({
'%s__content_object' % cls.tag_relname(): instance
})
return cls.tag_model().objects.filter(**kwargs)
kwargs.update({
'%s__content_object__isnull' % cls.tag_relname(): False
})
return cls.tag_model().objects.filter(**kwargs).distinct()
class CommonGenericTaggedItemBase(ItemBase):
content_type = models.ForeignKey(
ContentType,
on_delete=models.CASCADE,
verbose_name=_('Content type'),
related_name="%(app_label)s_%(class)s_tagged_items"
)
content_object = GenericForeignKey()
class Meta:
abstract = True
@classmethod
def lookup_kwargs(cls, instance):
return {
'object_id': instance.pk,
'content_type': ContentType.objects.get_for_model(instance)
}
@classmethod
def bulk_lookup_kwargs(cls, instances):
if isinstance(instances, QuerySet):
# Can do a real object_id IN (SELECT ..) query.
return {
"object_id__in": instances,
"content_type": ContentType.objects.get_for_model(instances.model),
}
else:
# TODO: instances[0], can we assume there are instances.
return {
"object_id__in": [instance.pk for instance in instances],
"content_type": ContentType.objects.get_for_model(instances[0]),
}
@classmethod
def tags_for(cls, model, instance=None, **extra_filters):
ct = ContentType.objects.get_for_model(model)
kwargs = {
"%s__content_type" % cls.tag_relname(): ct
}
if instance is not None:
kwargs["%s__object_id" % cls.tag_relname()] = instance.pk
if extra_filters:
kwargs.update(extra_filters)
return cls.tag_model().objects.filter(**kwargs).distinct()
class GenericTaggedItemBase(CommonGenericTaggedItemBase):
object_id = models.IntegerField(verbose_name=_('Object id'), db_index=True)
class Meta:
abstract = True
if VERSION >= (1, 8):
class GenericUUIDTaggedItemBase(CommonGenericTaggedItemBase):
object_id = models.UUIDField(verbose_name=_('Object id'), db_index=True)
class Meta:
abstract = True
class TaggedItem(GenericTaggedItemBase, TaggedItemBase):
class Meta:
verbose_name = _("Tagged Item")
verbose_name_plural = _("Tagged Items")
if django.VERSION >= (1, 5):
index_together = [
["content_type", "object_id"],
]
| |
# -*- coding: utf-8 -*-
"""
ambari_jinja2.debug
~~~~~~~~~~~~
Implements the debug interface for Jinja. This module does some pretty
ugly stuff with the Python traceback system in order to achieve tracebacks
with correct line numbers, locals and contents.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import sys
import traceback
from ambari_jinja2.utils import CodeType, missing, internal_code
from ambari_jinja2.exceptions import TemplateSyntaxError
# how does the raise helper look like?
try:
exec "raise TypeError, 'foo'"
except SyntaxError:
raise_helper = 'raise __jinja_exception__[1]'
except TypeError:
raise_helper = 'raise __jinja_exception__[0], __jinja_exception__[1]'
class TracebackFrameProxy(object):
"""Proxies a traceback frame."""
def __init__(self, tb):
self.tb = tb
def _set_tb_next(self, next):
if tb_set_next is not None:
tb_set_next(self.tb, next and next.tb or None)
self._tb_next = next
def _get_tb_next(self):
return self._tb_next
tb_next = property(_get_tb_next, _set_tb_next)
del _get_tb_next, _set_tb_next
@property
def is_jinja_frame(self):
return '__jinja_template__' in self.tb.tb_frame.f_globals
def __getattr__(self, name):
return getattr(self.tb, name)
class ProcessedTraceback(object):
"""Holds a Jinja preprocessed traceback for priting or reraising."""
def __init__(self, exc_type, exc_value, frames):
assert frames, 'no frames for this traceback?'
self.exc_type = exc_type
self.exc_value = exc_value
self.frames = frames
def chain_frames(self):
"""Chains the frames. Requires ctypes or the debugsupport extension."""
prev_tb = None
for tb in self.frames:
if prev_tb is not None:
prev_tb.tb_next = tb
prev_tb = tb
prev_tb.tb_next = None
def render_as_text(self, limit=None):
"""Return a string with the traceback."""
lines = traceback.format_exception(self.exc_type, self.exc_value,
self.frames[0], limit=limit)
return ''.join(lines).rstrip()
def render_as_html(self, full=False):
"""Return a unicode string with the traceback as rendered HTML."""
from ambari_jinja2.debugrenderer import render_traceback
return u'%s\n\n<!--\n%s\n-->' % (
render_traceback(self, full=full),
self.render_as_text().decode('utf-8', 'replace')
)
@property
def is_template_syntax_error(self):
"""`True` if this is a template syntax error."""
return isinstance(self.exc_value, TemplateSyntaxError)
@property
def exc_info(self):
"""Exception info tuple with a proxy around the frame objects."""
return self.exc_type, self.exc_value, self.frames[0]
@property
def standard_exc_info(self):
"""Standard python exc_info for re-raising"""
return self.exc_type, self.exc_value, self.frames[0].tb
def make_traceback(exc_info, source_hint=None):
"""Creates a processed traceback object from the exc_info."""
exc_type, exc_value, tb = exc_info
if isinstance(exc_value, TemplateSyntaxError):
exc_info = translate_syntax_error(exc_value, source_hint)
initial_skip = 0
else:
initial_skip = 1
return translate_exception(exc_info, initial_skip)
def translate_syntax_error(error, source=None):
"""Rewrites a syntax error to please traceback systems."""
error.source = source
error.translated = True
exc_info = (error.__class__, error, None)
filename = error.filename
if filename is None:
filename = '<unknown>'
return fake_exc_info(exc_info, filename, error.lineno)
def translate_exception(exc_info, initial_skip=0):
"""If passed an exc_info it will automatically rewrite the exceptions
all the way down to the correct line numbers and frames.
"""
tb = exc_info[2]
frames = []
# skip some internal frames if wanted
for x in xrange(initial_skip):
if tb is not None:
tb = tb.tb_next
initial_tb = tb
while tb is not None:
# skip frames decorated with @internalcode. These are internal
# calls we can't avoid and that are useless in template debugging
# output.
if tb.tb_frame.f_code in internal_code:
tb = tb.tb_next
continue
# save a reference to the next frame if we override the current
# one with a faked one.
next = tb.tb_next
# fake template exceptions
template = tb.tb_frame.f_globals.get('__jinja_template__')
if template is not None:
lineno = template.get_corresponding_lineno(tb.tb_lineno)
tb = fake_exc_info(exc_info[:2] + (tb,), template.filename,
lineno)[2]
frames.append(TracebackFrameProxy(tb))
tb = next
# if we don't have any exceptions in the frames left, we have to
# reraise it unchanged.
# XXX: can we backup here? when could this happen?
if not frames:
raise exc_info[0], exc_info[1], exc_info[2]
traceback = ProcessedTraceback(exc_info[0], exc_info[1], frames)
if tb_set_next is not None:
traceback.chain_frames()
return traceback
def fake_exc_info(exc_info, filename, lineno):
"""Helper for `translate_exception`."""
exc_type, exc_value, tb = exc_info
# figure the real context out
if tb is not None:
real_locals = tb.tb_frame.f_locals.copy()
ctx = real_locals.get('context')
if ctx:
locals = ctx.get_all()
else:
locals = {}
for name, value in real_locals.iteritems():
if name.startswith('l_') and value is not missing:
locals[name[2:]] = value
# if there is a local called __jinja_exception__, we get
# rid of it to not break the debug functionality.
locals.pop('__jinja_exception__', None)
else:
locals = {}
# assamble fake globals we need
globals = {
'__name__': filename,
'__file__': filename,
'__jinja_exception__': exc_info[:2],
# we don't want to keep the reference to the template around
# to not cause circular dependencies, but we mark it as Jinja
# frame for the ProcessedTraceback
'__jinja_template__': None
}
# and fake the exception
code = compile('\n' * (lineno - 1) + raise_helper, filename, 'exec')
# if it's possible, change the name of the code. This won't work
# on some python environments such as google appengine
try:
if tb is None:
location = 'template'
else:
function = tb.tb_frame.f_code.co_name
if function == 'root':
location = 'top-level template code'
elif function.startswith('block_'):
location = 'block "%s"' % function[6:]
else:
location = 'template'
code = CodeType(0, code.co_nlocals, code.co_stacksize,
code.co_flags, code.co_code, code.co_consts,
code.co_names, code.co_varnames, filename,
location, code.co_firstlineno,
code.co_lnotab, (), ())
except:
pass
# execute the code and catch the new traceback
try:
exec code in globals, locals
except:
exc_info = sys.exc_info()
new_tb = exc_info[2].tb_next
# return without this frame
return exc_info[:2] + (new_tb,)
def _init_ugly_crap():
"""This function implements a few ugly things so that we can patch the
traceback objects. The function returned allows resetting `tb_next` on
any python traceback object.
"""
import ctypes
from types import TracebackType
# figure out side of _Py_ssize_t
if hasattr(ctypes.pythonapi, 'Py_InitModule4_64'):
_Py_ssize_t = ctypes.c_int64
else:
_Py_ssize_t = ctypes.c_int
# regular python
class _PyObject(ctypes.Structure):
pass
_PyObject._fields_ = [
('ob_refcnt', _Py_ssize_t),
('ob_type', ctypes.POINTER(_PyObject))
]
# python with trace
if hasattr(sys, 'getobjects'):
class _PyObject(ctypes.Structure):
pass
_PyObject._fields_ = [
('_ob_next', ctypes.POINTER(_PyObject)),
('_ob_prev', ctypes.POINTER(_PyObject)),
('ob_refcnt', _Py_ssize_t),
('ob_type', ctypes.POINTER(_PyObject))
]
class _Traceback(_PyObject):
pass
_Traceback._fields_ = [
('tb_next', ctypes.POINTER(_Traceback)),
('tb_frame', ctypes.POINTER(_PyObject)),
('tb_lasti', ctypes.c_int),
('tb_lineno', ctypes.c_int)
]
def tb_set_next(tb, next):
"""Set the tb_next attribute of a traceback object."""
if not (isinstance(tb, TracebackType) and
(next is None or isinstance(next, TracebackType))):
raise TypeError('tb_set_next arguments must be traceback objects')
obj = _Traceback.from_address(id(tb))
if tb.tb_next is not None:
old = _Traceback.from_address(id(tb.tb_next))
old.ob_refcnt -= 1
if next is None:
obj.tb_next = ctypes.POINTER(_Traceback)()
else:
next = _Traceback.from_address(id(next))
next.ob_refcnt += 1
obj.tb_next = ctypes.pointer(next)
return tb_set_next
# try to get a tb_set_next implementation
try:
from ambari_jinja2._debugsupport import tb_set_next
except ImportError:
try:
tb_set_next = _init_ugly_crap()
except:
tb_set_next = None
del _init_ugly_crap
| |
from lixian_query import ExactQuery
from lixian_query import SearchQuery
from lixian_query import query
from lixian_query import bt_query
import lixian_hash_bt
import lixian_url
import lixian_encoding
import re
##################################################
# queries
##################################################
class SingleTaskQuery(ExactQuery):
def __init__(self, base, t):
super(SingleTaskQuery, self).__init__(base)
self.id = t['id']
def query_once(self):
return [self.base.get_task_by_id(self.id)]
def query_search(self):
t = self.base.find_task_by_id(self.id)
return [t] if t else []
@query(priority=1)
@bt_query(priority=1)
def single_id_processor(base, x):
if not re.match(r'^\d+/?$', x):
return
n = x.rstrip('/')
t = base.find_task_by_id(n)
if t:
return SingleTaskQuery(base, t)
##################################################
class MultipleTasksQuery(ExactQuery):
def __init__(self, base, tasks):
super(MultipleTasksQuery, self).__init__(base)
self.tasks = tasks
def query_once(self):
return map(self.base.get_task_by_id, (t['id'] for t in self.tasks))
def query_search(self):
return filter(bool, map(self.base.find_task_by_id, (t['id'] for t in self.tasks)))
@query(priority=1)
@bt_query(priority=1)
def range_id_processor(base, x):
m = re.match(r'^(\d+)-(\d+)$', x)
if not m:
return
begin = int(m.group(1))
end = int(m.group(2))
tasks = base.get_tasks()
if begin <= end:
found = filter(lambda x: begin <= x['#'] <= end, tasks)
else:
found = reversed(filter(lambda x: end <= x['#'] <= begin, tasks))
if found:
return MultipleTasksQuery(base, found)
##################################################
class SubTaskQuery(ExactQuery):
def __init__(self, base, t, subs):
super(SubTaskQuery, self).__init__(base)
self.task = t
self.subs = subs
def query_once(self):
task = dict(self.base.get_task_by_id(self.task['id']))
files = self.base.get_files(task)
task['files'] = self.subs
return [task]
def query_search(self):
task = self.base.find_task_by_id(self.task['id'])
if not task:
return []
task = dict(task)
files = self.base.get_files(task)
task['files'] = self.subs
return [task]
@query(priority=2)
@bt_query(priority=2)
def sub_id_processor(base, x):
x = lixian_encoding.from_native(x)
m = re.match(r'^(\d+)/(.+)$', x)
if not m:
return
task_id, sub_id = m.groups()
task = base.find_task_by_id(task_id)
if not task:
return
assert task['type'] == 'bt', 'task %s is not a bt task' % lixian_encoding.to_native(task['name'])
files = base.get_files(task)
import lixian_filter_expr
files = lixian_filter_expr.filter_expr(files, sub_id)
subs = [x for x in files]
return SubTaskQuery(base, task, subs)
##################################################
class BtHashQuery(ExactQuery):
def __init__(self, base, x):
super(BtHashQuery, self).__init__(base)
self.hash = re.match(r'^(?:bt://)?([0-9a-f]{40})$', x, flags=re.I).group(1).lower()
self.task = self.base.find_task_by_hash(self.hash)
def prepare(self):
if not self.task:
self.base.add_bt_task_by_hash(self.hash)
def query_once(self):
t = self.base.find_task_by_hash(self.hash)
assert t, 'Task not found: bt://' + self.hash
return [t]
def query_search(self):
t = self.base.find_task_by_hash(self.hash)
return [t] if t else []
@query(priority=1)
@bt_query(priority=1)
def bt_hash_processor(base, x):
if re.match(r'^(bt://)?[0-9a-f]{40}$', x, flags=re.I):
return BtHashQuery(base, x)
##################################################
class LocalBtQuery(ExactQuery):
def __init__(self, base, x):
super(LocalBtQuery, self).__init__(base)
self.path = x
self.hash = lixian_hash_bt.info_hash(self.path)
self.task = self.base.find_task_by_hash(self.hash)
with open(self.path, 'rb') as stream:
self.torrent = stream.read()
def prepare(self):
if not self.task:
self.base.add_bt_task_by_content(self.torrent, self.path)
def query_once(self):
t = self.base.find_task_by_hash(self.hash)
assert t, 'Task not found: bt://' + self.hash
return [t]
def query_search(self):
t = self.base.find_task_by_hash(self.hash)
return [t] if t else []
@query(priority=1)
@bt_query(priority=1)
def local_bt_processor(base, x):
import os.path
if x.lower().endswith('.torrent') and os.path.exists(x):
return LocalBtQuery(base, x)
##################################################
class MagnetQuery(ExactQuery):
def __init__(self, base, x):
super(MagnetQuery, self).__init__(base)
self.url = x
self.hash = lixian_hash_bt.magnet_to_infohash(x).encode('hex').lower()
self.task = self.base.find_task_by_hash(self.hash)
def prepare(self):
if not self.task:
self.base.add_magnet_task(self.url)
def query_once(self):
t = self.base.find_task_by_hash(self.hash)
assert t, 'Task not found: bt://' + self.hash
return [t]
def query_search(self):
t = self.base.find_task_by_hash(self.hash)
return [t] if t else []
@query(priority=4)
@bt_query(priority=4)
def magnet_processor(base, url):
if re.match(r'magnet:', url):
return MagnetQuery(base, url)
##################################################
class BatchUrlsQuery(ExactQuery):
def __init__(self, base, urls):
super(BatchUrlsQuery, self).__init__(base)
self.urls = urls
def prepare(self):
for url in self.urls:
if not self.base.find_task_by_url(url):
self.base.add_url_task(url)
def query_once(self):
return map(self.base.get_task_by_url, self.urls)
def query_search(self):
return filter(bool, map(self.base.find_task_by_url, self.urls))
@query(priority=6)
@bt_query(priority=6)
def url_extend_processor(base, url):
import lixian_plugins.parsers
extended = lixian_plugins.parsers.try_to_extend_link(url)
if extended:
extended = map(lixian_plugins.parsers.to_url, extended)
return BatchUrlsQuery(base, extended)
##################################################
class UrlQuery(ExactQuery):
def __init__(self, base, x):
super(UrlQuery, self).__init__(base)
self.url = lixian_url.url_unmask(x)
self.task = self.base.find_task_by_url(self.url)
def prepare(self):
if not self.task:
self.base.add_url_task(self.url)
def query_once(self):
t = self.base.find_task_by_url(self.url)
assert t, 'Task not found: ' + self.url
return [t]
def query_search(self):
t = self.base.find_task_by_url(self.url)
return [t] if t else []
@query(priority=7)
def url_processor(base, url):
if re.match(r'\w+://', url):
return UrlQuery(base, url)
##################################################
class BtUrlQuery(ExactQuery):
def __init__(self, base, url, torrent):
super(BtUrlQuery, self).__init__(base)
self.url = url
self.torrent = torrent
self.hash = lixian_hash_bt.info_hash_from_content(self.torrent)
self.task = self.base.find_task_by_hash(self.hash)
def prepare(self):
if not self.task:
self.base.add_bt_task_by_content(self.torrent, self.url)
def query_once(self):
t = self.base.find_task_by_hash(self.hash)
assert t, 'Task not found: bt://' + self.hash
return [t]
def query_search(self):
t = self.base.find_task_by_hash(self.hash)
return [t] if t else []
@bt_query(priority=7)
def bt_url_processor(base, url):
if not re.match(r'http://', url):
return
print 'Downloading torrent file from', url
import urllib2
response = urllib2.urlopen(url, timeout=60)
torrent = response.read()
if response.info().get('Content-Encoding') == 'gzip':
def ungzip(s):
from StringIO import StringIO
import gzip
buffer = StringIO(s)
f = gzip.GzipFile(fileobj=buffer)
return f.read()
torrent = ungzip(torrent)
return BtUrlQuery(base, url, torrent)
##################################################
class FilterQuery(SearchQuery):
def __init__(self, base, x):
super(FilterQuery, self).__init__(base)
self.keyword = x
def query_search(self):
import lixian_plugins.filters
tasks = lixian_plugins.filters.filter_tasks(self.base.get_tasks(), self.keyword)
assert tasks is not None
return tasks
@query(priority=8)
@bt_query(priority=8)
def filter_processor(base, x):
import lixian_plugins.filters
if lixian_plugins.filters.has_task_filter(x):
return FilterQuery(base, x)
##################################################
class DefaultQuery(SearchQuery):
def __init__(self, base, x):
super(DefaultQuery, self).__init__(base)
self.text = lixian_encoding.from_native(x)
def query_search(self):
return filter(lambda t: t['name'].lower().find(self.text.lower()) != -1, self.base.get_tasks())
@query(priority=9)
@bt_query(priority=9)
def default_processor(base, x):
return DefaultQuery(base, x)
| |
import warnings
import numpy as np
import pandas as pd
from statsmodels.base import model
import statsmodels.base.wrapper as wrap
from statsmodels.tools.sm_exceptions import ConvergenceWarning
class _DimReductionRegression(model.Model):
"""
A base class for dimension reduction regression methods.
"""
def __init__(self, endog, exog, **kwargs):
super(_DimReductionRegression, self).__init__(endog, exog, **kwargs)
def _prep(self, n_slice):
# Sort the data by endog
ii = np.argsort(self.endog)
x = self.exog[ii, :]
# Whiten the data
x -= x.mean(0)
covx = np.dot(x.T, x) / x.shape[0]
covxr = np.linalg.cholesky(covx)
x = np.linalg.solve(covxr, x.T).T
self.wexog = x
self._covxr = covxr
# Split the data into slices
self._split_wexog = np.array_split(x, n_slice)
class SlicedInverseReg(_DimReductionRegression):
"""
Sliced Inverse Regression (SIR)
Parameters
----------
endog : array_like (1d)
The dependent variable
exog : array_like (2d)
The covariates
References
----------
KC Li (1991). Sliced inverse regression for dimension reduction.
JASA 86, 316-342.
"""
def fit(self, slice_n=20, **kwargs):
"""
Estimate the EDR space using Sliced Inverse Regression.
Parameters
----------
slice_n : int, optional
Target number of observations per slice
"""
# Sample size per slice
if len(kwargs) > 0:
msg = "SIR.fit does not take any extra keyword arguments"
warnings.warn(msg)
# Number of slices
n_slice = self.exog.shape[0] // slice_n
self._prep(n_slice)
mn = [z.mean(0) for z in self._split_wexog]
n = [z.shape[0] for z in self._split_wexog]
mn = np.asarray(mn)
n = np.asarray(n)
# Estimate Cov E[X | Y=y]
mnc = np.dot(mn.T, n[:, None] * mn) / n.sum()
a, b = np.linalg.eigh(mnc)
jj = np.argsort(-a)
a = a[jj]
b = b[:, jj]
params = np.linalg.solve(self._covxr.T, b)
results = DimReductionResults(self, params, eigs=a)
return DimReductionResultsWrapper(results)
def _regularized_objective(self, A):
# The objective function for regularized SIR
p = self.k_vars
covx = self._covx
mn = self._slice_means
ph = self._slice_props
v = 0
A = np.reshape(A, (p, self.ndim))
# The penalty
for k in range(self.ndim):
u = np.dot(self.pen_mat, A[:, k])
v += np.sum(u * u)
# The SIR objective function
covxa = np.dot(covx, A)
q, _ = np.linalg.qr(covxa)
qd = np.dot(q, np.dot(q.T, mn.T))
qu = mn.T - qd
v += np.dot(ph, (qu * qu).sum(0))
return v
def _regularized_grad(self, A):
# The gradient of the objective function for regularized SIR
p = self.k_vars
ndim = self.ndim
covx = self._covx
n_slice = self.n_slice
mn = self._slice_means
ph = self._slice_props
A = A.reshape((p, ndim))
# Penalty gradient
gr = 2 * np.dot(self.pen_mat.T, np.dot(self.pen_mat, A))
A = A.reshape((p, ndim))
covxa = np.dot(covx, A)
covx2a = np.dot(covx, covxa)
Q = np.dot(covxa.T, covxa)
Qi = np.linalg.inv(Q)
jm = np.zeros((p, ndim))
qcv = np.linalg.solve(Q, covxa.T)
ft = [None] * (p * ndim)
for q in range(p):
for r in range(ndim):
jm *= 0
jm[q, r] = 1
umat = np.dot(covx2a.T, jm)
umat += umat.T
umat = -np.dot(Qi, np.dot(umat, Qi))
fmat = np.dot(np.dot(covx, jm), qcv)
fmat += np.dot(covxa, np.dot(umat, covxa.T))
fmat += np.dot(covxa, np.linalg.solve(Q, np.dot(jm.T, covx)))
ft[q*ndim + r] = fmat
ch = np.linalg.solve(Q, np.dot(covxa.T, mn.T))
cu = mn - np.dot(covxa, ch).T
for i in range(n_slice):
u = cu[i, :]
v = mn[i, :]
for q in range(p):
for r in range(ndim):
f = np.dot(u, np.dot(ft[q*ndim + r], v))
gr[q, r] -= 2 * ph[i] * f
return gr.ravel()
def fit_regularized(self, ndim=1, pen_mat=None, slice_n=20, maxiter=100,
gtol=1e-3, **kwargs):
"""
Estimate the EDR space using regularized SIR.
Parameters
----------
ndim : int
The number of EDR directions to estimate
pen_mat : array_like
A 2d array such that the squared Frobenius norm of
`dot(pen_mat, dirs)`` is added to the objective function,
where `dirs` is an orthogonal array whose columns span
the estimated EDR space.
slice_n : int, optional
Target number of observations per slice
maxiter :int
The maximum number of iterations for estimating the EDR
space.
gtol : float
If the norm of the gradient of the objective function
falls below this value, the algorithm has converged.
Returns
-------
A results class instance.
Notes
-----
If each row of `exog` can be viewed as containing the values of a
function evaluated at equally-spaced locations, then setting the
rows of `pen_mat` to [[1, -2, 1, ...], [0, 1, -2, 1, ..], ...]
will give smooth EDR coefficients. This is a form of "functional
SIR" using the squared second derivative as a penalty.
References
----------
L. Ferre, A.F. Yao (2003). Functional sliced inverse regression
analysis. Statistics: a journal of theoretical and applied
statistics 37(6) 475-488.
"""
if len(kwargs) > 0:
msg = "SIR.fit_regularized does not take keyword arguments"
warnings.warn(msg)
if pen_mat is None:
raise ValueError("pen_mat is a required argument")
start_params = kwargs.get("start_params", None)
# Sample size per slice
slice_n = kwargs.get("slice_n", 20)
# Number of slices
n_slice = self.exog.shape[0] // slice_n
# Sort the data by endog
ii = np.argsort(self.endog)
x = self.exog[ii, :]
x -= x.mean(0)
covx = np.cov(x.T)
# Split the data into slices
split_exog = np.array_split(x, n_slice)
mn = [z.mean(0) for z in split_exog]
n = [z.shape[0] for z in split_exog]
mn = np.asarray(mn)
n = np.asarray(n)
self._slice_props = n / n.sum()
self.ndim = ndim
self.k_vars = covx.shape[0]
self.pen_mat = pen_mat
self._covx = covx
self.n_slice = n_slice
self._slice_means = mn
if start_params is None:
params = np.zeros((self.k_vars, ndim))
params[0:ndim, 0:ndim] = np.eye(ndim)
params = params
else:
if start_params.shape[1] != ndim:
msg = "Shape of start_params is not compatible with ndim"
raise ValueError(msg)
params = start_params
params, _, cnvrg = _grass_opt(params, self._regularized_objective,
self._regularized_grad, maxiter, gtol)
if not cnvrg:
g = self._regularized_grad(params.ravel())
gn = np.sqrt(np.dot(g, g))
msg = "SIR.fit_regularized did not converge, |g|=%f" % gn
warnings.warn(msg)
results = DimReductionResults(self, params, eigs=None)
return DimReductionResultsWrapper(results)
class PrincipalHessianDirections(_DimReductionRegression):
"""
Principal Hessian Directions (PHD)
Parameters
----------
endog : array_like (1d)
The dependent variable
exog : array_like (2d)
The covariates
Returns
-------
A model instance. Call `fit` to obtain a results instance,
from which the estimated parameters can be obtained.
References
----------
KC Li (1992). On Principal Hessian Directions for Data
Visualization and Dimension Reduction: Another application
of Stein's lemma. JASA 87:420.
"""
def fit(self, **kwargs):
"""
Estimate the EDR space using PHD.
Parameters
----------
resid : bool, optional
If True, use least squares regression to remove the
linear relationship between each covariate and the
response, before conducting PHD.
Returns
-------
A results instance which can be used to access the estimated
parameters.
"""
resid = kwargs.get("resid", False)
y = self.endog - self.endog.mean()
x = self.exog - self.exog.mean(0)
if resid:
from statsmodels.regression.linear_model import OLS
r = OLS(y, x).fit()
y = r.resid
cm = np.einsum('i,ij,ik->jk', y, x, x)
cm /= len(y)
cx = np.cov(x.T)
cb = np.linalg.solve(cx, cm)
a, b = np.linalg.eig(cb)
jj = np.argsort(-np.abs(a))
a = a[jj]
params = b[:, jj]
results = DimReductionResults(self, params, eigs=a)
return DimReductionResultsWrapper(results)
class SlicedAverageVarianceEstimation(_DimReductionRegression):
"""
Sliced Average Variance Estimation (SAVE)
Parameters
----------
endog : array_like (1d)
The dependent variable
exog : array_like (2d)
The covariates
bc : bool, optional
If True, use the bias-corrected CSAVE method of Li and Zhu.
References
----------
RD Cook. SAVE: A method for dimension reduction and graphics
in regression.
http://www.stat.umn.edu/RegGraph/RecentDev/save.pdf
Y Li, L-X Zhu (2007). Asymptotics for sliced average
variance estimation. The Annals of Statistics.
https://arxiv.org/pdf/0708.0462.pdf
"""
def __init__(self, endog, exog, **kwargs):
super(SAVE, self).__init__(endog, exog, **kwargs)
self.bc = False
if "bc" in kwargs and kwargs["bc"] is True:
self.bc = True
def fit(self, **kwargs):
"""
Estimate the EDR space.
Parameters
----------
slice_n : int
Number of observations per slice
"""
# Sample size per slice
slice_n = kwargs.get("slice_n", 50)
# Number of slices
n_slice = self.exog.shape[0] // slice_n
self._prep(n_slice)
cv = [np.cov(z.T) for z in self._split_wexog]
ns = [z.shape[0] for z in self._split_wexog]
p = self.wexog.shape[1]
if not self.bc:
# Cook's original approach
vm = 0
for w, cvx in zip(ns, cv):
icv = np.eye(p) - cvx
vm += w * np.dot(icv, icv)
vm /= len(cv)
else:
# The bias-corrected approach of Li and Zhu
# \Lambda_n in Li, Zhu
av = 0
for c in cv:
av += np.dot(c, c)
av /= len(cv)
# V_n in Li, Zhu
vn = 0
for x in self._split_wexog:
r = x - x.mean(0)
for i in range(r.shape[0]):
u = r[i, :]
m = np.outer(u, u)
vn += np.dot(m, m)
vn /= self.exog.shape[0]
c = np.mean(ns)
k1 = c * (c - 1) / ((c - 1)**2 + 1)
k2 = (c - 1) / ((c - 1)**2 + 1)
av2 = k1 * av - k2 * vn
vm = np.eye(p) - 2 * sum(cv) / len(cv) + av2
a, b = np.linalg.eigh(vm)
jj = np.argsort(-a)
a = a[jj]
b = b[:, jj]
params = np.linalg.solve(self._covxr.T, b)
results = DimReductionResults(self, params, eigs=a)
return DimReductionResultsWrapper(results)
class DimReductionResults(model.Results):
"""
Results class for a dimension reduction regression.
Notes
-----
The `params` attribute is a matrix whose columns span
the effective dimension reduction (EDR) space. Some
methods produce a corresponding set of eigenvalues
(`eigs`) that indicate how much information is contained
in each basis direction.
"""
def __init__(self, model, params, eigs):
super(DimReductionResults, self).__init__(
model, params)
self.eigs = eigs
class DimReductionResultsWrapper(wrap.ResultsWrapper):
_attrs = {
'params': 'columns',
}
_wrap_attrs = _attrs
wrap.populate_wrapper(DimReductionResultsWrapper, # noqa:E305
DimReductionResults)
def _grass_opt(params, fun, grad, maxiter, gtol):
"""
Minimize a function on a Grassmann manifold.
Parameters
----------
params : array_like
Starting value for the optimization.
fun : function
The function to be minimized.
grad : function
The gradient of fun.
maxiter : int
The maximum number of iterations.
gtol : float
Convergence occurs when the gradient norm falls below this value.
Returns
-------
params : array_like
The minimizing value for the objective function.
fval : float
The smallest achieved value of the objective function.
cnvrg : bool
True if the algorithm converged to a limit point.
Notes
-----
`params` is 2-d, but `fun` and `grad` should take 1-d arrays
`params.ravel()` as arguments.
Reference
---------
A Edelman, TA Arias, ST Smith (1998). The geometry of algorithms with
orthogonality constraints. SIAM J Matrix Anal Appl.
http://math.mit.edu/~edelman/publications/geometry_of_algorithms.pdf
"""
p, d = params.shape
params = params.ravel()
f0 = fun(params)
cnvrg = False
for _ in range(maxiter):
# Project the gradient to the tangent space
g = grad(params)
g -= np.dot(g, params) * params / np.dot(params, params)
if np.sqrt(np.sum(g * g)) < gtol:
cnvrg = True
break
gm = g.reshape((p, d))
u, s, vt = np.linalg.svd(gm, 0)
paramsm = params.reshape((p, d))
pa0 = np.dot(paramsm, vt.T)
def geo(t):
# Parameterize the geodesic path in the direction
# of the gradient as a function of a real value t.
pa = pa0 * np.cos(s * t) + u * np.sin(s * t)
return np.dot(pa, vt).ravel()
# Try to find a downhill step along the geodesic path.
step = 2.
while step > 1e-10:
pa = geo(-step)
f1 = fun(pa)
if f1 < f0:
params = pa
f0 = f1
break
step /= 2
params = params.reshape((p, d))
return params, f0, cnvrg
class CovarianceReduction(_DimReductionRegression):
"""
Dimension reduction for covariance matrices (CORE).
Parameters
----------
endog : array_like
The dependent variable, treated as group labels
exog : array_like
The independent variables.
dim : int
The dimension of the subspace onto which the covariance
matrices are projected.
Returns
-------
A model instance. Call `fit` on the model instance to obtain
a results instance, which contains the fitted model parameters.
Notes
-----
This is a likelihood-based dimension reduction procedure based
on Wishart models for sample covariance matrices. The goal
is to find a projection matrix P so that C_i | P'C_iP and
C_j | P'C_jP are equal in distribution for all i, j, where
the C_i are the within-group covariance matrices.
The model and methodology are as described in Cook and Forzani.
The optimization method follows Edelman et. al.
References
----------
DR Cook, L Forzani (2008). Covariance reducing models: an alternative
to spectral modeling of covariance matrices. Biometrika 95:4.
A Edelman, TA Arias, ST Smith (1998). The geometry of algorithms with
orthogonality constraints. SIAM J Matrix Anal Appl.
http://math.mit.edu/~edelman/publications/geometry_of_algorithms.pdf
"""
def __init__(self, endog, exog, dim):
super(CovarianceReduction, self).__init__(endog, exog)
covs, ns = [], []
df = pd.DataFrame(self.exog, index=self.endog)
for _, v in df.groupby(df.index):
covs.append(v.cov().values)
ns.append(v.shape[0])
self.nobs = len(endog)
# The marginal covariance
covm = 0
for i, _ in enumerate(covs):
covm += covs[i] * ns[i]
covm /= self.nobs
self.covm = covm
self.covs = covs
self.ns = ns
self.dim = dim
def loglike(self, params):
"""
Evaluate the log-likelihood
Parameters
----------
params : array_like
The projection matrix used to reduce the covariances, flattened
to 1d.
Returns the log-likelihood.
"""
p = self.covm.shape[0]
proj = params.reshape((p, self.dim))
c = np.dot(proj.T, np.dot(self.covm, proj))
_, ldet = np.linalg.slogdet(c)
f = self.nobs * ldet / 2
for j, c in enumerate(self.covs):
c = np.dot(proj.T, np.dot(c, proj))
_, ldet = np.linalg.slogdet(c)
f -= self.ns[j] * ldet / 2
return f
def score(self, params):
"""
Evaluate the score function.
Parameters
----------
params : array_like
The projection matrix used to reduce the covariances,
flattened to 1d.
Returns the score function evaluated at 'params'.
"""
p = self.covm.shape[0]
proj = params.reshape((p, self.dim))
c0 = np.dot(proj.T, np.dot(self.covm, proj))
cP = np.dot(self.covm, proj)
g = self.nobs * np.linalg.solve(c0, cP.T).T
for j, c in enumerate(self.covs):
c0 = np.dot(proj.T, np.dot(c, proj))
cP = np.dot(c, proj)
g -= self.ns[j] * np.linalg.solve(c0, cP.T).T
return g.ravel()
def fit(self, start_params=None, maxiter=200, gtol=1e-4):
"""
Fit the covariance reduction model.
Parameters
----------
start_params : array_like
Starting value for the projection matrix. May be
rectangular, or flattened.
maxiter : int
The maximum number of gradient steps to take.
gtol : float
Convergence criterion for the gradient norm.
Returns
-------
A results instance that can be used to access the
fitted parameters.
"""
p = self.covm.shape[0]
d = self.dim
# Starting value for params
if start_params is None:
params = np.zeros((p, d))
params[0:d, 0:d] = np.eye(d)
params = params
else:
params = start_params
# _grass_opt is designed for minimization, we are doing maximization
# here so everything needs to be flipped.
params, llf, cnvrg = _grass_opt(params, lambda x: -self.loglike(x),
lambda x: -self.score(x), maxiter,
gtol)
llf *= -1
if not cnvrg:
g = self.score(params.ravel())
gn = np.sqrt(np.sum(g * g))
msg = "CovReduce optimization did not converge, |g|=%f" % gn
warnings.warn(msg, ConvergenceWarning)
results = DimReductionResults(self, params, eigs=None)
results.llf = llf
return DimReductionResultsWrapper(results)
# aliases for expert users
SIR = SlicedInverseReg
PHD = PrincipalHessianDirections
SAVE = SlicedAverageVarianceEstimation
CORE = CovarianceReduction
| |
from __future__ import division
import numpy as np
from scipy.linalg import solve
import sys
import time
from properties import *
from parse_file import *
class RealTime(object):
"""A RealTime object contains important parsed data from a Gaussian RealTime
log file.
Attributes:
name: A string containing primary filename
logfile: A string representing the Gaussian realtime log file
electricDipole: Object containing x, y, z electric dipole moments (au)
magneticDipole: Object containing x, y, z magnetic dipole moments (au)
electricField: Object containing x, y, z electric field strengths (au)
magneticField: Object containing x, y, z magnetic field strengths (au)
iops: Dict containing IOps for 512
envelope: Dict containing field parameters printed in logfile
time: Array containing time (au)
energy: Array containing energy (au)
frequency: Array containing frequencies from *time* (au)
fourier: Array containing fourier transformed signal (au)
propertyarrays: List containing names of properties stored as arrays.
truncate: Method to truncate propertyarrays to a given length
mmut_restart: Integer containing how often MMUT restarts
au2fs: Scalar constant to convert au to femtoseconds
"""
def __init__(self, name):
"""Return a RealTime object whose logfile is *logfile*."""
# Initialize data
self.name = name
self.logfile = name+'.log'
self.envelope = {}
self.iops = {'132':['0'],
'134':['0'],
'177':['0'],
'136':['0'],
'137':['0'],
'138':['0'],
'139':['0'],
'140':['0'],
'141':['0'],
'142':['0'],
'143':['0'],
'144':['0']}
self.electricDipole = ElectricDipole()
self.magneticDipole = MagneticDipole()
self.electricField = ElectricField()
self.magneticField = MagneticField()
self.orthonorm = None
self.step_size = None
self.total_steps = None
self.time = None
self.energy = None
self.frequency = None
self.fourier = None
self.au2fs = 0.0241888425
# TODO May want to look at a better way of defining which attributes are
# arrays instead of just hard-coding them in.
self.propertyarrays = ['electricDipole',
'magneticDipole',
'electricField',
'magneticField',
'time',
#FIXME valid for H2+ Rabi ONLY
'HOMO',
'LUMO',
'energy']
self.truncate = truncate
self.min_length = None
self.mmut_restart = 10000000000 # e.g. never restart
#FIXME: ONLY FOR H2+ RABI
self.HOMO = None
self.LUMO = None
# Call parser
parse_file(self)
decode_iops(self)
# Make all arrays consistent length
clean_data(self)
def pade_tx(self,dipole_direction='x',spectra='abs',damp_const=5500,
num_pts=10000):
# num_pts: number of points to sample for pade transformation
if (spectra.lower() == 'abs') or (spectra.lower() == 'power'):
if dipole_direction.lower() == 'x':
dipole = self.electricDipole.x
kick_strength = self.electricField.x[0]
elif dipole_direction.lower() == 'y':
dipole = self.electricDipole.y
kick_strength = self.electricField.y[0]
elif dipole_direction.lower() == 'z':
dipole = self.electricDipole.z
kick_strength = self.electricField.z[0]
else:
print "Not a valid direction for the dipole! Try: x,y,z "
elif spectra.lower() == 'ecd':
if dipole_direction.lower() == 'x':
dipole = self.magneticDipole.x
kick_strength = self.electricField.x[0]
elif dipole_direction.lower() == 'y':
dipole = self.magneticDipole.y
kick_strength = self.electricField.y[0]
elif dipole_direction.lower() == 'z':
dipole = self.magneticDipole.z
kick_strength = self.electricField.z[0]
else:
print "Not a valid direction for the dipole! Try: x,y,z "
else:
print "Not a valid spectra choice"
if np.isclose(kick_strength,0.0):
if dipole_direction.lower() == 'x':
kick_strength = max(self.electricField.x)
elif dipole_direction.lower() == 'y':
kick_strength = max(self.electricField.y)
elif dipole_direction.lower() == 'z':
kick_strength = max(self.electricField.z)
if np.isclose(kick_strength,0.0):
print "Kick strength = 0. Make sure you FFT'd the correct direction"
sys.exit(0)
print "It looks like you are not perturbing the field at time = 0"
print "so we are taking the maximum of the electric field instead"
print "This may not be the functionality you want."
# skip is integer to skip every n-th value
# skip = 1 would not skip any values, but skip = 10 would only
# consider every tenth value
skip = 1
dipole = dipole - dipole[0]
dipole = dipole[::skip]
damp = np.exp(-(self.time-self.time[0])/float(damp_const))
damp = damp[::skip]
dipole = dipole * damp
timestep = skip*(self.time[2] - self.time[1])
M = len(dipole)
N = int(np.floor(M / 2))
print "N = ", N
if N > num_pts:
N = num_pts
print "Trimmed points to: ", N
# G and d are (N-1) x (N-1)
# d[k] = -dipole[N+k] for k in range(1,N)
d = -dipole[N+1:2*N]
# Old code, which works with regular Ax=b linear solver.
# G[k,m] = dipole[N - m + k] for m,k in range(1,N)
#G = dipole[N + np.arange(1,N)[:,None] - np.arange(1,N)]
#b = solve(G,d,check_finite=False)
# Toeplitz linear solver using Levinson recursion
# Should be O(n^2), and seems to work well, but if you get strange
# results you may want to switch to regular linear solver which is much
# more stable.
try:
from scipy.linalg import toeplitz, solve_toeplitz
except ImportError:
print "You'll need SciPy version >= 0.17.0"
# Instead, form G = (c,r) as toeplitz
#c = dipole[N:2*N-1]
#r = np.hstack((dipole[1],dipole[N-1:1:-1]))
b = solve_toeplitz((dipole[N:2*N-1],\
np.hstack((dipole[1],dipole[N-1:1:-1]))),d,check_finite=False)
# Now make b Nx1 where b0 = 1
b = np.hstack((1,b))
# b[m]*dipole[k-m] for k in range(0,N), for m in range(k)
a = np.dot(np.tril(toeplitz(dipole[0:N])),b)
p = np.poly1d(a)
q = np.poly1d(b)
# If you want energies greater than 2*27.2114 eV, you'll need to change
# the default frequency range to something greater.
self.frequency = np.arange(0,2,0.000025)
W = np.exp(-1j*self.frequency*timestep)
fw_re = np.real(p(W)/q(W))
fw_im = np.imag(p(W)/q(W))
if np.any(np.isinf(self.frequency)) or np.any(np.isnan(self.frequency)):
print "Check your dT: frequency contains NaNs and/or Infs!"
sys.exit(0)
if spectra.lower() == 'abs':
self.fourier = \
np.abs((4.0*self.frequency*np.pi*fw_im)/(3.0*137*kick_strength))
elif spectra.lower() == 'ecd':
self.fourier = \
(17.32*fw_re)/(np.pi*kick_strength)
elif spectra.lower() == 'power':
self.fourier = \
(self.frequency*(fw_re**2 + fw_im**2))/(np.pi*kick_strength)
def fourier_tx(self,dipole_direction='x',spectra='abs',damp_const=150,
zero_pad=None,auto=False):
"""Return a set of frequencies and fourier transforms of a time
dependent signal, e.g. return fourier transform of the x component of
the time varying electric dipole"""
from scipy.fftpack import fft, fftfreq
# Choose which signal to FFT
if spectra.lower() == 'abs':
if dipole_direction.lower() == 'x':
dipole = self.electricDipole.x
kick_strength = self.electricField.x[0]
elif dipole_direction.lower() == 'y':
dipole = self.electricDipole.y
kick_strength = self.electricField.y[0]
elif dipole_direction.lower() == 'z':
dipole = self.electricDipole.z
kick_strength = self.electricField.z[0]
else:
print "Not a valid direction for the dipole! Try: x,y,z "
elif spectra.lower() == 'ecd':
if dipole_direction.lower() == 'x':
dipole = self.magneticDipole.x
kick_strength = self.electricField.x[0]
elif dipole_direction.lower() == 'y':
dipole = self.magneticDipole.y
kick_strength = self.electricField.y[0]
elif dipole_direction.lower() == 'z':
dipole = self.magneticDipole.z
kick_strength = self.electricField.z[0]
else:
print "Not a valid direction for the dipole! Try: x,y,z "
else:
print "Not a valid spectra choice"
if np.isclose(kick_strength,0.0):
if dipole_direction.lower() == 'x':
kick_strength = max(self.electricField.x)
elif dipole_direction.lower() == 'y':
kick_strength = max(self.electricField.y)
elif dipole_direction.lower() == 'z':
kick_strength = max(self.electricField.z)
if np.isclose(kick_strength,0.0):
print "Kick strength = 0. Make sure you FFT'd the correct direction"
sys.exit(0)
print "It looks like you are not perturbing the field at time = 0"
print "so we are taking the maximum of the electric field instead"
if auto:
dt = self.time[2] - self.time[1]
damp_const = self.time[-1]/10.0
line_width = (2.0/damp_const)*27.2114
#print "Damp const = ", damp_const
if line_width > 2.0:
print "Large line width: ", "{0:.3f}".format(line_width)," eV"
print "Spectra not meaningful. Exiting..."
sys.exit(0)
else:
print "Line width (eV) = ", "{0:.3f}".format(line_width)
dipole = dipole - dipole[0]
damp = np.exp(-(self.time-self.time[0])/float(damp_const))
dipole = dipole * damp
resolution = 0.025 #eV
zero_pad = int(np.floor((2.0*np.pi*27.2114)/(resolution*dt))\
- len(self.time))
if(zero_pad < 0.0):
zero_pad = 0.0
print "Number zeros = ", zero_pad
zero = np.linspace(0,0,zero_pad)
dipole = np.hstack((dipole,zero))
else:
dipole = dipole - dipole[0]
damp = np.exp(-(self.time-self.time[0])/float(damp_const))
dipole = dipole * damp
if zero_pad:
zero = np.linspace(0,0,zero_pad)
dipole = np.hstack((dipole,zero))
fw = fft(dipole)
fw_re = np.real(fw)
fw_im = np.imag(fw)
n = len(fw_re)
m = int(n / 2)
timestep = self.time[2] - self.time[1]
self.frequency = fftfreq(n,d=timestep)*2.0*np.pi
if np.any(np.isinf(self.frequency)) or np.any(np.isnan(self.frequency)):
print "Check your dT: frequency contains NaNs and/or Infs!"
sys.exit(0)
if spectra.lower() == 'abs':
self.fourier = \
-(4.0*self.frequency*np.pi*fw_im)/(3.0*137*kick_strength)
elif spectra.lower() == 'ecd':
self.fourier = \
(17.32*fw_re)/(np.pi*kick_strength)
# Grab positive values only
self.frequency = self.frequency[1:m]
self.fourier = self.fourier[1:m]
def test(self):
self.check_energy()
self.check_iops()
pass
def check_energy(self):
dE = abs(max(self.energy) - min(self.energy))
t_maxE = self.time[np.argmax(self.energy)]
t_minE = self.time[np.argmin(self.energy)]
print "Energy conserved to: ", "{0:.2e}".format(dE), " au"
print "Max energy at time: ", t_maxE, " au"
print "Min energy at time: ", t_minE, " au"
def check_field(self,tol=1e-6):
if self.envelope['Field']:
print "External field: ", self.envelope['Envelope']
print "Ex field matches: ", np.allclose(self.electricField.x,
self.expected_field('Ex'),atol=tol)
print "Ey field matches: ", np.allclose(self.electricField.y,
self.expected_field('Ey'),atol=tol)
print "Ez field matches: ", np.allclose(self.electricField.z,
self.expected_field('Ez'),atol=tol)
# print "Bx field matches: ", np.allclose(self.magneticField.x,
# self.expected_field('Bx'),atol=tol)
# print "By field matches: ", np.allclose(self.magneticField.y,
# self.expected_field('By'),atol=tol)
# print "Bz field matches: ", np.allclose(self.magneticField.z,
# self.expected_field('Bz'),atol=tol)
else:
print "No external field applied"
def check_iops(self):
""" Check internal consistency of some set iops and values printed out
to the logfile, as well as some derived quantities"""
# Check the step size
if self.step_size == (self.time[2] - self.time[1]):
if ((self.step_size == 0.05) \
and (int(self.iops['134'][0]) == 0)) or\
(self.step_size == float(self.iops['134'][0])*0.00001):
print "Time step [OK]: ", self.step_size, " au"
else:
print "Inconsistent time step: "
print " IOps: ", self.iops['134'][1]
print " logfile header showing ", self.step_size
print " logfile showing ", self.time[2] - self.time[1]
# Check the total propagation steps
if ((self.total_steps == 15) \
and (int(self.iops['132'][0]) == 0)) or\
(self.total_steps == abs(int(self.iops['132'][0]))):
print "Number MMUT steps [OK]: ", self.total_steps, " steps"
else:
print "Inconsistent propagation time: "
print " IOps: ", self.iops['132'][1]
print " logfile header showing ", self.total_steps
# Check if external field is indeed On or OFF
if ((self.envelope['Field'] == False) and\
(int(self.iops['138'][0]) == 0)):
print "Field off: [OK]"
elif (self.envelope and int(self.iops['138'][0]) != 0):
print "Field on: [OK]"
self.check_field()
else:
print "Inconsistency in field:"
print "IOps: ", self.iops['138']
# Check Orthonormalization
if ((self.orthonorm == self.iops['136'][1])):
print "Orthonormality [OK]:", self.orthonorm
else:
print "Inconsistency in orthonormality"
print "IOps: ", self.iops['136'][1]
print "logfile showing: ", self.iops['136'][1]
def expected_field(self,component):
Time = self.time
TOn = self.envelope['TOn']
TOff = self.envelope['TOff']
try:
Omega = self.envelope['Frequency']
except KeyError:
Omega = 0.0
try:
Phase = self.envelope['Phase']
except KeyError:
Phase = 0.0
OmegT = Omega*(Time - TOn) + Phase
field = np.zeros_like(self.time)
if self.envelope['Envelope'] == 'Constant':
# Step function, depending on how TOn and TOff are defined
idx = np.where((Time >= TOn) & (Time < TOff))
# in GDV OmegT begins at TOn as well
field[idx] = self.envelope[component]*np.cos(OmegT[idx])
elif self.envelope['Envelope'] == 'Linear':
TMax = (2.0*np.pi)/Omega
# Linearly ramp off to zero
idx = np.where((Time >= TOn) & (Time <= TOff) & \
(Time > TOff-TMax))
field[idx] = self.envelope[component]*\
((TOff-Time[idx])/TMax)*np.cos(OmegT[idx])
# Constant envelope
idx = np.where((Time >= TOn) & (Time <= TOff) & \
(Time > TOn+TMax) & (Time <= TOff-TMax))
field[idx] = self.envelope[component]*np.cos(OmegT[idx])
# Linearly ramp up to maximum in first cycle
idx = np.where((Time >= TOn) & (Time <= TOff) & \
(Time <= TOn+TMax))
field[idx] = self.envelope[component]*\
((Time[idx]-TOn)/TMax)*np.cos(OmegT[idx])
elif self.envelope['Envelope'] == 'Gaussian':
idx = np.where((Time >= TOn) & (Time < TOff))
#FIXME: Sigma is hard-coded for testing...need to print it in the
# output and then search for it during parsing.
Sigma = 0.01
TCntr = np.sqrt(np.log(1000.0))/Sigma
field[idx] = self.envelope[component]*\
np.cos(OmegT[idx])*\
np.exp(-(Sigma*(Time[idx]-TCntr))**2)
else:
print "Not a valid field!"
sys.exit(0)
return field
if __name__ == '__main__':
a = RealTime('test')
import matplotlib.pyplot as plt
plt.plot(a.time,a.electricDipole.z)
plt.savefig('dipole.pdf')
#plt.show()
| |
import random
import math
class Board:
"""
A Board object represent the Dots puzzle, which is a container for MxN array of tiles. Each tile in turn may contain
a 'colored' dot or sometimes can be empty as in blockage tile.
"""
def __init__(self, rows, cols, gen):
"""
Create a Board the size as specified by rows X cols. A generator object is used to feed the board with dots.
:param rows: number of rows
:param cols: number of columns
:param gen: a generator object
"""
assert isinstance(gen, RandomGen)
self.data = []
self.rows = rows
self.cols = cols
self.gen = gen
for j in range(rows):
row = []
self.data.append(row)
for i in range(cols):
row.append(Tile(j, i))
for j in range(rows):
for i in range(cols):
x = next(gen)
c = Dot(x)
self.put(i, j, c)
def get(self, i, j):
"""
Get a Tile at location i, j.
:param i: index i
:param j: index j
:return: Tile
"""
return self.data[j][i]
def put(self, i, j, dot):
"""
Place a dot at location i, j.
:param i: index i
:param j: index j
:param dot: dot object to be placed
"""
assert isinstance(dot, Dot)
t = self.get(i, j)
assert isinstance(t, Tile)
t.put(dot)
def remove(self, conn):
"""
Remove a connection from the board.
:param conn: connection object
"""
assert isinstance(conn, Connection)
for p in conn:
i, j = p
t = self.get(i, j)
assert isinstance(t, Tile)
t.removeCookie()
def move(self, src, dst):
"""
Move a dot at location src to dst.
:param src: tuple (i, j) indicating a source location on the Board
:param dst: tuple (p, q) indicating a destination location on the Board
"""
i, j = src
p, q = dst
srcTile = self.get(i, j)
dstTile = self.get(p, q)
assert isinstance(srcTile, Tile)
assert isinstance(dstTile, Tile)
assert srcTile.dot is not None, "src tile can't be empty in move(): {}".format(srcTile)
assert dstTile.dot is None, "dst tile must be empty in move(): {}".format(dstTile)
srcCookie = srcTile.dot
assert isinstance(srcCookie, Dot)
srcTile.removeCookie()
dstTile.put(srcCookie)
def fillHolesByGravity(self, debug = False):
"""
Fill-up gaps on the puzzle (holes) by moving dots from top to fill-up the gap in the bottom.
:param debug: debugging flag
"""
for i in range(self.cols):
for j in range(self.rows):
t = self.get(i, j)
if t.cookie is None:
# i,j is empty
if debug:
print("{} is empty".format((i, j)))
for q in range(j, 0, -1):
# move i,q-1 to i,q
srcPos = (i, q-1)
dstPos = (i, q)
srcTile = self.get(srcPos[0], srcPos[1])
if srcTile.cookie is not None:
if debug:
print("moving {} to {}".format(srcPos, dstPos))
self.move(srcPos, dstPos)
else:
if debug:
print("skipping {}".format(srcPos))
if debug:
print("finished col %d" % i)
def fillHolesWithNewCookies(self, debug = False):
"""
Fill-up all remaining holes with new dots sourced from the generator object used to create this Board.
:param debug: debugging flag
"""
for i in range(self.cols):
for j in range(self.rows):
t = self.get(i, j)
if t.cookie is None:
# i,j is empty
if debug:
print("{} is empty".format((i, j)))
c = Dot(next(self.gen))
t.put(c)
def __str__(self):
"""
Convenient function to let you visualize the contents on the Board.
"""
result = "Board {}x{}:\n".format(self.cols, self.rows)
row1 = self.data[0]
result += "\t "
for i in range(len(row1)):
result += "%d " % i
result += '\n'
for j, row in enumerate(self.data):
result += '\t%d ' % j
for x in row:
assert isinstance(x, Tile)
if x.dot is not None:
result += x.dot.color
else:
result += '_'
result += ' '
result += '\n'
return result
class Tile:
"""
A Tile object represent a valid placement location on Board. A Tile may contain a dot or represent a blockage
location on the Board in which case is empty. It may also possess other special properties for added effects in
the game.
"""
def __init__(self, row, col):
"""
Init a Tile object at location row, col on the Board.
:param row: row
:param col: col
"""
self.row = row
self.col = col
self.dot = None
def put(self, dot):
"""
Put a dot onto this Tile.
:param dot: dot object
"""
assert isinstance(dot, Dot)
assert self.dot is None, "Tile must be empty in put()!"
self.dot = dot
dot.tile = self
def removeCookie(self):
"""
Remove a dot from this Tile.
"""
if self.dot is not None:
self.dot.removeFromTile()
def __str__(self):
if self.dot is not None:
return "<Tile .dot {self.dot}, .row {self.row}, .col {self.col}>".format(self=self)
return "<Tile .dot None, .row {self.row}, .col {self.col}>".format(self=self)
class Dot:
"""
Dot object represent a 'colored' dot on a Board. Every dots may move through various Tile locations on the Board
in a Dots game until it is removed from the game such as when a string of connected dots is discovered. A Dot may
also possess additional special attributes such as "Bomb", "Anchor" for special effects in the game.
"""
def __init__(self, color):
"""
Create a Dot object with color in a game.
:param color: color attribute for the dot.
:return:
"""
self.color = color
self.tile = None
def removeFromTile(self):
"""
Remove this Dot from its current Tile location.
:return:
"""
if self.tile is not None:
tile = self.tile
assert isinstance(tile, Tile)
tile.dot = None
self.tile = None
def __str__(self):
if self.tile is not None:
return "<Cookie .color '{self.color}', .tile {tile.row}, {tile.col}>".format(self=self, tile=self.tile)
return "<Cookie .color '{self.color}', .tile None>".format(self=self)
class RandomGen:
def __init__(self, f, limit=None):
"""
Create a Random generator that generate a random sequence of colors based on a requested frequency distribution
for colors. We are using this generator mainly to populate our Dots puzzle game.
:param f: Color versus normalized frequency table. E.g. {'a': .25, 'b': .25, 'c': .25, 'd': .25} creates
color 'a', 'b', 'c' and 'd' with 25 percent each among the population.
"""
assert isinstance(f, dict)
s = 0
self.cdf = {}
for x in sorted(f.keys()):
s += f[x]
# print(x, f[x] * 100, s)
if s > 1.0:
print("Warning: sum {} exceed >1.0 at letter '{}'".format(s, x))
self.cdf[x] = s
self.limit = limit
self.count = 0
# see CDF https://en.wikipedia.org/wiki/Cumulative_distribution_function
# print("cdf")
# for a in sorted(self.cdf.keys()):
# print(a, self.cdf[a] * 100)
def __iter__(self):
return self
def __next__(self):
if self.limit is not None:
if self.count > self.limit:
raise StopIteration()
x = random.uniform(0, 1.0)
for a in sorted(self.cdf.keys(), key=lambda x: self.cdf[x]):
if x < self.cdf[a]:
self.count += 1
return a
#print(a, self.cdf[a])
class Connection:
"""
Connection object represent a sequence of locations attempted by the player on a Board. For a valid dots connection
in Dots game, all dots on locations must share the same color.
"""
def __init__(self):
self.positions = []
def append(self, pos):
self.positions.append(pos)
def __contains__(self, item):
return item in self.positions
def __len__(self):
return len(self.positions)
def __iter__(self):
return iter(self.positions)
def __str__(self):
result = ''
first = True
for p in self.positions:
if not first:
result +=" -> "
else:
first = False
result += " {}".format(p)
return result
def __getitem__(self, index):
return self.positions[index]
def findAnyConnection(b, pos, depth = 0, connection = None, debug=False):
"""
Given a starting position search the board (b) for a connected sequence of dots, all dots in a connection must
shard the same color.
:param b: Board object
:param pos: starting position on the Board for the search
:param depth: current search depth (debugging only)
:param connection: current connection object in the search
:param debug: debugging flag
:return: a Connection object
"""
assert isinstance(b, Board)
assert isinstance(pos, tuple)
i, j = pos
t = b.get(i, j)
assert isinstance(t, Tile)
assert t.dot is not None
if debug:
print(' ' * depth, t)
if connection is None:
connection = Connection()
connection.append(pos)
searchOrders = [(t.col + 1, t.row), (t.col - 1, t.row), (t.col, t.row - 1), (t.col, t.row + 1)]
random.shuffle(searchOrders)
for nextPos in searchOrders:
i, j = nextPos
if 0 <= i < b.cols and 0 <= j < b.rows:
a = b.get(i, j)
assert isinstance(a, Tile)
if nextPos not in connection and a.dot is not None:
if t.dot.color == a.dot.color:
findAnyConnection(b, nextPos, depth= depth + 1, connection= connection)
break
return connection
def findAnyValidConnection(b):
"""
Attempt to find a valid dots connection on a board (b) randomly. A valid dots connection must share the same color
and must satisfy the minimum length of 3. We will be using this routine as our main AI player to play a game of
Dots.
:param b: Board object
:return: Connection object if found otherwise None when maximum attempts exceeded.
"""
assert isinstance(b, Board)
result = None
nattempt = 0
while nattempt < 100:
i = math.floor(random.uniform(0, b.cols))
j = math.floor(random.uniform(0, b.rows))
conn = findAnyConnection(b, (i, j))
if len(conn) >= 3:
result = conn
break
nattempt += 1
return result
class Game:
"""
Game host a Dots game. Utility methods we've created so far are consumed and chained together in this class to create
a playable Dots game.
"""
def __init__(self, b):
"""
Create a Dots game with a Board, b.
:param b: Board object
"""
assert isinstance(b, Board)
self.board = b
def playOnce(self, debug=False):
"""
Play a Dots game in a single iteration.
:param debug: debugging flag
:return: a tuple with color and a valid connection object found in this iteration
"""
result = None
conn = findAnyValidConnection(self.board)
if conn is None:
print("can't find any connection after max attempt")
else:
if debug:
print("connection found.", conn)
i, j = conn[0]
t = self.board.get(i, j)
assert isinstance(t, Tile)
result = (t.dot.color, len(conn))
self.board.remove(conn)
if conn is not None:
if debug:
print("post removal {}".format(self.board))
self.board.fillHolesByGravity()
if debug:
print("post fillHolesByGravity {}".format(self.board))
self.board.fillHolesWithNewCookies()
if debug:
print("post fillHolesWithNewCookies {}".format(self.board))
return result
def play(self, limit=30):
"""
Play a Dots game until iteration limit has been reached.
:param limit: Iteration limit
:return: Dict with interesting statistical data in the game
e.g. Number of dots with a given color has been slashed off in this game.
"""
stat = {}
for i in range(limit):
print("iteration %d" % (i + 1))
color, count = self.playOnce()
if color not in stat:
stat[color] = 0
stat[color] += count
print('-'*60)
print("game stat:", stat)
return stat
| |
# Copyright (c) 2015 - present. Boling Consulting Solutions, BCSW.net
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import unicode_literals
import logging
import pprint
import socket
from keystoneclient.v3.services import Service
from novaclient.v2.hypervisors import Hypervisor
from urllib3.util import parse_url
from OVS.client import Client as OvsClient
from core.node import Node
from core.switch import Switch
from linux.client import Client as BrctlClient
class OpenStackNode(Node):
"""
Class to wrap an OpenStack node (system with a unique IP address).
This is used to wrap KeyStone, Neutron, and Nova-compute service nodes and can wrap a single service
or multiple. All Services have the following properties in common.
Property Description
-------- -----------------------------------------------------------------------------------
Bridges OVS, Linux, and other bridges. These can be part of the base operating system configuration
or could have been created by Neutron networking. Also 'docker' and 'libvirt' may have also
created some of these bridges. Note that not all may be part of the SDN/NFV environment
Ports Physical ports as well as virtual ports (vEths). Some could be part of the OS or created
by Neutron or another package. Note that not all may be part of the SDN/NFV environment
Links
Keystone Servers have the following unique properties:
Property Description
-------- -----------------------------------------------------------------------------------
Tenants Project Tenants
Neutron Servers have the following unique properties:
Property Description
-------- -----------------------------------------------------------------------------------
TODO: Lots of work here to keep networks and subnets straight..
Nova Compute Servers have the following unique properties:
Property Description
-------- -----------------------------------------------------------------------------------
Instances VM Instances running
"""
def __init__(self, **kwargs):
logging.debug('openstack.node.__init__: entry:\n{}'.format(pprint.PrettyPrinter().pformat(kwargs)))
Node.__init__(self, **kwargs)
self._service_info = kwargs.get('service_info') # A List of tuples -> (service-type, NodeInfo obj)
self._ip = kwargs.get('ip_address')
self._openstack_credentials = kwargs.get('credentials')
self._ssh_credentials = kwargs.get('ssh_credentials')
self._ssh_valid = False
self._ovs_topology = None
self._brctl_topology = None
self._switches = None
self._ports = None
self._links = None
self._projects = None
self._instances = None
@staticmethod
def create(parent, **kwargs):
logging.info('openstack.node.Create: entry:\n{}'.format(pprint.PrettyPrinter().pformat(kwargs)))
kwargs['parent'] = parent
kwargs['name'] = '{} - {}'.format(kwargs['ip_address'],
NodeInfo.service_names(kwargs['service_info']))
kwargs['config'] = parent.config
return OpenStackNode(**kwargs)
@property
def ssh_address(self):
return self._ip
@property
def ssh_username(self):
return self._ssh_credentials.username
@property
def ssh_password(self):
return self._ssh_credentials.password
@property
def edges(self):
"""
Get all edges (links) associated with this node
:return: (list) Links
"""
return self.get_links()
@property
def credentials(self):
"""
Get OpenStack Credentials object
:return: (Credentials) OpenStack admin access credentials
"""
return self._openstack_credentials
def get_ovs_topology(self, refresh=False):
"""
Get all the entire OVS database of interest for this node. Centralized here since
it is the focal point for the local system
:param refresh: (boolean) If true, force refresh of all items
:return: (list) of bridge nodes
"""
if not refresh and self._ovs_topology is not None:
return self._ovs_topology
if 'ssh' not in self.client or self.client['ssh'] is None:
return None # TODO: Probably best to throw an exception
self._ovs_topology = OvsClient.get_topology(self.ssh_address, self.ssh_username, self.ssh_password)
return self._ovs_topology
def get_brctl_topology(self, refresh=False):
"""
Get all the entire linux bridge configuration. Centralized here since
it is the focal point for the local system
:param refresh: (boolean) If true, force refresh of all items
:return: (list) of bridge nodes
"""
if not refresh and self._brctl_topology is not None:
return self._brctl_topology
if 'ssh' not in self.client or self.client['ssh'] is None:
return None # TODO: Probably best to throw an exception
self._brctl_topology = BrctlClient.get_topology(self.ssh_address, self.ssh_username, self.ssh_password)
return self._brctl_topology
def get_switches(self, refresh=False):
"""
Get all bridges (OVS & Linux) for this node.
All nodes will have zero or more bridges associated to them
:param refresh: (boolean) If true, force refresh of all items
:return: (list) of bridge nodes
"""
if not refresh and self._switches is not None:
return self._switches
if 'ssh' not in self.client or self.client['ssh'] is None:
return None # TODO: Probably best to throw an exception
self._switches = Switch.get_switches(parent=self,
address=self.ssh_address,
ssh_credentials=self._ssh_credentials,
ovs_topology=self._ovs_topology,
brctl_topology=self._brctl_topology)
return self._switches
def get_ports(self, refresh=False):
"""
Get all networking ports (physical and veths) for this node.
All nodes will have one or more ports associated to them
:param refresh: (boolean) If true, force refresh of all items
:return: (list) of port nodes
"""
if not refresh and self._ports is not None:
return self._ports
self._ports = [] # TODO: Need to implement
return self._ports
def get_links(self, refresh=False):
"""
Get all networking links (physical and veths) for this node.
All nodes will have one or more links associated to them
:param refresh: (boolean) If true, force refresh of all items
:return: (list) of link nodes
"""
if not refresh and self._links is not None:
return self._links
self._links = [] # TODO: Need to implement
# TODO: Once SDN controllers supported, check OVS and other flows for duplicates
return self._links
def get_projects(self, refresh=False):
"""
Get all projects/tenants for this node.
Available on KeyStone nodes only
:param refresh: (boolean) If true, force refresh of all items
:return: (list) of project nodes
"""
if (not refresh and self._projects is not None) or not self.supports_service('keystone'):
return self._projects
self._projects = [] # TODO: Need to implement
return self._projects
def get_instances(self, refresh=False):
"""
Get all VM instances for this node.
Nova API and Nova compute nodes support VM instances
:param refresh: (boolean) If true, force refresh of all items
:return: (list) of instance nodes
"""
if not refresh and self._instances is not None:
return self._instances
# TODO: Need to experiment with Nova Cells v2 and instances that fail to launch
if not self.supports_service('compute'):
return None
self._instances = [] # TODO: Need to implement
return self._instances
def supports_service(self, service):
"""
Search through service list and return true if given service is supported
:param service: (unicode) Service to match (case-insensitive)
:return: True if the services list has the requested service
"""
for srv in self._service_info:
if srv[1].name.lower() == service.lower():
return True
if isinstance(srv[1], ComputeInfo) and service.lower() == 'compute':
return True
return False
def set_ssh_credentials(self):
# For SSH, determine find a matching username password
if self._ssh_valid:
return
user_pwd_list = self._ssh_credentials.get_ssh_credentials(self.ssh_address)
for username, password in user_pwd_list.items():
ssh_client = self._ssh_credentials.ssh_client(self.ssh_address, username, password)
if ssh_client is not None:
ssh_client.close()
# Cached the last ones we used successfully
self._ssh_credentials.save_ssh_creds(self.ssh_address, username, password)
self._ssh_valid = True
return
def connect(self):
"""
Create credentials for accessing and OpenStack Controller. A variety of clients are used to work with
OpenStack but at this level, we mainly need to enumerate all the services (and endpoints) that we
will be acessing. This requires a Keystone Client
:return: (dict) Keystone Client and/or Nova Client
"""
keystone = self.credentials.keystone_client if self.supports_service('keystone') else None
nova = self.credentials.nova_client if self.supports_service('compute') else None
self.set_ssh_credentials()
ssh = {'username': self.ssh_username, 'password': self.ssh_password}
return {'keystone': keystone, 'nova': nova, 'ssh': ssh}
def perform_sync(self):
"""
A controller is made up of one or more machines running services that we care about. There are
a large number of OpenStack services, but we currently only care about a few.
:return: True if synchronization was successful, False otherwise
"""
if self.client is None:
return False
# Snapshot the OVS subsystem. It may not exist but we will always create a basic one and
# use it as the basis of the entire topology. Other commands and interfaces will augment the
# existing data and create some new first-level items (such as 'VMs')
brctl_topology = self.get_brctl_topology(refresh=True)
if brctl_topology is None:
return False
ovs_topology = self.get_ovs_topology(refresh=True)
if ovs_topology is None:
return False
# First all bridges and switches
status = self.perform_sync_switches()
# TODO: Need to implement lots more !!!
return status
def perform_sync_switches(self):
# Load switches/bridges
switches = self.get_switches(refresh=True)
# TODO: Remove old children not in new list first
for switch in switches:
# TODO: Add if needed, also need to remove if no longer there
if switch in self.children:
# Existing child
pass
else:
# New child
self.children.append(switch)
return True
# ########################################################################################################3
# Support classes to wrap OpenStack information related to Service and Compute nodes
# ########################################################################################################3
class NodeInfo(object):
"""
Class to wrap an OpenStack node (service and/or compute) information
"""
def __init__(self, info):
"""
Initialization
:param info: OpenStack (Service/Hypervisor) Object to wrap
"""
self.id = str(info.id).lower()
def __repr__(self):
return '%s.(%s, type: %s, ip: %s, descr: %s, %r)' % (self.__class__, self.name, self.type,
self.ip, self.description, self.__dict__)
@staticmethod
def create(service, **kwargs):
"""
Create the appropriate Service or Compute Node wrappers for each item in the list
:param service: (OpenStack objects) List of OpenStack Service/Compute nodes to wrap
:return: (ComputeInfo or ServiceInfo)
"""
if isinstance(service, list):
return [ServiceInfo.create(srv, **kwargs) for srv in service]
else:
_info_ctors = {
Service : ServiceInfo,
Hypervisor: ComputeInfo
}
return _info_ctors.get(type(service))(service, **kwargs)
@staticmethod
def service_names(services):
"""
Convert service types to names
:param services: (list or inst) Each instances is a tuple composed of (Service Type, NodeInfo object)
:return: (unicode) Service types converted to names
"""
if isinstance(services, list):
output = ''
for srv in services:
output += '{}{}'.format('' if len(output) == 0 else ' - ',
NodeInfo.service_names(srv))
return output
else:
return '{}/{}'.format(services[1].name, services[1].type)
@property
def name(self):
return self._name
@property
def type(self):
return self._type
@property
def description(self):
return self._description
@property
def enabled(self):
return self._enabled
@property
def ip(self):
return self._ip
def to_dict(self):
return self.__dict__
class ServiceInfo(NodeInfo):
"""
Base class to wrap some common OpenStack service/compute-node information
"""
def __init__(self, srv_info, **kwargs):
NodeInfo.__init__(self, srv_info)
self._name = srv_info.name.lower()
self._type = srv_info.type.lower()
self._description = srv_info.to_dict().get('description', '')
self._enabled = srv_info.enabled
self._ip = '0.0.0.0'
self.endpoints = {}
if 'endpoints' in kwargs:
self.endpoints = {endpt.interface.lower(): {'url' : endpt.url,
'id' : endpt.id.lower(),
'enabled': endpt.enabled}
for endpt in kwargs['endpoints'] if endpt.service_id.lower() == self.id}
# Attempt to get an ip address for one of the endpoint
for endpt_type in ['admin', 'public', 'internal']:
try:
if endpt_type in self.endpoints:
self._ip = socket.gethostbyname(parse_url(self.endpoints[endpt_type]['url']).host)
break
except Exception:
pass
class ComputeInfo(NodeInfo):
"""
Class to wrap Compute-Node specific information
"""
def __init__(self, compute_info):
"""
Initialization
"""
NodeInfo.__init__(self, compute_info)
# Pull common properties from object itself
self._name = compute_info.hypervisor_hostname
self._description = 'Compute Node at {}'.format(self.name)
self._type = 'compute-{}'.format(self.id)
self.status = compute_info.status
self.state = compute_info.state
# Pull less common properties from dictionary version
cd = compute_info.to_dict()
self._ip = cd.get('host_ip')
self.service = cd.get('service')
self.cpu_info = {'info' : cd.get('cpu_info'),
'type' : cd.get('hypervisor_type'),
'current_workload': cd.get('current_workload'),
'vcpus' : cd.get('vcpus'),
'vcpus_used' : cd.get('vcpus_used'),
}
self.disk_info = {'disk_available_least': cd.get('disk_available_least'),
'free_disk_gb' : cd.get('free_disk_gb'),
'local_gb' : cd.get('local_gb'),
'local_gb_used' : cd.get('local_gb_used')
}
self.memory_info = {'free_ram_mb' : cd.get('free_ram_mb'),
'memory_mb' : cd.get('memory_mb'),
'memory_mb_used': cd.get('memory_mb_used')
}
@property
def enabled(self):
return self.status.lower() == 'enabled'
| |
# Copyright 2012 IBM Corp.
# Copyright 2013 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the conductor service."""
import contextlib
import uuid
import mock
from mox3 import mox
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_utils import timeutils
import six
from nova.api.ec2 import ec2utils
from nova.compute import arch
from nova.compute import flavors
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import conductor
from nova.conductor import api as conductor_api
from nova.conductor import manager as conductor_manager
from nova.conductor import rpcapi as conductor_rpcapi
from nova.conductor.tasks import live_migrate
from nova import context
from nova import db
from nova.db.sqlalchemy import models
from nova import exception as exc
from nova.image import api as image_api
from nova import notifications
from nova import objects
from nova.objects import base as obj_base
from nova.objects import block_device as block_device_obj
from nova.objects import fields
from nova.objects import instance as instance_obj
from nova.objects import quotas as quotas_obj
from nova import quota
from nova import rpc
from nova.scheduler import utils as scheduler_utils
from nova import test
from nova.tests.unit import cast_as_call
from nova.tests.unit.compute import test_compute
from nova.tests.unit import fake_block_device
from nova.tests.unit import fake_instance
from nova.tests.unit import fake_notifier
from nova.tests.unit import fake_server_actions
from nova.tests.unit import fake_utils
from nova import utils
CONF = cfg.CONF
CONF.import_opt('report_interval', 'nova.service')
FAKE_IMAGE_REF = 'fake-image-ref'
class FakeContext(context.RequestContext):
def elevated(self):
"""Return a consistent elevated context so we can detect it."""
if not hasattr(self, '_elevated'):
self._elevated = super(FakeContext, self).elevated()
return self._elevated
class _BaseTestCase(object):
def setUp(self):
super(_BaseTestCase, self).setUp()
self.db = None
self.user_id = 'fake'
self.project_id = 'fake'
self.context = FakeContext(self.user_id, self.project_id)
fake_notifier.stub_notifier(self.stubs)
self.addCleanup(fake_notifier.reset)
def fake_deserialize_context(serializer, ctxt_dict):
self.assertEqual(self.context.user_id, ctxt_dict['user_id'])
self.assertEqual(self.context.project_id, ctxt_dict['project_id'])
return self.context
self.stubs.Set(rpc.RequestContextSerializer, 'deserialize_context',
fake_deserialize_context)
fake_utils.stub_out_utils_spawn_n(self.stubs)
def _create_fake_instance(self, params=None, type_name='m1.tiny'):
if not params:
params = {}
inst = {}
inst['vm_state'] = vm_states.ACTIVE
inst['image_ref'] = FAKE_IMAGE_REF
inst['reservation_id'] = 'r-fakeres'
inst['user_id'] = self.user_id
inst['project_id'] = self.project_id
inst['host'] = 'fake_host'
type_id = flavors.get_flavor_by_name(type_name)['id']
inst['instance_type_id'] = type_id
inst['ami_launch_index'] = 0
inst['memory_mb'] = 0
inst['vcpus'] = 0
inst['root_gb'] = 0
inst['ephemeral_gb'] = 0
inst['architecture'] = arch.X86_64
inst['os_type'] = 'Linux'
inst['availability_zone'] = 'fake-az'
inst.update(params)
return db.instance_create(self.context, inst)
def _do_update(self, instance_uuid, **updates):
return self.conductor.instance_update(self.context, instance_uuid,
updates, None)
def test_instance_update(self):
instance = self._create_fake_instance()
new_inst = self._do_update(instance['uuid'],
vm_state=vm_states.STOPPED)
instance = db.instance_get_by_uuid(self.context, instance['uuid'])
self.assertEqual(instance['vm_state'], vm_states.STOPPED)
self.assertEqual(new_inst['vm_state'], instance['vm_state'])
def test_instance_update_invalid_key(self):
# NOTE(danms): the real DB API call ignores invalid keys
if self.db is None:
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(KeyError,
self._do_update, 'any-uuid', foobar=1)
def test_migration_get_in_progress_by_host_and_node(self):
self.mox.StubOutWithMock(db,
'migration_get_in_progress_by_host_and_node')
db.migration_get_in_progress_by_host_and_node(
self.context, 'fake-host', 'fake-node').AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.migration_get_in_progress_by_host_and_node(
self.context, 'fake-host', 'fake-node')
self.assertEqual(result, 'fake-result')
def test_aggregate_metadata_get_by_host(self):
self.mox.StubOutWithMock(db, 'aggregate_metadata_get_by_host')
db.aggregate_metadata_get_by_host(self.context, 'host',
'key').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.aggregate_metadata_get_by_host(self.context,
'host', 'key')
self.assertEqual(result, 'result')
def test_bw_usage_update(self):
self.mox.StubOutWithMock(db, 'bw_usage_update')
self.mox.StubOutWithMock(db, 'bw_usage_get')
update_args = (self.context, 'uuid', 'mac', 0, 10, 20, 5, 10, 20)
get_args = (self.context, 'uuid', 0, 'mac')
db.bw_usage_update(*update_args, update_cells=True)
db.bw_usage_get(*get_args).AndReturn('foo')
self.mox.ReplayAll()
result = self.conductor.bw_usage_update(*update_args,
update_cells=True)
self.assertEqual(result, 'foo')
def test_provider_fw_rule_get_all(self):
fake_rules = ['a', 'b', 'c']
self.mox.StubOutWithMock(db, 'provider_fw_rule_get_all')
db.provider_fw_rule_get_all(self.context).AndReturn(fake_rules)
self.mox.ReplayAll()
result = self.conductor.provider_fw_rule_get_all(self.context)
self.assertEqual(result, fake_rules)
def test_block_device_mapping_get_all_by_instance(self):
fake_inst = {'uuid': 'fake-uuid'}
self.mox.StubOutWithMock(db,
'block_device_mapping_get_all_by_instance')
db.block_device_mapping_get_all_by_instance(
self.context, fake_inst['uuid']).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.block_device_mapping_get_all_by_instance(
self.context, fake_inst, legacy=False)
self.assertEqual(result, 'fake-result')
def test_vol_usage_update(self):
self.mox.StubOutWithMock(db, 'vol_usage_update')
self.mox.StubOutWithMock(compute_utils, 'usage_volume_info')
fake_inst = {'uuid': 'fake-uuid',
'project_id': 'fake-project',
'user_id': 'fake-user',
'availability_zone': 'fake-az',
}
db.vol_usage_update(self.context, 'fake-vol', 22, 33, 44, 55,
fake_inst['uuid'],
fake_inst['project_id'],
fake_inst['user_id'],
fake_inst['availability_zone'],
False).AndReturn('fake-usage')
compute_utils.usage_volume_info('fake-usage').AndReturn('fake-info')
self.mox.ReplayAll()
self.conductor.vol_usage_update(self.context, 'fake-vol',
22, 33, 44, 55, fake_inst, None, False)
self.assertEqual(1, len(fake_notifier.NOTIFICATIONS))
msg = fake_notifier.NOTIFICATIONS[0]
self.assertEqual('conductor.%s' % self.conductor_manager.host,
msg.publisher_id)
self.assertEqual('volume.usage', msg.event_type)
self.assertEqual('INFO', msg.priority)
self.assertEqual('fake-info', msg.payload)
def test_compute_node_create(self):
self.mox.StubOutWithMock(db, 'compute_node_create')
db.compute_node_create(self.context, 'fake-values').AndReturn(
'fake-result')
self.mox.ReplayAll()
result = self.conductor.compute_node_create(self.context,
'fake-values')
self.assertEqual(result, 'fake-result')
def test_compute_node_update(self):
node = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'compute_node_update')
db.compute_node_update(self.context, node['id'], {'fake': 'values'}).\
AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.compute_node_update(self.context, node,
{'fake': 'values'})
self.assertEqual(result, 'fake-result')
def test_compute_node_delete(self):
node = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'compute_node_delete')
db.compute_node_delete(self.context, node['id']).AndReturn(None)
self.mox.ReplayAll()
result = self.conductor.compute_node_delete(self.context, node)
self.assertIsNone(result)
def test_task_log_get(self):
self.mox.StubOutWithMock(db, 'task_log_get')
db.task_log_get(self.context, 'task', 'begin', 'end', 'host',
'state').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_get(self.context, 'task', 'begin',
'end', 'host', 'state')
self.assertEqual(result, 'result')
def test_task_log_get_with_no_state(self):
self.mox.StubOutWithMock(db, 'task_log_get')
db.task_log_get(self.context, 'task', 'begin', 'end',
'host', None).AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_get(self.context, 'task', 'begin',
'end', 'host', None)
self.assertEqual(result, 'result')
def test_task_log_begin_task(self):
self.mox.StubOutWithMock(db, 'task_log_begin_task')
db.task_log_begin_task(self.context.elevated(), 'task', 'begin',
'end', 'host', 'items',
'message').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_begin_task(
self.context, 'task', 'begin', 'end', 'host', 'items', 'message')
self.assertEqual(result, 'result')
def test_task_log_end_task(self):
self.mox.StubOutWithMock(db, 'task_log_end_task')
db.task_log_end_task(self.context.elevated(), 'task', 'begin', 'end',
'host', 'errors', 'message').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.task_log_end_task(
self.context, 'task', 'begin', 'end', 'host', 'errors', 'message')
self.assertEqual(result, 'result')
@mock.patch.object(notifications, 'audit_period_bounds')
@mock.patch.object(notifications, 'bandwidth_usage')
@mock.patch.object(compute_utils, 'notify_about_instance_usage')
def test_notify_usage_exists(self, mock_notify, mock_bw, mock_audit):
info = {
'audit_period_beginning': 'start',
'audit_period_ending': 'end',
'bandwidth': 'bw_usage',
'image_meta': {},
'extra': 'info',
}
instance = objects.Instance(id=1, system_metadata={})
mock_audit.return_value = ('start', 'end')
mock_bw.return_value = 'bw_usage'
self.conductor.notify_usage_exists(self.context, instance, False, True,
system_metadata={},
extra_usage_info=dict(extra='info'))
class MatchInstance(object):
def __eq__(self, thing):
return thing.id == instance.id
notifier = self.conductor_manager.notifier
mock_audit.assert_called_once_with(False)
mock_bw.assert_called_once_with(MatchInstance(), 'start', True)
mock_notify.assert_called_once_with(notifier, self.context,
MatchInstance(),
'exists', system_metadata={},
extra_usage_info=info)
def test_security_groups_trigger_members_refresh(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_members_refresh')
self.conductor_manager.security_group_api.trigger_members_refresh(
self.context, [1, 2, 3])
self.mox.ReplayAll()
self.conductor.security_groups_trigger_members_refresh(self.context,
[1, 2, 3])
def test_get_ec2_ids(self):
expected = {
'instance-id': 'ec2-inst-id',
'ami-id': 'ec2-ami-id',
'kernel-id': 'ami-kernel-ec2-kernelid',
'ramdisk-id': 'ami-ramdisk-ec2-ramdiskid',
}
inst = {
'uuid': 'fake-uuid',
'kernel_id': 'ec2-kernelid',
'ramdisk_id': 'ec2-ramdiskid',
'image_ref': 'fake-image',
}
self.mox.StubOutWithMock(ec2utils, 'id_to_ec2_inst_id')
self.mox.StubOutWithMock(ec2utils, 'glance_id_to_ec2_id')
self.mox.StubOutWithMock(ec2utils, 'image_type')
ec2utils.id_to_ec2_inst_id(inst['uuid']).AndReturn(
expected['instance-id'])
ec2utils.glance_id_to_ec2_id(self.context,
inst['image_ref']).AndReturn(
expected['ami-id'])
for image_type in ['kernel', 'ramdisk']:
image_id = inst['%s_id' % image_type]
ec2utils.image_type(image_type).AndReturn('ami-' + image_type)
ec2utils.glance_id_to_ec2_id(self.context, image_id,
'ami-' + image_type).AndReturn(
'ami-%s-ec2-%sid' % (image_type, image_type))
self.mox.ReplayAll()
result = self.conductor.get_ec2_ids(self.context, inst)
self.assertEqual(result, expected)
class ConductorTestCase(_BaseTestCase, test.TestCase):
"""Conductor Manager Tests."""
def setUp(self):
super(ConductorTestCase, self).setUp()
self.conductor = conductor_manager.ConductorManager()
self.conductor_manager = self.conductor
def test_instance_get_by_uuid(self):
orig_instance = self._create_fake_instance()
copy_instance = self.conductor.instance_get_by_uuid(
self.context, orig_instance['uuid'], None)
self.assertEqual(orig_instance['name'],
copy_instance['name'])
def test_block_device_mapping_update_or_create(self):
fake_bdm = {'id': 1, 'device_name': 'foo',
'source_type': 'volume', 'volume_id': 'fake-vol-id',
'destination_type': 'volume'}
fake_bdm = fake_block_device.FakeDbBlockDeviceDict(fake_bdm)
fake_bdm2 = {'id': 1, 'device_name': 'foo2',
'source_type': 'volume', 'volume_id': 'fake-vol-id',
'destination_type': 'volume'}
fake_bdm2 = fake_block_device.FakeDbBlockDeviceDict(fake_bdm2)
cells_rpcapi = self.conductor.cells_rpcapi
self.mox.StubOutWithMock(db, 'block_device_mapping_create')
self.mox.StubOutWithMock(db, 'block_device_mapping_update')
self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
self.mox.StubOutWithMock(cells_rpcapi,
'bdm_update_or_create_at_top')
db.block_device_mapping_create(self.context,
fake_bdm).AndReturn(fake_bdm2)
cells_rpcapi.bdm_update_or_create_at_top(
self.context, mox.IsA(block_device_obj.BlockDeviceMapping),
create=True)
db.block_device_mapping_update(self.context, fake_bdm['id'],
fake_bdm).AndReturn(fake_bdm2)
cells_rpcapi.bdm_update_or_create_at_top(
self.context, mox.IsA(block_device_obj.BlockDeviceMapping),
create=False)
self.mox.ReplayAll()
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=True)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=False)
def test_instance_get_all_by_filters(self):
filters = {'foo': 'bar'}
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
columns_to_join=None, use_slave=False)
self.mox.ReplayAll()
self.conductor.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
None, False)
def test_instance_get_all_by_filters_use_slave(self):
filters = {'foo': 'bar'}
self.mox.StubOutWithMock(db, 'instance_get_all_by_filters')
db.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
columns_to_join=None, use_slave=True)
self.mox.ReplayAll()
self.conductor.instance_get_all_by_filters(self.context, filters,
'fake-key', 'fake-sort',
columns_to_join=None,
use_slave=True)
def test_instance_get_all_by_host(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
db.instance_get_all_by_host(self.context.elevated(),
'host', None).AndReturn('result')
db.instance_get_all_by_host_and_node(self.context.elevated(), 'host',
'node').AndReturn('result')
self.mox.ReplayAll()
result = self.conductor.instance_get_all_by_host(self.context, 'host',
None, None)
self.assertEqual(result, 'result')
result = self.conductor.instance_get_all_by_host(self.context, 'host',
'node', None)
self.assertEqual(result, 'result')
def _test_stubbed(self, name, dbargs, condargs,
db_result_listified=False, db_exception=None):
self.mox.StubOutWithMock(db, name)
if db_exception:
getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
else:
getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
self.mox.ReplayAll()
if db_exception:
self.assertRaises(messaging.ExpectedException,
self.conductor.service_get_all_by,
self.context, **condargs)
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(db_exception.__class__,
self.conductor.service_get_all_by,
self.context, **condargs)
else:
result = self.conductor.service_get_all_by(self.context,
**condargs)
if db_result_listified:
self.assertEqual(['fake-result'], result)
else:
self.assertEqual('fake-result', result)
def test_service_get_all(self):
self._test_stubbed('service_get_all', (),
dict(host=None, topic=None, binary=None))
def test_service_get_by_host_and_topic(self):
self._test_stubbed('service_get_by_host_and_topic',
('host', 'topic'),
dict(topic='topic', host='host', binary=None))
def test_service_get_all_by_topic(self):
self._test_stubbed('service_get_all_by_topic',
('topic',),
dict(topic='topic', host=None, binary=None))
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host',
('host',),
dict(host='host', topic=None, binary=None))
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host', binary=None),
db_result_listified=True)
def test_service_get_by_args(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary', topic=None))
def test_service_get_by_compute_host_not_found(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host', binary=None),
db_exception=exc.ComputeHostNotFound(host='host'))
def test_service_get_by_args_not_found(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary', topic=None),
db_exception=exc.HostBinaryNotFound(binary='binary',
host='host'))
def test_security_groups_trigger_handler(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_handler')
self.conductor_manager.security_group_api.trigger_handler('event',
self.context,
'args')
self.mox.ReplayAll()
self.conductor.security_groups_trigger_handler(self.context,
'event', ['args'])
def _test_object_action(self, is_classmethod, raise_exception):
class TestObject(obj_base.NovaObject):
def foo(self, context, raise_exception=False):
if raise_exception:
raise Exception('test')
else:
return 'test'
@classmethod
def bar(cls, context, raise_exception=False):
if raise_exception:
raise Exception('test')
else:
return 'test'
obj = TestObject()
if is_classmethod:
result = self.conductor.object_class_action(
self.context, TestObject.obj_name(), 'bar', '1.0',
tuple(), {'raise_exception': raise_exception})
else:
updates, result = self.conductor.object_action(
self.context, obj, 'foo', tuple(),
{'raise_exception': raise_exception})
self.assertEqual('test', result)
def test_object_action(self):
self._test_object_action(False, False)
def test_object_action_on_raise(self):
self.assertRaises(messaging.ExpectedException,
self._test_object_action, False, True)
def test_object_class_action(self):
self._test_object_action(True, False)
def test_object_class_action_on_raise(self):
self.assertRaises(messaging.ExpectedException,
self._test_object_action, True, True)
def test_object_action_copies_object(self):
class TestObject(obj_base.NovaObject):
fields = {'dict': fields.DictOfStringsField()}
def touch_dict(self, context):
self.dict['foo'] = 'bar'
self.obj_reset_changes()
obj = TestObject()
obj.dict = {}
obj.obj_reset_changes()
updates, result = self.conductor.object_action(
self.context, obj, 'touch_dict', tuple(), {})
# NOTE(danms): If conductor did not properly copy the object, then
# the new and reference copies of the nested dict object will be
# the same, and thus 'dict' will not be reported as changed
self.assertIn('dict', updates)
self.assertEqual({'foo': 'bar'}, updates['dict'])
def _test_expected_exceptions(self, db_method, conductor_method, errors,
*args, **kwargs):
# Tests that expected exceptions are handled properly.
for error in errors:
with mock.patch.object(db, db_method, side_effect=error):
self.assertRaises(messaging.ExpectedException,
conductor_method,
self.context, *args, **kwargs)
def test_action_event_start_expected_exceptions(self):
error = exc.InstanceActionNotFound(request_id='1', instance_uuid='2')
self._test_expected_exceptions(
'action_event_start', self.conductor.action_event_start, [error],
{'foo': 'bar'})
def test_action_event_finish_expected_exceptions(self):
errors = (exc.InstanceActionNotFound(request_id='1',
instance_uuid='2'),
exc.InstanceActionEventNotFound(event='1', action_id='2'))
self._test_expected_exceptions(
'action_event_finish', self.conductor.action_event_finish,
errors, {'foo': 'bar'})
def test_instance_update_expected_exceptions(self):
errors = (exc.InvalidUUID(uuid='foo'),
exc.InstanceNotFound(instance_id=1),
exc.UnexpectedTaskStateError(expected='foo',
actual='bar'))
self._test_expected_exceptions(
'instance_update', self.conductor.instance_update,
errors, None, {'foo': 'bar'}, None)
def test_instance_get_by_uuid_expected_exceptions(self):
error = exc.InstanceNotFound(instance_id=1)
self._test_expected_exceptions(
'instance_get_by_uuid', self.conductor.instance_get_by_uuid,
[error], None, [])
def test_aggregate_host_add_expected_exceptions(self):
error = exc.AggregateHostExists(aggregate_id=1, host='foo')
self._test_expected_exceptions(
'aggregate_host_add', self.conductor.aggregate_host_add,
[error], {'id': 1}, None)
def test_aggregate_host_delete_expected_exceptions(self):
error = exc.AggregateHostNotFound(aggregate_id=1, host='foo')
self._test_expected_exceptions(
'aggregate_host_delete', self.conductor.aggregate_host_delete,
[error], {'id': 1}, None)
def test_service_update_expected_exceptions(self):
error = exc.ServiceNotFound(service_id=1)
self._test_expected_exceptions(
'service_update',
self.conductor.service_update,
[error], {'id': 1}, None)
def test_service_destroy_expected_exceptions(self):
error = exc.ServiceNotFound(service_id=1)
self._test_expected_exceptions(
'service_destroy',
self.conductor.service_destroy,
[error], 1)
def _setup_aggregate_with_host(self):
aggregate_ref = db.aggregate_create(self.context.elevated(),
{'name': 'foo'}, metadata={'availability_zone': 'foo'})
self.conductor.aggregate_host_add(self.context, aggregate_ref, 'bar')
aggregate_ref = db.aggregate_get(self.context.elevated(),
aggregate_ref['id'])
return aggregate_ref
def test_aggregate_host_add(self):
aggregate_ref = self._setup_aggregate_with_host()
self.assertIn('bar', aggregate_ref['hosts'])
db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
def test_aggregate_host_delete(self):
aggregate_ref = self._setup_aggregate_with_host()
self.conductor.aggregate_host_delete(self.context, aggregate_ref,
'bar')
aggregate_ref = db.aggregate_get(self.context.elevated(),
aggregate_ref['id'])
self.assertNotIn('bar', aggregate_ref['hosts'])
db.aggregate_delete(self.context.elevated(), aggregate_ref['id'])
def test_network_migrate_instance_start(self):
self.mox.StubOutWithMock(self.conductor_manager.network_api,
'migrate_instance_start')
self.conductor_manager.network_api.migrate_instance_start(self.context,
'instance',
'migration')
self.mox.ReplayAll()
self.conductor.network_migrate_instance_start(self.context,
'instance',
'migration')
def test_network_migrate_instance_finish(self):
self.mox.StubOutWithMock(self.conductor_manager.network_api,
'migrate_instance_finish')
self.conductor_manager.network_api.migrate_instance_finish(
self.context, 'instance', 'migration')
self.mox.ReplayAll()
self.conductor.network_migrate_instance_finish(self.context,
'instance',
'migration')
def test_instance_destroy(self):
self.mox.StubOutWithMock(db, 'instance_destroy')
db.instance_destroy(self.context, 'fake-uuid').AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.instance_destroy(self.context,
{'uuid': 'fake-uuid'})
self.assertEqual(result, 'fake-result')
def test_compute_unrescue(self):
self.mox.StubOutWithMock(self.conductor_manager.compute_api,
'unrescue')
self.conductor_manager.compute_api.unrescue(self.context, 'instance')
self.mox.ReplayAll()
self.conductor.compute_unrescue(self.context, 'instance')
def test_instance_get_active_by_window_joined(self):
self.mox.StubOutWithMock(db, 'instance_get_active_by_window_joined')
db.instance_get_active_by_window_joined(self.context, 'fake-begin',
'fake-end', 'fake-proj',
'fake-host')
self.mox.ReplayAll()
self.conductor.instance_get_active_by_window_joined(
self.context, 'fake-begin', 'fake-end', 'fake-proj', 'fake-host')
def test_instance_fault_create(self):
self.mox.StubOutWithMock(db, 'instance_fault_create')
db.instance_fault_create(self.context, 'fake-values').AndReturn(
'fake-result')
self.mox.ReplayAll()
result = self.conductor.instance_fault_create(self.context,
'fake-values')
self.assertEqual(result, 'fake-result')
def test_action_event_start(self):
self.mox.StubOutWithMock(db, 'action_event_start')
db.action_event_start(self.context, mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.action_event_start(self.context, {})
def test_action_event_finish(self):
self.mox.StubOutWithMock(db, 'action_event_finish')
db.action_event_finish(self.context, mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.action_event_finish(self.context, {})
def test_agent_build_get_by_triple(self):
self.mox.StubOutWithMock(db, 'agent_build_get_by_triple')
db.agent_build_get_by_triple(self.context, 'fake-hv', 'fake-os',
'fake-arch').AndReturn('it worked')
self.mox.ReplayAll()
result = self.conductor.agent_build_get_by_triple(self.context,
'fake-hv',
'fake-os',
'fake-arch')
self.assertEqual(result, 'it worked')
class ConductorRPCAPITestCase(_BaseTestCase, test.TestCase):
"""Conductor RPC API Tests."""
def setUp(self):
super(ConductorRPCAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor_manager = self.conductor_service.manager
self.conductor = conductor_rpcapi.ConductorAPI()
def test_block_device_mapping_update_or_create(self):
fake_bdm = {'id': 'fake-id'}
self.mox.StubOutWithMock(db, 'block_device_mapping_create')
self.mox.StubOutWithMock(db, 'block_device_mapping_update')
self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
self.mox.StubOutWithMock(block_device_obj.BlockDeviceMapping,
'_from_db_object')
db.block_device_mapping_create(self.context, fake_bdm)
block_device_obj.BlockDeviceMapping._from_db_object(
self.context, mox.IgnoreArg(), mox.IgnoreArg())
db.block_device_mapping_update(self.context, fake_bdm['id'], fake_bdm)
block_device_obj.BlockDeviceMapping._from_db_object(
self.context, mox.IgnoreArg(), mox.IgnoreArg())
db.block_device_mapping_update_or_create(self.context, fake_bdm)
block_device_obj.BlockDeviceMapping._from_db_object(
self.context, mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=True)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm,
create=False)
self.conductor.block_device_mapping_update_or_create(self.context,
fake_bdm)
def _test_stubbed(self, name, dbargs, condargs,
db_result_listified=False, db_exception=None):
self.mox.StubOutWithMock(db, name)
if db_exception:
getattr(db, name)(self.context, *dbargs).AndRaise(db_exception)
else:
getattr(db, name)(self.context, *dbargs).AndReturn('fake-result')
self.mox.ReplayAll()
if db_exception:
self.assertRaises(db_exception.__class__,
self.conductor.service_get_all_by,
self.context, **condargs)
else:
result = self.conductor.service_get_all_by(self.context,
**condargs)
if db_result_listified:
self.assertEqual(['fake-result'], result)
else:
self.assertEqual('fake-result', result)
def test_service_get_all(self):
self._test_stubbed('service_get_all', (),
dict(topic=None, host=None, binary=None))
def test_service_get_by_host_and_topic(self):
self._test_stubbed('service_get_by_host_and_topic',
('host', 'topic'),
dict(topic='topic', host='host', binary=None))
def test_service_get_all_by_topic(self):
self._test_stubbed('service_get_all_by_topic',
('topic',),
dict(topic='topic', host=None, binary=None))
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host',
('host',),
dict(host='host', topic=None, binary=None))
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host', binary=None),
db_result_listified=True)
def test_service_get_by_args(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary', topic=None))
def test_service_get_by_compute_host_not_found(self):
self._test_stubbed('service_get_by_compute_host',
('host',),
dict(topic='compute', host='host', binary=None),
db_exception=exc.ComputeHostNotFound(host='host'))
def test_service_get_by_args_not_found(self):
self._test_stubbed('service_get_by_args',
('host', 'binary'),
dict(host='host', binary='binary', topic=None),
db_exception=exc.HostBinaryNotFound(binary='binary',
host='host'))
def test_security_groups_trigger_handler(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_handler')
self.conductor_manager.security_group_api.trigger_handler('event',
self.context,
'arg')
self.mox.ReplayAll()
self.conductor.security_groups_trigger_handler(self.context,
'event', ['arg'])
@mock.patch.object(db, 'service_update')
@mock.patch('oslo.messaging.RPCClient.prepare')
def test_service_update_time_big(self, mock_prepare, mock_update):
CONF.set_override('report_interval', 10)
services = {'id': 1}
self.conductor.service_update(self.context, services, {})
mock_prepare.assert_called_once_with(timeout=9)
@mock.patch.object(db, 'service_update')
@mock.patch('oslo.messaging.RPCClient.prepare')
def test_service_update_time_small(self, mock_prepare, mock_update):
CONF.set_override('report_interval', 3)
services = {'id': 1}
self.conductor.service_update(self.context, services, {})
mock_prepare.assert_called_once_with(timeout=3)
@mock.patch.object(db, 'service_update')
@mock.patch('oslo.messaging.RPCClient.prepare')
def test_service_update_no_time(self, mock_prepare, mock_update):
CONF.set_override('report_interval', None)
services = {'id': 1}
self.conductor.service_update(self.context, services, {})
mock_prepare.assert_called_once_with()
class ConductorAPITestCase(_BaseTestCase, test.TestCase):
"""Conductor API Tests."""
def setUp(self):
super(ConductorAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_api.API()
self.conductor_manager = self.conductor_service.manager
self.db = None
def _do_update(self, instance_uuid, **updates):
# NOTE(danms): the public API takes actual keyword arguments,
# so override the base class here to make the call correctly
return self.conductor.instance_update(self.context, instance_uuid,
**updates)
def test_bw_usage_get(self):
self.mox.StubOutWithMock(db, 'bw_usage_update')
self.mox.StubOutWithMock(db, 'bw_usage_get')
get_args = (self.context, 'uuid', 0, 'mac')
db.bw_usage_get(*get_args).AndReturn('foo')
self.mox.ReplayAll()
result = self.conductor.bw_usage_get(*get_args)
self.assertEqual(result, 'foo')
def test_block_device_mapping_update_or_create(self):
self.mox.StubOutWithMock(db, 'block_device_mapping_create')
self.mox.StubOutWithMock(db, 'block_device_mapping_update')
self.mox.StubOutWithMock(db, 'block_device_mapping_update_or_create')
self.mox.StubOutWithMock(block_device_obj.BlockDeviceMapping,
'_from_db_object')
db.block_device_mapping_create(self.context, 'fake-bdm')
block_device_obj.BlockDeviceMapping._from_db_object(
self.context, mox.IgnoreArg(), mox.IgnoreArg())
db.block_device_mapping_update(self.context,
'fake-id', {'id': 'fake-id'})
block_device_obj.BlockDeviceMapping._from_db_object(
self.context, mox.IgnoreArg(), mox.IgnoreArg())
db.block_device_mapping_update_or_create(self.context, 'fake-bdm')
block_device_obj.BlockDeviceMapping._from_db_object(
self.context, mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self.conductor.block_device_mapping_create(self.context, 'fake-bdm')
self.conductor.block_device_mapping_update(self.context, 'fake-id', {})
self.conductor.block_device_mapping_update_or_create(self.context,
'fake-bdm')
def _test_stubbed(self, name, *args, **kwargs):
if args and isinstance(args[0], FakeContext):
ctxt = args[0]
args = args[1:]
else:
ctxt = self.context
db_exception = kwargs.get('db_exception')
self.mox.StubOutWithMock(db, name)
if db_exception:
getattr(db, name)(ctxt, *args).AndRaise(db_exception)
else:
getattr(db, name)(ctxt, *args).AndReturn('fake-result')
if name == 'service_destroy':
# TODO(russellb) This is a hack ... SetUp() starts the conductor()
# service. There is a cleanup step that runs after this test which
# also deletes the associated service record. This involves a call
# to db.service_destroy(), which we have stubbed out.
db.service_destroy(mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
if db_exception:
self.assertRaises(db_exception.__class__,
getattr(self.conductor, name),
self.context, *args)
else:
result = getattr(self.conductor, name)(self.context, *args)
self.assertEqual(
result, 'fake-result' if kwargs.get('returns', True) else None)
def test_service_get_all(self):
self._test_stubbed('service_get_all')
def test_service_get_by_host_and_topic(self):
self._test_stubbed('service_get_by_host_and_topic', 'host', 'topic')
def test_service_get_all_by_topic(self):
self._test_stubbed('service_get_all_by_topic', 'topic')
def test_service_get_all_by_host(self):
self._test_stubbed('service_get_all_by_host', 'host')
def test_service_get_by_compute_host(self):
self._test_stubbed('service_get_by_compute_host', 'host')
def test_service_get_by_args(self):
self._test_stubbed('service_get_by_args', 'host', 'binary')
def test_service_get_by_compute_host_not_found(self):
self._test_stubbed('service_get_by_compute_host', 'host',
db_exception=exc.ComputeHostNotFound(host='host'))
def test_service_get_by_args_not_found(self):
self._test_stubbed('service_get_by_args', 'host', 'binary',
db_exception=exc.HostBinaryNotFound(binary='binary',
host='host'))
def test_service_create(self):
self._test_stubbed('service_create', {})
def test_service_destroy(self):
self._test_stubbed('service_destroy', '', returns=False)
def test_service_update(self):
ctxt = self.context
self.mox.StubOutWithMock(db, 'service_update')
db.service_update(ctxt, '', {}).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.service_update(self.context, {'id': ''}, {})
self.assertEqual(result, 'fake-result')
def test_instance_get_all_by_host_and_node(self):
self._test_stubbed('instance_get_all_by_host_and_node',
self.context.elevated(), 'host', 'node')
def test_instance_get_all_by_host(self):
self.mox.StubOutWithMock(db, 'instance_get_all_by_host')
self.mox.StubOutWithMock(db, 'instance_get_all_by_host_and_node')
db.instance_get_all_by_host(self.context.elevated(), 'host',
None).AndReturn('fake-result')
self.mox.ReplayAll()
result = self.conductor.instance_get_all_by_host(self.context,
'host', None)
self.assertEqual(result, 'fake-result')
def test_wait_until_ready(self):
timeouts = []
calls = dict(count=0)
def fake_ping(context, message, timeout):
timeouts.append(timeout)
calls['count'] += 1
if calls['count'] < 15:
raise messaging.MessagingTimeout("fake")
self.stubs.Set(self.conductor.base_rpcapi, 'ping', fake_ping)
self.conductor.wait_until_ready(self.context)
self.assertEqual(timeouts.count(10), 10)
self.assertIn(None, timeouts)
def test_security_groups_trigger_handler(self):
self.mox.StubOutWithMock(self.conductor_manager.security_group_api,
'trigger_handler')
self.conductor_manager.security_group_api.trigger_handler('event',
self.context,
'arg')
self.mox.ReplayAll()
self.conductor.security_groups_trigger_handler(self.context,
'event', 'arg')
class ConductorLocalAPITestCase(ConductorAPITestCase):
"""Conductor LocalAPI Tests."""
def setUp(self):
super(ConductorLocalAPITestCase, self).setUp()
self.conductor = conductor_api.LocalAPI()
self.conductor_manager = self.conductor._manager._target
self.db = db
def test_client_exceptions(self):
instance = self._create_fake_instance()
# NOTE(danms): The LocalAPI should not raise exceptions wrapped
# in ClientException. KeyError should be raised if an invalid
# update key is passed, so use that to validate.
self.assertRaises(KeyError,
self._do_update, instance['uuid'], foo='bar')
def test_wait_until_ready(self):
# Override test in ConductorAPITestCase
pass
class ConductorImportTest(test.TestCase):
def test_import_conductor_local(self):
self.flags(use_local=True, group='conductor')
self.assertIsInstance(conductor.API(), conductor_api.LocalAPI)
self.assertIsInstance(conductor.ComputeTaskAPI(),
conductor_api.LocalComputeTaskAPI)
def test_import_conductor_rpc(self):
self.flags(use_local=False, group='conductor')
self.assertIsInstance(conductor.API(), conductor_api.API)
self.assertIsInstance(conductor.ComputeTaskAPI(),
conductor_api.ComputeTaskAPI)
def test_import_conductor_override_to_local(self):
self.flags(use_local=False, group='conductor')
self.assertIsInstance(conductor.API(use_local=True),
conductor_api.LocalAPI)
self.assertIsInstance(conductor.ComputeTaskAPI(use_local=True),
conductor_api.LocalComputeTaskAPI)
class ConductorPolicyTest(test.TestCase):
def test_all_allowed_keys(self):
def fake_db_instance_update(self, *args, **kwargs):
return None, None
self.stubs.Set(db, 'instance_update_and_get_original',
fake_db_instance_update)
ctxt = context.RequestContext('fake-user', 'fake-project')
conductor = conductor_api.LocalAPI()
updates = {}
for key in conductor_manager.allowed_updates:
if key in conductor_manager.datetime_fields:
updates[key] = timeutils.utcnow()
else:
updates[key] = 'foo'
with mock.patch('nova.objects.Instance._from_db_object'):
conductor.instance_update(ctxt, 'fake-instance', **updates)
def test_allowed_keys_are_real(self):
instance = models.Instance()
keys = list(conductor_manager.allowed_updates)
# NOTE(danms): expected_task_state is a parameter that gets
# passed to the db layer, but is not actually an instance attribute
del keys[keys.index('expected_task_state')]
for key in keys:
self.assertTrue(hasattr(instance, key))
class _BaseTaskTestCase(object):
def setUp(self):
super(_BaseTaskTestCase, self).setUp()
self.user_id = 'fake'
self.project_id = 'fake'
self.context = FakeContext(self.user_id, self.project_id)
fake_server_actions.stub_out_action_events(self.stubs)
def fake_deserialize_context(serializer, ctxt_dict):
self.assertEqual(self.context.user_id, ctxt_dict['user_id'])
self.assertEqual(self.context.project_id, ctxt_dict['project_id'])
return self.context
self.stubs.Set(rpc.RequestContextSerializer, 'deserialize_context',
fake_deserialize_context)
def _prepare_rebuild_args(self, update_args=None):
rebuild_args = {'new_pass': 'admin_password',
'injected_files': 'files_to_inject',
'image_ref': 'image_ref',
'orig_image_ref': 'orig_image_ref',
'orig_sys_metadata': 'orig_sys_meta',
'bdms': {},
'recreate': False,
'on_shared_storage': False,
'preserve_ephemeral': False,
'host': 'compute-host'}
if update_args:
rebuild_args.update(update_args)
return rebuild_args
def test_live_migrate(self):
inst = fake_instance.fake_db_instance()
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst, [])
self.mox.StubOutWithMock(live_migrate, 'execute')
live_migrate.execute(self.context,
mox.IsA(objects.Instance),
'destination',
'block_migration',
'disk_over_commit')
self.mox.ReplayAll()
if isinstance(self.conductor, (conductor_api.ComputeTaskAPI,
conductor_api.LocalComputeTaskAPI)):
# The API method is actually 'live_migrate_instance'. It gets
# converted into 'migrate_server' when doing RPC.
self.conductor.live_migrate_instance(self.context, inst_obj,
'destination', 'block_migration', 'disk_over_commit')
else:
self.conductor.migrate_server(self.context, inst_obj,
{'host': 'destination'}, True, False, None,
'block_migration', 'disk_over_commit')
def _test_cold_migrate(self, clean_shutdown=True):
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(
self.conductor_manager.compute_rpcapi, 'prep_resize')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
'select_destinations')
inst = fake_instance.fake_db_instance(image_ref='image_ref')
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst, [])
flavor = flavors.get_default_flavor()
flavor.extra_specs = {'extra_specs': 'fake'}
request_spec = {'instance_type': obj_base.obj_to_primitive(flavor),
'instance_properties': {}}
compute_utils.get_image_metadata(
self.context, self.conductor_manager.image_api,
'image_ref', mox.IsA(objects.Instance)).AndReturn('image')
scheduler_utils.build_request_spec(
self.context, 'image',
[mox.IsA(objects.Instance)],
instance_type=mox.IsA(objects.Flavor)).AndReturn(request_spec)
scheduler_utils.setup_instance_group(self.context, request_spec, {})
hosts = [dict(host='host1', nodename=None, limits={})]
self.conductor_manager.scheduler_client.select_destinations(
self.context, request_spec,
{'retry': {'num_attempts': 1, 'hosts': []}}).AndReturn(hosts)
filter_properties = {'limits': {},
'retry': {'num_attempts': 1,
'hosts': [['host1', None]]}}
self.conductor_manager.compute_rpcapi.prep_resize(
self.context, 'image', mox.IsA(objects.Instance),
mox.IsA(objects.Flavor), 'host1', [], request_spec=request_spec,
filter_properties=filter_properties, node=None,
clean_shutdown=clean_shutdown)
self.mox.ReplayAll()
scheduler_hint = {'filter_properties': {}}
if isinstance(self.conductor, (conductor_api.ComputeTaskAPI,
conductor_api.LocalComputeTaskAPI)):
# The API method is actually 'resize_instance'. It gets
# converted into 'migrate_server' when doing RPC.
self.conductor.resize_instance(
self.context, inst_obj, {}, scheduler_hint, flavor, [],
clean_shutdown)
else:
self.conductor.migrate_server(
self.context, inst_obj, scheduler_hint,
False, False, flavor, None, None, [],
clean_shutdown)
def test_cold_migrate(self):
self._test_cold_migrate()
def test_cold_migrate_forced_shutdown(self):
self._test_cold_migrate(clean_shutdown=False)
@mock.patch('nova.objects.Instance.refresh')
@mock.patch('nova.utils.spawn_n')
def test_build_instances(self, mock_spawn, mock_refresh):
mock_spawn.side_effect = lambda f, *a, **k: f(*a, **k)
instance_type = flavors.get_default_flavor()
instances = [objects.Instance(context=self.context,
id=i,
uuid=uuid.uuid4(),
flavor=instance_type) for i in xrange(2)]
instance_type_p = obj_base.obj_to_primitive(instance_type)
instance_properties = instance_obj.compat_instance(instances[0])
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(db,
'block_device_mapping_get_all_by_instance')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'build_and_run_instance')
spec = {'image': {'fake_data': 'should_pass_silently'},
'instance_properties': instance_properties,
'instance_type': instance_type_p,
'num_instances': 2}
scheduler_utils.setup_instance_group(self.context, spec, {})
self.conductor_manager.scheduler_client.select_destinations(
self.context, spec,
{'retry': {'num_attempts': 1, 'hosts': []}}).AndReturn(
[{'host': 'host1', 'nodename': 'node1', 'limits': []},
{'host': 'host2', 'nodename': 'node2', 'limits': []}])
db.block_device_mapping_get_all_by_instance(self.context,
instances[0].uuid, use_slave=False).AndReturn([])
self.conductor_manager.compute_rpcapi.build_and_run_instance(
self.context,
instance=mox.IgnoreArg(),
host='host1',
image={'fake_data': 'should_pass_silently'},
request_spec={
'image': {'fake_data': 'should_pass_silently'},
'instance_properties': instance_properties,
'instance_type': instance_type_p,
'num_instances': 2},
filter_properties={'retry': {'num_attempts': 1,
'hosts': [['host1', 'node1']]},
'limits': []},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping=mox.IgnoreArg(),
node='node1', limits=[])
db.block_device_mapping_get_all_by_instance(self.context,
instances[1].uuid, use_slave=False).AndReturn([])
self.conductor_manager.compute_rpcapi.build_and_run_instance(
self.context,
instance=mox.IgnoreArg(),
host='host2',
image={'fake_data': 'should_pass_silently'},
request_spec={
'image': {'fake_data': 'should_pass_silently'},
'instance_properties': instance_properties,
'instance_type': instance_type_p,
'num_instances': 2},
filter_properties={'limits': [],
'retry': {'num_attempts': 1,
'hosts': [['host2', 'node2']]}},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping=mox.IgnoreArg(),
node='node2', limits=[])
self.mox.ReplayAll()
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(self.context,
instances=instances,
image={'fake_data': 'should_pass_silently'},
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
def test_build_instances_scheduler_failure(self):
instances = [fake_instance.fake_instance_obj(self.context)
for i in xrange(2)]
image = {'fake-data': 'should_pass_silently'}
spec = {'fake': 'specs',
'instance_properties': instances[0]}
exception = exc.NoValidHost(reason='fake-reason')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(scheduler_utils, 'set_vm_state_and_notify')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
'select_destinations')
scheduler_utils.build_request_spec(self.context, image,
mox.IgnoreArg()).AndReturn(spec)
scheduler_utils.setup_instance_group(self.context, spec, {})
self.conductor_manager.scheduler_client.select_destinations(
self.context, spec,
{'retry': {'num_attempts': 1,
'hosts': []}}).AndRaise(exception)
updates = {'vm_state': vm_states.ERROR, 'task_state': None}
for instance in instances:
scheduler_utils.set_vm_state_and_notify(
self.context, instance.uuid, 'compute_task', 'build_instances',
updates, exception, spec, self.conductor_manager.db)
self.mox.ReplayAll()
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(self.context,
instances=instances,
image=image,
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
@mock.patch('nova.utils.spawn_n')
@mock.patch.object(scheduler_utils, 'build_request_spec')
@mock.patch.object(scheduler_utils, 'setup_instance_group')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_set_vm_state_and_notify')
def test_build_instances_scheduler_group_failure(self, state_mock,
sig_mock, bs_mock,
spawn_mock):
instances = [fake_instance.fake_instance_obj(self.context)
for i in range(2)]
image = {'fake-data': 'should_pass_silently'}
spec = {'fake': 'specs',
'instance_properties': instances[0]}
# NOTE(gibi): LocalComputeTaskAPI use eventlet spawn that makes mocking
# hard so use direct call instead.
spawn_mock.side_effect = lambda f, *a, **k: f(*a, **k)
bs_mock.return_value = spec
exception = exc.UnsupportedPolicyException(reason='fake-reason')
sig_mock.side_effect = exception
updates = {'vm_state': vm_states.ERROR, 'task_state': None}
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(
context=self.context,
instances=instances,
image=image,
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
calls = []
for instance in instances:
calls.append(mock.call(self.context, instance.uuid,
'build_instances', updates, exception, spec))
state_mock.assert_has_calls(calls)
def test_unshelve_instance_on_host(self):
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED
instance.task_state = task_states.UNSHELVING
instance.save()
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'start_instance')
self.mox.StubOutWithMock(self.conductor_manager, '_delete_image')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'unshelve_instance')
self.conductor_manager.compute_rpcapi.start_instance(self.context,
instance)
self.conductor_manager._delete_image(self.context,
'fake_image_id')
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
def test_unshelve_offloaded_instance_glance_image_not_found(self):
shelved_image_id = "image_not_found"
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.task_state = task_states.UNSHELVING
instance.save()
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager.image_api, 'get')
e = exc.ImageNotFound(image_id=shelved_image_id)
self.conductor_manager.image_api.get(
self.context, shelved_image_id, show_deleted=False).AndRaise(e)
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_host'] = 'fake-mini'
system_metadata['shelved_image_id'] = shelved_image_id
self.assertRaises(
exc.UnshelveException,
self.conductor_manager.unshelve_instance,
self.context, instance)
self.assertEqual(instance.vm_state, vm_states.ERROR)
def test_unshelve_offloaded_instance_image_id_is_none(self):
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.task_state = task_states.UNSHELVING
# 'shelved_image_id' is None for volumebacked instance
instance.system_metadata['shelved_image_id'] = None
with contextlib.nested(
mock.patch.object(self.conductor_manager,
'_schedule_instances'),
mock.patch.object(self.conductor_manager.compute_rpcapi,
'unshelve_instance'),
) as (schedule_mock, unshelve_mock):
schedule_mock.return_value = [{'host': 'fake_host',
'nodename': 'fake_node',
'limits': {}}]
self.conductor_manager.unshelve_instance(self.context, instance)
self.assertEqual(1, unshelve_mock.call_count)
def test_unshelve_instance_schedule_and_rebuild(self):
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
filter_properties = {}
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager.image_api, 'get')
self.mox.StubOutWithMock(self.conductor_manager, '_schedule_instances')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'unshelve_instance')
self.conductor_manager.image_api.get(self.context,
'fake_image_id', show_deleted=False).AndReturn('fake_image')
self.conductor_manager._schedule_instances(self.context,
'fake_image', filter_properties, instance).AndReturn(
[{'host': 'fake_host',
'nodename': 'fake_node',
'limits': {}}])
self.conductor_manager.compute_rpcapi.unshelve_instance(self.context,
instance, 'fake_host', image='fake_image',
filter_properties={'limits': {}}, node='fake_node')
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
def test_unshelve_instance_schedule_and_rebuild_novalid_host(self):
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
system_metadata = instance.system_metadata
def fake_schedule_instances(context, image, filter_properties,
*instances):
raise exc.NoValidHost(reason='')
with contextlib.nested(
mock.patch.object(self.conductor_manager.image_api, 'get',
return_value='fake_image'),
mock.patch.object(self.conductor_manager, '_schedule_instances',
fake_schedule_instances)
) as (_get_image, _schedule_instances):
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
_get_image.assert_has_calls([mock.call(self.context,
system_metadata['shelved_image_id'],
show_deleted=False)])
self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state)
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_schedule_instances',
side_effect=messaging.MessagingTimeout())
@mock.patch.object(image_api.API, 'get', return_value='fake_image')
def test_unshelve_instance_schedule_and_rebuild_messaging_exception(
self, mock_get_image, mock_schedule_instances):
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.task_state = task_states.UNSHELVING
instance.save()
system_metadata = instance.system_metadata
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.assertRaises(messaging.MessagingTimeout,
self.conductor_manager.unshelve_instance,
self.context, instance)
mock_get_image.assert_has_calls([mock.call(self.context,
system_metadata['shelved_image_id'],
show_deleted=False)])
self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state)
self.assertIsNone(instance.task_state)
def test_unshelve_instance_schedule_and_rebuild_volume_backed(self):
instance = self._create_fake_instance_obj()
instance.vm_state = vm_states.SHELVED_OFFLOADED
instance.save()
filter_properties = {}
system_metadata = instance.system_metadata
self.mox.StubOutWithMock(self.conductor_manager.image_api, 'get')
self.mox.StubOutWithMock(self.conductor_manager, '_schedule_instances')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'unshelve_instance')
self.conductor_manager.image_api.get(self.context,
'fake_image_id', show_deleted=False).AndReturn(None)
self.conductor_manager._schedule_instances(self.context,
None, filter_properties, instance).AndReturn(
[{'host': 'fake_host',
'nodename': 'fake_node',
'limits': {}}])
self.conductor_manager.compute_rpcapi.unshelve_instance(self.context,
instance, 'fake_host', image=None,
filter_properties={'limits': {}}, node='fake_node')
self.mox.ReplayAll()
system_metadata['shelved_at'] = timeutils.utcnow()
system_metadata['shelved_image_id'] = 'fake_image_id'
system_metadata['shelved_host'] = 'fake-mini'
self.conductor_manager.unshelve_instance(self.context, instance)
def test_rebuild_instance(self):
inst_obj = self._create_fake_instance_obj()
rebuild_args = self._prepare_rebuild_args({'host': inst_obj.host})
with contextlib.nested(
mock.patch.object(self.conductor_manager.compute_rpcapi,
'rebuild_instance'),
mock.patch.object(self.conductor_manager.scheduler_client,
'select_destinations')
) as (rebuild_mock, select_dest_mock):
self.conductor_manager.rebuild_instance(context=self.context,
instance=inst_obj,
**rebuild_args)
self.assertFalse(select_dest_mock.called)
rebuild_mock.assert_called_once_with(self.context,
instance=inst_obj,
**rebuild_args)
def test_rebuild_instance_with_scheduler(self):
inst_obj = self._create_fake_instance_obj()
inst_obj.host = 'noselect'
rebuild_args = self._prepare_rebuild_args({'host': None})
expected_host = 'thebesthost'
request_spec = {}
filter_properties = {'ignore_hosts': [(inst_obj.host)]}
with contextlib.nested(
mock.patch.object(self.conductor_manager.compute_rpcapi,
'rebuild_instance'),
mock.patch.object(scheduler_utils, 'setup_instance_group',
return_value=False),
mock.patch.object(self.conductor_manager.scheduler_client,
'select_destinations',
return_value=[{'host': expected_host}]),
mock.patch('nova.scheduler.utils.build_request_spec',
return_value=request_spec)
) as (rebuild_mock, sig_mock, select_dest_mock, bs_mock):
self.conductor_manager.rebuild_instance(context=self.context,
instance=inst_obj,
**rebuild_args)
select_dest_mock.assert_called_once_with(self.context,
request_spec,
filter_properties)
rebuild_args['host'] = expected_host
rebuild_mock.assert_called_once_with(self.context,
instance=inst_obj,
**rebuild_args)
def test_rebuild_instance_with_scheduler_no_host(self):
inst_obj = self._create_fake_instance_obj()
inst_obj.host = 'noselect'
rebuild_args = self._prepare_rebuild_args({'host': None})
request_spec = {}
filter_properties = {'ignore_hosts': [(inst_obj.host)]}
with contextlib.nested(
mock.patch.object(self.conductor_manager.compute_rpcapi,
'rebuild_instance'),
mock.patch.object(scheduler_utils, 'setup_instance_group',
return_value=False),
mock.patch.object(self.conductor_manager.scheduler_client,
'select_destinations',
side_effect=exc.NoValidHost(reason='')),
mock.patch('nova.scheduler.utils.build_request_spec',
return_value=request_spec)
) as (rebuild_mock, sig_mock, select_dest_mock, bs_mock):
self.assertRaises(exc.NoValidHost,
self.conductor_manager.rebuild_instance,
context=self.context, instance=inst_obj,
**rebuild_args)
select_dest_mock.assert_called_once_with(self.context,
request_spec,
filter_properties)
self.assertFalse(rebuild_mock.called)
@mock.patch('nova.utils.spawn_n')
@mock.patch.object(conductor_manager.compute_rpcapi.ComputeAPI,
'rebuild_instance')
@mock.patch.object(scheduler_utils, 'setup_instance_group')
@mock.patch.object(conductor_manager.scheduler_client.SchedulerClient,
'select_destinations')
@mock.patch('nova.scheduler.utils.build_request_spec')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_set_vm_state_and_notify')
def test_rebuild_instance_with_scheduler_group_failure(self,
state_mock,
bs_mock,
select_dest_mock,
sig_mock,
rebuild_mock,
spawn_mock):
inst_obj = self._create_fake_instance_obj()
rebuild_args = self._prepare_rebuild_args({'host': None})
request_spec = {}
bs_mock.return_value = request_spec
# NOTE(gibi): LocalComputeTaskAPI use eventlet spawn that makes mocking
# hard so use direct call instead.
spawn_mock.side_effect = lambda f, *a, **k: f(*a, **k)
exception = exc.UnsupportedPolicyException(reason='')
sig_mock.side_effect = exception
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.assertRaises(exc.UnsupportedPolicyException,
self.conductor.rebuild_instance,
self.context,
inst_obj,
**rebuild_args)
updates = {'vm_state': vm_states.ACTIVE, 'task_state': None}
state_mock.assert_called_once_with(self.context, inst_obj.uuid,
'rebuild_server', updates,
exception, request_spec)
self.assertFalse(select_dest_mock.called)
self.assertFalse(rebuild_mock.called)
class ConductorTaskTestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
"""ComputeTaskManager Tests."""
def setUp(self):
super(ConductorTaskTestCase, self).setUp()
self.conductor = conductor_manager.ComputeTaskManager()
self.conductor_manager = self.conductor
def test_migrate_server_fails_with_rebuild(self):
self.assertRaises(NotImplementedError, self.conductor.migrate_server,
self.context, None, None, True, True, None, None, None)
def test_migrate_server_fails_with_flavor(self):
flavor = flavors.get_flavor_by_name('m1.tiny')
self.assertRaises(NotImplementedError, self.conductor.migrate_server,
self.context, None, None, True, False, flavor, None, None)
def _build_request_spec(self, instance):
return {
'instance_properties': {
'uuid': instance['uuid'], },
}
def _test_migrate_server_deals_with_expected_exceptions(self, ex):
instance = fake_instance.fake_db_instance(uuid='uuid',
vm_state=vm_states.ACTIVE)
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), instance, [])
self.mox.StubOutWithMock(live_migrate, 'execute')
self.mox.StubOutWithMock(scheduler_utils,
'set_vm_state_and_notify')
live_migrate.execute(self.context, mox.IsA(objects.Instance),
'destination', 'block_migration',
'disk_over_commit').AndRaise(ex)
scheduler_utils.set_vm_state_and_notify(self.context,
inst_obj.uuid,
'compute_task', 'migrate_server',
{'vm_state': vm_states.ACTIVE,
'task_state': None,
'expected_task_state': task_states.MIGRATING},
ex, self._build_request_spec(inst_obj),
self.conductor_manager.db)
self.mox.ReplayAll()
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(type(ex),
self.conductor.migrate_server, self.context, inst_obj,
{'host': 'destination'}, True, False, None, 'block_migration',
'disk_over_commit')
def test_migrate_server_deals_with_invalidcpuinfo_exception(self):
instance = fake_instance.fake_db_instance(uuid='uuid',
vm_state=vm_states.ACTIVE)
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), instance, [])
self.mox.StubOutWithMock(live_migrate, 'execute')
self.mox.StubOutWithMock(scheduler_utils,
'set_vm_state_and_notify')
ex = exc.InvalidCPUInfo(reason="invalid cpu info.")
live_migrate.execute(self.context, mox.IsA(objects.Instance),
'destination', 'block_migration',
'disk_over_commit').AndRaise(ex)
scheduler_utils.set_vm_state_and_notify(self.context,
inst_obj.uuid,
'compute_task', 'migrate_server',
{'vm_state': vm_states.ACTIVE,
'task_state': None,
'expected_task_state': task_states.MIGRATING},
ex, self._build_request_spec(inst_obj),
self.conductor_manager.db)
self.mox.ReplayAll()
self.conductor = utils.ExceptionHelper(self.conductor)
self.assertRaises(exc.InvalidCPUInfo,
self.conductor.migrate_server, self.context, inst_obj,
{'host': 'destination'}, True, False, None, 'block_migration',
'disk_over_commit')
def test_migrate_server_deals_with_InstanceInvalidState(self):
ex = exc.InstanceInvalidState(instance_uuid="fake", attr='',
state='', method='')
self._test_migrate_server_deals_with_expected_exceptions(ex)
def test_migrate_server_deals_with_DestinationHypervisorTooOld(self):
ex = exc.DestinationHypervisorTooOld()
self._test_migrate_server_deals_with_expected_exceptions(ex)
def test_migrate_server_deals_with_HypervisorUnavailable(self):
ex = exc.HypervisorUnavailable(host='dummy')
self._test_migrate_server_deals_with_expected_exceptions(ex)
def test_migrate_server_deals_with_LiveMigrationWithOldNovaNotSafe(self):
ex = exc.LiveMigrationWithOldNovaNotSafe(server='dummy')
self._test_migrate_server_deals_with_expected_exceptions(ex)
@mock.patch.object(scheduler_utils, 'set_vm_state_and_notify')
@mock.patch.object(live_migrate, 'execute')
def test_migrate_server_deals_with_unexpected_exceptions(self,
mock_live_migrate, mock_set_state):
expected_ex = IOError('fake error')
mock_live_migrate.side_effect = expected_ex
instance = fake_instance.fake_db_instance()
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), instance, [])
ex = self.assertRaises(exc.MigrationError,
self.conductor.migrate_server, self.context, inst_obj,
{'host': 'destination'}, True, False, None, 'block_migration',
'disk_over_commit')
request_spec = {'instance_properties': {
'uuid': instance['uuid'], },
}
mock_set_state.assert_called_once_with(self.context,
instance['uuid'],
'compute_task', 'migrate_server',
dict(vm_state=vm_states.ERROR,
task_state=inst_obj.task_state,
expected_task_state=task_states.MIGRATING,),
expected_ex, request_spec, self.conductor.db)
self.assertEqual(ex.kwargs['reason'], six.text_type(expected_ex))
def test_set_vm_state_and_notify(self):
self.mox.StubOutWithMock(scheduler_utils,
'set_vm_state_and_notify')
scheduler_utils.set_vm_state_and_notify(
self.context, 1, 'compute_task', 'method', 'updates',
'ex', 'request_spec', self.conductor.db)
self.mox.ReplayAll()
self.conductor._set_vm_state_and_notify(
self.context, 1, 'method', 'updates', 'ex', 'request_spec')
def test_cold_migrate_no_valid_host_back_in_active_state(self):
flavor = flavors.get_flavor_by_name('m1.tiny')
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
instance_type_id=flavor['id'])
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()),
instance_properties=dict())
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.conductor.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.conductor,
'_set_vm_state_and_notify')
self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
compute_utils.get_image_metadata(
self.context, self.conductor_manager.image_api,
'fake-image_ref', mox.IsA(objects.Instance)).AndReturn(image)
scheduler_utils.build_request_spec(
self.context, image, [inst_obj],
instance_type=flavor).AndReturn(request_spec)
scheduler_utils.setup_instance_group(self.context, request_spec,
filter_props)
exc_info = exc.NoValidHost(reason="")
self.conductor.scheduler_client.select_destinations(
self.context, request_spec,
filter_props).AndRaise(exc_info)
updates = {'vm_state': vm_states.ACTIVE,
'task_state': None}
self.conductor._set_vm_state_and_notify(self.context,
inst_obj.uuid,
'migrate_server',
updates, exc_info,
request_spec)
# NOTE(mriedem): Validate that the quota rollback is using
# the correct project_id and user_id.
project_id, user_id = quotas_obj.ids_from_instance(self.context,
inst_obj)
quota.QUOTAS.rollback(self.context, [resvs], project_id=project_id,
user_id=user_id)
self.mox.ReplayAll()
self.assertRaises(exc.NoValidHost,
self.conductor._cold_migrate,
self.context, inst_obj,
flavor, filter_props, [resvs],
clean_shutdown=True)
def test_cold_migrate_no_valid_host_back_in_stopped_state(self):
flavor = flavors.get_flavor_by_name('m1.tiny')
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
vm_state=vm_states.STOPPED,
instance_type_id=flavor['id'])
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()),
instance_properties=dict())
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.conductor.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.conductor,
'_set_vm_state_and_notify')
self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
compute_utils.get_image_metadata(
self.context, self.conductor_manager.image_api,
'fake-image_ref', mox.IsA(objects.Instance)).AndReturn(image)
scheduler_utils.build_request_spec(
self.context, image, [inst_obj],
instance_type=flavor).AndReturn(request_spec)
scheduler_utils.setup_instance_group(self.context, request_spec,
filter_props)
exc_info = exc.NoValidHost(reason="")
self.conductor.scheduler_client.select_destinations(
self.context, request_spec,
filter_props).AndRaise(exc_info)
updates = {'vm_state': vm_states.STOPPED,
'task_state': None}
self.conductor._set_vm_state_and_notify(self.context,
inst_obj.uuid,
'migrate_server',
updates, exc_info,
request_spec)
# NOTE(mriedem): Validate that the quota rollback is using
# the correct project_id and user_id.
project_id, user_id = quotas_obj.ids_from_instance(self.context,
inst_obj)
quota.QUOTAS.rollback(self.context, [resvs], project_id=project_id,
user_id=user_id)
self.mox.ReplayAll()
self.assertRaises(exc.NoValidHost,
self.conductor._cold_migrate, self.context,
inst_obj, flavor, filter_props, [resvs],
clean_shutdown=True)
def test_cold_migrate_no_valid_host_error_msg(self):
flavor = flavors.get_flavor_by_name('m1.tiny')
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
vm_state=vm_states.STOPPED,
instance_type_id=flavor['id'])
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()),
instance_properties=dict())
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
with contextlib.nested(
mock.patch.object(compute_utils, 'get_image_metadata',
return_value=image),
mock.patch.object(scheduler_utils, 'build_request_spec',
return_value=request_spec),
mock.patch.object(scheduler_utils, 'setup_instance_group',
return_value=False),
mock.patch.object(self.conductor, '_set_vm_state_and_notify'),
mock.patch.object(self.conductor.scheduler_client,
'select_destinations',
side_effect=exc.NoValidHost(reason=""))
) as (image_mock, brs_mock, sig_mock, set_vm_mock, select_dest_mock):
nvh = self.assertRaises(exc.NoValidHost,
self.conductor._cold_migrate, self.context,
inst_obj, flavor, filter_props, [resvs],
clean_shutdown=True)
self.assertIn('cold migrate', nvh.message)
@mock.patch.object(compute_utils, 'get_image_metadata')
@mock.patch('nova.scheduler.utils.build_request_spec')
@mock.patch.object(scheduler_utils, 'setup_instance_group')
@mock.patch.object(conductor_manager.ComputeTaskManager,
'_set_vm_state_and_notify')
def test_cold_migrate_no_valid_host_in_group(self,
set_vm_mock,
sig_mock,
brs_mock,
image_mock):
flavor = flavors.get_flavor_by_name('m1.tiny')
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
vm_state=vm_states.STOPPED,
instance_type_id=flavor['id'])
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()),
instance_properties=dict())
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
exception = exc.UnsupportedPolicyException(reason='')
image_mock.return_value = image
brs_mock.return_value = request_spec
sig_mock.side_effect = exception
self.assertRaises(exc.UnsupportedPolicyException,
self.conductor._cold_migrate, self.context,
inst_obj, flavor, filter_props, [resvs],
clean_shutdown=True)
updates = {'vm_state': vm_states.STOPPED, 'task_state': None}
set_vm_mock.assert_called_once_with(self.context, inst_obj.uuid,
'migrate_server', updates,
exception, request_spec)
def test_cold_migrate_exception_host_in_error_state_and_raise(self):
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
vm_state=vm_states.STOPPED)
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(),
instance_properties=dict())
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
hosts = [dict(host='host1', nodename=None, limits={})]
self.mox.StubOutWithMock(compute_utils, 'get_image_metadata')
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.conductor.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(scheduler_utils,
'populate_filter_properties')
self.mox.StubOutWithMock(self.conductor.compute_rpcapi,
'prep_resize')
self.mox.StubOutWithMock(self.conductor,
'_set_vm_state_and_notify')
self.mox.StubOutWithMock(quota.QUOTAS, 'rollback')
compute_utils.get_image_metadata(
self.context, self.conductor_manager.image_api,
'fake-image_ref', mox.IsA(objects.Instance)).AndReturn(image)
scheduler_utils.build_request_spec(
self.context, image, [inst_obj],
instance_type='flavor').AndReturn(request_spec)
scheduler_utils.setup_instance_group(self.context, request_spec,
filter_props)
expected_filter_props = {'retry': {'num_attempts': 1,
'hosts': []},
'context': None}
self.conductor.scheduler_client.select_destinations(
self.context, request_spec,
expected_filter_props).AndReturn(hosts)
scheduler_utils.populate_filter_properties(filter_props,
hosts[0])
exc_info = test.TestingException('something happened')
expected_filter_props = {'retry': {'num_attempts': 1,
'hosts': []}}
self.conductor.compute_rpcapi.prep_resize(
self.context, image, inst_obj,
'flavor', hosts[0]['host'], [resvs],
request_spec=request_spec,
filter_properties=expected_filter_props,
node=hosts[0]['nodename'],
clean_shutdown=True).AndRaise(exc_info)
updates = {'vm_state': vm_states.STOPPED,
'task_state': None}
self.conductor._set_vm_state_and_notify(self.context,
inst_obj.uuid,
'migrate_server',
updates, exc_info,
request_spec)
# NOTE(mriedem): Validate that the quota rollback is using
# the correct project_id and user_id.
project_id, user_id = quotas_obj.ids_from_instance(self.context,
inst_obj)
quota.QUOTAS.rollback(self.context, [resvs], project_id=project_id,
user_id=user_id)
self.mox.ReplayAll()
self.assertRaises(test.TestingException,
self.conductor._cold_migrate,
self.context, inst_obj, 'flavor',
filter_props, [resvs],
clean_shutdown=True)
def test_resize_no_valid_host_error_msg(self):
flavor = flavors.get_flavor_by_name('m1.tiny')
flavor_new = flavors.get_flavor_by_name('m1.small')
inst = fake_instance.fake_db_instance(image_ref='fake-image_ref',
vm_state=vm_states.STOPPED,
instance_type_id=flavor['id'])
inst_obj = objects.Instance._from_db_object(
self.context, objects.Instance(), inst,
expected_attrs=[])
request_spec = dict(instance_type=dict(extra_specs=dict()),
instance_properties=dict())
filter_props = dict(context=None)
resvs = 'fake-resvs'
image = 'fake-image'
with contextlib.nested(
mock.patch.object(compute_utils, 'get_image_metadata',
return_value=image),
mock.patch.object(scheduler_utils, 'build_request_spec',
return_value=request_spec),
mock.patch.object(scheduler_utils, 'setup_instance_group',
return_value=False),
mock.patch.object(self.conductor, '_set_vm_state_and_notify'),
mock.patch.object(self.conductor.scheduler_client,
'select_destinations',
side_effect=exc.NoValidHost(reason=""))
) as (image_mock, brs_mock, sig_mock, vm_st_mock, select_dest_mock):
nvh = self.assertRaises(exc.NoValidHost,
self.conductor._cold_migrate, self.context,
inst_obj, flavor_new, filter_props,
[resvs], clean_shutdown=True)
self.assertIn('resize', nvh.message)
def test_build_instances_instance_not_found(self):
instances = [fake_instance.fake_instance_obj(self.context)
for i in xrange(2)]
self.mox.StubOutWithMock(instances[0], 'refresh')
self.mox.StubOutWithMock(instances[1], 'refresh')
image = {'fake-data': 'should_pass_silently'}
spec = {'fake': 'specs',
'instance_properties': instances[0]}
self.mox.StubOutWithMock(scheduler_utils, 'build_request_spec')
self.mox.StubOutWithMock(scheduler_utils, 'setup_instance_group')
self.mox.StubOutWithMock(self.conductor_manager.scheduler_client,
'select_destinations')
self.mox.StubOutWithMock(self.conductor_manager.compute_rpcapi,
'build_and_run_instance')
scheduler_utils.build_request_spec(self.context, image,
mox.IgnoreArg()).AndReturn(spec)
scheduler_utils.setup_instance_group(self.context, spec, {})
self.conductor_manager.scheduler_client.select_destinations(
self.context, spec,
{'retry': {'num_attempts': 1, 'hosts': []}}).AndReturn(
[{'host': 'host1', 'nodename': 'node1', 'limits': []},
{'host': 'host2', 'nodename': 'node2', 'limits': []}])
instances[0].refresh().AndRaise(
exc.InstanceNotFound(instance_id=instances[0].uuid))
instances[1].refresh()
self.conductor_manager.compute_rpcapi.build_and_run_instance(
self.context, instance=instances[1], host='host2',
image={'fake-data': 'should_pass_silently'}, request_spec=spec,
filter_properties={'limits': [],
'retry': {'num_attempts': 1,
'hosts': [['host2',
'node2']]}},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping=mox.IsA(objects.BlockDeviceMappingList),
node='node2', limits=[])
self.mox.ReplayAll()
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(self.context,
instances=instances,
image=image,
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
@mock.patch.object(scheduler_utils, 'setup_instance_group')
@mock.patch.object(scheduler_utils, 'build_request_spec')
def test_build_instances_info_cache_not_found(self, build_request_spec,
setup_instance_group):
instances = [fake_instance.fake_instance_obj(self.context)
for i in xrange(2)]
image = {'fake-data': 'should_pass_silently'}
destinations = [{'host': 'host1', 'nodename': 'node1', 'limits': []},
{'host': 'host2', 'nodename': 'node2', 'limits': []}]
spec = {'fake': 'specs',
'instance_properties': instances[0]}
build_request_spec.return_value = spec
with contextlib.nested(
mock.patch.object(instances[0], 'refresh',
side_effect=exc.InstanceInfoCacheNotFound(
instance_uuid=instances[0].uuid)),
mock.patch.object(instances[1], 'refresh'),
mock.patch.object(self.conductor_manager.scheduler_client,
'select_destinations', return_value=destinations),
mock.patch.object(self.conductor_manager.compute_rpcapi,
'build_and_run_instance')
) as (inst1_refresh, inst2_refresh, select_destinations,
build_and_run_instance):
# build_instances() is a cast, we need to wait for it to complete
self.useFixture(cast_as_call.CastAsCall(self.stubs))
self.conductor.build_instances(self.context,
instances=instances,
image=image,
filter_properties={},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping='block_device_mapping',
legacy_bdm=False)
# NOTE(sbauza): Due to populate_retry() later in the code,
# filter_properties is dynamically modified
setup_instance_group.assert_called_once_with(
self.context, spec, {'retry': {'num_attempts': 1,
'hosts': []}})
build_and_run_instance.assert_called_once_with(self.context,
instance=instances[1], host='host2', image={'fake-data':
'should_pass_silently'}, request_spec=spec,
filter_properties={'limits': [],
'retry': {'num_attempts': 1,
'hosts': [['host2',
'node2']]}},
admin_password='admin_password',
injected_files='injected_files',
requested_networks=None,
security_groups='security_groups',
block_device_mapping=mock.ANY,
node='node2', limits=[])
class ConductorTaskRPCAPITestCase(_BaseTaskTestCase,
test_compute.BaseTestCase):
"""Conductor compute_task RPC namespace Tests."""
def setUp(self):
super(ConductorTaskRPCAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_rpcapi.ComputeTaskAPI()
service_manager = self.conductor_service.manager
self.conductor_manager = service_manager.compute_task_mgr
class ConductorTaskAPITestCase(_BaseTaskTestCase, test_compute.BaseTestCase):
"""Compute task API Tests."""
def setUp(self):
super(ConductorTaskAPITestCase, self).setUp()
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.conductor = conductor_api.ComputeTaskAPI()
service_manager = self.conductor_service.manager
self.conductor_manager = service_manager.compute_task_mgr
class ConductorLocalComputeTaskAPITestCase(ConductorTaskAPITestCase):
"""Conductor LocalComputeTaskAPI Tests."""
def setUp(self):
super(ConductorLocalComputeTaskAPITestCase, self).setUp()
self.conductor = conductor_api.LocalComputeTaskAPI()
self.conductor_manager = self.conductor._manager._target
| |
from __future__ import unicode_literals
import json
import boto
import boto.s3
import boto.s3.key
import boto.cloudformation
from boto.exception import BotoServerError
import sure # noqa
# Ensure 'assert_raises' context manager support for Python 2.6
import tests.backport_assert_raises # noqa
from nose.tools import assert_raises
from moto import mock_cloudformation_deprecated, mock_s3_deprecated, mock_route53_deprecated
from moto.cloudformation import cloudformation_backends
dummy_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Stack 1",
"Resources": {},
}
dummy_template2 = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Stack 2",
"Resources": {},
}
# template with resource which has no delete attribute defined
dummy_template3 = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Stack 3",
"Resources": {
"VPC": {
"Properties": {
"CidrBlock": "192.168.0.0/16",
},
"Type": "AWS::EC2::VPC"
}
},
}
dummy_template_json = json.dumps(dummy_template)
dummy_template_json2 = json.dumps(dummy_template2)
dummy_template_json3 = json.dumps(dummy_template3)
@mock_cloudformation_deprecated
def test_create_stack():
conn = boto.connect_cloudformation()
conn.create_stack(
"test_stack",
template_body=dummy_template_json,
)
stack = conn.describe_stacks()[0]
stack.stack_name.should.equal('test_stack')
stack.get_template().should.equal({
'GetTemplateResponse': {
'GetTemplateResult': {
'TemplateBody': dummy_template_json,
'ResponseMetadata': {
'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE'
}
}
}
})
@mock_cloudformation_deprecated
@mock_route53_deprecated
def test_create_stack_hosted_zone_by_id():
conn = boto.connect_cloudformation()
dummy_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Stack 1",
"Parameters": {
},
"Resources": {
"Bar": {
"Type" : "AWS::Route53::HostedZone",
"Properties" : {
"Name" : "foo.bar.baz",
}
},
},
}
dummy_template2 = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Stack 2",
"Parameters": {
"ZoneId": { "Type": "String" }
},
"Resources": {
"Foo": {
"Properties": {
"HostedZoneId": {"Ref": "ZoneId"},
"RecordSets": []
},
"Type": "AWS::Route53::RecordSetGroup"
}
},
}
conn.create_stack(
"test_stack",
template_body=json.dumps(dummy_template),
parameters={}.items()
)
r53_conn = boto.connect_route53()
zone_id = r53_conn.get_zones()[0].id
conn.create_stack(
"test_stack",
template_body=json.dumps(dummy_template2),
parameters={"ZoneId": zone_id}.items()
)
stack = conn.describe_stacks()[0]
assert stack.list_resources()
@mock_cloudformation_deprecated
def test_creating_stacks_across_regions():
west1_conn = boto.cloudformation.connect_to_region("us-west-1")
west1_conn.create_stack("test_stack", template_body=dummy_template_json)
west2_conn = boto.cloudformation.connect_to_region("us-west-2")
west2_conn.create_stack("test_stack", template_body=dummy_template_json)
list(west1_conn.describe_stacks()).should.have.length_of(1)
list(west2_conn.describe_stacks()).should.have.length_of(1)
@mock_cloudformation_deprecated
def test_create_stack_with_notification_arn():
conn = boto.connect_cloudformation()
conn.create_stack(
"test_stack_with_notifications",
template_body=dummy_template_json,
notification_arns='arn:aws:sns:us-east-1:123456789012:fake-queue'
)
stack = conn.describe_stacks()[0]
[n.value for n in stack.notification_arns].should.contain(
'arn:aws:sns:us-east-1:123456789012:fake-queue')
@mock_cloudformation_deprecated
@mock_s3_deprecated
def test_create_stack_from_s3_url():
s3_conn = boto.s3.connect_to_region('us-west-1')
bucket = s3_conn.create_bucket("foobar")
key = boto.s3.key.Key(bucket)
key.key = "template-key"
key.set_contents_from_string(dummy_template_json)
key_url = key.generate_url(expires_in=0, query_auth=False)
conn = boto.cloudformation.connect_to_region('us-west-1')
conn.create_stack('new-stack', template_url=key_url)
stack = conn.describe_stacks()[0]
stack.stack_name.should.equal('new-stack')
stack.get_template().should.equal(
{
'GetTemplateResponse': {
'GetTemplateResult': {
'TemplateBody': dummy_template_json,
'ResponseMetadata': {
'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE'
}
}
}
})
@mock_cloudformation_deprecated
def test_describe_stack_by_name():
conn = boto.connect_cloudformation()
conn.create_stack(
"test_stack",
template_body=dummy_template_json,
)
stack = conn.describe_stacks("test_stack")[0]
stack.stack_name.should.equal('test_stack')
@mock_cloudformation_deprecated
def test_describe_stack_by_stack_id():
conn = boto.connect_cloudformation()
conn.create_stack(
"test_stack",
template_body=dummy_template_json,
)
stack = conn.describe_stacks("test_stack")[0]
stack_by_id = conn.describe_stacks(stack.stack_id)[0]
stack_by_id.stack_id.should.equal(stack.stack_id)
stack_by_id.stack_name.should.equal("test_stack")
@mock_cloudformation_deprecated
def test_describe_deleted_stack():
conn = boto.connect_cloudformation()
conn.create_stack(
"test_stack",
template_body=dummy_template_json,
)
stack = conn.describe_stacks("test_stack")[0]
stack_id = stack.stack_id
conn.delete_stack(stack.stack_id)
stack_by_id = conn.describe_stacks(stack_id)[0]
stack_by_id.stack_id.should.equal(stack.stack_id)
stack_by_id.stack_name.should.equal("test_stack")
stack_by_id.stack_status.should.equal("DELETE_COMPLETE")
@mock_cloudformation_deprecated
def test_get_template_by_name():
conn = boto.connect_cloudformation()
conn.create_stack(
"test_stack",
template_body=dummy_template_json,
)
template = conn.get_template("test_stack")
template.should.equal({
'GetTemplateResponse': {
'GetTemplateResult': {
'TemplateBody': dummy_template_json,
'ResponseMetadata': {
'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE'
}
}
}
})
@mock_cloudformation_deprecated
def test_list_stacks():
conn = boto.connect_cloudformation()
conn.create_stack(
"test_stack",
template_body=dummy_template_json,
)
conn.create_stack(
"test_stack2",
template_body=dummy_template_json,
)
stacks = conn.list_stacks()
stacks.should.have.length_of(2)
stacks[0].template_description.should.equal("Stack 1")
@mock_cloudformation_deprecated
def test_delete_stack_by_name():
conn = boto.connect_cloudformation()
conn.create_stack(
"test_stack",
template_body=dummy_template_json,
)
conn.list_stacks().should.have.length_of(1)
conn.delete_stack("test_stack")
conn.list_stacks().should.have.length_of(0)
@mock_cloudformation_deprecated
def test_delete_stack_by_id():
conn = boto.connect_cloudformation()
stack_id = conn.create_stack(
"test_stack",
template_body=dummy_template_json,
)
conn.list_stacks().should.have.length_of(1)
conn.delete_stack(stack_id)
conn.list_stacks().should.have.length_of(0)
with assert_raises(BotoServerError):
conn.describe_stacks("test_stack")
conn.describe_stacks(stack_id).should.have.length_of(1)
@mock_cloudformation_deprecated
def test_delete_stack_with_resource_missing_delete_attr():
conn = boto.connect_cloudformation()
conn.create_stack(
"test_stack",
template_body=dummy_template_json3,
)
conn.list_stacks().should.have.length_of(1)
conn.delete_stack("test_stack")
conn.list_stacks().should.have.length_of(0)
@mock_cloudformation_deprecated
def test_bad_describe_stack():
conn = boto.connect_cloudformation()
with assert_raises(BotoServerError):
conn.describe_stacks("bad_stack")
@mock_cloudformation_deprecated()
def test_cloudformation_params():
dummy_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Stack 1",
"Resources": {},
"Parameters": {
"APPNAME": {
"Default": "app-name",
"Description": "The name of the app",
"Type": "String"
}
}
}
dummy_template_json = json.dumps(dummy_template)
cfn = boto.connect_cloudformation()
cfn.create_stack('test_stack1', template_body=dummy_template_json, parameters=[
('APPNAME', 'testing123')])
stack = cfn.describe_stacks('test_stack1')[0]
stack.parameters.should.have.length_of(1)
param = stack.parameters[0]
param.key.should.equal('APPNAME')
param.value.should.equal('testing123')
@mock_cloudformation_deprecated
def test_cloudformation_params_conditions_and_resources_are_distinct():
dummy_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Stack 1",
"Conditions": {
"FooEnabled": {
"Fn::Equals": [
{
"Ref": "FooEnabled"
},
"true"
]
},
"FooDisabled": {
"Fn::Not": [
{
"Fn::Equals": [
{
"Ref": "FooEnabled"
},
"true"
]
}
]
}
},
"Parameters": {
"FooEnabled": {
"Type": "String",
"AllowedValues": [
"true",
"false"
]
}
},
"Resources": {
"Bar": {
"Properties": {
"CidrBlock": "192.168.0.0/16",
},
"Condition": "FooDisabled",
"Type": "AWS::EC2::VPC"
}
}
}
dummy_template_json = json.dumps(dummy_template)
cfn = boto.connect_cloudformation()
cfn.create_stack('test_stack1', template_body=dummy_template_json, parameters=[('FooEnabled', 'true')])
stack = cfn.describe_stacks('test_stack1')[0]
resources = stack.list_resources()
assert not [resource for resource in resources if resource.logical_resource_id == 'Bar']
@mock_cloudformation_deprecated
def test_stack_tags():
conn = boto.connect_cloudformation()
conn.create_stack(
"test_stack",
template_body=dummy_template_json,
tags={"foo": "bar", "baz": "bleh"},
)
stack = conn.describe_stacks()[0]
dict(stack.tags).should.equal({"foo": "bar", "baz": "bleh"})
@mock_cloudformation_deprecated
def test_update_stack():
conn = boto.connect_cloudformation()
conn.create_stack(
"test_stack",
template_body=dummy_template_json,
)
conn.update_stack("test_stack", dummy_template_json2)
stack = conn.describe_stacks()[0]
stack.stack_status.should.equal("UPDATE_COMPLETE")
stack.get_template().should.equal({
'GetTemplateResponse': {
'GetTemplateResult': {
'TemplateBody': dummy_template_json2,
'ResponseMetadata': {
'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE'
}
}
}
})
@mock_cloudformation_deprecated
def test_update_stack_with_previous_template():
conn = boto.connect_cloudformation()
conn.create_stack(
"test_stack",
template_body=dummy_template_json,
)
conn.update_stack("test_stack", use_previous_template=True)
stack = conn.describe_stacks()[0]
stack.stack_status.should.equal("UPDATE_COMPLETE")
stack.get_template().should.equal({
'GetTemplateResponse': {
'GetTemplateResult': {
'TemplateBody': dummy_template_json,
'ResponseMetadata': {
'RequestId': '2d06e36c-ac1d-11e0-a958-f9382b6eb86bEXAMPLE'
}
}
}
})
@mock_cloudformation_deprecated
def test_update_stack_with_parameters():
dummy_template = {
"AWSTemplateFormatVersion": "2010-09-09",
"Description": "Stack",
"Resources": {
"VPC": {
"Properties": {
"CidrBlock": {"Ref": "Bar"}
},
"Type": "AWS::EC2::VPC"
}
},
"Parameters": {
"Bar": {
"Type": "String"
}
}
}
dummy_template_json = json.dumps(dummy_template)
conn = boto.connect_cloudformation()
conn.create_stack(
"test_stack",
template_body=dummy_template_json,
parameters=[("Bar", "192.168.0.0/16")]
)
conn.update_stack(
"test_stack",
template_body=dummy_template_json,
parameters=[("Bar", "192.168.0.1/16")]
)
stack = conn.describe_stacks()[0]
assert stack.parameters[0].value == "192.168.0.1/16"
@mock_cloudformation_deprecated
def test_update_stack_replace_tags():
conn = boto.connect_cloudformation()
conn.create_stack(
"test_stack",
template_body=dummy_template_json,
tags={"foo": "bar"},
)
conn.update_stack(
"test_stack",
template_body=dummy_template_json,
tags={"foo": "baz"},
)
stack = conn.describe_stacks()[0]
stack.stack_status.should.equal("UPDATE_COMPLETE")
# since there is one tag it doesn't come out as a list
dict(stack.tags).should.equal({"foo": "baz"})
@mock_cloudformation_deprecated
def test_update_stack_when_rolled_back():
conn = boto.connect_cloudformation()
stack_id = conn.create_stack(
"test_stack", template_body=dummy_template_json)
cloudformation_backends[conn.region.name].stacks[
stack_id].status = 'ROLLBACK_COMPLETE'
with assert_raises(BotoServerError) as err:
conn.update_stack("test_stack", dummy_template_json)
ex = err.exception
ex.body.should.match(
r'is in ROLLBACK_COMPLETE state and can not be updated')
ex.error_code.should.equal('ValidationError')
ex.reason.should.equal('Bad Request')
ex.status.should.equal(400)
@mock_cloudformation_deprecated
def test_describe_stack_events_shows_create_update_and_delete():
conn = boto.connect_cloudformation()
stack_id = conn.create_stack(
"test_stack", template_body=dummy_template_json)
conn.update_stack(stack_id, template_body=dummy_template_json2)
conn.delete_stack(stack_id)
# assert begins and ends with stack events
events = conn.describe_stack_events(stack_id)
events[0].resource_type.should.equal("AWS::CloudFormation::Stack")
events[-1].resource_type.should.equal("AWS::CloudFormation::Stack")
# testing ordering of stack events without assuming resource events will not exist
# the AWS API returns events in reverse chronological order
stack_events_to_look_for = iter([
("DELETE_COMPLETE", None),
("DELETE_IN_PROGRESS", "User Initiated"),
("UPDATE_COMPLETE", None),
("UPDATE_IN_PROGRESS", "User Initiated"),
("CREATE_COMPLETE", None),
("CREATE_IN_PROGRESS", "User Initiated"),
])
try:
for event in events:
event.stack_id.should.equal(stack_id)
event.stack_name.should.equal("test_stack")
event.event_id.should.match(r"[0-9a-f]{8}-([0-9a-f]{4}-){3}[0-9a-f]{12}")
if event.resource_type == "AWS::CloudFormation::Stack":
event.logical_resource_id.should.equal("test_stack")
event.physical_resource_id.should.equal(stack_id)
status_to_look_for, reason_to_look_for = next(
stack_events_to_look_for)
event.resource_status.should.equal(status_to_look_for)
if reason_to_look_for is not None:
event.resource_status_reason.should.equal(
reason_to_look_for)
except StopIteration:
assert False, "Too many stack events"
list(stack_events_to_look_for).should.be.empty
| |
"""
This is a REST server that accept requests to control the PiGlow board.
A few interesting things about this server:
* It is designed with a RESTful Api
* It uses a global lock to queue up operations to the PiGlow
Run this server like this:
python pg_rest_server.py
Example in using the API:
# set arm 3 to brightness 50
curl -X PUT -d brightness=50 http://localhost:5000/arms/3
# switch on and off LED 7
curl -X PUT -d brightness=100 http://localhost:5000/leds/7
curl -X PUT -d brightness=0 http://localhost:5000/leds/7
# switch on led 3 and 5 with brightness 10 and 200
curl -X PUT -H 'Content-Type: application/json' \
-d '[{"led_id":3, "brightness": 10}, {"led_id":5, "brightness":200 }]' \
http://localhost:5000/leds
"""
import threading
from flask import Flask
from flask import request
from flask.ext.restful import (Resource, Api, reqparse, abort)
# Support a dummy PyGlow class so that we can test this code
# on something other than a real RPi
try:
from PyGlow import (PyGlow, ARM_LED_LIST, COLOR_LED_LIST)
except ImportError:
print 'Cannot import PyGlow library, use dummy interface for testing'
from dummy_pyglow import (PyGlow, ARM_LED_LIST, COLOR_LED_LIST)
app = Flask(__name__)
api = Api(app)
# internal cache of LED status
led_list = [{'led_id': i, 'brightness': 0} for i in range(1, 19)]
# global lock
lock = threading.Lock()
pyglow = PyGlow()
# interface to the h/w layer
def set_led(num, brightness):
"""
Set one LED
:param num: is the LED number, from 1 to 18
:param brightness: is the light level, from 0-255
"""
global lock
# do this one at a time
with lock:
led_list[num - 1]['brightness'] = brightness
pyglow.led(num, brightness=brightness)
def set_arm(num, brightness):
"""
Set one arm of the PiGlow
:param num: is the arm number, from 1 to 3
:param brightness: is the light level, from 0-255
"""
global lock
# do this one at a time
with lock:
for i in ARM_LED_LIST[num - 1]:
led_list[i - 1]['brightness'] = brightness
pyglow.arm(num, brightness=brightness)
def set_color(num, brightness):
"""
Set one color ring of the PiGlow
:param num: is the color/ring number, from 1 to 6
:param brightness: is the light level, from 0-255
"""
global lock
# do this one at a time
with lock:
for i in COLOR_LED_LIST[num - 1]:
led_list[i - 1]['brightness'] = brightness
pyglow.color(num, brightness=brightness)
def set_clear():
global lock
# do this one at a time
with lock:
for i in range(1, 19):
led_list[i - 1]['brightness'] = 0
pyglow.all(brightness=0)
def set_starburst(brightness):
"""
Execute starburst pattern
:param brightness: is the light level, from 0-255
"""
global lock
# do this one at a time
with lock:
# clear first
pyglow.all(brightness=0)
for i in range(1, 7):
pyglow.color(i, brightness=brightness)
pyglow.color(i, brightness=0)
# interface to the h/w layer
def set_leds(set_list):
"""
Set list of LED
:param set_list: is a list of (id, brightness)
"""
global lock
# do this one at a time
with lock:
for num, b in set_list:
led_list[num - 1]['brightness'] = b
pyglow.led(num, brightness=b)
class PiGlowResourceMixin(object):
"""
Mixin provide some helper functions.
"""
def validate_led_id(self, led_id):
if led_id is None or not led_id in range(1, 19):
abort(404, message='LED id must be in the range of 1 to 18')
def validate_brightness(self, b):
if b is None or not b in range(0, 256):
abort(400, message='brightness must be in the range of 0 to 255')
def validate_arm_id(self, arm_id):
if arm_id is None or not arm_id in range(1, 4):
abort(404, message='arm id must be in the range of 1 to 3')
def validate_color_id(self, color_id):
if color_id is None or not color_id in range(1, 7):
abort(404, message='color id must be in the range of 1 to 6')
def queue_command(self, func, *args):
"""
Queue function with optional args in a separate thread.
"""
h = threading.Thread(target=func, args=args)
h.setDaemon(True)
h.start()
return h
class LedListAPI(PiGlowResourceMixin, Resource):
"""
REST interface to the list of LED as a whole.
"""
def get(self):
return led_list
def put(self):
"""
Accept JSON [ {led_id:n, brightness:b}, ...]
"""
set_list = []
for d in request.json:
n = d['led_id']
b = d['brightness']
self.validate_brightness(b)
self.validate_led_id(n)
set_list.append((n, b))
self.queue_command(set_leds, set_list)
return led_list
class LedAPI(PiGlowResourceMixin, Resource):
"""
REST interface to control the LEDs.
"""
def get(self, led_id):
return led_list[led_id]
def put(self, led_id):
self.validate_led_id(led_id)
parser = reqparse.RequestParser()
parser.add_argument('brightness', type=int, default=0,
help='Brightness for this arm of LED')
args = parser.parse_args()
b = args.get('brightness')
self.validate_brightness(b)
self.queue_command(set_led, led_id, b)
return led_list[led_id - 1]
class ArmAPI(PiGlowResourceMixin, Resource):
"""
Control a single arm on the PiGlow.
/arms/:arm_id/
The brightness value can be specified as json or form data in the request,
or directly on the URL.
:param arm_id: on the URL is 1 to 3
:param brightness: brightness=0..255
"""
def get(self, arm_id):
return led_list
def put(self, arm_id):
parser = reqparse.RequestParser()
parser.add_argument('brightness', type=int, default=0,
help='Brightness for this arm of LED')
args = parser.parse_args()
self.validate_arm_id(arm_id)
b = args.get('brightness')
self.validate_brightness(b)
self.queue_command(set_arm, arm_id, b)
return led_list
class ColorAPI(PiGlowResourceMixin, Resource):
"""
Control a single color ring on the PiGlow.
/colors/:color_id/
The brightness value can be specified as json or form data in the request,
or directly on the URL.
:param color_id: on the URL is 1 to 6
:param brightness: brightness=0..255
"""
def get(self, color_id):
return led_list
def put(self, color_id):
parser = reqparse.RequestParser()
parser.add_argument('brightness', type=int, default=0,
help='Brightness for this arm of LED')
args = parser.parse_args()
self.validate_color_id(color_id)
b = args.get('brightness')
self.validate_brightness(b)
self.queue_command(set_color, color_id, b)
return led_list
class PatternAPI(PiGlowResourceMixin, Resource):
"""
This API allows display of patterns as a whole.
/pattern/:pattern_name/
:param brightness: brightness=0..255
"""
def get(self, color_id):
return led_list
def put(self, pattern_name):
parser = reqparse.RequestParser()
parser.add_argument('brightness', type=int, default=0,
help='Brightness for the pattern')
parser.add_argument('speed', type=int, default=0,
help='Speed for the pattern')
args = parser.parse_args()
b = args.get('brightness')
self.validate_brightness(b)
if pattern_name == 'clear':
self.queue_command(set_clear)
if pattern_name == 'starburst':
self.queue_command(set_starburst, b)
return led_list
api.add_resource(LedListAPI, '/leds')
api.add_resource(LedAPI, '/leds/<int:led_id>')
api.add_resource(ArmAPI, '/arms/<int:arm_id>')
api.add_resource(ColorAPI, '/colors/<int:color_id>')
api.add_resource(PatternAPI, '/patterns/<pattern_name>')
@app.route('/', methods=['GET', ])
def index():
return 'PiGlow RESTful API Server.<br />See http://github.com/pkshiu/piglowserver for info'
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0')
| |
#coding=UTF-8
'''
Created on 2011-7-5
@author: Administrator
'''
import threading
import time
from spider import soufang
from spider import ganji
from spider import tongcheng58
from spider.threadpool import ThreadPool, makeRequests
import urllib2
import urllib
from spider.globalvars import fetch_quere
from spider.jjrlog import msglogger
import gc
import random
import spider
gc.enable()
#gc.set_debug(gc.DEBUG_COLLECTABLE | gc.DEBUG_UNCOLLECTABLE | gc.DEBUG_INSTANCES | gc.DEBUG_OBJECTS)
coctn=True
def linksThead(list):
global coctn
while True:
if coctn:
for args in list:
ts=[]
p={"citycode":args[1],"kind":args[2]}
mod=args[0]
Tcls=getattr(spider,mod).getLinksThread(p)
ts.append(Tcls)
for t in ts:
t.start()
else:
time.sleep(3)
class fetchLinkThreadControl(threading.Thread):
def __init__(self,data):
threading.Thread.__init__(self)
self.data=data
def run(self):
# global coctn
lmain = ThreadPool(len(self.data))
while True:
if fetch_quere.empty():
# coctn=False
lrequests = makeRequests(self.runFunc, self.data, self.getResult)
for lreq in lrequests:
lmain.putRequest(lreq)
lmain.wait()
else:
time.sleep(3)
print "*"*80
def runFunc(self,args):
p={"citycode":args[1],"kind":args[2]}
mod=args[0]
getattr(spider,mod).getLinks(p)
# args[0].getLinks({"citycode":args[1],"kind":args[2]})
# print "gc fetchLink------------->%s , %s , %s"%(gc.collect(),len(gc.garbage),len(gc.get_objects()))
del gc.garbage[:]
args=0
def getResult(self,id,res):
print "fetch_quere.qsize-----------------> %s"%fetch_quere.qsize()
# for r in res[0]:
# fetch_quere.put({"link":r,"args":res[1]})
class fetchDataThreadControl(threading.Thread):
def __init__(self,psize):
threading.Thread.__init__(self)
self.psize=psize
def run(self):
# global coctn
fmain = ThreadPool(self.psize)
while True:
if not fetch_quere.empty() :
fdata=[]
# coctn=False
for i in range(self.psize):
try:
fdata.append(fetch_quere.get(0))
except Exception,e:
print "-=-==================%s"%e
# coctn=True
continue
frequests = makeRequests(self.runFunc, fdata, self.getResult)
for freq in frequests:
fmain.putRequest(freq)
time.sleep(0.5)
# main.poll()
fmain.wait()
# else:
# coctn=True
time.sleep(0.5)
print "gc fetchData------------->%s , %s , %s"%(gc.collect(),len(gc.garbage),len(gc.get_objects()))
del gc.garbage[:]
print "-----------------> %s"%fetch_quere.qsize()
print "%"*60
def runFunc(self,args):
res=getattr(spider,args["mod"]).getContent(args["link"],args["citycode"],args["kind"])
# res=cc.extractDict()
# res=random.randint(1,5)
# time.sleep(res)
# msglogger.debug("%s%s"%(args["link"],res))
args=0
return res
def getResult(self,id,res):
# print res
# return
if res==None:
return
req=urllib2.Request("http://site.jjr360.com/app.php", urllib.urlencode(res))
br=urllib2.build_opener()
try:
p=br.open(req).read().strip()
except:
p=None
rs=""
if p!=None and p!="":
rs=p.decode('gbk')
try:
msglogger.debug("%s---->%s"%(res,rs))
except:
print "Exception -------->%s"%res
# print p.decode('gbk')
def maingogogo(data):
fl=fetchLinkThreadControl(data)
fl.start()
print ""
time.sleep(5)
fd=fetchDataThreadControl(100)
fd.setDaemon(True)
fd.start()
if __name__=="__main__":
data=[
# ["tongcheng58","su","1"],
# ["tongcheng58","su","2"],
["tongcheng58","cz","3"],
# ["tongcheng58","su","4"],
## [soufang,"su","1"],
# ["ganji","su","1"],
# ["ganji","su","2"],
# ["ganji","su","3"],
# ["ganji","su","4"],
]
# linksThead(data)
fl=fetchLinkThreadControl(data)
fl.start()
print ""
time.sleep(5)
fd=fetchDataThreadControl(100)
fd.setDaemon(True)
fd.start()
# linksThead(data)
# print getattr(spider,"tongcheng58")
# lf=file("link.log")
# idx=0
# for line in lf.readlines():
# lk=line.split('|')
# fetch_quere.put({"mod":"tongcheng58","link":lk[1],"citycode":"su","kind":lk[0]})
# idx=idx+1
# if idx%25==0:
# time.sleep(random.randint(1,30))
# try:
# ct=CThread("su",'1',3000,3)
# ct.start()
# except:
# pass
| |
''' Script to check the correctness of the clustering for different data types.
'''
import unittest
import os
import numpy as np
from pixel_clusterizer.clusterizer import HitClusterizer, default_hits_descr, default_hits_dtype, default_clusters_dtype, default_clusters_descr, default_cluster_hits_descr, default_cluster_hits_dtype
def create_hits(n_hits, max_column, max_row, max_frame, max_charge, hit_dtype=default_hits_dtype, hit_fields=None):
hits = np.zeros(shape=(n_hits, ), dtype=hit_dtype)
if not hit_fields:
for i in range(n_hits):
hits[i]['event_number'], hits[i]['frame'], hits[i]['column'], hits[i]['row'], hits[i]['charge'] = i / 3, i % max_frame, i % max_column + 1, 2 * i % max_row + 1, i % max_charge
else:
hit_fields_inverse = dict((v, k) for k, v in hit_fields.items())
for i in range(n_hits):
hits[i][hit_fields_inverse['event_number']], hits[i][hit_fields_inverse['frame']], hits[i][hit_fields_inverse['column']], hits[i][hit_fields_inverse['row']], hits[i][hit_fields_inverse['charge']] = i / 3, i % max_frame, i % max_column + 1, 2 * i % max_row + 1, i % max_charge
return hits
class TestClusterizer(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.pure_python = os.getenv('PURE_PYTHON', False)
def test_different_hit_data_types(self):
# Define a different hit data structure with standard names but
# different data types and number of fields. Numba automatically
# recompiles and the result should not change
hit_data_types = []
hit_data_types.append([
('event_number', '<i8'),
('frame', '<u2'),
('column', '<u4'),
('row', '<u4'),
('charge', '<f4'),
('parameter', '<i4')])
hit_data_types.append([
('event_number', '<i4'),
('frame', '<u8'),
('column', '<u2'),
('row', '<i2'),
('charge', '<u1'),
('parameter', '<u1'),
('parameter_1', '<i4'),
('parameter_2', 'f4')])
# Initialize clusterizer
clusterizer = HitClusterizer(pure_python=self.pure_python, min_hit_charge=0, max_hit_charge=13, charge_correction=1, charge_weighted_clustering=True, column_cluster_distance=2, row_cluster_distance=2, frame_cluster_distance=4, ignore_same_hits=True)
for hit_data_type in hit_data_types:
clusterizer.set_hit_dtype(np.dtype(hit_data_type))
# Create fake data with actual hit data structure
hits = create_hits(n_hits=10, max_column=100, max_row=100, max_frame=1, max_charge=2, hit_dtype=np.dtype(hit_data_type))
hits['parameter'] = 1 # check for number different from zero
cluster_hits, clusters = clusterizer.cluster_hits(hits)
array_size_before = clusterizer._clusters.shape[0]
# Define expected output
expected_clusters = np.zeros(shape=(4, ), dtype=default_clusters_dtype)
expected_clusters['event_number'] = [0, 1, 2, 3]
expected_clusters['n_hits'] = [3, 3, 3, 1]
expected_clusters['charge'] = [1, 2, 1, 1]
expected_clusters['seed_column'] = [2, 4, 8, 10]
expected_clusters['seed_row'] = [3, 7, 15, 19]
expected_clusters['mean_column'] = [2.0, 5.0, 8.0, 10.0]
expected_clusters['mean_row'] = [3.0, 9.0, 15.0, 19.0]
# Define expected output. Cluster hit data types are different and thus the expected results have to have different data types
hit_data_type.extend([
('cluster_ID', '<i2'),
('is_seed', '<u1'),
('cluster_size', '<u2'),
('n_cluster', '<u2')])
expected_cluster_hits = np.zeros(shape=(10, ), dtype=hit_data_type)
expected_cluster_hits['event_number'] = hits['event_number']
expected_cluster_hits['frame'] = hits['frame']
expected_cluster_hits['column'] = hits['column']
expected_cluster_hits['row'] = hits['row']
expected_cluster_hits['charge'] = hits['charge']
expected_cluster_hits['is_seed'] = [0, 1, 0, 1, 0, 0, 0, 1, 0, 1]
expected_cluster_hits['cluster_size'] = [3, 3, 3, 3, 3, 3, 3, 3, 3, 1]
expected_cluster_hits['n_cluster'] = 1
expected_cluster_hits['parameter'] = 1 # was set to 1 before and copied to the cluster hits array
# Test results
self.assertTrue(np.array_equal(clusters, expected_clusters))
self.assertTrue(np.array_equal(cluster_hits, expected_cluster_hits))
# Test same size array
hits = create_hits(n_hits=10, max_column=100, max_row=100, max_frame=1, max_charge=2, hit_dtype=np.dtype(hit_data_type))
cluster_hits, clusters = clusterizer.cluster_hits(hits)
array_size_after = clusterizer._clusters.shape[0]
# Test results
self.assertTrue(array_size_before == array_size_after)
self.assertTrue(np.array_equal(clusters, expected_clusters))
expected_cluster_hits['parameter'] = 0 # created new hits, this is zero again
self.assertTrue(np.array_equal(cluster_hits, expected_cluster_hits))
# Test increasing size array
hits = create_hits(n_hits=20, max_column=100, max_row=100, max_frame=1, max_charge=2, hit_dtype=np.dtype(hit_data_type))
cluster_hits, clusters = clusterizer.cluster_hits(hits)
array_size_after = clusterizer._clusters.shape[0]
# Define expected output
expected_clusters = np.zeros(shape=(7, ), dtype=default_clusters_dtype)
expected_clusters['event_number'] = [0, 1, 2, 3, 4, 5, 6]
expected_clusters['n_hits'] = [3, 3, 3, 3, 3, 3, 2]
expected_clusters['charge'] = [1, 2, 1, 2, 1, 2, 1]
expected_clusters['seed_column'] = [2, 4, 8, 10, 14, 16, 20]
expected_clusters['seed_row'] = [3, 7, 15, 19, 27, 31, 39]
expected_clusters['mean_column'] = [2.0, 5.0, 8.0, 11.0, 14.0, 17.0, (1 * 19 + 2 * 20) / 3.0]
expected_clusters['mean_row'] = [3.0, 9.0, 15.0, 21.0, 27.0, 33.0, (1 * 37 + 2 * 39) / 3.0]
# Define expected output. Cluster hit data types are different and thus the expected results have to have different data types
expected_cluster_hits = np.zeros(shape=(20, ), dtype=hit_data_type)
expected_cluster_hits['event_number'] = hits['event_number']
expected_cluster_hits['frame'] = hits['frame']
expected_cluster_hits['column'] = hits['column']
expected_cluster_hits['row'] = hits['row']
expected_cluster_hits['charge'] = hits['charge']
expected_cluster_hits['is_seed'] = [0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1]
expected_cluster_hits['cluster_size'] = [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2]
expected_cluster_hits['n_cluster'] = 1
# Test results
self.assertTrue(array_size_before < array_size_after)
self.assertTrue(np.array_equal(clusters, expected_clusters))
self.assertTrue(np.array_equal(cluster_hits, expected_cluster_hits))
# Initialize Clusterizer and test charge weighted clustering (charge is float)
clusterizer = HitClusterizer(pure_python=self.pure_python, charge_weighted_clustering=True)
# Create some fake data
hits = np.ones(shape=(4, ), dtype=default_hits_dtype)
clusterizer.set_hit_dtype(hits.dtype)
hits[0]['column'], hits[0]['row'], hits[0]['charge'], hits[0]['event_number'] = 17, 36, 0.0, 19
hits[1]['column'], hits[1]['row'], hits[1]['charge'], hits[1]['event_number'] = 18, 37, 10.5, 19
hits[2]['column'], hits[2]['row'], hits[2]['charge'], hits[2]['event_number'] = 17, 36, 1.0, 20
hits[3]['column'], hits[3]['row'], hits[3]['charge'], hits[3]['event_number'] = 18, 37, 10.5, 20
cluster_hits, clusters = clusterizer.cluster_hits(hits) # cluster hits
# Define expected output
expected_clusters = np.zeros(shape=(2, ), dtype=default_clusters_dtype)
expected_clusters['event_number'] = [19, 20]
expected_clusters['n_hits'] = [2, 2]
expected_clusters['charge'] = [10.5, 11.5]
expected_clusters['seed_column'] = [18, 18]
expected_clusters['seed_row'] = [37, 37]
expected_clusters['mean_column'] = [18.0, (1.0 * 17 + 10.5 * 18) / 11.5]
expected_clusters['mean_row'] = [37.0, (1.0 * 36 + 10.5 * 37) / 11.5]
# Define expected output
expected_cluster_hits = np.zeros(shape=(4, ), dtype=default_cluster_hits_dtype)
expected_cluster_hits['event_number'] = hits['event_number']
expected_cluster_hits['frame'] = hits['frame']
expected_cluster_hits['column'] = hits['column']
expected_cluster_hits['row'] = hits['row']
expected_cluster_hits['charge'] = hits['charge']
expected_cluster_hits['is_seed'] = [0, 1, 0, 1]
expected_cluster_hits['cluster_size'] = [2, 2, 2, 2]
expected_cluster_hits['n_cluster'] = [1, 1, 1, 1]
# Test results
self.assertTrue(np.array_equal(clusters, expected_clusters))
self.assertTrue(np.array_equal(cluster_hits, expected_cluster_hits))
def test_different_cluster_data_types(self):
# Define a different hit data structure with standard names but
# different data types and number of fields. Numba automatically
# recompiles and the result should not change
cluster_data_types = []
cluster_data_types.append([
('event_number', '<f8'),
('ID', '<u2'),
('n_hits', '<u2'),
('charge', 'f4'),
('seed_column', '<i2'),
('seed_row', '<i2'),
('mean_column', 'f4'),
('mean_row', 'f4')])
cluster_data_types.append([
('event_number', '<u8'),
('ID', '<u2'),
('n_hits', '<u2'),
('charge', 'u4'),
('seed_column', '<u2'),
('seed_row', '<u2'),
('mean_column', 'f4'),
('mean_row', 'f4')])
# Initialize clusterizer
clusterizer = HitClusterizer(
pure_python=self.pure_python,
min_hit_charge=0,
max_hit_charge=13,
charge_correction=1,
charge_weighted_clustering=True,
column_cluster_distance=2,
row_cluster_distance=2,
frame_cluster_distance=4,
ignore_same_hits=True)
for cluster_data_type in cluster_data_types:
clusterizer.set_cluster_dtype(np.dtype(cluster_data_type))
# Create fake data with actual hit data structure
hits = create_hits(n_hits=10, max_column=100, max_row=100, max_frame=1, max_charge=2)
cluster_hits, clusters = clusterizer.cluster_hits(hits) # Cluster hits
array_size_before = clusterizer._clusters.shape[0]
# Define expected output
expected_clusters = np.zeros(shape=(4, ), dtype=np.dtype(cluster_data_type))
expected_clusters['event_number'] = [0, 1, 2, 3]
expected_clusters['n_hits'] = [3, 3, 3, 1]
expected_clusters['charge'] = [1, 2, 1, 1]
expected_clusters['seed_column'] = [2, 4, 8, 10]
expected_clusters['seed_row'] = [3, 7, 15, 19]
expected_clusters['mean_column'] = [2.0, 5.0, 8.0, 10.0]
expected_clusters['mean_row'] = [3.0, 9.0, 15.0, 19.0]
expected_cluster_hits = np.zeros(shape=(10, ), dtype=default_cluster_hits_dtype)
expected_cluster_hits['event_number'] = hits['event_number']
expected_cluster_hits['frame'] = hits['frame']
expected_cluster_hits['column'] = hits['column']
expected_cluster_hits['row'] = hits['row']
expected_cluster_hits['charge'] = hits['charge']
expected_cluster_hits['is_seed'] = [0, 1, 0, 1, 0, 0, 0, 1, 0, 1]
expected_cluster_hits['cluster_size'] = [3, 3, 3, 3, 3, 3, 3, 3, 3, 1]
expected_cluster_hits['n_cluster'] = 1
# Test results
self.assertTrue(np.array_equal(clusters, expected_clusters))
self.assertTrue(np.array_equal(cluster_hits, expected_cluster_hits))
# Test same size array
hits = create_hits(n_hits=10, max_column=100, max_row=100, max_frame=1, max_charge=2)
cluster_hits, clusters = clusterizer.cluster_hits(hits)
array_size_after = clusterizer._clusters.shape[0]
# Test results
self.assertTrue(array_size_before == array_size_after)
self.assertTrue(np.array_equal(clusters, expected_clusters))
self.assertTrue(np.array_equal(cluster_hits, expected_cluster_hits))
# Test increasing size array
hits = create_hits(n_hits=20, max_column=100, max_row=100, max_frame=1, max_charge=2)
cluster_hits, clusters = clusterizer.cluster_hits(hits)
array_size_after = clusterizer._clusters.shape[0]
# Define expected output
expected_clusters = np.zeros(shape=(7, ), dtype=np.dtype(cluster_data_type))
expected_clusters['event_number'] = [0, 1, 2, 3, 4, 5, 6]
expected_clusters['n_hits'] = [3, 3, 3, 3, 3, 3, 2]
expected_clusters['charge'] = [1, 2, 1, 2, 1, 2, 1]
expected_clusters['seed_column'] = [2, 4, 8, 10, 14, 16, 20]
expected_clusters['seed_row'] = [3, 7, 15, 19, 27, 31, 39]
expected_clusters['mean_column'] = [2.0, 5.0, 8.0, 11.0, 14.0, 17.0, (1 * 19 + 2 * 20) / 3.0]
expected_clusters['mean_row'] = [3.0, 9.0, 15.0, 21.0, 27.0, 33.0, (1 * 37 + 2 * 39) / 3.0]
# Define expected output. Cluster hit data types are different and thus the expected results have to have different data types
expected_cluster_hits = np.zeros(shape=(20, ), dtype=default_cluster_hits_dtype)
expected_cluster_hits['event_number'] = hits['event_number']
expected_cluster_hits['frame'] = hits['frame']
expected_cluster_hits['column'] = hits['column']
expected_cluster_hits['row'] = hits['row']
expected_cluster_hits['charge'] = hits['charge']
expected_cluster_hits['is_seed'] = [0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1]
expected_cluster_hits['cluster_size'] = [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2]
expected_cluster_hits['n_cluster'] = 1
# Test results
self.assertTrue(array_size_before < array_size_after)
self.assertTrue(np.array_equal(clusters, expected_clusters))
self.assertTrue(np.array_equal(cluster_hits, expected_cluster_hits))
def test_custom_hit_fields(self):
# Define a different hit data structure with different names but standard data types.
hit_dtype = np.dtype([
('eventNumber', '<i8'),
('relBCID', '<u1'),
('column', '<u2'),
('row', '<u2'),
('tot', '<u2')])
hit_clustered_dtype = np.dtype([
('eventNumber', '<i8'),
('relBCID', '<u1'),
('column', '<u2'),
('row', '<u2'),
('tot', '<u2'),
('cluster_ID', '<i2'),
('is_seed', '<u1'),
('cluster_size', '<u2'),
('n_cluster', '<u2')])
hit_fields = {
'eventNumber': 'event_number',
'column': 'column',
'row': 'row',
'tot': 'charge',
'relBCID': 'frame'}
# Initialize clusterizer and cluster test hits with self defined data type names
clusterizer = HitClusterizer(hit_fields=hit_fields, hit_dtype=hit_dtype, pure_python=self.pure_python, min_hit_charge=0, max_hit_charge=13, charge_correction=1, charge_weighted_clustering=True, column_cluster_distance=2, row_cluster_distance=2, frame_cluster_distance=4, ignore_same_hits=True)
hits = create_hits(n_hits=10, max_column=100, max_row=100, max_frame=1, max_charge=2, hit_dtype=hit_dtype, hit_fields=hit_fields)
cluster_hits, clusters = clusterizer.cluster_hits(hits)
array_size_before = clusterizer._clusters.shape[0]
# Define expected output
expected_clusters = np.zeros(shape=(4, ), dtype=default_clusters_dtype)
expected_clusters['event_number'] = [0, 1, 2, 3]
expected_clusters['n_hits'] = [3, 3, 3, 1]
expected_clusters['charge'] = [1, 2, 1, 1]
expected_clusters['seed_column'] = [2, 4, 8, 10]
expected_clusters['seed_row'] = [3, 7, 15, 19]
expected_clusters['mean_column'] = [2.0, 5.0, 8.0, 10.0]
expected_clusters['mean_row'] = [3.0, 9.0, 15.0, 19.0]
# Define expected output. Cluster hit data types are different and thus the expected results have to have different data types
expected_cluster_hits = np.zeros(shape=(10, ), dtype=hit_clustered_dtype)
expected_cluster_hits['eventNumber'] = hits['eventNumber']
expected_cluster_hits['relBCID'] = hits['relBCID']
expected_cluster_hits['column'] = hits['column']
expected_cluster_hits['row'] = hits['row']
expected_cluster_hits['tot'] = hits['tot']
expected_cluster_hits['is_seed'] = [0, 1, 0, 1, 0, 0, 0, 1, 0, 1]
expected_cluster_hits['cluster_size'] = [3, 3, 3, 3, 3, 3, 3, 3, 3, 1]
expected_cluster_hits['n_cluster'] = 1
# Test results
self.assertTrue(np.array_equal(clusters, expected_clusters))
self.assertTrue(np.array_equal(cluster_hits, expected_cluster_hits))
# Test same size array
hits = create_hits(n_hits=10, max_column=100, max_row=100, max_frame=1, max_charge=2, hit_dtype=hit_dtype, hit_fields=hit_fields)
cluster_hits, clusters = clusterizer.cluster_hits(hits)
array_size_after = clusterizer._clusters.shape[0]
# Test results
self.assertTrue(array_size_before == array_size_after)
self.assertTrue(np.array_equal(clusters, expected_clusters))
self.assertTrue(np.array_equal(cluster_hits, expected_cluster_hits))
# Test increasing size array
hits = create_hits(n_hits=20, max_column=100, max_row=100, max_frame=1, max_charge=2, hit_dtype=hit_dtype, hit_fields=hit_fields)
cluster_hits, clusters = clusterizer.cluster_hits(hits)
array_size_after = clusterizer._clusters.shape[0]
# Define expected output
expected_clusters = np.zeros(shape=(7, ), dtype=default_clusters_dtype)
expected_clusters['event_number'] = [0, 1, 2, 3, 4, 5, 6]
expected_clusters['n_hits'] = [3, 3, 3, 3, 3, 3, 2]
expected_clusters['charge'] = [1, 2, 1, 2, 1, 2, 1]
expected_clusters['seed_column'] = [2, 4, 8, 10, 14, 16, 20]
expected_clusters['seed_row'] = [3, 7, 15, 19, 27, 31, 39]
expected_clusters['mean_column'] = [2.0, 5.0, 8.0, 11.0, 14.0, 17.0, (1 * 19 + 2 * 20) / 3.0]
expected_clusters['mean_row'] = [3.0, 9.0, 15.0, 21.0, 27.0, 33.0, (1 * 37 + 2 * 39) / 3.0]
# Define expected output. Cluster hit data types are different and thus the expected results have to have different data types
expected_cluster_hits = np.zeros(shape=(20, ), dtype=hit_clustered_dtype)
expected_cluster_hits['eventNumber'] = hits['eventNumber']
expected_cluster_hits['relBCID'] = hits['relBCID']
expected_cluster_hits['column'] = hits['column']
expected_cluster_hits['row'] = hits['row']
expected_cluster_hits['tot'] = hits['tot']
expected_cluster_hits['is_seed'] = [0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1]
expected_cluster_hits['cluster_size'] = [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2]
expected_cluster_hits['n_cluster'] = 1
# Test results
self.assertTrue(array_size_before < array_size_after)
self.assertTrue(np.array_equal(clusters, expected_clusters))
self.assertTrue(np.array_equal(cluster_hits, expected_cluster_hits))
def test_custom_cluster_fields(self):
# Define a different cluster data structure with different names but standard data types.
clusters_dtype = np.dtype([
('eventNumber', '<i8'),
('ID', '<u2'),
('size', '<u2'),
('tot', 'f4'),
('seed_column', '<u2'),
('seed_row', '<u2'),
('mean_column', 'f4'),
('mean_row', 'f4')])
clusters_fields = {
'eventNumber': 'event_number',
'ID': 'ID',
'size': 'n_hits',
'tot': 'charge',
'seed_column': 'seed_column',
'seed_row': 'seed_row',
'mean_column': 'mean_column',
'mean_row': 'mean_row'}
# Initialize clusterizer and cluster test hits with self defined data type names
clusterizer = HitClusterizer(cluster_fields=clusters_fields, cluster_dtype=clusters_dtype, pure_python=self.pure_python, min_hit_charge=0, max_hit_charge=13, charge_correction=1, charge_weighted_clustering=True, column_cluster_distance=2, row_cluster_distance=2, frame_cluster_distance=4, ignore_same_hits=True)
hits = create_hits(n_hits=10, max_column=100, max_row=100, max_frame=1, max_charge=2)
cluster_hits, clusters = clusterizer.cluster_hits(hits)
array_size_before = clusterizer._clusters.shape[0]
# Define expected output
expected_clusters = np.zeros(shape=(4, ), dtype=clusters_dtype)
expected_clusters['eventNumber'] = [0, 1, 2, 3]
expected_clusters['size'] = [3, 3, 3, 1]
expected_clusters['tot'] = [1, 2, 1, 1]
expected_clusters['seed_column'] = [2, 4, 8, 10]
expected_clusters['seed_row'] = [3, 7, 15, 19]
expected_clusters['mean_column'] = [2.0, 5.0, 8.0, 10.0]
expected_clusters['mean_row'] = [3.0, 9.0, 15.0, 19.0]
# Define expected output. Cluster hit data types are different and thus the expected results have to have different data types
expected_cluster_hits = np.zeros(shape=(10, ), dtype=default_cluster_hits_dtype)
expected_cluster_hits['event_number'] = hits['event_number']
expected_cluster_hits['frame'] = hits['frame']
expected_cluster_hits['column'] = hits['column']
expected_cluster_hits['row'] = hits['row']
expected_cluster_hits['charge'] = hits['charge']
expected_cluster_hits['is_seed'] = [0, 1, 0, 1, 0, 0, 0, 1, 0, 1]
expected_cluster_hits['cluster_size'] = [3, 3, 3, 3, 3, 3, 3, 3, 3, 1]
expected_cluster_hits['n_cluster'] = 1
# Test results
self.assertTrue(np.array_equal(clusters, expected_clusters))
self.assertTrue(np.array_equal(cluster_hits, expected_cluster_hits))
# Test same size array
hits = create_hits(n_hits=10, max_column=100, max_row=100, max_frame=1, max_charge=2)
cluster_hits, clusters = clusterizer.cluster_hits(hits)
array_size_after = clusterizer._clusters.shape[0]
# Test results
self.assertTrue(array_size_before == array_size_after)
self.assertTrue(np.array_equal(clusters, expected_clusters))
self.assertTrue(np.array_equal(cluster_hits, expected_cluster_hits))
# Test increasing size array
hits = create_hits(n_hits=20, max_column=100, max_row=100, max_frame=1, max_charge=2)
cluster_hits, clusters = clusterizer.cluster_hits(hits)
array_size_after = clusterizer._clusters.shape[0]
# Define expected output
expected_clusters = np.zeros(shape=(7, ), dtype=clusters_dtype)
expected_clusters['eventNumber'] = [0, 1, 2, 3, 4, 5, 6]
expected_clusters['size'] = [3, 3, 3, 3, 3, 3, 2]
expected_clusters['tot'] = [1, 2, 1, 2, 1, 2, 1]
expected_clusters['seed_column'] = [2, 4, 8, 10, 14, 16, 20]
expected_clusters['seed_row'] = [3, 7, 15, 19, 27, 31, 39]
expected_clusters['mean_column'] = [2.0, 5.0, 8.0, 11.0, 14.0, 17.0, (1 * 19 + 2 * 20) / 3.0]
expected_clusters['mean_row'] = [3.0, 9.0, 15.0, 21.0, 27.0, 33.0, (1 * 37 + 2 * 39) / 3.0]
# Define expected output. Cluster hit data types are different and thus the expected results have to have different data types
expected_cluster_hits = np.zeros(shape=(20, ), dtype=default_cluster_hits_dtype)
expected_cluster_hits['event_number'] = hits['event_number']
expected_cluster_hits['frame'] = hits['frame']
expected_cluster_hits['column'] = hits['column']
expected_cluster_hits['row'] = hits['row']
expected_cluster_hits['charge'] = hits['charge']
expected_cluster_hits['is_seed'] = [0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1]
expected_cluster_hits['cluster_size'] = [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2]
expected_cluster_hits['n_cluster'] = 1
# Test results
self.assertTrue(array_size_before < array_size_after)
self.assertTrue(np.array_equal(clusters, expected_clusters))
self.assertTrue(np.array_equal(cluster_hits, expected_cluster_hits))
def test_adding_hit_field(self):
clusterizer = HitClusterizer(pure_python=self.pure_python, min_hit_charge=0, max_hit_charge=13, charge_correction=1, charge_weighted_clustering=True, column_cluster_distance=2, row_cluster_distance=2, frame_cluster_distance=4, ignore_same_hits=True)
with self.assertRaises(TypeError):
clusterizer.add_hit_field(description=['extra_field', 'f4']) # also test list of 2 items
clusterizer.add_hit_field(description=[('extra_field', 'f4')]) # also test list of 2-tuples
modified_hits_descr = default_hits_descr[:]
modified_hits_descr.append(('extra_field', 'f4'))
hits = create_hits(n_hits=10, max_column=100, max_row=100, max_frame=1, max_charge=2, hit_dtype=np.dtype(modified_hits_descr))
hits['extra_field'][1:] = range(hits.shape[0] - 1)
cluster_hits, clusters = clusterizer.cluster_hits(hits)
# Define expected cluster output with extra field
expected_clusters = np.zeros(shape=(4, ), dtype=default_clusters_dtype)
expected_clusters['event_number'] = [0, 1, 2, 3]
expected_clusters['n_hits'] = [3, 3, 3, 1]
expected_clusters['charge'] = [1, 2, 1, 1]
expected_clusters['seed_column'] = [2, 4, 8, 10]
expected_clusters['seed_row'] = [3, 7, 15, 19]
expected_clusters['mean_column'] = [2.0, 5.0, 8.0, 10.0]
expected_clusters['mean_row'] = [3.0, 9.0, 15.0, 19.0]
# Define expected hit clustered output
modified_cluster_hits_descr = default_cluster_hits_descr[:]
modified_cluster_hits_descr.append(('extra_field', 'f4'))
expected_cluster_hits = np.zeros(shape=(10, ), dtype=np.dtype(modified_cluster_hits_descr))
expected_cluster_hits['event_number'] = hits['event_number']
expected_cluster_hits['frame'] = hits['frame']
expected_cluster_hits['column'] = hits['column']
expected_cluster_hits['row'] = hits['row']
expected_cluster_hits['charge'] = hits['charge']
expected_cluster_hits['is_seed'] = [0, 1, 0, 1, 0, 0, 0, 1, 0, 1]
expected_cluster_hits['cluster_size'] = [3, 3, 3, 3, 3, 3, 3, 3, 3, 1]
expected_cluster_hits['n_cluster'] = 1
expected_cluster_hits['extra_field'] = [0.0, 0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]
# Test results
print("\n")
print(clusters)
print(expected_clusters)
self.assertTrue(np.array_equal(clusters, expected_clusters))
self.assertTrue(np.array_equal(cluster_hits, expected_cluster_hits))
def test_adding_cluster_field(self):
clusterizer = HitClusterizer(pure_python=self.pure_python, min_hit_charge=0, max_hit_charge=13, charge_correction=1, charge_weighted_clustering=True, column_cluster_distance=2, row_cluster_distance=2, frame_cluster_distance=4, ignore_same_hits=True)
with self.assertRaises(TypeError):
clusterizer.add_cluster_field(description=['extra_field', 'f4']) # also test list of 2 items
clusterizer.add_cluster_field(description=[('extra_field', 'f4')]) # also test list of 2-tuples
hits = create_hits(n_hits=10, max_column=100, max_row=100, max_frame=1, max_charge=2)
cluster_hits, clusters = clusterizer.cluster_hits(hits)
array_size_before = clusterizer._clusters.shape[0]
# Define expected cluster output with extra field
modified_clusters_descr = default_clusters_descr[:]
modified_clusters_descr.append(('extra_field', 'f4'))
expected_clusters = np.zeros(shape=(4, ), dtype=np.dtype(modified_clusters_descr))
expected_clusters['event_number'] = [0, 1, 2, 3]
expected_clusters['n_hits'] = [3, 3, 3, 1]
expected_clusters['charge'] = [1, 2, 1, 1]
expected_clusters['seed_column'] = [2, 4, 8, 10]
expected_clusters['seed_row'] = [3, 7, 15, 19]
expected_clusters['mean_column'] = [2.0, 5.0, 8.0, 10.0]
expected_clusters['mean_row'] = [3.0, 9.0, 15.0, 19.0]
expected_clusters['extra_field'] = [0.0, 0.0, 0.0, 0.0]
# Define expected hit clustered output
expected_cluster_hits = np.zeros(shape=(10, ), dtype=default_cluster_hits_dtype)
expected_cluster_hits['event_number'] = hits['event_number']
expected_cluster_hits['frame'] = hits['frame']
expected_cluster_hits['column'] = hits['column']
expected_cluster_hits['row'] = hits['row']
expected_cluster_hits['charge'] = hits['charge']
expected_cluster_hits['is_seed'] = [0, 1, 0, 1, 0, 0, 0, 1, 0, 1]
expected_cluster_hits['cluster_size'] = [3, 3, 3, 3, 3, 3, 3, 3, 3, 1]
expected_cluster_hits['n_cluster'] = 1
# Test results
self.assertTrue(np.array_equal(clusters, expected_clusters))
self.assertTrue(np.array_equal(cluster_hits, expected_cluster_hits))
# Test same size array
hits = create_hits(n_hits=10, max_column=100, max_row=100, max_frame=1, max_charge=2)
cluster_hits, clusters = clusterizer.cluster_hits(hits)
array_size_after = clusterizer._clusters.shape[0]
# Test results
self.assertTrue(array_size_before == array_size_after)
self.assertTrue(np.array_equal(clusters, expected_clusters))
self.assertTrue(np.array_equal(cluster_hits, expected_cluster_hits))
# Test increasing size array
hits = create_hits(n_hits=20, max_column=100, max_row=100, max_frame=1, max_charge=2)
cluster_hits, clusters = clusterizer.cluster_hits(hits)
array_size_after = clusterizer._clusters.shape[0]
# Define expected cluster output with extra field
modified_clusters_descr = default_clusters_descr[:]
modified_clusters_descr.append(('extra_field', 'f4'))
expected_clusters = np.zeros(shape=(7, ), dtype=np.dtype(modified_clusters_descr))
expected_clusters['event_number'] = [0, 1, 2, 3, 4, 5, 6]
expected_clusters['n_hits'] = [3, 3, 3, 3, 3, 3, 2]
expected_clusters['charge'] = [1, 2, 1, 2, 1, 2, 1]
expected_clusters['seed_column'] = [2, 4, 8, 10, 14, 16, 20]
expected_clusters['seed_row'] = [3, 7, 15, 19, 27, 31, 39]
expected_clusters['mean_column'] = [2.0, 5.0, 8.0, 11.0, 14.0, 17.0, (1 * 19 + 2 * 20) / 3.0]
expected_clusters['mean_row'] = [3.0, 9.0, 15.0, 21.0, 27.0, 33.0, (1 * 37 + 2 * 39) / 3.0]
expected_clusters['extra_field'] = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
# Define expected hit clustered output
expected_cluster_hits = np.zeros(shape=(20, ), dtype=default_cluster_hits_dtype)
expected_cluster_hits['event_number'] = hits['event_number']
expected_cluster_hits['frame'] = hits['frame']
expected_cluster_hits['column'] = hits['column']
expected_cluster_hits['row'] = hits['row']
expected_cluster_hits['charge'] = hits['charge']
expected_cluster_hits['is_seed'] = [0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1]
expected_cluster_hits['cluster_size'] = [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2]
expected_cluster_hits['n_cluster'] = 1
# Test results
self.assertTrue(array_size_before < array_size_after)
self.assertTrue(np.array_equal(clusters, expected_clusters))
self.assertTrue(np.array_equal(cluster_hits, expected_cluster_hits))
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestClusterizer)
unittest.TextTestRunner(verbosity=2).run(suite)
| |
"""
Novell ASAM Runner
==================
.. versionadded:: 2015.8.0
Runner to interact with Novell ASAM Fan-Out Driver
:codeauthor: Nitin Madhok <nmadhok@g.clemson.edu>
To use this runner, set up the Novell Fan-Out Driver URL, username and password in the
master configuration at ``/etc/salt/master`` or ``/etc/salt/master.d/asam.conf``:
.. code-block:: yaml
asam:
prov1.domain.com
username: "testuser"
password: "verybadpass"
verify_ssl: true
prov2.domain.com
username: "testuser"
password: "verybadpass"
verify_ssl: true
.. note::
Optionally, ``protocol`` and ``port`` can be specified if the Fan-Out Driver server
is not using the defaults. Default is ``protocol: https`` and ``port: 3451``.
"""
import logging
HAS_LIBS = False
try:
import requests
import html.parser
HAS_LIBS = True
# pylint: disable=abstract-method
class ASAMHTMLParser(html.parser.HTMLParser): # fix issue #30477
def __init__(self):
html.parser.HTMLParser.__init__(self)
self.data = []
def handle_starttag(self, tag, attrs):
if tag != "a":
return
for attr in attrs:
if attr[0] != "href":
return
self.data.append(attr[1])
# pylint: enable=abstract-method
except ImportError:
pass
log = logging.getLogger(__name__)
def __virtual__():
"""
Check for ASAM Fan-Out driver configuration in master config file
or directory and load runner only if it is specified
"""
if not HAS_LIBS:
return False
if _get_asam_configuration() is False:
return False
return True
def _get_asam_configuration(driver_url=""):
"""
Return the configuration read from the master configuration
file or directory
"""
asam_config = __opts__["asam"] if "asam" in __opts__ else None
if asam_config:
try:
for asam_server, service_config in asam_config.items():
username = service_config.get("username", None)
password = service_config.get("password", None)
protocol = service_config.get("protocol", "https")
port = service_config.get("port", 3451)
verify_ssl = service_config.get("verify_ssl")
if verify_ssl is None:
verify_ssl = True
if not username or not password:
log.error(
"Username or Password has not been specified in the "
"master configuration for %s",
asam_server,
)
return False
ret = {
"platform_edit_url": "{}://{}:{}/config/PlatformEdit.html".format(
protocol, asam_server, port
),
"platform_config_url": (
"{}://{}:{}/config/PlatformConfig.html".format(
protocol, asam_server, port
)
),
"platformset_edit_url": (
"{}://{}:{}/config/PlatformSetEdit.html".format(
protocol, asam_server, port
)
),
"platformset_config_url": (
"{}://{}:{}/config/PlatformSetConfig.html".format(
protocol, asam_server, port
)
),
"username": username,
"password": password,
"verify_ssl": verify_ssl,
}
if (not driver_url) or (driver_url == asam_server):
return ret
except Exception as exc: # pylint: disable=broad-except
log.error("Exception encountered: %s", exc)
return False
if driver_url:
log.error(
"Configuration for %s has not been specified in the master "
"configuration",
driver_url,
)
return False
return False
def _make_post_request(url, data, auth, verify=True):
r = requests.post(url, data=data, auth=auth, verify=verify)
if r.status_code != requests.codes.ok:
r.raise_for_status()
else:
return r.text.split("\n")
def _parse_html_content(html_content):
parser = ASAMHTMLParser()
for line in html_content:
if line.startswith("<META"):
html_content.remove(line)
else:
parser.feed(line)
return parser
def _get_platformset_name(data, platform_name):
for item in data:
if platform_name in item and item.startswith("PlatformEdit.html?"):
parameter_list = item.split("&")
for parameter in parameter_list:
if parameter.startswith("platformSetName"):
return parameter.split("=")[1]
return None
def _get_platforms(data):
platform_list = []
for item in data:
if item.startswith("PlatformEdit.html?"):
parameter_list = item.split("PlatformEdit.html?", 1)[1].split("&")
for parameter in parameter_list:
if parameter.startswith("platformName"):
platform_list.append(parameter.split("=")[1])
return platform_list
def _get_platform_sets(data):
platform_set_list = []
for item in data:
if item.startswith("PlatformSetEdit.html?"):
parameter_list = item.split("PlatformSetEdit.html?", 1)[1].split("&")
for parameter in parameter_list:
if parameter.startswith("platformSetName"):
platform_set_list.append(
parameter.split("=")[1].replace("%20", " ")
)
return platform_set_list
def remove_platform(name, server_url):
"""
To remove specified ASAM platform from the Novell Fan-Out Driver
CLI Example:
.. code-block:: bash
salt-run asam.remove_platform my-test-vm prov1.domain.com
"""
config = _get_asam_configuration(server_url)
if not config:
return False
url = config["platform_config_url"]
data = {
"manual": "false",
}
auth = (config["username"], config["password"])
try:
html_content = _make_post_request(url, data, auth, verify=config["verify_ssl"])
except Exception as exc: # pylint: disable=broad-except
err_msg = "Failed to look up existing platforms on {}".format(server_url)
log.error("%s:\n%s", err_msg, exc)
return {name: err_msg}
parser = _parse_html_content(html_content)
platformset_name = _get_platformset_name(parser.data, name)
if platformset_name:
log.debug(platformset_name)
data["platformName"] = name
data["platformSetName"] = str(platformset_name)
data["postType"] = "platformRemove"
data["Submit"] = "Yes"
try:
html_content = _make_post_request(
url, data, auth, verify=config["verify_ssl"]
)
except Exception as exc: # pylint: disable=broad-except
err_msg = "Failed to delete platform from {}".format(server_url)
log.error("%s:\n%s", err_msg, exc)
return {name: err_msg}
parser = _parse_html_content(html_content)
platformset_name = _get_platformset_name(parser.data, name)
if platformset_name:
return {name: "Failed to delete platform from {}".format(server_url)}
else:
return {name: "Successfully deleted platform from {}".format(server_url)}
else:
return {name: "Specified platform name does not exist on {}".format(server_url)}
def list_platforms(server_url):
"""
To list all ASAM platforms present on the Novell Fan-Out Driver
CLI Example:
.. code-block:: bash
salt-run asam.list_platforms prov1.domain.com
"""
config = _get_asam_configuration(server_url)
if not config:
return False
url = config["platform_config_url"]
data = {
"manual": "false",
}
auth = (config["username"], config["password"])
try:
html_content = _make_post_request(url, data, auth, verify=config["verify_ssl"])
except Exception as exc: # pylint: disable=broad-except
err_msg = "Failed to look up existing platforms"
log.error("%s:\n%s", err_msg, exc)
return {server_url: err_msg}
parser = _parse_html_content(html_content)
platform_list = _get_platforms(parser.data)
if platform_list:
return {server_url: platform_list}
else:
return {server_url: "No existing platforms found"}
def list_platform_sets(server_url):
"""
To list all ASAM platform sets present on the Novell Fan-Out Driver
CLI Example:
.. code-block:: bash
salt-run asam.list_platform_sets prov1.domain.com
"""
config = _get_asam_configuration(server_url)
if not config:
return False
url = config["platformset_config_url"]
data = {
"manual": "false",
}
auth = (config["username"], config["password"])
try:
html_content = _make_post_request(url, data, auth, verify=config["verify_ssl"])
except Exception as exc: # pylint: disable=broad-except
err_msg = "Failed to look up existing platform sets"
log.error("%s:\n%s", err_msg, exc)
return {server_url: err_msg}
parser = _parse_html_content(html_content)
platform_set_list = _get_platform_sets(parser.data)
if platform_set_list:
return {server_url: platform_set_list}
else:
return {server_url: "No existing platform sets found"}
def add_platform(name, platform_set, server_url):
"""
To add an ASAM platform using the specified ASAM platform set on the Novell
Fan-Out Driver
CLI Example:
.. code-block:: bash
salt-run asam.add_platform my-test-vm test-platform-set prov1.domain.com
"""
config = _get_asam_configuration(server_url)
if not config:
return False
platforms = list_platforms(server_url)
if name in platforms[server_url]:
return {name: "Specified platform already exists on {}".format(server_url)}
platform_sets = list_platform_sets(server_url)
if platform_set not in platform_sets[server_url]:
return {name: "Specified platform set does not exist on {}".format(server_url)}
url = config["platform_edit_url"]
data = {
"platformName": name,
"platformSetName": platform_set,
"manual": "false",
"previousURL": "/config/platformAdd.html",
"postType": "PlatformAdd",
"Submit": "Apply",
}
auth = (config["username"], config["password"])
try:
html_content = _make_post_request(url, data, auth, verify=config["verify_ssl"])
except Exception as exc: # pylint: disable=broad-except
err_msg = "Failed to add platform on {}".format(server_url)
log.error("%s:\n%s", err_msg, exc)
return {name: err_msg}
platforms = list_platforms(server_url)
if name in platforms[server_url]:
return {name: "Successfully added platform on {}".format(server_url)}
else:
return {name: "Failed to add platform on {}".format(server_url)}
| |
from jsonrpc import ServiceProxy
import sys
import string
# ===== BEGIN USER SETTINGS =====
# if you do not set these you will be prompted for a password for every command
rpcuser = ""
rpcpass = ""
# ====== END USER SETTINGS ======
if rpcpass == "":
access = ServiceProxy("http://127.0.0.1:55881")
else:
access = ServiceProxy("http://"+rpcuser+":"+rpcpass+"@127.0.0.1:55881")
cmd = sys.argv[1].lower()
if cmd == "backupwallet":
try:
path = raw_input("Enter destination path/filename: ")
print access.backupwallet(path)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccount":
try:
addr = raw_input("Enter a Bitcoin address: ")
print access.getaccount(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "getaccountaddress":
try:
acct = raw_input("Enter an account name: ")
print access.getaccountaddress(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getaddressesbyaccount":
try:
acct = raw_input("Enter an account name: ")
print access.getaddressesbyaccount(acct)
except:
print "\n---An error occurred---\n"
elif cmd == "getbalance":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getbalance(acct, mc)
except:
print access.getbalance()
except:
print "\n---An error occurred---\n"
elif cmd == "getblockbycount":
try:
height = raw_input("Height: ")
print access.getblockbycount(height)
except:
print "\n---An error occurred---\n"
elif cmd == "getblockcount":
try:
print access.getblockcount()
except:
print "\n---An error occurred---\n"
elif cmd == "getblocknumber":
try:
print access.getblocknumber()
except:
print "\n---An error occurred---\n"
elif cmd == "getconnectioncount":
try:
print access.getconnectioncount()
except:
print "\n---An error occurred---\n"
elif cmd == "getdifficulty":
try:
print access.getdifficulty()
except:
print "\n---An error occurred---\n"
elif cmd == "getgenerate":
try:
print access.getgenerate()
except:
print "\n---An error occurred---\n"
elif cmd == "gethashespersec":
try:
print access.gethashespersec()
except:
print "\n---An error occurred---\n"
elif cmd == "getinfo":
try:
print access.getinfo()
except:
print "\n---An error occurred---\n"
elif cmd == "getnewaddress":
try:
acct = raw_input("Enter an account name: ")
try:
print access.getnewaddress(acct)
except:
print access.getnewaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaccount":
try:
acct = raw_input("Enter an account (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaccount(acct, mc)
except:
print access.getreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "getreceivedbyaddress":
try:
addr = raw_input("Enter a Bitcoin address (optional): ")
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.getreceivedbyaddress(addr, mc)
except:
print access.getreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "gettransaction":
try:
txid = raw_input("Enter a transaction ID: ")
print access.gettransaction(txid)
except:
print "\n---An error occurred---\n"
elif cmd == "getwork":
try:
data = raw_input("Data (optional): ")
try:
print access.gettransaction(data)
except:
print access.gettransaction()
except:
print "\n---An error occurred---\n"
elif cmd == "help":
try:
cmd = raw_input("Command (optional): ")
try:
print access.help(cmd)
except:
print access.help()
except:
print "\n---An error occurred---\n"
elif cmd == "listaccounts":
try:
mc = raw_input("Minimum confirmations (optional): ")
try:
print access.listaccounts(mc)
except:
print access.listaccounts()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaccount":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaccount(mc, incemp)
except:
print access.listreceivedbyaccount()
except:
print "\n---An error occurred---\n"
elif cmd == "listreceivedbyaddress":
try:
mc = raw_input("Minimum confirmations (optional): ")
incemp = raw_input("Include empty? (true/false, optional): ")
try:
print access.listreceivedbyaddress(mc, incemp)
except:
print access.listreceivedbyaddress()
except:
print "\n---An error occurred---\n"
elif cmd == "listtransactions":
try:
acct = raw_input("Account (optional): ")
count = raw_input("Number of transactions (optional): ")
frm = raw_input("Skip (optional):")
try:
print access.listtransactions(acct, count, frm)
except:
print access.listtransactions()
except:
print "\n---An error occurred---\n"
elif cmd == "move":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.move(frm, to, amt, mc, comment)
except:
print access.move(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendfrom":
try:
frm = raw_input("From: ")
to = raw_input("To: ")
amt = raw_input("Amount:")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendfrom(frm, to, amt, mc, comment, commentto)
except:
print access.sendfrom(frm, to, amt)
except:
print "\n---An error occurred---\n"
elif cmd == "sendmany":
try:
frm = raw_input("From: ")
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
mc = raw_input("Minimum confirmations (optional): ")
comment = raw_input("Comment (optional): ")
try:
print access.sendmany(frm,to,mc,comment)
except:
print access.sendmany(frm,to)
except:
print "\n---An error occurred---\n"
elif cmd == "sendtoaddress":
try:
to = raw_input("To (in format address1:amount1,address2:amount2,...): ")
amt = raw_input("Amount:")
comment = raw_input("Comment (optional): ")
commentto = raw_input("Comment-to (optional): ")
try:
print access.sendtoaddress(to,amt,comment,commentto)
except:
print access.sendtoaddress(to,amt)
except:
print "\n---An error occurred---\n"
elif cmd == "setaccount":
try:
addr = raw_input("Address: ")
acct = raw_input("Account:")
print access.setaccount(addr,acct)
except:
print "\n---An error occurred---\n"
elif cmd == "setgenerate":
try:
gen= raw_input("Generate? (true/false): ")
cpus = raw_input("Max processors/cores (-1 for unlimited, optional):")
try:
print access.setgenerate(gen, cpus)
except:
print access.setgenerate(gen)
except:
print "\n---An error occurred---\n"
elif cmd == "settxfee":
try:
amt = raw_input("Amount:")
print access.settxfee(amt)
except:
print "\n---An error occurred---\n"
elif cmd == "stop":
try:
print access.stop()
except:
print "\n---An error occurred---\n"
elif cmd == "validateaddress":
try:
addr = raw_input("Address: ")
print access.validateaddress(addr)
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrase":
try:
pwd = raw_input("Enter wallet passphrase: ")
access.walletpassphrase(pwd, 60)
print "\n---Wallet unlocked---\n"
except:
print "\n---An error occurred---\n"
elif cmd == "walletpassphrasechange":
try:
pwd = raw_input("Enter old wallet passphrase: ")
pwd2 = raw_input("Enter new wallet passphrase: ")
access.walletpassphrasechange(pwd, pwd2)
print
print "\n---Passphrase changed---\n"
except:
print
print "\n---An error occurred---\n"
print
else:
print "Command not found or not supported"
| |
import tensorflow as tf
import os
import sys
sys.path.append(os.path.realpath(".."))
import helpers.utils as hlp
import subprocess
from redis import Redis
import time
from helpers.layers import denselayer
from models.ddpg_network import DDPGNetwork
import numpy as np
class DDPGTrainer(DDPGNetwork):
def __init__(self, sess, args):
DDPGNetwork.__init__(self, sess, args)
self.sess = sess
self.config = args['config']
self.env = args['environment']
self.l_rate = args['learning_rate']
self.timesteps_per_launch = args['max_pathlength']
self.n_workers = args['n_workers']
self.l_rate_critic = args['learning_rate_critic']
self.n_pre_tasks = args['n_pre_tasks']
self.n_tests = args['n_tests']
self.scale = args['scale']
self.gamma = args['gamma']
self.tau = args['tau']
self.action_noise = args['action_noise']
self.test_every = args['test_every']
self.random_steps = args['random_steps']
self.step_delay = args['step_delay']
self.xp_size = args['xp_size']
self.save_every = args.get('save_every', 1)
self.clip_error = args.get('clip_error', 10.)
self.batch_size = args['batch_size']
self.sums = self.sumsqrs = self.sumtime = 0
self.timestep = 0
self.create_internal()
self.sess.run(tf.global_variables_initializer())
self.train_scores = []
self.test_scores = []
np.set_printoptions(precision=6)
# Worker parameters:
self.id_worker = args['id_worker']
self.test_mode = args['test_mode']
def create_internal(self):
td_error = self.better_value - self.critic_value
self.value_loss = tf.reduce_mean(td_error ** 2)
self.value_train_op = tf.train.AdamOptimizer(self.l_rate_critic).minimize(self.value_loss,
var_list=self.value_weights)
self.train_actor_op = tf.train.AdamOptimizer(self.l_rate).minimize(-tf.reduce_mean(self.value_for_train),
var_list=self.weights)
def save(self, name):
directory = 'saves/' + name + '/'
if not os.path.exists(directory):
os.makedirs(directory)
directory += 'iteration_{}'.format(self.timestep) + '/'
if not os.path.exists(directory):
os.makedirs(directory)
for i, w in enumerate(tf.global_variables()):
np.save(directory + 'weight_{}'.format(i), self.sess.run(w))
if self.scale:
np.save(directory + 'sums', self.sums)
np.save(directory + 'sumsquares', self.sumsqrs)
np.save(directory + 'sumtime', self.sumtime)
np.save(directory + 'timestep', np.array([self.timestep]))
np.save(directory + 'train_scores', np.array(self.train_scores))
np.save(directory + 'test_scores', np.array(self.test_scores))
print("Agent successfully saved in folder {}".format(directory))
def load(self, name, iteration=None):
try:
directory = 'saves/' + name + '/'
if not os.path.exists(directory):
print('That directory does not exist!')
raise Exception
if iteration is None:
iteration = np.max([int(x[10:]) for x in [dir for dir in os.walk(directory)][0][1]])
directory += 'iteration_{}'.format(iteration) + '/'
weights = [np.zeros(shape=w.get_shape()) for w in self.weights]
for i, w in enumerate(tf.global_variables()):
weights[i] = np.load(directory + 'weight_{}.npy'.format(i))
self.set_weights(weights)
if self.scale:
self.sums = np.load(directory + 'sums.npy')
self.sumsqrs = np.load(directory + 'sumsquares.npy')
self.sumtime = np.load(directory + 'sumtime.npy')
self.timestep = np.load(directory + 'timestep.npy')[0]
self.train_scores = np.load(directory + 'train_scores.npy').tolist()
self.test_scores = np.load(directory + 'test_scores.npy').tolist()
print("Agent successfully loaded from folder {}".format(directory))
except:
print("Something is wrong, loading failed")
def load_weights_from_redis(self):
weights = [hlp.load_object(self.variables_server.get("weight_{}".format(i))) for i in range(len(self.weights))]
self.set_weights(weights)
def work(self):
self.variables_server = Redis(port=12000)
if self.scale != 'off':
try:
means = hlp.load_object(self.variables_server.get("means"))
stds = hlp.load_object(self.variables_server.get("stds"))
self.sess.run(self.norm_set_op, feed_dict=dict(zip(self.norm_phs, [means, stds])))
except:
pass
try:
weights = [hlp.load_object(self.variables_server.get("weight_{}".format(i))) for i in
range(len(self.weights))]
self.set_weights(weights)
except:
pass
env = self.env
local_iteration = 0
while True:
st = time.time()
self.last_state = env.reset()
self.load_weights_from_redis()
while not env.done and env.timestamp < self.timesteps_per_launch:
if local_iteration * self.n_workers <= self.random_steps:
actions = env.env.action_space.sample()
else:
actions = self.act(env.features)
actions += np.random.normal(0, scale=self.action_noise, size=actions.shape)
env.step(actions)
transition = hlp.dump_object([self.last_state, env.reward, actions, env.features, env.done])
time.sleep(self.step_delay)
self.variables_server.lpush('transitions', transition)
self.last_state = env.features
if time.time() - st > 3:
st = time.time()
self.load_weights_from_redis()
local_iteration += 1
print("Episode reward: {}".format(env.get_total_reward()), "Length: {}".format(env.timestamp))
if self.variables_server.llen('transitions') > self.xp_size:
self.variables_server.ltrim('transitions', 1, self.xp_size)
def make_rollout(self):
variables_server = Redis(port=12000)
if self.scale != 'off':
try:
means = hlp.load_object(variables_server.get("means"))
stds = hlp.load_object(variables_server.get("stds"))
self.sess.run(self.norm_set_op, feed_dict=dict(zip(self.norm_phs, [means, stds])))
except:
pass
try:
weights = [hlp.load_object(variables_server.get("weight_{}".format(i))) for i in
range(len(self.weights))]
self.set_weights(weights)
except:
pass
env = self.env
if self.test_mode:
n_tasks = self.n_tests
else:
n_tasks = self.n_pre_tasks
i_task = 0
paths = []
while i_task < n_tasks:
path = {}
rewards = []
sums = np.zeros((1, env.get_observation_space()))
sumsqrs = np.zeros(sums.shape)
env.reset()
while not env.done and env.timestamp < self.timesteps_per_launch:
sums += env.features
sumsqrs += np.square(env.features)
if not self.test_mode:
actions = self.act(env.features, exploration=True)
else:
actions = self.act(env.features, exploration=False)
env.step(actions)
rewards.append(env.reward)
path["rewards"] = rewards
path["sumobs"] = sums
path["sumsqrobs"] = sumsqrs
path["total"] = env.get_total_reward()
paths.append(path)
i_task += 1
variables_server.set("paths_{}".format(self.id_worker), hlp.dump_object(paths))
def update_target_weights(self, alpha=None):
if alpha is None:
alpha = self.tau
value_weights = self.get_value_weights()
new_weights = self.get_target_value_weights()
for i in range(len(value_weights)):
new_weights[i] = new_weights[i] * (1 - alpha) + alpha * value_weights[i]
self.set_target_value_weights(new_weights)
weights = self.get_weights()
new_weights = self.get_target_weights()
for i in range(len(weights)):
new_weights[i] = new_weights[i] * (1 - alpha) + alpha * weights[i]
self.set_target_weights(new_weights)
def train(self):
cmd_server = 'redis-server --port 12000'
p = subprocess.Popen(cmd_server, shell=True, preexec_fn=os.setsid)
self.variables_server = Redis(port=12000)
means = "-"
stds = "-"
if self.scale != 'off':
if self.timestep == 0:
print("Time to measure features!")
worker_args = \
{
'config': self.config,
'test_mode': False,
}
hlp.launch_workers(worker_args, self.n_workers)
paths = []
for i in range(self.n_workers):
paths += hlp.load_object(self.variables_server.get("paths_{}".format(i)))
for path in paths:
self.sums += path["sumobs"]
self.sumsqrs += path["sumsqrobs"]
self.sumtime += len(path["rewards"])
stds = np.sqrt((self.sumsqrs - np.square(self.sums) / self.sumtime) / (self.sumtime - 1))
means = self.sums / self.sumtime
print("Init means: {}".format(means))
print("Init stds: {}".format(stds))
self.variables_server.set("means", hlp.dump_object(means))
self.variables_server.set("stds", hlp.dump_object(stds))
self.sess.run(self.norm_set_op, feed_dict=dict(zip(self.norm_phs, [means, stds])))
print("Let's go!")
self.update_target_weights(alpha=1.0)
weights = self.get_weights()
for i, weight in enumerate(weights):
self.variables_server.set("weight_" + str(i), hlp.dump_object(weight))
worker_args = \
{
'config': self.config,
'test_mode': False,
}
hlp.launch_workers(worker_args, self.n_workers, command='work', wait=False)
self.variables_server.ltrim('transitions', 0, 0)
time.sleep(5)
iteration = 0
start_time = time.time()
max_idx = self.variables_server.llen('transitions')
while True:
obs_batch = []
next_obs_batch = []
done_batch = []
reward_batch = []
actions_batch = []
if iteration % 500 == 0 and max_idx < self.xp_size:
max_idx = self.variables_server.llen('transitions')
idxs = np.random.randint(np.min([self.xp_size, max_idx]), size=self.batch_size)
transitions = []
for i in range(self.batch_size):
transitions.append(hlp.load_object(self.variables_server.lindex('transitions', idxs[i])))
for transition in transitions:
obs_batch.append(transition[0])
reward_batch.append(transition[1])
actions_batch.append(transition[2])
next_obs_batch.append(transition[3])
done_batch.append(transition[4])
feed_dict = {
self.state_input: np.concatenate(obs_batch, axis=0),
self.next_state_input: np.concatenate(next_obs_batch, axis=0),
self.action_input: np.array(actions_batch),
self.reward_input: np.array(reward_batch),
self.done_input: np.array(done_batch)
}
self.sess.run(self.value_train_op, feed_dict)
self.sess.run(self.train_actor_op, feed_dict)
self.update_target_weights()
weights = self.get_weights()
for i, weight in enumerate(weights):
self.variables_server.set("weight_" + str(i), hlp.dump_object(weight))
if iteration % 1000 == 0:
print("Iteration #{}".format(iteration))
self.save(self.config[:-5])
if iteration % self.test_every == 0:
print("Time for testing!")
worker_args = \
{
'config': self.config,
'test_mode': True,
}
hlp.launch_workers(worker_args, self.n_workers)
paths = []
for i in range(self.n_workers):
paths += hlp.load_object(self.variables_server.get("paths_{}".format(i)))
total_rewards = np.array([path["total"] for path in paths])
eplens = np.array([len(path["rewards"]) for path in paths])
if self.scale == 'full':
stds = np.sqrt((self.sumsqrs - np.square(self.sums) / self.sumtime) / (self.sumtime - 1))
means = self.sums / self.sumtime
self.variables_server.set("means", hlp.dump_object(means))
self.variables_server.set("stds", hlp.dump_object(stds))
self.sess.run(self.norm_set_op, feed_dict=dict(zip(self.norm_phs, [means, stds])))
print("""
-------------------------------------------------------------
Mean test score: {test_scores}
Mean test episode length: {test_eplengths}
Max test score: {max_test}
Mean of features: {means}
Std of features: {stds}
Time for iteration: {tt}
-------------------------------------------------------------
""".format(
means=means,
stds=stds,
test_scores=np.mean(total_rewards),
test_eplengths=np.mean(eplens),
max_test=np.max(total_rewards),
tt=time.time() - start_time
))
self.test_scores.append(np.mean(total_rewards))
iteration += 1
self.timestep += 1
| |
# -*- coding: utf-8 -*-
"""
Class and program to colorize python source code for ANSI terminals.
Based on an HTML code highlighter by Jurgen Hermann found at:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52298
Modifications by Fernando Perez (fperez@colorado.edu).
Information on the original HTML highlighter follows:
MoinMoin - Python Source Parser
Title: Colorize Python source using the built-in tokenizer
Submitter: Jurgen Hermann
Last Updated:2001/04/06
Version no:1.2
Description:
This code is part of MoinMoin (http://moin.sourceforge.net/) and converts
Python source code to HTML markup, rendering comments, keywords,
operators, numeric and string literals in different colors.
It shows how to use the built-in keyword, token and tokenize modules to
scan Python source code and re-emit it with no changes to its original
formatting (which is the hard part).
"""
__all__ = ['ANSICodeColors', 'Parser']
_scheme_default = 'Linux'
# Imports
import keyword
import os
import sys
import token
import tokenize
generate_tokens = tokenize.generate_tokens
from IPython.utils.coloransi import TermColors, InputTermColors,ColorScheme, ColorSchemeTable
from .colorable import Colorable
from io import StringIO
#############################################################################
### Python Source Parser (does Highlighting)
#############################################################################
_KEYWORD = token.NT_OFFSET + 1
_TEXT = token.NT_OFFSET + 2
#****************************************************************************
# Builtin color schemes
Colors = TermColors # just a shorthand
# Build a few color schemes
NoColor = ColorScheme(
'NoColor',{
'header' : Colors.NoColor,
token.NUMBER : Colors.NoColor,
token.OP : Colors.NoColor,
token.STRING : Colors.NoColor,
tokenize.COMMENT : Colors.NoColor,
token.NAME : Colors.NoColor,
token.ERRORTOKEN : Colors.NoColor,
_KEYWORD : Colors.NoColor,
_TEXT : Colors.NoColor,
'in_prompt' : InputTermColors.NoColor, # Input prompt
'in_number' : InputTermColors.NoColor, # Input prompt number
'in_prompt2' : InputTermColors.NoColor, # Continuation prompt
'in_normal' : InputTermColors.NoColor, # color off (usu. Colors.Normal)
'out_prompt' : Colors.NoColor, # Output prompt
'out_number' : Colors.NoColor, # Output prompt number
'normal' : Colors.NoColor # color off (usu. Colors.Normal)
} )
LinuxColors = ColorScheme(
'Linux',{
'header' : Colors.LightRed,
token.NUMBER : Colors.LightCyan,
token.OP : Colors.Yellow,
token.STRING : Colors.LightBlue,
tokenize.COMMENT : Colors.LightRed,
token.NAME : Colors.Normal,
token.ERRORTOKEN : Colors.Red,
_KEYWORD : Colors.LightGreen,
_TEXT : Colors.Yellow,
'in_prompt' : InputTermColors.Green,
'in_number' : InputTermColors.LightGreen,
'in_prompt2' : InputTermColors.Green,
'in_normal' : InputTermColors.Normal, # color off (usu. Colors.Normal)
'out_prompt' : Colors.Red,
'out_number' : Colors.LightRed,
'normal' : Colors.Normal # color off (usu. Colors.Normal)
} )
NeutralColors = ColorScheme(
'Neutral',{
'header' : Colors.Red,
token.NUMBER : Colors.Cyan,
token.OP : Colors.Blue,
token.STRING : Colors.Blue,
tokenize.COMMENT : Colors.Red,
token.NAME : Colors.Normal,
token.ERRORTOKEN : Colors.Red,
_KEYWORD : Colors.Green,
_TEXT : Colors.Blue,
'in_prompt' : InputTermColors.Blue,
'in_number' : InputTermColors.LightBlue,
'in_prompt2' : InputTermColors.Blue,
'in_normal' : InputTermColors.Normal, # color off (usu. Colors.Normal)
'out_prompt' : Colors.Red,
'out_number' : Colors.LightRed,
'normal' : Colors.Normal # color off (usu. Colors.Normal)
} )
# Hack: the 'neutral' colours are not very visible on a dark background on
# Windows. Since Windows command prompts have a dark background by default, and
# relatively few users are likely to alter that, we will use the 'Linux' colours,
# designed for a dark background, as the default on Windows. Changing it here
# avoids affecting the prompt colours rendered by prompt_toolkit, where the
# neutral defaults do work OK.
if os.name == 'nt':
NeutralColors = LinuxColors.copy(name='Neutral')
LightBGColors = ColorScheme(
'LightBG',{
'header' : Colors.Red,
token.NUMBER : Colors.Cyan,
token.OP : Colors.Blue,
token.STRING : Colors.Blue,
tokenize.COMMENT : Colors.Red,
token.NAME : Colors.Normal,
token.ERRORTOKEN : Colors.Red,
_KEYWORD : Colors.Green,
_TEXT : Colors.Blue,
'in_prompt' : InputTermColors.Blue,
'in_number' : InputTermColors.LightBlue,
'in_prompt2' : InputTermColors.Blue,
'in_normal' : InputTermColors.Normal, # color off (usu. Colors.Normal)
'out_prompt' : Colors.Red,
'out_number' : Colors.LightRed,
'normal' : Colors.Normal # color off (usu. Colors.Normal)
} )
# Build table of color schemes (needed by the parser)
ANSICodeColors = ColorSchemeTable([NoColor,LinuxColors,LightBGColors, NeutralColors],
_scheme_default)
Undefined = object()
class Parser(Colorable):
""" Format colored Python source.
"""
def __init__(self, color_table=None, out = sys.stdout, parent=None, style=None):
""" Create a parser with a specified color table and output channel.
Call format() to process code.
"""
super(Parser, self).__init__(parent=parent)
self.color_table = color_table if color_table else ANSICodeColors
self.out = out
self.pos = None
self.lines = None
self.raw = None
if not style:
self.style = self.default_style
else:
self.style = style
def format(self, raw, out=None, scheme=Undefined):
import warnings
if scheme is not Undefined:
warnings.warn('The `scheme` argument of IPython.utils.PyColorize:Parser.format is deprecated since IPython 6.0.'
'It will have no effect. Set the parser `style` directly.',
stacklevel=2)
return self.format2(raw, out)[0]
def format2(self, raw, out = None):
""" Parse and send the colored source.
If out and scheme are not specified, the defaults (given to
constructor) are used.
out should be a file-type object. Optionally, out can be given as the
string 'str' and the parser will automatically return the output in a
string."""
string_output = 0
if out == 'str' or self.out == 'str' or \
isinstance(self.out, StringIO):
# XXX - I don't really like this state handling logic, but at this
# point I don't want to make major changes, so adding the
# isinstance() check is the simplest I can do to ensure correct
# behavior.
out_old = self.out
self.out = StringIO()
string_output = 1
elif out is not None:
self.out = out
else:
raise ValueError('`out` or `self.out` should be file-like or the value `"str"`')
# Fast return of the unmodified input for NoColor scheme
if self.style == 'NoColor':
error = False
self.out.write(raw)
if string_output:
return raw, error
return None, error
# local shorthands
colors = self.color_table[self.style].colors
self.colors = colors # put in object so __call__ sees it
# Remove trailing whitespace and normalize tabs
self.raw = raw.expandtabs().rstrip()
# store line offsets in self.lines
self.lines = [0, 0]
pos = 0
raw_find = self.raw.find
lines_append = self.lines.append
while True:
pos = raw_find('\n', pos) + 1
if not pos:
break
lines_append(pos)
lines_append(len(self.raw))
# parse the source and write it
self.pos = 0
text = StringIO(self.raw)
error = False
try:
for atoken in generate_tokens(text.readline):
self(*atoken)
except tokenize.TokenError as ex:
msg = ex.args[0]
line = ex.args[1][0]
self.out.write("%s\n\n*** ERROR: %s%s%s\n" %
(colors[token.ERRORTOKEN],
msg, self.raw[self.lines[line]:],
colors.normal)
)
error = True
self.out.write(colors.normal+'\n')
if string_output:
output = self.out.getvalue()
self.out = out_old
return (output, error)
return (None, error)
def _inner_call_(self, toktype, toktext, start_pos):
"""like call but write to a temporary buffer"""
buff = StringIO()
srow, scol = start_pos
colors = self.colors
owrite = buff.write
# line separator, so this works across platforms
linesep = os.linesep
# calculate new positions
oldpos = self.pos
newpos = self.lines[srow] + scol
self.pos = newpos + len(toktext)
# send the original whitespace, if needed
if newpos > oldpos:
owrite(self.raw[oldpos:newpos])
# skip indenting tokens
if toktype in [token.INDENT, token.DEDENT]:
self.pos = newpos
buff.seek(0)
return buff.read()
# map token type to a color group
if token.LPAR <= toktype <= token.OP:
toktype = token.OP
elif toktype == token.NAME and keyword.iskeyword(toktext):
toktype = _KEYWORD
color = colors.get(toktype, colors[_TEXT])
# Triple quoted strings must be handled carefully so that backtracking
# in pagers works correctly. We need color terminators on _each_ line.
if linesep in toktext:
toktext = toktext.replace(linesep, '%s%s%s' %
(colors.normal,linesep,color))
# send text
owrite('%s%s%s' % (color,toktext,colors.normal))
buff.seek(0)
return buff.read()
def __call__(self, toktype, toktext, start_pos, end_pos, line):
""" Token handler, with syntax highlighting."""
self.out.write(
self._inner_call_(toktype, toktext, start_pos))
| |
# Natural Language Toolkit: API for alignment and translation objects
#
# Copyright (C) 2001-2015 NLTK Project
# Author: Will Zhang <wilzzha@gmail.com>
# Guan Gui <ggui@student.unimelb.edu.au>
# Steven Bird <stevenbird1@gmail.com>
# Tah Wei Hoon <hoon.tw@gmail.com>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
from __future__ import print_function, unicode_literals
import subprocess
from collections import namedtuple
from nltk.compat import python_2_unicode_compatible, string_types
@python_2_unicode_compatible
class AlignedSent(object):
"""
Return an aligned sentence object, which encapsulates two sentences
along with an ``Alignment`` between them.
>>> from nltk.translate import AlignedSent, Alignment
>>> algnsent = AlignedSent(['klein', 'ist', 'das', 'Haus'],
... ['the', 'house', 'is', 'small'], Alignment.fromstring('0-2 1-3 2-1 3-0'))
>>> algnsent.words
['klein', 'ist', 'das', 'Haus']
>>> algnsent.mots
['the', 'house', 'is', 'small']
>>> algnsent.alignment
Alignment([(0, 2), (1, 3), (2, 1), (3, 0)])
>>> from nltk.corpus import comtrans
>>> print(comtrans.aligned_sents()[54])
<AlignedSent: 'Weshalb also sollten...' -> 'So why should EU arm...'>
>>> print(comtrans.aligned_sents()[54].alignment)
0-0 0-1 1-0 2-2 3-4 3-5 4-7 5-8 6-3 7-9 8-9 9-10 9-11 10-12 11-6 12-6 13-13
:param words: source language words
:type words: list(str)
:param mots: target language words
:type mots: list(str)
:param alignment: the word-level alignments between the source
and target language
:type alignment: Alignment
"""
def __init__(self, words, mots, alignment=None):
self._words = words
self._mots = mots
if alignment is None:
self.alignment = Alignment([])
else:
assert type(alignment) is Alignment
self.alignment = alignment
@property
def words(self):
return self._words
@property
def mots(self):
return self._mots
def _get_alignment(self):
return self._alignment
def _set_alignment(self, alignment):
_check_alignment(len(self.words), len(self.mots), alignment)
self._alignment = alignment
alignment = property(_get_alignment, _set_alignment)
def __repr__(self):
"""
Return a string representation for this ``AlignedSent``.
:rtype: str
"""
words = "[%s]" % (", ".join("'%s'" % w for w in self._words))
mots = "[%s]" % (", ".join("'%s'" % w for w in self._mots))
return "AlignedSent(%s, %s, %r)" % (words, mots, self._alignment)
def _to_dot(self):
"""
Dot representation of the aligned sentence
"""
s = 'graph align {\n'
s += 'node[shape=plaintext]\n'
# Declare node
for w in self._words:
s += '"%s_source" [label="%s"] \n' % (w, w)
for w in self._mots:
s += '"%s_target" [label="%s"] \n' % (w, w)
# Alignment
for u,v in self._alignment:
s += '"%s_source" -- "%s_target" \n' % (self._words[u] , self._mots[v] )
# Connect the source words
for i in range(len(self._words)-1) :
s += '"%s_source" -- "%s_source" [style=invis]\n' % (self._words[i] , self._words[i+1])
# Connect the target words
for i in range(len(self._mots)-1) :
s += '"%s_target" -- "%s_target" [style=invis]\n' % (self._mots[i] , self._mots[i+1])
# Put it in the same rank
s += '{rank = same; %s}\n' % (' '.join('"%s_source"' % w for w in self._words))
s += '{rank = same; %s}\n' % (' '.join('"%s_target"' % w for w in self._mots))
s += '}'
return s
def _repr_svg_(self):
"""
Ipython magic : show SVG representation of this ``AlignedSent``.
"""
dot_string = self._to_dot().encode('utf8')
output_format = 'svg'
try:
process = subprocess.Popen(['dot', '-T%s' % output_format], stdin=subprocess.PIPE,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError:
raise Exception('Cannot find the dot binary from Graphviz package')
out, err = process.communicate(dot_string)
return out
def __str__(self):
"""
Return a human-readable string representation for this ``AlignedSent``.
:rtype: str
"""
source = " ".join(self._words)[:20] + "..."
target = " ".join(self._mots)[:20] + "..."
return "<AlignedSent: '%s' -> '%s'>" % (source, target)
def invert(self):
"""
Return the aligned sentence pair, reversing the directionality
:rtype: AlignedSent
"""
return AlignedSent(self._mots, self._words,
self._alignment.invert())
@python_2_unicode_compatible
class Alignment(frozenset):
"""
A storage class for representing alignment between two sequences, s1, s2.
In general, an alignment is a set of tuples of the form (i, j, ...)
representing an alignment between the i-th element of s1 and the
j-th element of s2. Tuples are extensible (they might contain
additional data, such as a boolean to indicate sure vs possible alignments).
>>> from nltk.translate import Alignment
>>> a = Alignment([(0, 0), (0, 1), (1, 2), (2, 2)])
>>> a.invert()
Alignment([(0, 0), (1, 0), (2, 1), (2, 2)])
>>> print(a.invert())
0-0 1-0 2-1 2-2
>>> a[0]
[(0, 1), (0, 0)]
>>> a.invert()[2]
[(2, 1), (2, 2)]
>>> b = Alignment([(0, 0), (0, 1)])
>>> b.issubset(a)
True
>>> c = Alignment.fromstring('0-0 0-1')
>>> b == c
True
"""
def __new__(cls, pairs):
self = frozenset.__new__(cls, pairs)
self._len = (max(p[0] for p in self) if self != frozenset([]) else 0)
self._index = None
return self
@classmethod
def fromstring(cls, s):
"""
Read a giza-formatted string and return an Alignment object.
>>> Alignment.fromstring('0-0 2-1 9-2 21-3 10-4 7-5')
Alignment([(0, 0), (2, 1), (7, 5), (9, 2), (10, 4), (21, 3)])
:type s: str
:param s: the positional alignments in giza format
:rtype: Alignment
:return: An Alignment object corresponding to the string representation ``s``.
"""
return Alignment([_giza2pair(a) for a in s.split()])
def __getitem__(self, key):
"""
Look up the alignments that map from a given index or slice.
"""
if not self._index:
self._build_index()
return self._index.__getitem__(key)
def invert(self):
"""
Return an Alignment object, being the inverted mapping.
"""
return Alignment(((p[1], p[0]) + p[2:]) for p in self)
def range(self, positions=None):
"""
Work out the range of the mapping from the given positions.
If no positions are specified, compute the range of the entire mapping.
"""
image = set()
if not self._index:
self._build_index()
if not positions:
positions = list(range(len(self._index)))
for p in positions:
image.update(f for _,f in self._index[p])
return sorted(image)
def __repr__(self):
"""
Produce a Giza-formatted string representing the alignment.
"""
return "Alignment(%r)" % sorted(self)
def __str__(self):
"""
Produce a Giza-formatted string representing the alignment.
"""
return " ".join("%d-%d" % p[:2] for p in sorted(self))
def _build_index(self):
"""
Build a list self._index such that self._index[i] is a list
of the alignments originating from word i.
"""
self._index = [[] for _ in range(self._len + 1)]
for p in self:
self._index[p[0]].append(p)
def _giza2pair(pair_string):
i, j = pair_string.split("-")
return int(i), int(j)
def _naacl2pair(pair_string):
i, j, p = pair_string.split("-")
return int(i), int(j)
def _check_alignment(num_words, num_mots, alignment):
"""
Check whether the alignments are legal.
:param num_words: the number of source language words
:type num_words: int
:param num_mots: the number of target language words
:type num_mots: int
:param alignment: alignment to be checked
:type alignment: Alignment
:raise IndexError: if alignment falls outside the sentence
"""
assert type(alignment) is Alignment
if not all(0 <= pair[0] < num_words for pair in alignment):
raise IndexError("Alignment is outside boundary of words")
if not all(pair[1] is None or 0 <= pair[1] < num_mots for pair in alignment):
raise IndexError("Alignment is outside boundary of mots")
PhraseTableEntry = namedtuple('PhraseTableEntry', ['trg_phrase', 'log_prob'])
class PhraseTable(object):
"""
In-memory store of translations for a given phrase, and the log
probability of the those translations
"""
def __init__(self):
self.src_phrases = dict()
def translations_for(self, src_phrase):
"""
Get the translations for a source language phrase
:param src_phrase: Source language phrase of interest
:type src_phrase: tuple(str)
:return: A list of target language phrases that are translations
of ``src_phrase``, ordered in decreasing order of
likelihood. Each list element is a tuple of the target
phrase and its log probability.
:rtype: list(PhraseTableEntry)
"""
return self.src_phrases[src_phrase]
def add(self, src_phrase, trg_phrase, log_prob):
"""
:type src_phrase: tuple(str)
:type trg_phrase: tuple(str)
:param log_prob: Log probability that given ``src_phrase``,
``trg_phrase`` is its translation
:type log_prob: float
"""
entry = PhraseTableEntry(trg_phrase=trg_phrase, log_prob=log_prob)
if src_phrase not in self.src_phrases:
self.src_phrases[src_phrase] = []
self.src_phrases[src_phrase].append(entry)
self.src_phrases[src_phrase].sort(key=lambda e: e.log_prob,
reverse=True)
def __contains__(self, src_phrase):
return src_phrase in self.src_phrases
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===================================================================
"""Tooling for support TPU embedding in TPUEstimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.feature_column import feature_column as core_fc
from tensorflow.python.feature_column import feature_column_lib as core_fc_lib
from tensorflow.python.tpu import feature_column as tpu_fc
from tensorflow.python.tpu import tpu_embedding
from tensorflow.python.tpu.tpu_embedding import AdagradParameters
from tensorflow.python.tpu.tpu_embedding import AdamParameters
from tensorflow.python.tpu.tpu_embedding import StochasticGradientDescentParameters
# pylint: disable=protected-access
_TPU_EMBEDDING_COLUMN_CLASSES = (tpu_fc._TPUEmbeddingColumn,
tpu_fc._TPUSharedEmbeddingColumn)
_EMBEDDING_COLUMN_CLASSES = (core_fc._EmbeddingColumn,
core_fc_lib.EmbeddingColumn,
core_fc._SharedEmbeddingColumn)
_SUPPORTED_FEATURE_COLUMNS = (core_fc._NumericColumn, core_fc_lib.NumericColumn)
_SUPPORTED_OPTIMIZERS = (AdagradParameters, AdamParameters,
StochasticGradientDescentParameters)
# pylint: enable=protected-access
_TABLE_NAME_PREFIX = 'tbl_'
_LEN_TABLE_NAME_PREFIX = len(_TABLE_NAME_PREFIX)
def _get_table_name_from_embedding_var_name(embedding_var_name):
return '{}{}'.format(_TABLE_NAME_PREFIX, embedding_var_name)
def _get_embedding_var_name_from_table_name(table_name):
return table_name[_LEN_TABLE_NAME_PREFIX:]
def _get_embedding_variable_name(scope_name, var_name):
return '{}/{}'.format(scope_name, var_name)
def _get_slot_variable_names(scope_name, var_name, optimization_parameters):
"""Return embedding variable names which are consistent with CPU runs."""
if isinstance(optimization_parameters, tpu_embedding.AdagradParameters):
return tpu_embedding.AdagradSlotVariableName(
'{}/{}/Adagrad'.format(scope_name, var_name)
)
elif isinstance(optimization_parameters, tpu_embedding.AdamParameters):
return tpu_embedding.AdamSlotVariableNames(
'{}/{}/Adam/m'.format(scope_name, var_name),
'{}/{}/Adam/v'.format(scope_name, var_name)
)
elif isinstance(optimization_parameters,
tpu_embedding.StochasticGradientDescentParameters):
return None
else:
raise ValueError('Support to infer full variable name '
'for optimization_parameter {} has not been added.'
.format(optimization_parameters))
def get_full_variable_names(
graph, table_to_config_dict, optimization_parameters):
"""Return embedding variable names and slot variables which are consistent with CPU runs."""
collection = graph.get_collection_ref(tpu_fc._TPU_FC_TO_SCOPE) # pylint: disable=protected-access
if not collection:
raise RuntimeError(
'Embedding feature column did not capture any thing. Make sure the '
'feature columns passed to TPUEstimator constructor is properly '
'used in model_fn.')
embedding_variable_name_by_table = {}
slot_variable_names_by_table = {}
for table_name in table_to_config_dict:
embedding_var_name = _get_embedding_var_name_from_table_name(table_name)
(scope_name, var_name) = collection[0][embedding_var_name]
embedding_variable_name_by_table[table_name] = (
_get_embedding_variable_name(scope_name, var_name))
slot_variable_names_by_table[table_name] = _get_slot_variable_names(
scope_name, var_name, optimization_parameters)
graph.clear_collection(tpu_fc._TPU_FC_TO_SCOPE) # pylint: disable=protected-access
return embedding_variable_name_by_table, slot_variable_names_by_table
def get_tpu_embedding_config_from_feature_columns(feature_columns):
"""Create configs for TPUEmbedding from a list of feature columns.
This function will place one embedding tensor per table and the return is
intended to be used as input to TPUEmbedding.
Args:
feature_columns: a list of supported feature columns.
Returns:
A pair of dicts, the first maps tables to their config, the second maps
features to tables.
"""
allowed = (tpu_fc._TPUEmbeddingColumn, tpu_fc._TPUSharedEmbeddingColumn) # pylint: disable=protected-access
for column in feature_columns:
if not isinstance(column, allowed):
raise TypeError(
'Unsupported feature column {}. Supported types are {}.'.format(
type(column), allowed))
table_to_config = {}
feature_to_table = {}
for column in feature_columns:
feature_name = column.get_feature_key_name()
table_name = _get_table_name_from_embedding_var_name(
column.get_embedding_var_name())
if feature_name in feature_to_table:
raise ValueError(
'Feature column {} is used with multiple embeddings and this is '
'not supported.'.format(feature_name))
feature_to_table[feature_name] = table_name
vocabulary_size, dimension = column.get_embedding_table_size()
table_to_config[table_name] = tpu_embedding.TableConfig(
vocabulary_size=vocabulary_size,
dimension=dimension,
initializer=column.get_initializer(),
combiner=column.get_combiner())
return table_to_config, feature_to_table
class EmbeddingConfigSpec(
collections.namedtuple('EmbeddingConfigSpec', [
'feature_columns', 'optimization_parameters', 'clipping_limit',
])):
"""Class to keep track of embedding config specification."""
def __new__(cls,
feature_columns,
optimization_parameters,
clipping_limit=None):
"""Creates an EmbeddingConfigSpec instance.
Args:
feature_columns: All `FeatureColumn`s used by model.
optimization_parameters: An instance of `AdagradParameters`,
`AdamParameters` or `StochasticGradientDescentParameters`. This
optimizer will be applied to all embedding variables specified by
`feature_columns`.
clipping_limit: (Optional) Clipping limit (absolute value).
Returns:
An EmbeddingConfigSpec instance.
Raises:
ValueError: If the feature_columns are not specified.
TypeError: If the feature columns are not of ths correct type (one of
_SUPPORTED_FEATURE_COLUMNS, _TPU_EMBEDDING_COLUMN_CLASSES OR
_EMBEDDING_COLUMN_CLASSES).
ValueError: If `optimization_parameters` is not one of the required types.
"""
if not feature_columns:
raise ValueError('`feature_columns` cannot be `None` or empty.')
# It is unknown at this moment, whether the TPUEstimator is running in CPU
# or TPU mode. So allow non-TPU embedding columns also.
supported_classes = tuple(
list(_SUPPORTED_FEATURE_COLUMNS) + list(_TPU_EMBEDDING_COLUMN_CLASSES) +
list(_EMBEDDING_COLUMN_CLASSES))
for column in feature_columns:
if not isinstance(column, supported_classes):
raise TypeError(
'All feature columns must be supported types in {}. Got {}'.format(
supported_classes, type(column)))
if not isinstance(optimization_parameters, _SUPPORTED_OPTIMIZERS):
raise ValueError('optimization_parameters must be an instance of type '
'{}. Got {}.'.format(_SUPPORTED_OPTIMIZERS,
type(optimization_parameters)))
return super(EmbeddingConfigSpec, cls).__new__(
cls,
feature_columns=feature_columns,
optimization_parameters=optimization_parameters,
clipping_limit=clipping_limit)
class EmbeddingConfig(object):
"""This is the internal immutable object for embedding config.
`_EmbeddingConfig` is responsible to _translate_ user provided
`EmbeddingConfigSpec` to internal data structures, mostly constructor
arguments of `TPUEmbedding`.
"""
def __init__(self, embedding_config_spec, train_batch_size, eval_batch_size,
num_hosts, num_cores, run_config):
self._embedding_config_spec = embedding_config_spec
self._train_batch_size = train_batch_size
self._eval_batch_size = eval_batch_size
self._num_hosts = num_hosts
self._num_cores = num_cores
self._run_config = run_config
self._table_to_config_dict, self._feature_to_table_dict = (
get_tpu_embedding_config_from_feature_columns(
embedding_config_spec.feature_columns))
self._mode_to_tpu_embedding_dict = {}
self.dummy_table_variables = None
def has_embedding_tables(self):
return bool(self._table_to_config_dict)
def _create_tpu_embedding(self, mode):
"""Create tpu_embedding.TPUEmbedding based on mode."""
if mode == model_fn_lib.ModeKeys.TRAIN:
batch_size = self._train_batch_size
else:
batch_size = self._eval_batch_size
if mode == model_fn_lib.ModeKeys.TRAIN:
tpu_embedding_mode = tpu_embedding.TRAINING
elif (mode == model_fn_lib.ModeKeys.EVAL or
mode == model_fn_lib.ModeKeys.PREDICT):
tpu_embedding_mode = tpu_embedding.INFERENCE
else:
raise ValueError('Mode {} is not supported.'.format(mode))
master = (
self._run_config.evaluation_master
if mode == model_fn_lib.ModeKeys.EVAL else self._run_config.master)
cluster_def = (self._run_config.session_config.cluster_def
if self._run_config.session_config else None)
tpu_embedding_ = tpu_embedding.TPUEmbedding(
self._table_to_config_dict,
self._feature_to_table_dict,
batch_size,
tpu_embedding_mode,
master,
self._embedding_config_spec.optimization_parameters,
cluster_def,
)
return tpu_embedding_
def get_tpu_embedding(self, mode):
if mode not in self._mode_to_tpu_embedding_dict:
self._mode_to_tpu_embedding_dict[mode] = (
self._create_tpu_embedding(mode))
return self._mode_to_tpu_embedding_dict[mode]
def split_inputs(ctx, features, labels):
"""Splits the dense and sparse tensors inside the features and labels."""
sparse_features = collections.OrderedDict()
if ctx.embedding_config:
tpu_embedding_ = ctx.embedding_config.tpu_embedding
for feature_key in tpu_embedding_.feature_to_table_dict:
sparse_features[feature_key] = features.pop(feature_key)
return features, labels, sparse_features
| |
"""
footer
======
**Module** : ``footer.footer.py``
Contains the :py:class:`.Footer`, the footer of the application.
Contains the :py:class:`.FooterSpinner`, for information and
the menus in the footer.
"""
from kivy.uix.stacklayout import StackLayout
from kivy.uix.spinner import Spinner
from kivy.properties import ObjectProperty
from kivy.properties import StringProperty
from kivy.properties import NumericProperty
from kivy.properties import ListProperty
from footer.menus.highlightmenu import HighlightMenu
from footer.menus.highlightmenu import HighlightStyleMenu
class Footer(StackLayout):
"""
A footer for the application.
The application should only have one
footer, even though more can be added. The application's footer
is :py:attr:`azaharTEA.Container.footer`.
"""
highlight_menu = ObjectProperty(None)
"""Reference to a :py:class:`footer.footer.FooterSpinner`.
Menu for the highlighting options (lexers).
"""
line_col_menu = ObjectProperty(None)
"""A :py:class:`footer.footer.FooterSpinner`.
Menu for lines and columns. It will display the cursor's current line
and column (:py:attr:`.cursor_pos`).
"""
highlight_style_menu = ObjectProperty(None)
"""A :py:class:`footer.footer.FooterSpinner`.
Menu for the highlighting style options (lexers).
"""
cursor_pos = ListProperty([0,0])
"""A :py:class:`kivy.properties.ListProperty` to store
the cursor's current position.
"""
def __init__(self, **kwargs):
super(Footer, self).__init__(**kwargs)
self.menus_dict = {
'highlight_menu': self.change_lexer_information,
'line_col_menu': None,
'highlight_style_menu': self.change_highlight_style
}
"""Dictionary of the available menus.
The *key* is the name and the *value* is the method to execute for
that menu.
"""
def menu_display(self, widget):
"""Manage the on_release event for the menus and display the different
menus in the footer.
This method is called when the menu of widget (if any) is "hidden".
:param widget: Widget on which the event occurred. If widget has \
a menu, then it should be displayed.
"""
if ((widget == self.highlight_menu)
or (widget == self.highlight_style_menu)):
# See if a menu was already created to
# speed up displaying it after the first time
if not widget.child_menu:
widget.editor_container = self.editor_container
if widget == self.highlight_menu:
pos = (widget.x, widget.y + widget.height)
hl = HighlightMenu(pos=pos)
elif widget == self.highlight_style_menu:
pos = (widget.x, widget.y + widget.height)
hl = HighlightStyleMenu(pos=pos)
hl.editor_container = self.editor_container
hl.center_x = widget.center_x
widget.bind(pos=lambda w, pos: self.change_pos(w))
widget.child_menu = hl
widget.add_widget(widget.child_menu)
def menu_hide(self, widget):
"""Manage the on_release event for the menus and hide the different
menus in the footer.
This method is called when the menu of widget (if any) is "displayed".
:param widget: Widget on which the event occurred. If widget has \
a menu, then it should be hidden.
"""
widget.remove_widget(widget.child_menu)
def change_pos(self, parent):
"""Manage event when the position of the menu changes.
This method is used to reposition the content of the menu
contained in parent, when the content is being "displayed"
parent is a menu (:py:attr:`.highlight_menu`, :py:attr:`.highlight_style_menu`,
:py:attr:`.line_col_menu`).
:param parent: Menu in which the event ocurred (menu that changed position).\
The child (content) of parent is repositioned accordingly with the new position \
of parent.
"""
parent.child_menu.center_x = parent.center_x
def propagate_editor_container(self, editor_container):
"""Propagate the :py:class:`editorcontainer.editorcontainer.EditorContainer` to this
:py:class:`.Footer`.
Binds the "current tab" of :py:attr:`.self.editor_container` (in theory a reference to
:py:attr:`.azaharTEA.Container.editor_container`) to :py:meth:`.change_current_tab`.
:param editor_container: :py:class:`~editorcontainer.editorcontainer.EditorContainer` to \
be propagated to this :py:class:`.Footer`. It should be \
:py:attr:`azaharTEA.Container.editor_container`.
"""
self.editor_container = editor_container
current_tab = self.editor_container.current_tab
change_tab_lambda = lambda w, v: self.change_current_tab(w, v)
self.editor_container.bind(current_tab=change_tab_lambda)
self.change_current_tab(self.editor_container, None)
def change_current_tab(self, widget, value):
"""Change the footer information when the tab changes.
It also unbinds and binds again the cursor information to display.
This is, unbinds and binds :py:attr:`.cursor_pos` to :py:meth:`cursor_info`.
:param widget: Widget in which the tab changes. It should be \
:py:attr:`azaharTab.editor_container`.
:param value: Not used yet.
"""
editor = widget.current_tab.content.editor
editor.unbind(cursor=lambda w, v: self.cursor_info(w,v))
editor.bind(cursor=lambda w, v: self.cursor_info(w,v))
self.cursor_info(None, editor.cursor)
self.change_lexer_information(editor.lexer.name)
# Use the not bound name to get the text that should actually
# be displayed.
self.change_highlight_style(editor.style_name_not_bound)
def cursor_info(self, widget, value):
"""Update cursor's position information, :py:attr:`.cursor_pos`."""
self.cursor_pos = [value[1] + 1, value[0] + 1]
def change_information(self, information = dict()):
"""Change the information displayed in the menus (text of the
menu, not the content).
:param information: Dictionary (:py:obj:`dict`) that contains the information \
to change. The *key* should be the name of the menu (like "highlight_menu") and \
the *value* the new information (text of the menu) to display.
"""
try:
for key, value in information.items():
action = self.menus_dict[key]
if action:
action(value)
except KeyError as err:
print(err, '{}: No such menu'.format(key), '\n')
def change_lexer_information(self, value):
"""Change the text of the :py:attr:`.highlight_menu`"""
self.highlight_menu.text = value
def change_highlight_style(self, value):
"""Change the text of the :py:attr:`.highlight_style_menu`"""
self.highlight_style_menu.text = value
class FooterSpinner(Spinner):
"""Widget to open a menu in the footer.
Inherits from :py:class:`kivy.uix.spinner.Spinner` just because of its
appearance.
"""
display_state = StringProperty('')
"""Current state of the menu for this :py:class:`.FooterSpinner`.
It's a value from :py:attr:`.states`.
"""
states = ['hidden','displayed']
"""Possible states which the menu for this :py:class:`.FooterSpinner` can be in."""
state_index = NumericProperty(0)
"""A :py:class:`kivy.properties.NumericProperty`. The current index for :py:attr:`.states`.
This index rotates to change the :py:attr:`.display_state`.
"""
child_menu = ObjectProperty(None)
""":py:class:`kivy.properties.ObjectProperty`. The contents for this :py:class:`.Footer`.
This is the actual menu (the menu options or the content of the menu).
This is what should be displayed and hidden when this :py:class:`.FooterSpinner` is
clicked.
"""
editor_container = ObjectProperty(None)
"""Should be used to store a reference to :py:attr:`azaharTEA.Container.editor_container`."""
def __init__(self, **kwargs):
super(FooterSpinner, self).__init__(**kwargs)
self.bind(texture_size=self.on_texture_size)
def on_display_state(self, instance, value):
"""Manage event when :py:attr:`.display_state` changes.
Rotates the value in :py:attr:`display_state`, assigning it values from
:py:attr:`states` using :py:attr:`state_index`.
:param instance: Instance of the widget on which the event ocurred. Always is this :py:class:`.FooterSpinner`.
:param value: Value of :py:attr:`display_state` after the change.
"""
if self.display_state == 'hidden':
self.parent.menu_display(self)
else:
self.parent.menu_hide(self)
self.state_index = (self.state_index + 1) % len(self.states)
def on_texture_size(self, widget, value):
"""Manage event when texture_size changes in this :py:class:`.FooterSpinner` changes.
The width of the menu is changed accordingly to fit text chosen in the menu.
:param widget: Widget on which the event ocurred (this :py:class:`.FooterSpinner`).
:param value: Value of texture_size after it changed.
"""
self.width = self.texture_size[0] + 2 * self.padding_x
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ======================================
"""Operations for handling session logging and shutdown notifications."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
from google.protobuf import text_format
from tensorflow.contrib.tpu.python.ops import tpu_ops
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training_util
class CoordinatorShutdownException(Exception):
"""Raised when the coordinator needs to shutdown."""
pass
class WorkerHeartbeatManager(object):
"""Manages the status/heartbeat monitor for a set of workers."""
def __init__(self, session, devices, heartbeat_ops, request_placeholder):
"""Construct a new WorkerHeartbeatManager.
(Prefer using `WorkerHeartbeatManager.from_devices` when possible.)
Args:
session: `tf.Session`, session to use for heartbeat operations.
devices: `list[string]` Set of devices to connect to.
heartbeat_ops: `list[tf.Operation]` Heartbeat operations.
request_placeholder: `tf.Placeholder[String]` Placeholder used to specify
the WorkerHeartbeatRequest protocol buffer.
"""
self._session = session
self._devices = devices
self._ops = heartbeat_ops
self._request_placeholder = request_placeholder
@staticmethod
def from_devices(session, devices):
"""Construct a heartbeat manager for the given devices."""
if not devices:
logging.error('Trying to create heartbeat manager with no devices?')
logging.info('Creating heartbeat manager for %s', devices)
request_placeholder = array_ops.placeholder(
name='worker_heartbeat_request', dtype=dtypes.string)
heartbeat_ops = []
for device in devices:
with ops.device(device):
heartbeat_ops.append(tpu_ops.worker_heartbeat(request_placeholder))
return WorkerHeartbeatManager(session, devices, heartbeat_ops,
request_placeholder)
def heartbeat_supported(self):
"""Returns True if heartbeat operations are supported on all workers."""
try:
# Send ping to verify worker has heartbeat support.
self.ping()
return True
except errors.InvalidArgumentError as _:
return False
def configure(self, message):
"""Configure heartbeat manager for all devices.
Args:
message: `event_pb2.WorkerHeartbeatRequest`
Returns: `None`
"""
logging.info('Configuring worker heartbeat: %s',
text_format.MessageToString(message))
self._session.run(self._ops,
{self._request_placeholder: message.SerializeToString()})
def ping(self, request=None, timeout_in_ms=5000):
"""Ping all workers, returning the parsed status results."""
if request is None:
request = event_pb2.WorkerHeartbeatRequest()
options = config_pb2.RunOptions(timeout_in_ms=timeout_in_ms)
results = self._session.run(
self._ops,
feed_dict={self._request_placeholder: request.SerializeToString()},
options=options)
parsed_results = [
event_pb2.WorkerHeartbeatResponse.FromString(res_pb)
for res_pb in results
]
logging.debug('Ping results: %s', parsed_results)
return parsed_results
def lame_workers(self):
"""Ping all workers, returning manager containing lame workers (or None)."""
ping_results = self.ping()
lame_workers = []
for ping_response, device, op in zip(ping_results, self._devices,
self._ops):
if ping_response.health_status != event_pb2.OK:
lame_workers.append((device, op))
if not lame_workers:
return None
bad_devices, bad_ops = zip(*lame_workers)
return WorkerHeartbeatManager(self._session, bad_devices, bad_ops,
self._request_placeholder)
def __repr__(self):
return 'HeartbeatManager(%s)' % ','.join(self._devices)
def shutdown(self, timeout_ms=10000):
"""Shutdown all workers after `shutdown_timeout_secs`."""
logging.info('Shutting down %s.', self)
req = event_pb2.WorkerHeartbeatRequest(
watchdog_config=event_pb2.WatchdogConfig(timeout_ms=timeout_ms))
self.configure(req)
# Wait for workers to shutdown. This isn't strictly required
# but it avoids triggering multiple checkpoints with the same lame worker.
logging.info('Waiting %dms for worker shutdown.', timeout_ms)
time.sleep(timeout_ms / 1000)
def all_worker_devices(session):
"""Return a list of devices for each worker in the system."""
devices = session.list_devices()
return [device.name for device in devices if 'CPU' in device.name]
class WatchdogManager(threading.Thread):
"""Configures worker watchdog timer and handles periodic pings.
Usage:
# Ping workers every minute, shutting down workers if they haven't received
# a ping after 1 hour.
watchdog_manager = WatchdogManager(
ping_interval=60, shutdown_timeout=3600
)
# Use as a context manager, resetting watchdog on context exit:
with watchdog_manager:
session.run(...)
# Or setup globally; watchdog will remain active until program exit.
watchdog_manager.configure_and_run()
"""
def __init__(self,
session,
devices=None,
ping_interval=60,
shutdown_timeout=3600):
"""Initialize a watchdog manager.
Args:
session: Session connected to worker devices. A cloned session and graph
will be created for managing worker pings.
devices: Set of devices to monitor. If none, all workers will be
monitored.
ping_interval: Time, in seconds, between watchdog pings.
shutdown_timeout: Time, in seconds, before watchdog timeout.
"""
threading.Thread.__init__(self)
self.ping_interval = ping_interval
self.shutdown_timeout = shutdown_timeout
self.daemon = True
self._running = False
self._graph = ops.Graph()
self._session = session_lib.Session(
target=session.sess_str,
graph=self._graph,
)
with self._graph.as_default():
if devices is None:
devices = all_worker_devices(self._session)
self._worker_manager = WorkerHeartbeatManager.from_devices(
self._session, devices)
def configure_and_run(self):
logging.info('Enabling worker watchdog.')
self._running = True
self._worker_manager.configure(
event_pb2.WorkerHeartbeatRequest(
watchdog_config=event_pb2.WatchdogConfig(
timeout_ms=self.shutdown_timeout * 1000,)))
self.start()
def __enter__(self):
self.configure_and_run()
def __exit__(self, exc_type, exc_val, exc_tb):
logging.info('Disabling worker watchdog.')
self._worker_manager.configure(
event_pb2.WorkerHeartbeatRequest(
watchdog_config=event_pb2.WatchdogConfig(timeout_ms=-1,)))
self._running = False
self.join()
def run(self):
# Don't fetch logs or adjust timing: just ping the watchdog.
while self._running:
self._worker_manager.ping(request=None)
time.sleep(self.ping_interval)
class GracefulShutdownHook(session_run_hook.SessionRunHook):
"""Session hook that watches for shutdown events.
If a shutdown is indicated, `saver.save(checkpoint_prefix)` is executed, and a
SystemShutdown exception is raised to terminate the main session. If `saver`
is None the `SAVERS` collection will be read to find a saver.
`on_shutdown_hooks` is an optional list of functions that should be called
after checkpointing. The function is called with (`run_context`,
`all_workers`, `lame_workers`).
If `heartbeat_group` is not specified, it will default to all CPU workers
in the system.
"""
def __init__(self, checkpoint_prefix, saver=None, on_shutdown_hooks=None):
self._saver = saver
self._checkpoint_prefix = checkpoint_prefix
self._on_shutdown_hooks = on_shutdown_hooks if on_shutdown_hooks else []
# Worker heartbeats are managed independently of the main training graph.
self._graph = ops.Graph()
self._workers = None
self._session = None
self._heartbeat_supported = False
def after_create_session(self, training_session, coord): # pylint: disable=unused-argument
# N.B. We have to pull the global step here to avoid it being unavailable
# at checkpoint time; the graph has been frozen at that point.
if training_util.get_global_step() is None and self.saver() is not None:
raise ValueError(
'Saver defined but no global step. Run `get_or_create_global_step()`'
' in your model definition to allow checkpointing.')
with self._graph.as_default():
logging.info('Installing graceful shutdown hook.')
self._session = session_lib.Session(
target=training_session.sess_str, graph=self._graph)
self._workers = WorkerHeartbeatManager.from_devices(
self._session, all_worker_devices(self._session))
self._heartbeat_supported = self._workers.heartbeat_supported()
if self._heartbeat_supported:
self._workers.configure(
event_pb2.WorkerHeartbeatRequest(
shutdown_mode=event_pb2.WAIT_FOR_COORDINATOR))
else:
logging.warn(
'Worker heartbeats not supported by all workers. No failure '
'handling will be enabled.'
)
def saver(self):
if self._saver:
return self._saver
savers = ops.get_collection(ops.GraphKeys.SAVERS)
if not savers:
return None
if not isinstance(savers, list):
return savers
if len(savers) > 1:
logging.error(
'Multiple savers in the SAVERS collection. On-demand checkpointing '
'will be disabled. Pass an explicit `saver` to the constructor to '
'override this behavior.'
)
return None
return savers[0]
def after_run(self, run_context, run_values):
del run_values
if not self._heartbeat_supported:
return
lame_workers = self._workers.lame_workers()
if lame_workers:
logging.info('ShutdownHook: lame workers found: %s', lame_workers)
if self.saver():
logging.info('ShutdownHook: saving checkpoint to %s',
self._checkpoint_prefix)
self.saver().save(
run_context.session,
self._checkpoint_prefix,
global_step=training_util.get_global_step(),
write_state=True,
)
else:
logging.info('ShutdownHook: no Saver defined.')
for fn in self._on_shutdown_hooks:
fn(run_context, self._workers, lame_workers)
class RestartComputation(object):
"""Restart the entire computation.
This hook shuts down all workers and returns control to the top-level by
throwing a CoordinatorShutdownException.
"""
def __init__(self, timeout_ms=10000):
self.timeout_ms = timeout_ms
def __call__(self, run_context, all_workers, lame_workers):
del run_context, lame_workers
all_workers.shutdown(timeout_ms=self.timeout_ms)
logging.info('Terminating coordinator.')
raise CoordinatorShutdownException()
class ShutdownLameWorkers(object):
"""Shutdown lamed workers.
Processing will continue normally (typically by waiting for the down
workers to be restarted).
"""
def __init__(self, timeout_ms=10000):
self.timeout_in_ms = timeout_ms
def __call__(self, run_context, all_workers, lame_workers):
lame_workers.shutdown(timeout_ms=self.timeout_in_ms)
| |
#!/usr/bin/env python
#
# Copyright (c) 2002 Vivake Gupta (vivakeATomniscia.org). All rights reserved.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
# USA
#
# This software is maintained by Vivake (vivakeATomniscia.org) and is available at:
# http://www.omniscia.org/~vivake/python/PorterStemmer.py
#
# Additional modifications were made to incorporate this module into
# nltk. All such modifications are marked with "--NLTK--". The nltk
# version of this module is maintained by the NLTK development staff,
# and is available from the NLTK webpage:
# <http://nltk.sourceforge.net>
"""Porter Stemming Algorithm
This is the Porter stemming algorithm, ported to Python from the
version coded up in ANSI C by the author. It follows the algorithm
presented in
Porter, M. "An algorithm for suffix stripping." Program 14.3 (1980): 130-137.
only differing from it at the points maked --DEPARTURE-- and --NEW--
below.
For a more faithful version of the Porter algorithm, see
http://www.tartarus.org/~martin/PorterStemmer/
Later additions:
June 2000
The 'l' of the 'logi' -> 'log' rule is put with the stem, so that
short stems like 'geo' 'theo' etc work like 'archaeo' 'philo' etc.
This follows a suggestion of Barry Wilkins, reasearch student at
Birmingham.
February 2000
the cvc test for not dropping final -e now looks after vc at the
beginning of a word, so are, eve, ice, ore, use keep final -e. In this
test c is any consonant, including w, x and y. This extension was
suggested by Chris Emerson.
-fully -> -ful treated like -fulness -> -ful, and
-tionally -> -tion treated like -tional -> -tion
both in Step 2. These were suggested by Hiranmay Ghosh, of New Delhi.
Invariants proceed, succeed, exceed. Also suggested by Hiranmay Ghosh.
Additional modifications were made to incorperate this module into
nltk. All such modifications are marked with \"--NLTK--\". The nltk
version of this module is maintained by the NLTK developers, and is
available from <http://nltk.sourceforge.net>
"""
## --NLTK--
## Declare this module's documentation format.
__docformat__ = 'plaintext'
import sys
import re
## --NLTK--
## Import the nltk.stemmer module, which defines the stemmer interface
from api import *
class PorterStemmer(StemmerI):
## --NLTK--
## Add a module docstring
"""
A word stemmer based on the Porter stemming algorithm.
Porter, M. \"An algorithm for suffix stripping.\"
Program 14.3 (1980): 130-137.
A few minor modifications have been made to Porter's basic
algorithm. See the source code of this module for more
information.
The Porter Stemmer requires that all tokens have string types.
"""
def __init__(self):
"""The main part of the stemming algorithm starts here.
b is a buffer holding a word to be stemmed. The letters are in b[k0],
b[k0+1] ... ending at b[k]. In fact k0 = 0 in this demo program. k is
readjusted downwards as the stemming progresses. Zero termination is
not in fact used in the algorithm.
Note that only lower case sequences are stemmed. Forcing to lower case
should be done before stem(...) is called.
"""
self.b = "" # buffer for word to be stemmed
self.k = 0
self.k0 = 0
self.j = 0 # j is a general offset into the string
## --NEW--
## This is a table of irregular forms. It is quite short, but still
## reflects the errors actually drawn to Martin Porter's attention over
## a 20 year period!
##
## Extend it as necessary.
##
## The form of the table is:
## {
## "p1" : ["s11","s12","s13", ... ],
## "p2" : ["s21","s22","s23", ... ],
## ...
## "pn" : ["sn1","sn2","sn3", ... ]
## }
##
## String sij is mapped to paradigm form pi, and the main stemming
## process is then bypassed.
irregular_forms = {
"sky" : ["sky", "skies"],
"die" : ["dying"],
"lie" : ["lying"],
"tie" : ["tying"],
"news" : ["news"],
"inning" : ["innings", "inning"],
"outing" : ["outings", "outing"],
"canning" : ["cannings", "canning"],
"howe" : ["howe"],
# --NEW--
"proceed" : ["proceed"],
"exceed" : ["exceed"],
"succeed" : ["succeed"], # Hiranmay Ghosh
}
self.pool = {}
for key in irregular_forms.keys():
for val in irregular_forms[key]:
self.pool[val] = key
def cons(self, i):
"""cons(i) is TRUE <=> b[i] is a consonant."""
if self.b[i] == 'a' or self.b[i] == 'e' or self.b[i] == 'i' or self.b[i] == 'o' or self.b[i] == 'u':
return 0
if self.b[i] == 'y':
if i == self.k0:
return 1
else:
return (not self.cons(i - 1))
return 1
def m(self):
"""m() measures the number of consonant sequences between k0 and j.
if c is a consonant sequence and v a vowel sequence, and <..>
indicates arbitrary presence,
<c><v> gives 0
<c>vc<v> gives 1
<c>vcvc<v> gives 2
<c>vcvcvc<v> gives 3
....
"""
n = 0
i = self.k0
while 1:
if i > self.j:
return n
if not self.cons(i):
break
i = i + 1
i = i + 1
while 1:
while 1:
if i > self.j:
return n
if self.cons(i):
break
i = i + 1
i = i + 1
n = n + 1
while 1:
if i > self.j:
return n
if not self.cons(i):
break
i = i + 1
i = i + 1
def vowelinstem(self):
"""vowelinstem() is TRUE <=> k0,...j contains a vowel"""
for i in range(self.k0, self.j + 1):
if not self.cons(i):
return 1
return 0
def doublec(self, j):
"""doublec(j) is TRUE <=> j,(j-1) contain a double consonant."""
if j < (self.k0 + 1):
return 0
if (self.b[j] != self.b[j-1]):
return 0
return self.cons(j)
def cvc(self, i):
"""cvc(i) is TRUE <=>
a) ( --NEW--) i == 1, and p[0] p[1] is vowel consonant, or
b) p[i - 2], p[i - 1], p[i] has the form consonant -
vowel - consonant and also if the second c is not w, x or y. this
is used when trying to restore an e at the end of a short word.
e.g.
cav(e), lov(e), hop(e), crim(e), but
snow, box, tray.
"""
if i == 0: return 0 # i == 0 never happens perhaps
if i == 1: return (not self.cons(0) and self.cons(1))
if not self.cons(i) or self.cons(i-1) or not self.cons(i-2): return 0
ch = self.b[i]
if ch == 'w' or ch == 'x' or ch == 'y':
return 0
return 1
def ends(self, s):
"""ends(s) is TRUE <=> k0,...k ends with the string s."""
length = len(s)
if s[length - 1] != self.b[self.k]: # tiny speed-up
return 0
if length > (self.k - self.k0 + 1):
return 0
if self.b[self.k-length+1:self.k+1] != s:
return 0
self.j = self.k - length
return 1
def setto(self, s):
"""setto(s) sets (j+1),...k to the characters in the string s, readjusting k."""
length = len(s)
self.b = self.b[:self.j+1] + s + self.b[self.j+length+1:]
self.k = self.j + length
def r(self, s):
"""r(s) is used further down."""
if self.m() > 0:
self.setto(s)
def step1ab(self):
"""step1ab() gets rid of plurals and -ed or -ing. e.g.
caresses -> caress
ponies -> poni
sties -> sti
tie -> tie (--NEW--: see below)
caress -> caress
cats -> cat
feed -> feed
agreed -> agree
disabled -> disable
matting -> mat
mating -> mate
meeting -> meet
milling -> mill
messing -> mess
meetings -> meet
"""
if self.b[self.k] == 's':
if self.ends("sses"):
self.k = self.k - 2
elif self.ends("ies"):
if self.j == 0:
self.k = self.k - 1
# this line extends the original algorithm, so that
# 'flies'->'fli' but 'dies'->'die' etc
else:
self.k = self.k - 2
elif self.b[self.k - 1] != 's':
self.k = self.k - 1
if self.ends("ied"):
if self.j == 0:
self.k = self.k - 1
else:
self.k = self.k - 2
# this line extends the original algorithm, so that
# 'spied'->'spi' but 'died'->'die' etc
elif self.ends("eed"):
if self.m() > 0:
self.k = self.k - 1
elif (self.ends("ed") or self.ends("ing")) and self.vowelinstem():
self.k = self.j
if self.ends("at"): self.setto("ate")
elif self.ends("bl"): self.setto("ble")
elif self.ends("iz"): self.setto("ize")
elif self.doublec(self.k):
self.k = self.k - 1
ch = self.b[self.k]
if ch == 'l' or ch == 's' or ch == 'z':
self.k = self.k + 1
elif (self.m() == 1 and self.cvc(self.k)):
self.setto("e")
def step1c(self):
"""step1c() turns terminal y to i when there is another vowel in the stem.
--NEW--: This has been modified from the original Porter algorithm so that y->i
is only done when y is preceded by a consonant, but not if the stem
is only a single consonant, i.e.
(*c and not c) Y -> I
So 'happy' -> 'happi', but
'enjoy' -> 'enjoy' etc
This is a much better rule. Formerly 'enjoy'->'enjoi' and 'enjoyment'->
'enjoy'. Step 1c is perhaps done too soon; but with this modification that
no longer really matters.
Also, the removal of the vowelinstem(z) condition means that 'spy', 'fly',
'try' ... stem to 'spi', 'fli', 'tri' and conflate with 'spied', 'tried',
'flies' ...
"""
if self.ends("y") and self.j > 0 and self.cons(self.k - 1):
self.b = self.b[:self.k] + 'i' + self.b[self.k+1:]
def step2(self):
"""step2() maps double suffices to single ones.
so -ization ( = -ize plus -ation) maps to -ize etc. note that the
string before the suffix must give m() > 0.
"""
if self.b[self.k - 1] == 'a':
if self.ends("ational"): self.r("ate")
elif self.ends("tional"): self.r("tion")
elif self.b[self.k - 1] == 'c':
if self.ends("enci"): self.r("ence")
elif self.ends("anci"): self.r("ance")
elif self.b[self.k - 1] == 'e':
if self.ends("izer"): self.r("ize")
elif self.b[self.k - 1] == 'l':
if self.ends("bli"): self.r("ble") # --DEPARTURE--
# To match the published algorithm, replace this phrase with
# if self.ends("abli"): self.r("able")
elif self.ends("alli"):
if self.m() > 0: # --NEW--
self.setto("al")
self.step2()
elif self.ends("fulli"): self.r("ful") # --NEW--
elif self.ends("entli"): self.r("ent")
elif self.ends("eli"): self.r("e")
elif self.ends("ousli"): self.r("ous")
elif self.b[self.k - 1] == 'o':
if self.ends("ization"): self.r("ize")
elif self.ends("ation"): self.r("ate")
elif self.ends("ator"): self.r("ate")
elif self.b[self.k - 1] == 's':
if self.ends("alism"): self.r("al")
elif self.ends("iveness"): self.r("ive")
elif self.ends("fulness"): self.r("ful")
elif self.ends("ousness"): self.r("ous")
elif self.b[self.k - 1] == 't':
if self.ends("aliti"): self.r("al")
elif self.ends("iviti"): self.r("ive")
elif self.ends("biliti"): self.r("ble")
elif self.b[self.k - 1] == 'g': # --DEPARTURE--
if self.ends("logi"):
self.j = self.j + 1 # --NEW-- (Barry Wilkins)
self.r("og")
# To match the published algorithm, delete this phrase
def step3(self):
"""step3() dels with -ic-, -full, -ness etc. similar strategy to step2."""
if self.b[self.k] == 'e':
if self.ends("icate"): self.r("ic")
elif self.ends("ative"): self.r("")
elif self.ends("alize"): self.r("al")
elif self.b[self.k] == 'i':
if self.ends("iciti"): self.r("ic")
elif self.b[self.k] == 'l':
if self.ends("ical"): self.r("ic")
elif self.ends("ful"): self.r("")
elif self.b[self.k] == 's':
if self.ends("ness"): self.r("")
def step4(self):
"""step4() takes off -ant, -ence etc., in context <c>vcvc<v>."""
if self.b[self.k - 1] == 'a':
if self.ends("al"): pass
else: return
elif self.b[self.k - 1] == 'c':
if self.ends("ance"): pass
elif self.ends("ence"): pass
else: return
elif self.b[self.k - 1] == 'e':
if self.ends("er"): pass
else: return
elif self.b[self.k - 1] == 'i':
if self.ends("ic"): pass
else: return
elif self.b[self.k - 1] == 'l':
if self.ends("able"): pass
elif self.ends("ible"): pass
else: return
elif self.b[self.k - 1] == 'n':
if self.ends("ant"): pass
elif self.ends("ement"): pass
elif self.ends("ment"): pass
elif self.ends("ent"): pass
else: return
elif self.b[self.k - 1] == 'o':
if self.ends("ion") and (self.b[self.j] == 's' or self.b[self.j] == 't'): pass
elif self.ends("ou"): pass
# takes care of -ous
else: return
elif self.b[self.k - 1] == 's':
if self.ends("ism"): pass
else: return
elif self.b[self.k - 1] == 't':
if self.ends("ate"): pass
elif self.ends("iti"): pass
else: return
elif self.b[self.k - 1] == 'u':
if self.ends("ous"): pass
else: return
elif self.b[self.k - 1] == 'v':
if self.ends("ive"): pass
else: return
elif self.b[self.k - 1] == 'z':
if self.ends("ize"): pass
else: return
else:
return
if self.m() > 1:
self.k = self.j
def step5(self):
"""step5() removes a final -e if m() > 1, and changes -ll to -l if
m() > 1.
"""
self.j = self.k
if self.b[self.k] == 'e':
a = self.m()
if a > 1 or (a == 1 and not self.cvc(self.k-1)):
self.k = self.k - 1
if self.b[self.k] == 'l' and self.doublec(self.k) and self.m() > 1:
self.k = self.k -1
def stem_word(self, p, i=0, j=None):
"""In stem(p,i,j), p is a char pointer, and the string to be stemmed
is from p[i] to p[j] inclusive. Typically i is zero and j is the
offset to the last character of a string, (p[j+1] == '\0'). The
stemmer adjusts the characters p[i] ... p[j] and returns the new
end-point of the string, k. Stemming never increases word length, so
i <= k <= j. To turn the stemmer into a module, declare 'stem' as
extern, and delete the remainder of this file.
"""
## --NLTK--
## Don't print results as we go (commented out the next line)
#print p[i:j+1]
if j == None:
j = len(p) - 1
# copy the parameters into statics
self.b = p
self.k = j
self.k0 = i
if self.pool.has_key(self.b[self.k0:self.k+1]):
return self.pool[self.b[self.k0:self.k+1]]
if self.k <= self.k0 + 1:
return self.b # --DEPARTURE--
# With this line, strings of length 1 or 2 don't go through the
# stemming process, although no mention is made of this in the
# published algorithm. Remove the line to match the published
# algorithm.
self.step1ab()
self.step1c()
self.step2()
self.step3()
self.step4()
self.step5()
return self.b[self.k0:self.k+1]
def adjust_case(self, word, stem):
lower = word.lower()
ret = ""
for x in xrange(len(stem)):
if lower[x] == stem[x]:
ret += word[x]
else:
ret += stem[x]
return ret
## --NLTK--
## Don't use this procedure; we want to work with individual
## tokens, instead. (commented out the following procedure)
#def stem(self, text):
# parts = re.split("(\W+)", text)
# numWords = (len(parts) + 1)/2
#
# ret = ""
# for i in xrange(numWords):
# word = parts[2 * i]
# separator = ""
# if ((2 * i) + 1) < len(parts):
# separator = parts[(2 * i) + 1]
#
# stem = self.stem_word(string.lower(word), 0, len(word) - 1)
# ret = ret + self.adjust_case(word, stem)
# ret = ret + separator
# return ret
## --NLTK--
## Define a stem() method that implements the StemmerI interface.
def stem(self, word):
stem = self.stem_word(word.lower(), 0, len(word) - 1)
return self.adjust_case(word, stem)
## --NLTK--
## Add a string representation function
def __repr__(self):
return '<PorterStemmer>'
## --NLTK--
## This test procedure isn't applicable.
#if __name__ == '__main__':
# p = PorterStemmer()
# if len(sys.argv) > 1:
# for f in sys.argv[1:]:
# infile = open(f, 'r')
# while 1:
# w = infile.readline()
# if w == '':
# break
# w = w[:-1]
# print p.stem(w)
##--NLTK--
## Added a demo() function
def demo():
"""
A demonstration of the porter stemmer on a sample from
the Penn Treebank corpus.
"""
from nltk.corpus import treebank
from nltk import stem
stemmer = stem.PorterStemmer()
orig = []
stemmed = []
for item in treebank.items[:3]:
for (word, tag) in treebank.tagged_words(item):
orig.append(word)
stemmed.append(stemmer.stem(word))
# Convert the results to a string, and word-wrap them.
results = ' '.join(stemmed)
results = re.sub(r"(.{,70})\s", r'\1\n', results+' ').rstrip()
# Convert the original to a string, and word wrap it.
original = ' '.join(orig)
original = re.sub(r"(.{,70})\s", r'\1\n', original+' ').rstrip()
# Print the results.
print '-Original-'.center(70).replace(' ', '*').replace('-', ' ')
print original
print '-Results-'.center(70).replace(' ', '*').replace('-', ' ')
print results
print '*'*70
##--NLTK--
## Call demo() if we're invoked directly.
if __name__ == '__main__': demo()
| |
# Copyright (c) 2014,Vienna University of Technology, Department of Geodesy and Geoinformation
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Vienna University of Technology, Department of
# Geodesy and Geoinformation nor the names of its contributors may be
# used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL VIENNA UNIVERSITY OF TECHNOLOGY,
# DEPARTMENT OF GEODESY AND GEOINFORMATION BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
Created on May 21, 2014
@author: Christoph Paulik christoph.paulik@geo.tuwien.ac.at
'''
import os
import glob
from datetime import datetime, timedelta
import numpy as np
import warnings
import pytesmo.io.dataset_base as dataset_base
import pytesmo.io.bufr.bufr as bufr_reader
try:
import pygrib
except ImportError:
warnings.warn('pygrib can not be imported H14 images can not be read.')
import pytesmo.timedate.julian as julian
import sys
if sys.version_info < (3, 0):
range = xrange
class H08img(dataset_base.DatasetImgBase):
"""
Reads HSAF H08 images. The images have to be uncompressed in the following folder structure
path -
month_path_str (default 'h08_%Y%m_buf')
For example if path is set to /home/user/hsaf08 and month_path_str is left to the default 'h08_%Y%m_buf'
then the images for March 2012 have to be in
the folder /home/user/hsaf08/h08_201203_buf/
Parameters
----------
path: string
path where the data is stored
month_path_str: string, optional
if the files are stored in folders by month as is the standard on the HSAF FTP Server
then please specify the string that should be used in datetime.datetime.strftime
Default: 'h08_%Y%m_buf'
day_search_str: string, optional
to provide an iterator over all images of a day the method _get_possible_timestamps
looks for all available images on a day on the harddisk. This string is used in
datetime.datetime.strftime and in glob.glob to search for all files on a day.
Default : 'h08_%Y%m%d_*.buf'
file_search_str: string, optional
this string is used in datetime.datetime.strftime and glob.glob to find a 3 minute bufr file
by the exact date.
Default: 'h08_%Y%m%d_%H%M%S*.buf'
"""
def __init__(self, path, month_path_str='h08_%Y%m_buf',
day_search_str='h08_%Y%m%d_*.buf',
file_search_str='h08_%Y%m%d_%H%M%S*.buf',
filename_datetime_format=(4, 19, '%Y%m%d_%H%M%S')):
self.path = path
self.month_path_str = month_path_str
self.day_search_str = day_search_str
self.file_search_str = file_search_str
self.filename_datetime_format = filename_datetime_format
super(H08img, self).__init__(path, sub_path=month_path_str,
filename_templ=file_search_str,
exact_templ=False, grid=None)
def _get_orbit_start_date(self, filename):
orbit_start_str = \
os.path.basename(filename)[self.filename_datetime_format[0]:
self.filename_datetime_format[1]]
return datetime.strptime(orbit_start_str,
self.filename_datetime_format[2])
def tstamps_for_daterange(self, startdate, enddate):
"""
Get the timestamps as datetime array that are possible for the
given day, if the timestamps are
For this product it is not fixed but has to be looked up from
the hard disk since bufr files are not regular spaced and only
europe is in this product. For a global product a 3 minute
spacing could be used as a fist approximation
Parameters
----------
start_date : datetime.date or datetime.datetime
start date
end_date : datetime.date or datetime.datetime
end date
Returns
-------
dates : list
list of datetimes
"""
file_list = []
delta_all = enddate - startdate
timestamps = []
for i in range(delta_all.days + 1):
timestamp = startdate + timedelta(days=i)
files = self._search_files(
timestamp, custom_templ=self.day_search_str)
file_list.extend(sorted(files))
for filename in file_list:
timestamps.append(self._get_orbit_start_date(filename))
return timestamps
def _read_spec_file(self, filename, timestamp=None, lat_lon_bbox=None):
"""
Read specific image for given datetime timestamp.
Parameters
----------
filename : string
filename
timestamp : datetime.datetime
exact observation timestamp of the image that should be read
lat_lon_bbox : list, optional
list of lat,lon cooridnates of bounding box
[lat_min, lat_max, lon_min, lon_max]
Returns
-------
data : dict or None
dictionary of numpy arrays that hold the image data for each
variable of the dataset, if no data was found None is returned
metadata : dict
dictionary of numpy arrays that hold the metadata
timestamp : datetime.datetime
exact timestamp of the image
lon : numpy.array or None
array of longitudes, if None self.grid will be assumed
lat : numpy.array or None
array of latitudes, if None self.grid will be assumed
time_var : string or None
variable name of observation times in the data dict, if None all
observations have the same timestamp
"""
with bufr_reader.BUFRReader(filename) as bufr:
lons = []
ssm = []
ssm_noise = []
ssm_corr_flag = []
ssm_proc_flag = []
data_in_bbox = True
for i, message in enumerate(bufr.messages()):
if i == 0:
# first message is just lat, lon extent
# check if any data in bbox
if lat_lon_bbox is not None:
lon_min, lon_max = message[0, 2], message[0, 3]
lat_min, lat_max = message[0, 4], message[0, 5]
if (lat_lon_bbox[0] > lat_max or lat_lon_bbox[1] < lat_min or
lat_lon_bbox[2] > lon_max or lat_lon_bbox[3] < lon_min):
data_in_bbox = False
break
# print 'columns', math.ceil((message[:, 3] - message[:, 2]) / 0.00416667)
# print 'rows', math.ceil((message[:, 5] - message[:, 4]) /
# 0.00416667)
elif data_in_bbox:
# first 5 elements are there only once, after that, 4 elements are repeated
# till the end of the array these 4 are ssm, ssm_noise, ssm_corr_flag and
# ssm_proc_flag
# each message contains the values for 120 lons between lat_min and lat_max
# the grid spacing is 0.00416667 degrees
lons.append(message[:, 0])
lat_min = message[0, 1]
lat_max = message[0, 2]
ssm.append(message[:, 4::4])
ssm_noise.append(message[:, 5::4])
ssm_corr_flag.append(message[:, 6::4])
ssm_proc_flag.append(message[:, 7::4])
if data_in_bbox:
ssm = np.rot90(np.vstack(ssm)).astype(np.float32)
ssm_noise = np.rot90(np.vstack(ssm_noise)).astype(np.float32)
ssm_corr_flag = np.rot90(
np.vstack(ssm_corr_flag)).astype(np.float32)
ssm_proc_flag = np.rot90(
np.vstack(ssm_proc_flag)).astype(np.float32)
lats_dim = np.linspace(lat_max, lat_min, ssm.shape[0])
lons_dim = np.concatenate(lons)
data = {'ssm': ssm,
'ssm_noise': ssm_noise,
'proc_flag': ssm_proc_flag,
'corr_flag': ssm_corr_flag
}
# if there are is a gap in the image it is not a 2D array in lon, lat space
# but has a jump in latitude or longitude
# detect a jump in lon or lat spacing
lon_jump_ind = np.where(np.diff(lons_dim) > 0.00418)[0]
if lon_jump_ind.size > 1:
print("More than one jump in longitude")
if lon_jump_ind.size == 1:
diff_lon_jump = np.abs(
lons_dim[lon_jump_ind] - lons_dim[lon_jump_ind + 1])
missing_elements = np.round(diff_lon_jump / 0.00416666)
missing_lons = np.linspace(lons_dim[lon_jump_ind],
lons_dim[
lon_jump_ind + 1], missing_elements,
endpoint=False)
# fill up longitude dimension to full grid
lons_dim = np.concatenate(
[lons_dim[:lon_jump_ind], missing_lons, lons_dim[lon_jump_ind + 1:]])
# fill data with NaN values
empty = np.empty((lats_dim.shape[0], missing_elements))
empty.fill(1e38)
for key in data:
data[key] = np.concatenate(
[data[key][:, :lon_jump_ind], empty, data[key][:, lon_jump_ind + 1:]], axis=1)
lat_jump_ind = np.where(np.diff(lats_dim) > 0.00418)[0]
if lat_jump_ind.size > 1:
print("More than one jump in latitude")
if lat_jump_ind.size == 1:
diff_lat_jump = np.abs(
lats_dim[lat_jump_ind] - lats_dim[lat_jump_ind + 1])
missing_elements = np.round(diff_lat_jump / 0.00416666)
missing_lats = np.linspace(lats_dim[lat_jump_ind],
lats_dim[
lat_jump_ind + 1], missing_elements,
endpoint=False)
# fill up longitude dimension to full grid
lats_dim = np.concatenate(
[lats_dim[:lat_jump_ind], missing_lats, lats_dim[lat_jump_ind + 1:]])
# fill data with NaN values
empty = np.empty((missing_elements, lons_dim.shape[0]))
empty.fill(1e38)
for key in data:
data[key] = np.concatenate(
[data[key][:lat_jump_ind, :], empty, data[key][lat_jump_ind + 1:, :]], axis=0)
lons, lats = np.meshgrid(lons_dim, lats_dim)
# only return data in bbox
if lat_lon_bbox is not None:
data_ind = np.where((lats >= lat_lon_bbox[0]) &
(lats <= lat_lon_bbox[1]) &
(lons >= lat_lon_bbox[2]) &
(lons <= lat_lon_bbox[3]))
# indexing returns 1d array
# get shape of lats_dim and lons_dim to be able to reshape
# the 1d arrays to the correct 2d shapes
lats_dim_shape = np.where((lats_dim >= lat_lon_bbox[0]) &
(lats_dim <= lat_lon_bbox[1]))[0].shape[0]
lons_dim_shape = np.where((lons_dim >= lat_lon_bbox[2]) &
(lons_dim <= lat_lon_bbox[3]))[0].shape[0]
lons = lons[data_ind].reshape(lats_dim_shape, lons_dim_shape)
lats = lats[data_ind].reshape(lats_dim_shape, lons_dim_shape)
for key in data:
data[key] = data[key][data_ind].reshape(
lats_dim_shape, lons_dim_shape)
return data, {}, timestamp, lons, lats, None
else:
return None, {}, timestamp, None, None, None
class H07img(dataset_base.DatasetImgBase):
"""
Class for reading HSAF H07 SM OBS 1 images in bufr format.
The images have the same structure as the ASCAT 3 minute pdu files
and these 2 readers could be merged in the future
The images have to be uncompressed in the following folder structure
path -
month_path_str (default 'h07_%Y%m_buf')
For example if path is set to /home/user/hsaf07 and month_path_str is left to the default 'h07_%Y%m_buf'
then the images for March 2012 have to be in
the folder /home/user/hsaf07/h07_201203_buf/
Parameters
----------
path: string
path where the data is stored
month_path_str: string, optional
if the files are stored in folders by month as is the standard on the HSAF FTP Server
then please specify the string that should be used in datetime.datetime.strftime
Default: 'h07_%Y%m_buf'
day_search_str: string, optional
to provide an iterator over all images of a day the method _get_possible_timestamps
looks for all available images on a day on the harddisk. This string is used in
datetime.datetime.strftime and in glob.glob to search for all files on a day.
Default : 'h07_%Y%m%d_*.buf'
file_search_str: string, optional
this string is used in datetime.datetime.strftime and glob.glob to find a 3 minute bufr file
by the exact date.
Default: 'h07_%Y%m%d_%H%M%S*.buf'
"""
def __init__(self, path, month_path_str='h07_%Y%m_buf',
day_search_str='h07_%Y%m%d_*.buf',
file_search_str='h07_%Y%m%d_%H%M%S*.buf',
filename_datetime_format=(4, 19, '%Y%m%d_%H%M%S')):
self.path = path
self.month_path_str = month_path_str
self.day_search_str = day_search_str
self.file_search_str = file_search_str
self.filename_datetime_format = filename_datetime_format
super(H07img, self).__init__(path, sub_path=month_path_str,
filename_templ=file_search_str,
exact_templ=False, grid=None)
def _get_orbit_start_date(self, filename):
orbit_start_str = \
os.path.basename(filename)[self.filename_datetime_format[0]:
self.filename_datetime_format[1]]
return datetime.strptime(orbit_start_str,
self.filename_datetime_format[2])
def tstamps_for_daterange(self, startdate, enddate):
"""
Get the timestamps as datetime array that are possible for the
given day, if the timestamps are
For this product it is not fixed but has to be looked up from
the hard disk since bufr files are not regular spaced and only
europe is in this product. For a global product a 3 minute
spacing could be used as a fist approximation
Parameters
----------
start_date : datetime.date or datetime.datetime
start date
end_date : datetime.date or datetime.datetime
end date
Returns
-------
dates : list
list of datetimes
"""
file_list = []
delta_all = enddate - startdate
timestamps = []
for i in range(delta_all.days + 1):
timestamp = startdate + timedelta(days=i)
files = self._search_files(
timestamp, custom_templ=self.day_search_str)
file_list.extend(sorted(files))
for filename in file_list:
timestamps.append(self._get_orbit_start_date(filename))
return timestamps
def _read_spec_file(self, filename, timestamp=None):
"""
Read specific image for given datetime timestamp.
Parameters
----------
filename : string
filename
timestamp : datetime.datetime
exact observation timestamp of the image that should be read
Returns
-------
data : dict
dictionary of numpy arrays that hold the image data for each
variable of the dataset
metadata : dict
dictionary of numpy arrays that hold the metadata
timestamp : datetime.datetime
exact timestamp of the image
lon : numpy.array or None
array of longitudes, if None self.grid will be assumed
lat : numpy.array or None
array of latitudes, if None self.grid will be assumed
time_var : string or None
variable name of observation times in the data dict, if None all
observations have the same timestamp
"""
latitude = []
longitude = []
ssm = []
dates = []
orbit_number = []
direction_of_motion = []
ssm_sens = []
frozen_lsf = []
snow_cover = []
topo_complex = []
ssm_noise = []
ssm_mean = []
beam_ident = []
azimuth = []
incidence = []
sig0 = []
sigma40 = []
sigma40_noise = []
with bufr_reader.BUFRReader(filename) as bufr:
for message in bufr.messages():
latitude.append(message[:, 12])
longitude.append(message[:, 13])
ssm.append(message[:, 64])
orbit_number.append(message[:, 15])
direction_of_motion.append(message[:, 5])
ssm_sens.append(message[:, 70])
frozen_lsf.append(message[:, 79])
snow_cover.append(message[:, 78])
topo_complex.append(message[:, 81])
ssm_noise.append(message[:, 65])
ssm_mean.append(message[:, 73])
sigma40.append(message[:, 66])
sigma40_noise.append(message[:, 67])
beam_ident.append([message[:, 20],
message[:, 34],
message[:, 48]])
incidence.append([message[:, 21],
message[:, 35],
message[:, 49]])
azimuth.append([message[:, 22],
message[:, 36],
message[:, 50]])
sig0.append([message[:, 23],
message[:, 37],
message[:, 51]])
years = message[:, 6].astype(int)
months = message[:, 7].astype(int)
days = message[:, 8].astype(int)
hours = message[:, 9].astype(int)
minutes = message[:, 10].astype(int)
seconds = message[:, 11].astype(int)
dates.append(julian.julday(months, days, years,
hours, minutes, seconds))
ssm = np.concatenate(ssm)
latitude = np.concatenate(latitude)
longitude = np.concatenate(longitude)
orbit_number = np.concatenate(orbit_number)
direction_of_motion = np.concatenate(direction_of_motion)
ssm_sens = np.concatenate(ssm_sens)
frozen_lsf = np.concatenate(frozen_lsf)
snow_cover = np.concatenate(snow_cover)
topo_complex = np.concatenate(topo_complex)
ssm_noise = np.concatenate(ssm_noise)
ssm_mean = np.concatenate(ssm_mean)
dates = np.concatenate(dates)
sigma40 = np.concatenate(sigma40)
sigma40_noise = np.concatenate(sigma40_noise)
data = {'ssm': ssm,
'ssm_noise': ssm_noise,
'snow_cover': snow_cover,
'frozen_prob': frozen_lsf,
'topo_complex': topo_complex,
'jd': dates
}
return data, {}, timestamp, longitude, latitude, 'jd'
class H14img(dataset_base.DatasetImgBase):
"""
Class for reading HSAF H14 SM DAS 2 products in grib format
The images have to be uncompressed in the following folder structure
path -
month_path_str (default 'h14_%Y%m_grib')
For example if path is set to /home/user/hsaf14 and month_path_str is left to the default 'h14_%Y%m_grib'
then the images for March 2012 have to be in
the folder /home/user/hsaf14/h14_201203_grib/
Parameters
----------
path: string
path where the data is stored
month_path_str: string, optional
if the files are stored in folders by month as is the standard on the HSAF FTP Server
then please specify the string that should be used in datetime.datetime.strftime
Default: 'h14_%Y%m_grib'
file_str: string, optional
this string is used in datetime.datetime.strftime to get the filename of a H14 daily grib file
Default: 'H14_%Y%m%d00.grib'
expand_grid : boolean, optional
if set the images will be expanded to a 2D image during reading
if false the images will be returned as 1D arrays on the
reduced gaussian grid
Default: True
"""
def __init__(self, path, month_path_str='h14_%Y%m_grib',
file_str='H14_%Y%m%d00.grib',
expand_grid=True):
self.path = path
self.month_path_str = month_path_str
self.file_search_str = file_str
super(H14img, self).__init__(path, sub_path=month_path_str,
filename_templ=file_str, grid=None)
self.expand_grid = expand_grid
def _read_spec_file(self, filename, timestamp=None):
"""
Read specific image for given datetime timestamp.
Parameters
----------
timestamp : datetime.datetime
exact observation timestamp of the image that should be read
Returns
-------
data : dict
dictionary of numpy arrays that hold the image data for each
variable of the dataset
metadata : dict
dictionary of numpy arrays that hold the metadata
timestamp : datetime.datetime
exact timestamp of the image
lon : numpy.array or None
array of longitudes, if None self.grid will be assumed
lat : numpy.array or None
array of latitudes, if None self.grid will be assumed
time_var : string or None
variable name of observation times in the data dict, if None all
observations have the same timestamp
"""
param_names = {'40': 'SM_layer1_0-7cm',
'41': 'SM_layer2_7-28cm',
'42': 'SM_layer3_28-100cm',
'43': 'SM_layer4_100-289cm'}
data = {}
with pygrib.open(filename) as grb:
for i, message in enumerate(grb):
message.expand_grid(self.expand_grid)
if i == 1:
lats, lons = message.latlons()
data[param_names[message['parameterName']]] = message.values
return data, {}, timestamp, lons, lats, None
| |
import os
import shutil
import tempfile
import warnings
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_breast_cancer
from sklearn.datasets import load_boston
from sklearn.datasets import load_wine
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import with_setup
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
# test return_X_y option
X_y_tuple = load_digits(return_X_y=True)
bunch = load_digits()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread # noqa
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
assert_equal(len(res.feature_names), 10)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_diabetes(return_X_y=True)
bunch = load_diabetes()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_linnerud(return_X_y=True)
bunch = load_linnerud()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_iris(return_X_y=True)
bunch = load_iris()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_wine():
res = load_wine()
assert_equal(res.data.shape, (178, 13))
assert_equal(res.target.size, 178)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_wine(return_X_y=True)
bunch = load_wine()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_breast_cancer():
res = load_breast_cancer()
assert_equal(res.data.shape, (569, 30))
assert_equal(res.target.size, 569)
assert_equal(res.target_names.size, 2)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_breast_cancer(return_X_y=True)
bunch = load_breast_cancer()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_boston(return_X_y=True)
bunch = load_boston()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
def test_bunch_pickle_generated_with_0_16_and_read_with_0_17():
bunch = Bunch(key='original')
# This reproduces a problem when Bunch pickles have been created
# with scikit-learn 0.16 and are read with 0.17. Basically there
# is a suprising behaviour because reading bunch.key uses
# bunch.__dict__ (which is non empty for 0.16 Bunch objects)
# whereas assigning into bunch.key uses bunch.__setattr__. See
# https://github.com/scikit-learn/scikit-learn/issues/6196 for
# more details
bunch.__dict__['key'] = 'set from __dict__'
bunch_from_pkl = loads(dumps(bunch))
# After loading from pickle the __dict__ should have been ignored
assert_equal(bunch_from_pkl.key, 'original')
assert_equal(bunch_from_pkl['key'], 'original')
# Making sure that changing the attr does change the value
# associated with __getitem__ as well
bunch_from_pkl.key = 'changed'
assert_equal(bunch_from_pkl.key, 'changed')
assert_equal(bunch_from_pkl['key'], 'changed')
def test_bunch_dir():
# check that dir (important for autocomplete) shows attributes
data = load_iris()
assert_true("data" in dir(data))
| |
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from testrunner import testhelp
import re
import bz2
import itertools
from conary_test import rephelp
from conary import conaryclient, errors
from conary.deps import deps
from conary import conarycfg
from conary import versions
from conary.cmds.queryrep import VERSION_FILTER_ALL, VERSION_FILTER_LATEST
from conary.cmds.queryrep import VERSION_FILTER_LEAVES
from conary.cmds.queryrep import FLAVOR_FILTER_ALL, FLAVOR_FILTER_AVAIL
from conary.cmds.queryrep import FLAVOR_FILTER_BEST, FLAVOR_FILTER_EXACT
from conary.cmds import queryrep
from conary.conaryclient import cmdline
from conary.repository import trovesource
from conary.repository.trovesource import TROVE_QUERY_NORMAL
from conary.repository.trovesource import TROVE_QUERY_PRESENT
from conary.repository.trovesource import TROVE_QUERY_ALL
from conary.versions import VersionFromString as VFS
class RepQueryTest(rephelp.RepositoryHelper):
def _rdiff(self, troveSpec, **kwargs):
client = conaryclient.ConaryClient(self.cfg)
return self.captureOutput(queryrep.rdiffCommand, self.cfg,
client, client.getDatabase(), troveSpec,
**kwargs)
def testBadQuery(self):
try:
queryrep.getTrovesToDisplay(None, ['=conary.rpath.com@'], [], [],
VERSION_FILTER_ALL, FLAVOR_FILTER_ALL,
self.cfg.installLabelPath, self.cfg.flavor,
None)
except errors.ParseError, msg:
assert(str(msg) == 'Error with spec "=conary.rpath.com@": Trove name is required')
else:
assert(0)
def _checkTupVers(self, tups, vers):
for tup, item in itertools.izip(tups, vers):
if isinstance(item, (list, tuple)):
verStr, flStr = item
else:
verStr = item
flStr = None
if '-' in verStr:
assert(str(tup[1].trailingRevision()) == verStr)
else:
assert(str(tup[1].trailingRevision().version) == verStr)
if flStr:
assert(str(tup[2]) == flStr)
def testGetTrovesVersions(self):
self.addComponent('foo:foo', '1.0', 'ssl')
self.addComponent('foo:foo', '1.0', '~ssl')
self.addComponent('foo:foo', '1.0', '!ssl')
self.addComponent('foo:foo', '1.0', 'readline')
self.addComponent('foo:foo', '2.0-1-1', 'readline')
self.addComponent('foo:foo', '2.0-2-1', 'readline')
self.addComponent('foo:foo', '3.0', '!readline')
repos = self.openRepository()
targetFlavor = [ deps.parseFlavor('~readline,ssl is:x86') ]
def _check(troveSpecs, versionFilter, flavorFilter, expected):
tups = queryrep.getTrovesToDisplay(repos, troveSpecs, [], [],
versionFilter, flavorFilter,
self.cfg.installLabelPath,
targetFlavor,
None)
self._checkTupVers(tups, expected)
# test ALL
_check(['foo:foo'], VERSION_FILTER_ALL, FLAVOR_FILTER_BEST,
['1.0', '2.0-1-1', '2.0-2-1'])
_check(['foo:foo=2.0'], VERSION_FILTER_ALL, FLAVOR_FILTER_BEST,
['2.0-1-1', '2.0-2-1'])
_check(['foo:foo[!readline]'], VERSION_FILTER_ALL, FLAVOR_FILTER_BEST,
['1.0', '3.0'])
_check(['foo:foo[!readline]'], VERSION_FILTER_ALL, FLAVOR_FILTER_AVAIL,
['1.0', '1.0', '3.0'])
_check(['foo:foo[!readline]'], VERSION_FILTER_ALL, FLAVOR_FILTER_ALL,
['1.0', '1.0', '1.0', '3.0'])
_check(['foo:foo'], VERSION_FILTER_ALL, FLAVOR_FILTER_ALL,
['1.0', '1.0', '1.0', '1.0', '2.0', '2.0', '3.0'])
# test ALL w/ no spec
_check([], VERSION_FILTER_ALL, FLAVOR_FILTER_BEST,
['1.0', '2.0-1-1', '2.0-2-1'])
_check([], VERSION_FILTER_ALL, FLAVOR_FILTER_AVAIL,
# ssl, ~ssl, readline,
['1.0', '1.0', '1.0', '2.0-1-1', '2.0-2-1'])
_check([], VERSION_FILTER_ALL, FLAVOR_FILTER_ALL,
['1.0', '1.0', '1.0', '1.0', '2.0', '2.0', '3.0'])
# test LEAVES
_check(['foo:foo'], VERSION_FILTER_LEAVES, FLAVOR_FILTER_BEST,
['2.0-2-1'])
_check(['foo:foo=2.0'], VERSION_FILTER_LEAVES, FLAVOR_FILTER_BEST,
['2.0-2-1'])
_check(['foo:foo[!readline]'], VERSION_FILTER_LEAVES,
FLAVOR_FILTER_BEST, ['3.0'])
_check(['foo:foo[!readline]'], VERSION_FILTER_LEAVES, # does not have
# !ssl
FLAVOR_FILTER_AVAIL, ['1.0', '1.0', '3.0'])
_check(['foo:foo[!readline]'], VERSION_FILTER_LEAVES, # include !ssl
FLAVOR_FILTER_ALL, ['1.0', '1.0', '1.0', '3.0'])
_check(['foo:foo'], VERSION_FILTER_LEAVES, FLAVOR_FILTER_ALL,
['1.0', '1.0', '1.0', '2.0-2-1', '3.0'])
# check LEAVES with no spec
_check([], VERSION_FILTER_LEAVES, FLAVOR_FILTER_BEST,
# best 1.0 flavor, best 2.0 flavor, no other version nodes
# have compatible flavor leaves.
['1.0', '2.0-2-1'])
_check([], VERSION_FILTER_LEAVES, FLAVOR_FILTER_AVAIL,
# add in ~ssl because it was also at 1.0 node
['1.0', '1.0', '2.0-2-1'])
_check([], VERSION_FILTER_LEAVES, FLAVOR_FILTER_ALL,
['1.0', '1.0', '1.0', '2.0', '3.0'])
# test LATEST
_check(['foo:foo'], VERSION_FILTER_LATEST, FLAVOR_FILTER_BEST,
['2.0-2-1'])
_check(['foo:foo=2.0'], VERSION_FILTER_LATEST, FLAVOR_FILTER_BEST,
['2.0-2-1'])
_check(['foo:foo[!readline]'], VERSION_FILTER_LATEST,
FLAVOR_FILTER_BEST, ['3.0'])
_check(['foo:foo[!readline]'], VERSION_FILTER_LATEST,
FLAVOR_FILTER_AVAIL, ['3.0'])
_check(['foo:foo[!readline]'], VERSION_FILTER_LATEST, # include !ssl
FLAVOR_FILTER_ALL, ['3.0'])
_check(['foo:foo'], VERSION_FILTER_LATEST, FLAVOR_FILTER_ALL,
['3.0'])
# to really test latest, we need to make the latest node more
# interesting..
# we've already got !readline there.
self.addComponent('foo:foo', '3.0', 'readline')
self.addComponent('foo:foo', '3.0', '!ssl')
self.addComponent('foo:foo', '3.0', 'ssl')
self.addComponent('foo:foo', '3.0', '~ssl')
_check(['foo:foo'], VERSION_FILTER_LATEST, FLAVOR_FILTER_BEST,
['3.0'])
_check(['foo:foo[!readline]'], VERSION_FILTER_LATEST,
FLAVOR_FILTER_BEST, ['3.0'])
_check(['foo:foo[!readline]'], VERSION_FILTER_LATEST,
# !readline, ssl, ~ssl
FLAVOR_FILTER_AVAIL, ['3.0', '3.0', '3.0'])
_check(['foo:foo[!readline]'], VERSION_FILTER_LATEST,
# !readline, ssl, ~ssl, !ssl
FLAVOR_FILTER_ALL, ['3.0', '3.0', '3.0', '3.0'])
# test LATEST w/ no spec
_check([], VERSION_FILTER_LATEST, FLAVOR_FILTER_BEST,
['3.0'])
_check([], VERSION_FILTER_LATEST,
# readline, ssl, ~ssl
FLAVOR_FILTER_AVAIL, ['3.0', '3.0', '3.0'])
_check([], VERSION_FILTER_LATEST,
# readline, ssl, ~ssl, !ssl
FLAVOR_FILTER_ALL, ['3.0', '3.0', '3.0', '3.0'])
def testQueryByTroveType(self):
repos = self.openRepository()
def _check(troveSpecs, versionFilter, flavorFilter, expected, all=False,
present=False):
if all:
troveTypes = TROVE_QUERY_ALL
elif present:
troveTypes = TROVE_QUERY_PRESENT
else:
troveTypes = TROVE_QUERY_NORMAL
tups = queryrep.getTrovesToDisplay(repos, troveSpecs, [], [],
versionFilter, flavorFilter,
self.cfg.installLabelPath,
self.cfg.flavor,
None,
troveTypes=troveTypes)
self._checkTupVers(tups, expected)
# foo is replaced by a redirect, bar is replaced by a redirect and a removed trove
self.addComponent('foo:run', '1.0')
self.addComponent('foo:run', '2.0', redirect=['bar:run'])
self.addComponent('bar:run', '1.0')
self.addComponent('bar:run', '2.0', redirect=['foo:run'])
self.addComponent('bar:run', '3.0')
self.markRemoved('bar:run=3.0')
# test ALL
_check([], VERSION_FILTER_ALL, FLAVOR_FILTER_ALL,
['1.0', '1.0'])
_check([], VERSION_FILTER_ALL, FLAVOR_FILTER_ALL,
['1.0', '2.0', '1.0', '2.0'], present=True)
_check([], VERSION_FILTER_ALL, FLAVOR_FILTER_ALL,
['1.0', '2.0', '3.0', '1.0', '2.0'], all=True)
# test LEAVES
_check([], VERSION_FILTER_LEAVES, FLAVOR_FILTER_ALL, [])
_check([], VERSION_FILTER_LEAVES, FLAVOR_FILTER_ALL,
['2.0', '2.0'], present=True)
_check([], VERSION_FILTER_LEAVES, FLAVOR_FILTER_ALL,
['3.0', '2.0'], all=True)
# test LATEST
_check([], VERSION_FILTER_LATEST, FLAVOR_FILTER_ALL, [])
_check([], VERSION_FILTER_LATEST, FLAVOR_FILTER_ALL, ['2.0', '2.0'], present=True)
_check([], VERSION_FILTER_LATEST, FLAVOR_FILTER_ALL, ['3.0', '2.0'], all=True)
def testGetTrovesLeavesMultiRepos(self):
v1 = self.addComponent('foo:foo', '1.0').getVersion()
v2 = self.addComponent('foo:foo', ':branch/1.0').getVersion()
installLabelPath = conarycfg.CfgLabelList(
[versions.Label('localhost@rpl:branch'),
self.cfg.buildLabel])
repos = self.openRepository()
tups = queryrep.getTrovesToDisplay(repos, [], [], [],
VERSION_FILTER_LEAVES,
FLAVOR_FILTER_ALL,
installLabelPath,
self.cfg.flavor, affinityDb=None)
assert(set([x[1] for x in tups]) == set((v1, v2)))
tups = queryrep.getTrovesToDisplay(repos, ['foo:foo'], [], [],
VERSION_FILTER_LEAVES,
FLAVOR_FILTER_ALL,
installLabelPath,
self.cfg.flavor, affinityDb=None)
assert(set([x[1] for x in tups]) == set((v1, v2)))
tups = queryrep.getTrovesToDisplay(repos, ['foo:foo'], [], [],
VERSION_FILTER_LATEST,
FLAVOR_FILTER_ALL,
installLabelPath,
self.cfg.flavor, affinityDb=None)
assert(set([x[1] for x in tups]) == set((v1, v2)))
tups = queryrep.getTrovesToDisplay(repos, [], [], [],
VERSION_FILTER_LATEST,
FLAVOR_FILTER_ALL,
installLabelPath,
self.cfg.flavor, affinityDb=None)
assert(set([x[1] for x in tups]) == set((v1, v2)))
def testGetTrovesLatestByLabel(self):
# test out the no trove queries that now use getTrovesLatestByLabel
trv1 = self.addComponent('foo:run', '/localhost@rpl:branch//rpl:linux/1.0-1-1')
trv2 = self.addComponent('foo:run', '1.0-1-2')
trv3 = self.addComponent('foo:run', '/localhost@rpl:branch//rpl:linux/1.0-1-3', 'ssl')
repos = self.openRepository()
tups = queryrep.getTrovesToDisplay(repos, [], [], [],
VERSION_FILTER_LATEST,
FLAVOR_FILTER_BEST,
self.cfg.installLabelPath,
self.cfg.flavor, affinityDb=None)
assert(len(tups) == 1)
assert(tups[0] == trv3.getNameVersionFlavor())
tups = queryrep.getTrovesToDisplay(repos, [], [], [],
VERSION_FILTER_LEAVES,
FLAVOR_FILTER_BEST,
self.cfg.installLabelPath,
self.cfg.flavor, affinityDb=None)
assert(len(tups) == 2)
assert(set(tups) == set([trv3.getNameVersionFlavor(),
trv2.getNameVersionFlavor()]))
def testLatestIsOfWrongFlavor(self):
# CNY-784 - if the latest version was of an incompatible flavor,
# conary rq <no args> would display nothing for that trove
v1 = self.addComponent('foo:foo', '1.0', 'is:x86').getVersion()
v2 = self.addComponent('foo:foo', '1.1', 'is:x86_64').getVersion()
targetFlavor = [ deps.parseFlavor('is:x86') ]
repos = self.openRepository()
tups = queryrep.getTrovesToDisplay(repos, [], [], [],
VERSION_FILTER_LATEST,
FLAVOR_FILTER_BEST,
self.cfg.installLabelPath,
targetFlavor, affinityDb=None)
assert(set([x[1] for x in tups]) == set((v1,)))
tups = queryrep.getTrovesToDisplay(repos, [], [], [],
VERSION_FILTER_LATEST,
FLAVOR_FILTER_AVAIL,
self.cfg.installLabelPath,
targetFlavor, affinityDb=None)
assert(set([x[1] for x in tups]) == set((v1,)))
def testExactFlavor(self):
self.addComponent('foo:run[~ssl]')
repos = self.openRepository()
def _get(troveSpec):
try:
return queryrep.getTrovesToDisplay(repos, [troveSpec], [], [],
VERSION_FILTER_LATEST,
FLAVOR_FILTER_EXACT,
self.cfg.installLabelPath,
self.cfg.flavor, None)
except errors.TroveNotFound:
return []
assert(not _get('foo:run[ssl]'))
assert(not _get('foo:run'))
assert(_get('foo:run[~ssl]'))
def testTroveNames(self):
for x in "12":
for ver in "12":
self.addComponent("trv%s:lib" % x, ver)
self.addComponent("trv%s:runtime" % x, ver)
self.addCollection("trv%s" % x, ver, [":lib", ":runtime"])
repos = self.openRepository()
ret = repos.troveNamesOnServer("localhost")
self.assertEqual(set(ret),
set(['trv1:lib', 'trv1:runtime', 'trv1',
'trv2:lib', 'trv2:runtime', 'trv2']))
self.markRemoved("trv1=1")
ret = repos.troveNamesOnServer("localhost")
self.assertEqual(set(ret),
set(['trv1:lib', 'trv1:runtime', 'trv1',
'trv2:lib', 'trv2:runtime', 'trv2']))
self.markRemoved("trv1=2")
ret = repos.troveNamesOnServer("localhost")
self.assertEqual(set(ret), set(['trv2:lib', 'trv2:runtime', 'trv2']))
self.addCollection("group-trv", [("trv2:runtime", "2"), ("trv3:runtime", "0")])
ret = repos.troveNamesOnServer("localhost")
# trv3:runtime is not present thus it shouldn't appear in the list
self.assertEqual(set(ret), set(['trv2:lib', 'trv2:runtime', 'trv2', "group-trv"]))
self.markRemoved("trv2=1")
ret = repos.troveNamesOnServer("localhost")
# trv2=2 is still there
self.assertEqual(set(ret), set(['trv2:lib', 'trv2:runtime', 'trv2', "group-trv"]))
self.markRemoved("trv2=2")
ret = repos.troveNamesOnServer("localhost")
self.assertEqual(set(ret), set(["group-trv"]))
self.markRemoved("group-trv")
ret = repos.troveNamesOnServer("localhost")
self.assertEqual(set(ret), set())
self.addCollection("group-other", ["foo:runtime", "foo:lib"])
self.addComponent("foo:lib", "999")
ret = repos.troveNamesOnServer("localhost")
self.assertEqual(set(ret), set(["foo:lib", "group-other"]))
self.markRemoved("foo:lib=999")
ret = repos.troveNamesOnServer("localhost")
self.assertEqual(set(ret), set(["group-other"]))
def testAffinity(self):
self.addComponent('foo:r', '/localhost@rpl:branch/1.0-1-1', '!readline',
['/usr/bin/foo'])
self.addComponent('foo:r', '/localhost@rpl:branch/2.0-1-1',
'readline,~!ssl', ['/usr/bin/foo'])
self.addComponent('foo:r', '/localhost@rpl:branch/2.0-1-1',
'readline,~ssl', ['/usr/bin/foo'])
self.addComponent('foo:r', '/localhost@rpl:branch/2.0-1-1',
'!readline,~ssl', ['/usr/bin/foo'])
self.addComponent('foo:r', '/localhost@rpl:branch/2.0-1-1',
'!readline,~!ssl', ['/usr/bin/foo'])
# orig branch - found by very few queries
self.addComponent('foo:r',
'/localhost@rpl:linux/1.0-1-1', 'readline')
self.updatePkg('foo:r=:branch/1.0[!readline]')
repos = self.openRepository()
def _get(affinityDb, versionFilter, flavorFilter, troveSpec):
return queryrep.getTrovesToDisplay(repos, troveSpec, [], [],
versionFilter,
flavorFilter,
self.cfg.installLabelPath,
self.cfg.flavor, affinityDb)
db = self.openDatabase()
troveTups = _get(db, VERSION_FILTER_LATEST,
FLAVOR_FILTER_BEST, ['foo:r'])
assert(len(troveTups) == 1)
assert(troveTups[0][1].branch() == VFS('/localhost@rpl:branch'))
assert(str(troveTups[0][2]) == '!readline,~ssl')
troveTups = _get(db, VERSION_FILTER_LATEST,
FLAVOR_FILTER_AVAIL, ['foo:r'])
assert(len(troveTups) == 2)
flavors = set(str(x[2]) for x in troveTups)
assert('readline,~ssl' in flavors)
assert('!readline,~ssl' in flavors)
# system compatible, should ignore db
troveTups = _get(None, VERSION_FILTER_LATEST,
FLAVOR_FILTER_AVAIL, ['foo:r'])
assert(len(troveTups) == 1)
assert(troveTups[0][1].branch() == VFS('/localhost@rpl:linux'))
flavors = set(str(x[2]) for x in troveTups)
assert('readline' in flavors)
def testQueryByPath(self):
for troveName in 'foo:run', 'bar:run':
self.addComponent(troveName, '1.0', 'ssl', ['/usr/bin/foo'])
self.addComponent(troveName, '1.0', '~ssl', ['/usr/bin/foo'])
self.addComponent(troveName, '1.0', '!ssl', ['/usr/bin/foo'])
self.addComponent(troveName, '1.0', 'readline', ['/usr/bin/foo'])
self.addComponent(troveName, '2.0-1-1', 'readline', ['/usr/bin/foo'])
self.addComponent(troveName, '2.0-2-1', 'readline', ['/usr/bin/foo'])
self.addComponent(troveName, '3.0', '!readline', ['/usr/bin/foo'])
repos = self.openRepository()
targetFlavor = [ deps.parseFlavor('~readline,ssl is:x86') ]
def _getByPath(versionFilter, flavorFilter, pathList=['/usr/bin/foo']):
return queryrep.getTrovesByPath(repos, pathList,
versionFilter, flavorFilter,
self.cfg.installLabelPath,
targetFlavor)
def _check(tups, troveSpecs):
source = trovesource.SimpleTroveSource(tups)
source.searchAsDatabase()
troveSpecs = [ cmdline.parseTroveSpec(x) for x in troveSpecs ]
results = source.findTroves(None, troveSpecs)
receivedTups = itertools.chain(*results.itervalues())
assert(set(receivedTups) == set(tups))
assert(len(_getByPath(VERSION_FILTER_ALL, FLAVOR_FILTER_ALL)) == 14)
tups = _getByPath(VERSION_FILTER_LATEST, FLAVOR_FILTER_ALL)
_check(tups, ['bar:run=3.0', 'foo:run=3.0'])
# check leaves, should leave out 2.0-1-1 readline and 1.0 readline.
tups = _getByPath(VERSION_FILTER_LEAVES, FLAVOR_FILTER_ALL)
assert(len(tups) == 10)
# we don't really need to check both foo and bar here...
tups = [ x for x in tups if x[0] == 'bar:run' ]
_check(tups, ['bar:run=1.0[ssl]', 'bar:run=2.0-2-1[readline]',
'bar:run=1.0[!ssl]', 'bar:run=1.0[~ssl]',
'bar:run=3.0[!readline]'])
# get all compatible flavors, should leave out !readline and !ssl
tups = _getByPath(VERSION_FILTER_ALL, FLAVOR_FILTER_AVAIL)
assert(len(tups) == 10)
tups = [ x for x in tups if x[0] == 'bar:run' ]
_check(tups, ['bar:run=1.0[ssl]', 'bar:run=1.0[readline]',
'bar:run=2.0-1-1[readline]', 'bar:run=2.0-2-1[readline]',
'bar:run=1.0[~ssl]'])
# get best best flavors for each version
tups = _getByPath(VERSION_FILTER_ALL, FLAVOR_FILTER_BEST)
tups = [ x for x in tups if x[0] == 'bar:run' ]
_check(tups, ['bar:run=1.0[ssl]', 'bar:run=2.0-1-1[readline]',
'bar:run=2.0-2-1[readline]'])
tups = _getByPath(VERSION_FILTER_LEAVES, FLAVOR_FILTER_BEST)
assert(len(tups) == 2)
tups = [ x for x in tups if x[0] == 'bar:run' ]
_check(tups, [ 'bar:run=2.0-2-1[readline]'])
tups = _getByPath(VERSION_FILTER_LATEST, FLAVOR_FILTER_BEST)
assert(len(tups) == 2)
_check(tups, ['bar:run=2.0-2-1[readline]', 'foo:run=2.0-2-1[readline]'])
# add another path for testing querying two paths at once,
# with different latest versions to test leaves handing.
self.addComponent('foo:lib', '1.0', 'ssl', ['/usr/lib/foo'])
self.addComponent('bar:lib', '1.0', 'ssl', ['/usr/lib/foo'])
self.addComponent('bar:lib', '2.0', 'ssl', ['/usr/lib/foo'])
tups = _getByPath(VERSION_FILTER_LATEST, FLAVOR_FILTER_BEST,
['/usr/lib/foo', '/usr/bin/foo'])
_check(tups, ['bar:run=2.0-2-1[readline]', 'foo:run=2.0-2-1[readline]',
'foo:lib=1.0', 'bar:lib=2.0'])
def testWhatProvides(self):
targetFlavor = [ deps.parseFlavor('is:x86') ]
def _get(versionFilter, flavorFilter, whatProvidesList):
return queryrep.getTrovesToDisplay(repos, [], [],
[deps.parseDep(x) for x in whatProvidesList],
versionFilter,
flavorFilter,
self.cfg.installLabelPath,
targetFlavor, None)
self.addComponent('foo:run', '1', 'is:x86')
self.addComponent('foo:run', '2', 'is:x86')
self.addComponent('foo:run', '2', 'is:x86_64')
repos = self.openRepository()
troveTups = _get(VERSION_FILTER_LATEST, FLAVOR_FILTER_BEST, ['trove:foo:run'])
assert(len(troveTups) == 1)
troveTups = _get(VERSION_FILTER_LATEST, FLAVOR_FILTER_ALL, ['trove:foo:run'])
assert(len(troveTups) == 2)
troveTups = _get(VERSION_FILTER_ALL, FLAVOR_FILTER_ALL, ['trove:foo:run'])
# --all-versions doesn't really work with what-provides
assert(len(troveTups) == 2)
def testBuildLogDisplay(self):
buildlog = 'This is test buildlog';
bz2compressor = bz2.BZ2Compressor()
bz2compressor.compress(buildlog)
contents = bz2compressor.flush()
self.addComponent('foo:runtime')
self.addComponent('foo:debuginfo', [('/usr/bin/foo', rephelp.RegularFile(contents=contents, tags=['buildlog']))])
self.addCollection('foo', [(':runtime', True), (':debuginfo', False)])
repos = self.openRepository()
output = self.captureOutput(queryrep.displayTroves, self.cfg, ['foo'], [], [],
queryrep.VERSION_FILTER_LATEST, queryrep.FLAVOR_FILTER_BEST, showBuildLog = True)
self.assertEqual(output[1], buildlog)
def testShowFile(self):
contents1 = 'This is test content';
contents2 = 'This is another test content';
self.addComponent('foo:runtime', [('/usr/bin/foofile', contents1), ('/usr/bin/barfile', contents2)])
self.addCollection('foo', [':runtime'])
repos = self.openRepository()
output = self.captureOutput(queryrep.displayTroves, self.cfg, ['foo'], [], [],
queryrep.VERSION_FILTER_LATEST, queryrep.FLAVOR_FILTER_BEST, filesToShow = ['/usr/bin/barfile'])
self.assertEqual(output[1], contents2)
def testRdiff1(self):
req1 = 'soname: ELF32/libfoo1(blah)'
req2 = 'soname: ELF32/lib/foo2(blah)'
req3 = 'soname: ELF32/lib/foo3(blah) trove:bar(1)'
prov1 = "trove:bar(1) trove:baz(1)"
prov2 = "trove:baz(1) trove:bloop(1)"
prov3 = "trove:bloop(2) trove:bar(1)"
buildReqs1 = [ ('py', '1', 'is: x'), ('by', '1', 'is: y'),
('ty', '1', 'is: z')]
buildReqs2 = [ ('py', '1', 'is: x'), ('my', '1', 'is: y'),
('by', '2', 'is: z')]
rf1 = rephelp.RegularFile(contents='1\n2\n3\n4\n5\n6\n7\n8\n',
perms = 0644, provides = prov1, requires = req1,
mtime = 1136921017,)
rf2 = rephelp.RegularFile(contents='1\n2\n4\n5\n6\n7\n8\n9\n',
perms = 0755, provides = prov2, requires = req2,
mtime = 1136921317, tags=['tag2', 'tag1', 'tag3'])
rf3 = rephelp.RegularFile(contents='1\n2\n4\n5\n6\n7\n8\n10\n',
perms = 0400, provides = prov3, requires = req3,
mtime = 1136921017)
# rf5 differs from rf1 just by tags
rf5 = rephelp.RegularFile(contents='1\n2\n3\n4\n5\n6\n7\n8\n',
perms = 0644, provides = prov1, requires = req1,
mtime = 1136921017, tags=['tag2', 'tag1', 'tag3'])
self.addComponent('foo:run', '1', 'is:x86',
[('/usr/bin/foo', rf1),
('/usr/bin/bar', rf2),
('/usr/bin/file1', rf1),
])
self.addComponent('foo:supdoc', '1', 'is:x86',
[('/usr/share/doc/foo1', rf1)])
self.addCollection('foo', '1',
[(x, '1', 'is:x86') for x in [':run', ':supdoc']],
buildReqs=buildReqs1)
self.addComponent('foo:run', '2', 'is:x86_64',
[('/usr/bin/foo', rf2),
('/usr/bin/file1', rf5),
('/usr/bin/baz', rf3),])
self.addComponent('foo:doc', '2', 'is:x86_64',
[('/usr/share/doc/foo2', rf2)])
self.addCollection('foo', '2',
[(x, '2', 'is:x86_64') for x in [':run', ':doc']],
buildReqs=buildReqs2)
# Force search flavor to x86_64 to get consistent output on x86
self.cfg.flavor = [deps.parseFlavor('is: x86 x86_64')]
repos = self.openRepository()
troveSpec = 'foo=1[is:x86]--2[is:x86_64]'
ret, outs = self._rdiff(troveSpec)
self.assertEqual(outs, expOutput1noargs)
self.cfg.fullFlavors = True
ret, outs = self._rdiff(troveSpec)
self.assertEqual(outs, expOutput1fullFlavors)
self.cfg.fullFlavors = False
self.cfg.fullVersions = True
ret, outs = self._rdiff(troveSpec)
self.assertEqual(outs, expOutput1fullVersions)
self.cfg.fullVersions = False
ret, outs = self._rdiff(troveSpec, ls = True)
self.assertEqual(outs, expOutput1withFiles)
ret, outs = self._rdiff(troveSpec, fileVersions = True)
self.assertEqual(outs, expOutput1withFileVersions)
ret, outs = self._rdiff(troveSpec, lsl = True)
self.assertEqual(outs, expOutput1withFilesStat)
ret, outs = self._rdiff(troveSpec, tags = True)
self.assertEqual(outs, expOutput1withFileTags)
# Diffing against ourselves
troveSpec = 'foo=1[is:x86]--1[is:x86]'
ret, outs = self._rdiff(troveSpec, tags = True)
self.assertEqual(outs, 'Identical troves\n')
def testRdiff2(self):
# Test showing of troves with no changes
req1 = 'soname: ELF32/lib/foo3(blah) trove:bar(1)'
req2 = 'soname: ELF32/lib/foo2(blah)'
prov1 = "trove:bar(1) trove:baz(1)"
prov2 = "trove:bar(1) trove:baz(1) soname: ELF32/lib/foo2(blah)"
rf1 = rephelp.RegularFile(contents='1\n2\n3\n4\n5\n6\n7\n8\n',
perms = 0644, provides = prov1, requires = req1,
mtime = 1176921017,)
rf2 = rephelp.RegularFile(contents='1\n2\n4\n5\n6\n7\n8\n9\n',
perms = 0755, provides = prov2, requires = req2,
mtime = 1176921317, tags=['tag2', 'tag1', 'tag3'])
self.addComponent('foo:run', '1', 'is:x86', [('/usr/bin/foo', rf1)])
self.addComponent('foo:supdoc', '1', 'is:x86', [('/usr/doc/foo1', rf2)])
self.addCollection('foo', '1',
[(x, '1', 'is:x86') for x in [':run', ':supdoc']])
self.addComponent('bar:run', '1', [ ('/usr/bin/bar', rf1) ])
self.addCollection('bar', '1', [':run'])
self.addCollection('group-bar', '1', ['bar'])
self.addCollection('group-foo', '1',
[('foo', '1', 'is:x86'), 'group-bar'])
self.addComponent('foo:run', '2', 'is:x86', [('/usr/bin/foo', rf1)])
self.addComponent('foo:doc', '2', 'is:x86', [('/usr/doc/foo1', rf2)])
self.addCollection('foo', '2',
[(x, '2', 'is:x86') for x in [':run', ':doc']])
self.addCollection('group-foo', '2', [('foo', '2', 'is:x86'),
('group-bar', '1', '')])
troveSpec = 'group-foo=1--2'
ret, outs = self._rdiff(troveSpec)
self.assertEqual(outs, expOutput2)
def testRdiff3(self):
# Have a file change from regular file to symbolic link
rf1 = rephelp.RegularFile(contents='1\n2\n3\n4\n5\n6\n7\n8\n',
perms = 0644, mtime = 1136921017)
lf1 = rephelp.Symlink("/etc/passwd")
self.addComponent('foo:run', '1', [('/usr/bin/foo', rf1)])
self.addCollection('foo', '1', [':run'])
self.addComponent('foo:run', '2',
[('/etc/passwd', rf1), ('/usr/bin/foo', lf1)])
self.addCollection('foo', '2', [':run'])
ret, outs = self._rdiff('foo=1--2', lsl = True)
#re.sub("Symbolic", "<TIMESTRING> (Symbolic", outs)
outs = re.sub(" [0-9]*-[0-9]*-[0-9]* [0-9]*:[0-9]*:[0-9]* ", " <TIMESTRING TIMESTAMP> ", outs)
self.assertEqual(outs, expOutput3)
def testRdiff4(self):
# test trove dependencies
req1 = 'soname: ELF32/lib/foo3(blah) trove:bar(1)'
req2 = 'soname: ELF32/lib/foo2(blah)'
prov1 = "trove:bar(1) trove:baz(1)"
prov2 = "trove:bar(1) trove:baz(1) soname: ELF32/lib/foo2(blah)"
rf1 = rephelp.RegularFile(contents='1\n2\n3\n4\n5\n6\n7\n8\n',
perms = 0644, provides = prov1, requires = req1,
mtime = 1176921017,)
rf2 = rephelp.RegularFile(contents='1\n2\n4\n5\n6\n7\n8\n9\n',
perms = 0755, provides = prov2, requires = req2,
mtime = 1176921317, tags=['tag2', 'tag1', 'tag3'])
self.addComponent('foo:run', '1', [('/usr/bin/foo', rf1)])
self.addCollection('foo', '1', [':run'])
self.addComponent('foo:run', '2', [('/usr/bin/foo', rf2)])
self.addCollection('foo', '2', [':run'])
ret, outs = self._rdiff('foo=1--2')
self.assertEqual(outs, expOutput4)
ret, outs = self._rdiff('foo:run=1--2', deps = True)
self.assertEqual(outs, expOutput4withTroveDeps)
def testRdiff5(self):
# CNY-1605
# Create two flavors of the same trove and add them to the same group
flv1 = '~ssl'
flv2 = '~!ssl'
rf11 = rephelp.RegularFile(contents='1\n2\n3\n4\n5\n6\n7\n8\n',
flavor=flv1)
rf12 = rephelp.RegularFile(contents='1\n2\n4\n5\n6\n7\n8\n9\n',
flavor=flv2)
rf21 = rephelp.RegularFile(contents='1\n2\n4\n5\n6\n7\n8\n9\n',
flavor=flv1)
rf22 = rephelp.RegularFile(contents='1\n2\n3\n4\n5\n6\n7\n8\n',
flavor=flv2)
files = [ (rf11, rf12), (rf21, rf22) ]
for v, fileobjs in zip([ '1', '2' ], files):
file1 = fileobjs[0]
file2 = fileobjs[1]
t1 = self.addComponent('foo:run', v, fileContents=[('/bin/foo', file1)])
p1 = self.addCollection('foo', v, [(':run', v, t1.getFlavor())])
t2 = self.addComponent('foo:run', v, fileContents=[('/bin/foo', file2)])
p2 = self.addCollection('foo', v, [(':run', v, t2.getFlavor())])
self.addCollection('group-foo', v,
[('foo', v, flv1), ('foo', v, flv2)])
troveSpec = 'group-foo=1--2'
self.cfg.fullFlavors = True
ret, outs = self._rdiff(troveSpec)
self.cfg.fullFlavors = False
self.assertEqual(outs, expOutput5)
def testRdiff6(self):
# Test that added and removed troves show up properly
self.addComponent('foo:run', '1', filePrimer=1)
self.addCollection('foo', '1', [':run'])
self.addComponent('erased:run', '1', filePrimer=1)
self.addCollection('erased', '1', [':run'])
self.addComponent('added:run', '1', filePrimer=1)
self.addCollection('added', '1', [':run'])
self.addCollection('group-foo', '1',
[ ('foo', '1'), ('erased', '1') ])
self.addCollection('group-foo', '2',
[ ('foo', '1'), ('added', '1') ])
troveSpec = 'group-foo=1--2'
ret, outs = self._rdiff(troveSpec)
self.assertEqual(outs, expOutput6)
def testRdiff8(self):
# CNY-1753
# Different files living on different branches
raise testhelp.SkipTestException("Unable to reproduce CNY-1753 in a test case")
# Manifested in running conary rdiff
# mkinitrd=conary.rpath.com@rpl:1--usplash.rb.rpath.com@rpl:1
rf1 = rephelp.RegularFile(contents='\000\001\002\003',
perms = 0644, mtime = 1176921017,)
rf2 = rephelp.RegularFile(contents='\000\001\003\005',
perms = 0644, mtime = 1176921317,)
v1 = versions.ThawVersion('/localhost@rpl:1/1:1-1-1')
v2 = versions.ThawVersion('/localhost1@rpl:2/2:2-2-2')
self.openRepository()
self.openRepository(1)
self.addComponent('foo:run', v1, [('/bin/foo', rf1)])
self.addCollection('foo', v1, [':run'])
self.addComponent('foo:run', v2, [('/bin/foo', rf2)])
self.addCollection('foo', v2, [':run'])
troveSpec = cmdline.parseChangeList('foo=%s--%s' % (v1, v2))[0]
ret, outs = self.captureOutput(queryrep.diffTroves,
self.cfg, troveSpec)
self.assertEqual(outs, '')
def testRdiff9(self):
"""Binary changes to config; using --diff"""
rf1 = rephelp.RegularFile(contents='1\n2\n3\n4\n5\n6\n7\n8\n',
perms = 0644, mtime = 1136921017, config=False)
rf2 = rephelp.RegularFile(contents='1\n2\n4\n5\n6\n7\n8\n9\n',
perms = 0644, mtime = 1136921317, config=True)
self.addComponent('foo:config', '1', [('/etc/foo', rf1)])
self.addComponent('foo:config', '2', [('/etc/foo', rf2)])
ret, outs = self._rdiff('foo:config=1--2', asDiff=True)
self.assertEqual(outs, expOutput9)
expOutput1noargs = """\
Update foo(:run) (1-1-1[is: x86] -> 2-1-1[is: x86_64])
Install foo:doc=2-1-1
Erase foo:supdoc=1-1-1
"""
expOutput1fullFlavors = """\
Update foo(:run) (1-1-1[is: x86] -> 2-1-1[is: x86_64])
Install foo:doc=2-1-1[is: x86_64]
Erase foo:supdoc=1-1-1[is: x86]
"""
expOutput1fullVersions = """\
Update foo(:run) (/localhost@rpl:linux/1-1-1[is: x86] -> /localhost@rpl:linux/2-1-1[is: x86_64])
Install foo:doc=/localhost@rpl:linux/2-1-1
Erase foo:supdoc=/localhost@rpl:linux/1-1-1
"""
expOutput1withFiles = """\
/usr/share/doc/foo2
/usr/bin/bar
/usr/bin/baz
/usr/bin/file1
/usr/bin/foo
/usr/share/doc/foo1
"""
expOutput1withFileVersions = """\
/usr/share/doc/foo2 2-1-1
/usr/bin/bar 1-1-1
/usr/bin/baz 2-1-1
/usr/bin/file1 2-1-1
/usr/bin/foo 2-1-1
/usr/share/doc/foo1 1-1-1
"""
expOutput1withFilesStat = """\
New -rwxr-xr-x 1 root root 16 2006-01-10 19:28:37 UTC /usr/share/doc/foo2
Del -rwxr-xr-x 1 root root 16 2006-01-10 19:28:37 UTC /usr/bin/bar
New -r-------- 1 root root 17 2006-01-10 19:23:37 UTC /usr/bin/baz
Mod -rw-r--r-- 1 root root 16 2006-01-10 19:23:37 UTC /usr/bin/file1
Mod -rwxr-xr-x 1 root root 16 2006-01-10 19:28:37 UTC /usr/bin/foo
Del -rw-r--r-- 1 root root 16 2006-01-10 19:23:37 UTC /usr/share/doc/foo1
"""
expOutput1withFileTags = """\
/usr/share/doc/foo2 {tag1 tag2 tag3}
/usr/bin/bar {tag1 tag2 tag3}
/usr/bin/baz
/usr/bin/file1 {tag1 tag2 tag3}
/usr/bin/foo {tag1 tag2 tag3}
/usr/share/doc/foo1
"""
expOutput2 = """\
Update foo(:run) (1-1-1 -> 2-1-1)
Install foo:doc=2-1-1
Erase foo:supdoc=1-1-1
Update group-foo (1-1-1 -> 2-1-1)
"""
expOutput3 = """\
New -rw-r--r-- 1 root root 16 <TIMESTRING TIMESTAMP> UTC /etc/passwd
Mod lrwxrwxrwx 1 root root 11 <TIMESTRING TIMESTAMP> UTC /usr/bin/foo -> /etc/passwd
"""
expOutput4 = """\
Update foo(:run) (1-1-1 -> 2-1-1)
"""
expOutput4withTroveDeps = """\
Update foo:run (1-1-1 -> 2-1-1)\nProvides:\n trove: bar(1)\n trove: baz(1)\n trove: foo:run\n soname: ELF32/lib/foo2(blah)\n\nRequires:\n soname: ELF32/lib/foo2(blah)\n
"""
expOutput5 = """\
Update foo(:run) (1-1-1[~!ssl] -> 2-1-1[~!ssl])
Update foo(:run) (1-1-1[~ssl] -> 2-1-1[~ssl])
Update group-foo (1-1-1 -> 2-1-1)
"""
expOutput6 = """\
Install added(:run)=1-1-1
Erase erased=1-1-1
Update group-foo (1-1-1 -> 2-1-1)
"""
expOutput9 = """\
diff --git a/etc/foo b/etc/foo
--- a/etc/foo
+++ b/etc/foo
@@ -1,8 +1,8 @@
1
2
-3
4
5
6
7
8
+9
"""
class MultiRepQueryTest(rephelp.RepositoryHelper):
def setUp(self):
rephelp.RepositoryHelper.setUp(self)
def tearDown(self):
self.servers.stopServer(1)
self.servers.stopServer(0)
def _rdiff(self, troveSpec, **kwargs):
client = conaryclient.ConaryClient(self.cfg)
return self.captureOutput(queryrep.rdiffCommand, self.cfg,
client, client.getDatabase(), troveSpec,
**kwargs)
def _openRepository(self, idx, serverName="localhost"):
# this could be left open from a previoius testsuite running
label = versions.Label("%s@foo:bar" % serverName)
self.servers.stopServer(idx)
repo = self.openRepository(idx, serverName=[serverName])
self.resetRepository(idx)
self.addUserAndRole(repo, label, "user", "pass")
repo.addAcl(label, "user", None, None, write=True, remove=True)
return repo
def testRdiffMulti(self):
# CNY-2544 - groups including troves from foreign repos
r0 = self._openRepository(0, "localhost")
r1 = self._openRepository(1, "otherhost")
c = self.getRepositoryClient("user", "pass")
self.addComponent("other:runtime", "/otherhost@foo:bar/9", repos = c)
self.addComponent("other:lib", "/otherhost@foo:bar/9", repos = c)
trv = self.addCollection("other", "/otherhost@foo:bar/9", [ ":runtime", ":lib"], repos = c)
grpfuu = self.addCollection("group-fuu", "/localhost@foo:bar/1", [ trv.getNameVersionFlavor() ], repos = c)
grpfoo1 = self.addCollection("group-foo", "/localhost@foo:bar/1", [ grpfuu.getNameVersionFlavor() ], repos = c)
grpfoo2 = self.addCollection("group-foo", "/localhost@foo:bar/2", [ trv.getNameVersionFlavor() ], repos = c)
ret, outs = self._rdiff(
'group-foo=localhost@foo:bar/1--localhost@foo:bar/2')
self.assertEqual(outs, expOutput7)
expOutput7 = """\
Update group-foo (1-1-1 -> 2-1-1)
Erase group-fuu=1-1-1
Install other(:lib :runtime)=9-1-1
"""
| |
import csv
import sys
import numpy
import math
from numpy import genfromtxt
from numpy.linalg import inv
import random
from random import randint
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D
import time
start_time = time.time()
# Given values
d = 5 # number of dimensions to learn / degrees of freedom
Sigma2 = 0.1
Lambda = 2
NbIterations = 50
# Ratings.csv is structured : userId,movieId,rating,timestamp
Ratings = genfromtxt(sys.argv[1], delimiter=',')
N = Ratings.shape[0]
N1 = 0 #Nb of users
N2 = 0 #Nb of objects
# I find the number of Users (N1) and the number of objects rated (N2)
indexOverN = 0
while indexOverN < N:
if Ratings[indexOverN][0] > N1:
N1 = int(Ratings[indexOverN][0])
if Ratings[indexOverN][1] > N2:
N2 = int(Ratings[indexOverN][1])
indexOverN += 1
# Withdraw the unused objects
#
# Count = []
# index = 0
# while index < N2+1:
# Count.append(0)
# index += 1
#
# indexOverN = 0
# while indexOverN < N:
# Count[int(Ratings[indexOverN][1])] += 1
# indexOverN += 1
#
# counter = 0
# j = 1
# while j < N2+1:
# save = int(Count[j])
# Count[j] = int(counter)
# if save == 0:
# counter += 1
# j += 1
#
# N2 = 0
# indexOverN = 0
# while indexOverN < N:
# Ratings[indexOverN][1] = Ratings[indexOverN][1] - Count[int(Ratings[indexOverN][1])]
# if Ratings[indexOverN][1] > N2:
# N2 = Ratings[indexOverN][1]
# indexOverN += 1
#
# with open("corrected-ratings-sandipan.csv", 'wb') as csvfile:
# spamwriter2 = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
# e = 0
# while e < N:
# spamwriter2.writerow(Ratings[e])
# e += 1
print("N1 (Nb of users)")
print(N1)
print("N2 (Nb of objects)")
print(N2)
# Populate OmegaIJ
OmegaIJ = numpy.zeros(shape=(N1, N2))
indexOverN = 0
while indexOverN < N:
OmegaIJ[Ratings[indexOverN][0]-1][Ratings[indexOverN][1]-1] = Ratings[indexOverN][2]
indexOverN += 1
U = numpy.zeros(shape=(N1, d))
V = numpy.zeros(shape=(d, N2))
# I choose to Initialize V to a normal distribution 0 mean and lambda^-1 * I covariance
indexOverD = 0
while indexOverD < d:
indexOverN2 = 0
while indexOverN2 < N2:
V[indexOverD][indexOverN2] = numpy.random.normal(0,float(1.0/Lambda))
indexOverN2 += 1
indexOverD += 1
indexOverD = 0
while indexOverD < d:
indexOverN1 = 0
while indexOverN1 < N1:
U[indexOverN1][indexOverD] = numpy.random.normal(0,float(1.0/Lambda))
indexOverN1 += 1
indexOverD += 1
# MAP objective function to minimize
L = []
def CalculL():
global L
Sum_Mij_uivj = 0
Sum_ui = 0
Sum_vj = 0
i = 0
while i < N1:
j = 0
while j < N2:
if OmegaIJ[i][j] > 0:
Ui = U[i]
Vj_ = V[:,j]
Vj = V[:,j][numpy.newaxis]
UV = Ui.dot(Vj.T)
Sum_Mij_uivj += (OmegaIJ[i][j] - UV)**2 / (2*Sigma2)
Sum_ui += numpy.linalg.norm(Ui, ord=2) * Lambda / 2
Sum_vj += numpy.linalg.norm(Vj_, ord=2) * Lambda / 2
j+=1
i += 1
newL = - Sum_Mij_uivj - Sum_ui - Sum_vj
L.append(int(newL))
def WriteToFile(nameOfTheFile_Prefix, nameOfTheFile_extension, OutputMatrix):
nameOfTheFile = str(nameOfTheFile_Prefix) + str(nameOfTheFile_extension) + ".csv"
with open(nameOfTheFile, 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
for e in range(OutputMatrix.shape[0]):
spamwriter.writerow(OutputMatrix[e])
def WriteArrayToFile(nameOfTheFile_Prefix, nameOfTheFile_extension, OutputArray):
nameOfTheFile = str(nameOfTheFile_Prefix) + str(nameOfTheFile_extension) + ".csv"
with open(nameOfTheFile, 'wb') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)
for obj in range(len(OutputArray)):
spamwriter.writerow([OutputArray[obj]])
iteration = 0
while iteration < NbIterations:
#Update User location
i = 0
while i < N1:
identity_matrix = numpy.identity(d)
LSI = identity_matrix * Lambda * Sigma2
Sum = numpy.zeros(shape=(d,d))
Mij_vj = numpy.zeros(shape=(d,1))
j = 0
while j < N2:
if OmegaIJ[i][j] > 0: # Meaning that this rating is present
Vj = V[:,j][numpy.newaxis]
Sum += (Vj.T).dot(Vj)
# print("Vj")
# print(Vj)
# print("outer product")
# print(Sum)
Mij_vj += numpy.transpose(numpy.multiply(Vj,OmegaIJ[i][j]))
j += 1
# if (i == 6 or i == 7):
# print("Sum")
# print(Sum)
# print("Mij_vj")
# print(Mij_vj)
LeftProduct = LSI + Sum
LeftProduct = inv(LeftProduct)
Total = LeftProduct.dot(Mij_vj)
U[i] = Total.T
# if (i == 6 or i == 7):
# print("LeftProduct")
# print(LeftProduct)
# print("Total")
# print(Total)
i += 1
# print("U")
# print(U)
# update the object location
j = 0
while j < N2:
identity_matrix = numpy.identity(d)
LSI = identity_matrix * Lambda * Sigma2
Sum = numpy.zeros(shape=(d,d))
Mij_ui = numpy.zeros(shape=(d,1))
i = 0
while i < N1:
if OmegaIJ[i][j] > 0: # Meaning that this rating is present
Ui = U[i][numpy.newaxis]
Sum += numpy.transpose(Ui).dot(Ui)
# print("yyy")
# print(Sum)
Mij_ui += numpy.transpose(numpy.multiply(Ui,OmegaIJ[i][j]))
i += 1
LeftProduct = LSI + Sum
LeftProduct = inv(LeftProduct)
Total = LeftProduct.dot(Mij_ui)
V[:,j] = Total.T
j += 1
# print("iteration")
# print(iteration)
CalculL()
# print("L")
# print(L[-1])
iteration += 1
if (iteration == 10 or iteration == 25 or iteration == 50):
WriteToFile("U-", iteration, U)
WriteToFile("V-", iteration, V.T)
WriteArrayToFile("objective", "", L)
print("--- %s seconds ---" % (time.time() - start_time))
# plt.plot(L)
# plt.ylabel('L')
# plt.show()
| |
#!/usr/bin/env python
#
# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Methods for checking EcmaScript files for indentation issues."""
__author__ = ('robbyw@google.com (Robert Walker)')
from closure_linter import ecmametadatapass
from closure_linter import errors
from closure_linter import javascripttokens
from closure_linter import tokenutil
from closure_linter.common import error
from closure_linter.common import position
import gflags as flags
flags.DEFINE_boolean('debug_indentation', False,
'Whether to print debugging information for indentation.')
# Shorthand
Context = ecmametadatapass.EcmaContext
Error = error.Error
Position = position.Position
Type = javascripttokens.JavaScriptTokenType
# The general approach:
#
# 1. Build a stack of tokens that can affect indentation.
# For each token, we determine if it is a block or continuation token.
# Some tokens need to be temporarily overwritten in case they are removed
# before the end of the line.
# Much of the work here is determining which tokens to keep on the stack
# at each point. Operators, for example, should be removed once their
# expression or line is gone, while parentheses must stay until the matching
# end parentheses is found.
#
# 2. Given that stack, determine the allowable indentations.
# Due to flexible indentation rules in JavaScript, there may be many
# allowable indentations for each stack. We follows the general
# "no false positives" approach of GJsLint and build the most permissive
# set possible.
class TokenInfo(object):
"""Stores information about a token.
Attributes:
token: The token
is_block: Whether the token represents a block indentation.
is_transient: Whether the token should be automatically removed without
finding a matching end token.
overridden_by: TokenInfo for a token that overrides the indentation that
this token would require.
is_permanent_override: Whether the override on this token should persist
even after the overriding token is removed from the stack. For example:
x([
1],
2);
needs this to be set so the last line is not required to be a continuation
indent.
line_number: The effective line number of this token. Will either be the
actual line number or the one before it in the case of a mis-wrapped
operator.
"""
def __init__(self, token, is_block=False):
"""Initializes a TokenInfo object.
Args:
token: The token
is_block: Whether the token represents a block indentation.
"""
self.token = token
self.overridden_by = None
self.is_permanent_override = False
self.is_block = is_block
self.is_transient = not is_block and not token.type in (
Type.START_PAREN, Type.START_PARAMETERS)
self.line_number = token.line_number
def __repr__(self):
result = '\n %s' % self.token
if self.overridden_by:
result = '%s OVERRIDDEN [by "%s"]' % (
result, self.overridden_by.token.string)
result += ' {is_block: %s, is_transient: %s}' % (
self.is_block, self.is_transient)
return result
class IndentationRules(object):
"""EmcaScript indentation rules.
Can be used to find common indentation errors in JavaScript, ActionScript and
other Ecma like scripting languages.
"""
def __init__(self):
"""Initializes the IndentationRules checker."""
self._stack = []
# Map from line number to number of characters it is off in indentation.
self._start_index_offset = {}
def Finalize(self):
if self._stack:
old_stack = self._stack
self._stack = []
raise Exception("INTERNAL ERROR: indentation stack is not empty: %r" %
old_stack)
def CheckToken(self, token, state):
"""Checks a token for indentation errors.
Args:
token: The current token under consideration
state: Additional information about the current tree state
Returns:
An error array [error code, error string, error token] if the token is
improperly indented, or None if indentation is correct.
"""
token_type = token.type
indentation_errors = []
stack = self._stack
is_first = self._IsFirstNonWhitespaceTokenInLine(token)
# Add tokens that could decrease indentation before checking.
if token_type == Type.END_PAREN:
self._PopTo(Type.START_PAREN)
elif token_type == Type.END_PARAMETERS:
self._PopTo(Type.START_PARAMETERS)
elif token_type == Type.END_BRACKET:
self._PopTo(Type.START_BRACKET)
elif token_type == Type.END_BLOCK:
start_token = self._PopTo(Type.START_BLOCK)
# Check for required goog.scope comment.
if start_token:
goog_scope = self._GoogScopeOrNone(start_token.token)
if goog_scope is not None:
if not token.line.endswith('; // goog.scope\n'):
if (token.line.find('//') > -1 and
token.line.find('goog.scope') >
token.line.find('//')):
indentation_errors.append([
errors.MALFORMED_END_OF_SCOPE_COMMENT,
('Malformed end of goog.scope comment. Please use the '
'exact following syntax to close the scope:\n'
'}); // goog.scope'),
token,
Position(token.start_index, token.length)])
else:
indentation_errors.append([
errors.MISSING_END_OF_SCOPE_COMMENT,
('Missing comment for end of goog.scope which opened at line '
'%d. End the scope with:\n'
'}); // goog.scope' %
(start_token.line_number)),
token,
Position(token.start_index, token.length)])
elif token_type == Type.KEYWORD and token.string in ('case', 'default'):
self._Add(self._PopTo(Type.START_BLOCK))
elif is_first and token.string == '.':
# This token should have been on the previous line, so treat it as if it
# was there.
info = TokenInfo(token)
info.line_number = token.line_number - 1
self._Add(info)
elif token_type == Type.SEMICOLON:
self._PopTransient()
not_binary_operator = (token_type != Type.OPERATOR or
token.metadata.IsUnaryOperator())
not_dot = token.string != '.'
if is_first and not_binary_operator and not_dot and token.type not in (
Type.COMMENT, Type.DOC_PREFIX, Type.STRING_TEXT):
if flags.FLAGS.debug_indentation:
print 'Line #%d: stack %r' % (token.line_number, stack)
# Ignore lines that start in JsDoc since we don't check them properly yet.
# TODO(robbyw): Support checking JsDoc indentation.
# Ignore lines that start as multi-line strings since indentation is N/A.
# Ignore lines that start with operators since we report that already.
# Ignore lines with tabs since we report that already.
expected = self._GetAllowableIndentations()
actual = self._GetActualIndentation(token)
# Special case comments describing else, case, and default. Allow them
# to outdent to the parent block.
if token_type in Type.COMMENT_TYPES:
next_code = tokenutil.SearchExcept(token, Type.NON_CODE_TYPES)
if next_code and next_code.type == Type.END_BLOCK:
next_code = tokenutil.SearchExcept(next_code, Type.NON_CODE_TYPES)
if next_code and next_code.string in ('else', 'case', 'default'):
# TODO(robbyw): This almost certainly introduces false negatives.
expected |= self._AddToEach(expected, -2)
if actual >= 0 and actual not in expected:
expected = sorted(expected)
indentation_errors.append([
errors.WRONG_INDENTATION,
'Wrong indentation: expected any of {%s} but got %d' % (
', '.join(
['%d' % x for x in expected]), actual),
token,
Position(actual, expected[0])])
self._start_index_offset[token.line_number] = expected[0] - actual
# Add tokens that could increase indentation.
if token_type == Type.START_BRACKET:
self._Add(TokenInfo(token=token,
is_block=token.metadata.context.type == Context.ARRAY_LITERAL))
elif token_type == Type.START_BLOCK or token.metadata.is_implied_block:
self._Add(TokenInfo(token=token, is_block=True))
elif token_type in (Type.START_PAREN, Type.START_PARAMETERS):
self._Add(TokenInfo(token=token, is_block=False))
elif token_type == Type.KEYWORD and token.string == 'return':
self._Add(TokenInfo(token))
elif not token.IsLastInLine() and (
token.IsAssignment() or token.IsOperator('?')):
self._Add(TokenInfo(token=token))
# Handle implied block closes.
if token.metadata.is_implied_block_close:
self._PopToImpliedBlock()
# Add some tokens only if they appear at the end of the line.
is_last = self._IsLastCodeInLine(token)
if is_last:
if token_type == Type.OPERATOR:
if token.string == ':':
if (stack and stack[-1].token.string == '?'):
# When a ternary : is on a different line than its '?', it doesn't
# add indentation.
if (token.line_number == stack[-1].token.line_number):
self._Add(TokenInfo(token))
elif token.metadata.context.type == Context.CASE_BLOCK:
# Pop transient tokens from say, line continuations, e.g.,
# case x.
# y:
# Want to pop the transient 4 space continuation indent.
self._PopTransient()
# Starting the body of the case statement, which is a type of
# block.
self._Add(TokenInfo(token=token, is_block=True))
elif token.metadata.context.type == Context.LITERAL_ELEMENT:
# When in an object literal, acts as operator indicating line
# continuations.
self._Add(TokenInfo(token))
pass
else:
# ':' might also be a statement label, no effect on indentation in
# this case.
pass
elif token.string != ',':
self._Add(TokenInfo(token))
else:
# The token is a comma.
if token.metadata.context.type == Context.VAR:
self._Add(TokenInfo(token))
elif token.metadata.context.type != Context.PARAMETERS:
self._PopTransient()
elif (token.string.endswith('.')
and token_type in (Type.IDENTIFIER, Type.NORMAL)):
self._Add(TokenInfo(token))
elif token_type == Type.PARAMETERS and token.string.endswith(','):
# Parameter lists.
self._Add(TokenInfo(token))
elif token.metadata.is_implied_semicolon:
self._PopTransient()
elif token.IsAssignment():
self._Add(TokenInfo(token))
return indentation_errors
def _AddToEach(self, original, amount):
"""Returns a new set with the given amount added to each element.
Args:
original: The original set of numbers
amount: The amount to add to each element
Returns:
A new set containing each element of the original set added to the amount.
"""
return set([x + amount for x in original])
_HARD_STOP_TYPES = (Type.START_PAREN, Type.START_PARAMETERS,
Type.START_BRACKET)
_HARD_STOP_STRINGS = ('return', '?')
def _IsHardStop(self, token):
"""Determines if the given token can have a hard stop after it.
Hard stops are indentations defined by the position of another token as in
indentation lined up with return, (, [, and ?.
"""
return (token.type in self._HARD_STOP_TYPES or
token.string in self._HARD_STOP_STRINGS or
token.IsAssignment())
def _GetAllowableIndentations(self):
"""Computes the set of allowable indentations.
Returns:
The set of allowable indentations, given the current stack.
"""
expected = set([0])
hard_stops = set([])
# Whether the tokens are still in the same continuation, meaning additional
# indentation is optional. As an example:
# x = 5 +
# 6 +
# 7;
# The second '+' does not add any required indentation.
in_same_continuation = False
for token_info in self._stack:
token = token_info.token
# Handle normal additive indentation tokens.
if not token_info.overridden_by and token.string != 'return':
if token_info.is_block:
expected = self._AddToEach(expected, 2)
hard_stops = self._AddToEach(hard_stops, 2)
in_same_continuation = False
elif in_same_continuation:
expected |= self._AddToEach(expected, 4)
hard_stops |= self._AddToEach(hard_stops, 4)
else:
expected = self._AddToEach(expected, 4)
hard_stops |= self._AddToEach(hard_stops, 4)
in_same_continuation = True
# Handle hard stops after (, [, return, =, and ?
if self._IsHardStop(token):
override_is_hard_stop = (token_info.overridden_by and
self._IsHardStop(token_info.overridden_by.token))
if not override_is_hard_stop:
start_index = token.start_index
if token.line_number in self._start_index_offset:
start_index += self._start_index_offset[token.line_number]
if (token.type in (Type.START_PAREN, Type.START_PARAMETERS) and
not token_info.overridden_by):
hard_stops.add(start_index + 1)
elif token.string == 'return' and not token_info.overridden_by:
hard_stops.add(start_index + 7)
elif (token.type == Type.START_BRACKET):
hard_stops.add(start_index + 1)
elif token.IsAssignment():
hard_stops.add(start_index + len(token.string) + 1)
elif token.IsOperator('?') and not token_info.overridden_by:
hard_stops.add(start_index + 2)
return (expected | hard_stops) or set([0])
def _GetActualIndentation(self, token):
"""Gets the actual indentation of the line containing the given token.
Args:
token: Any token on the line.
Returns:
The actual indentation of the line containing the given token. Returns
-1 if this line should be ignored due to the presence of tabs.
"""
# Move to the first token in the line
token = tokenutil.GetFirstTokenInSameLine(token)
# If it is whitespace, it is the indentation.
if token.type == Type.WHITESPACE:
if token.string.find('\t') >= 0:
return -1
else:
return len(token.string)
elif token.type == Type.PARAMETERS:
return len(token.string) - len(token.string.lstrip())
else:
return 0
def _IsFirstNonWhitespaceTokenInLine(self, token):
"""Determines if the given token is the first non-space token on its line.
Args:
token: The token.
Returns:
True if the token is the first non-whitespace token on its line.
"""
if token.type in (Type.WHITESPACE, Type.BLANK_LINE):
return False
if token.IsFirstInLine():
return True
return (token.previous and token.previous.IsFirstInLine() and
token.previous.type == Type.WHITESPACE)
def _IsLastCodeInLine(self, token):
"""Determines if the given token is the last code token on its line.
Args:
token: The token.
Returns:
True if the token is the last code token on its line.
"""
if token.type in Type.NON_CODE_TYPES:
return False
start_token = token
while True:
token = token.next
if not token or token.line_number != start_token.line_number:
return True
if token.type not in Type.NON_CODE_TYPES:
return False
def _GoogScopeOrNone(self, token):
"""Determines if the given START_BLOCK is part of a goog.scope statement.
Args:
token: A token of type START_BLOCK.
Returns:
The goog.scope function call token, or None if such call doesn't exist.
"""
# Search for a goog.scope statement, which will be 5 tokens before the
# block. Illustration of the tokens found prior to the start block:
# goog.scope(function() {
# 5 4 3 21 ^
maybe_goog_scope = token
for unused_i in xrange(5):
maybe_goog_scope = (maybe_goog_scope.previous if maybe_goog_scope and
maybe_goog_scope.previous else None)
if maybe_goog_scope and maybe_goog_scope.string == 'goog.scope':
return maybe_goog_scope
def _Add(self, token_info):
"""Adds the given token info to the stack.
Args:
token_info: The token information to add.
"""
if self._stack and self._stack[-1].token == token_info.token:
# Don't add the same token twice.
return
if token_info.is_block or token_info.token.type == Type.START_PAREN:
token_info.overridden_by = self._GoogScopeOrNone(token_info.token)
index = 1
while index <= len(self._stack):
stack_info = self._stack[-index]
stack_token = stack_info.token
if stack_info.line_number == token_info.line_number:
# In general, tokens only override each other when they are on
# the same line.
stack_info.overridden_by = token_info
if (token_info.token.type == Type.START_BLOCK and
(stack_token.IsAssignment() or
stack_token.type in (Type.IDENTIFIER, Type.START_PAREN))):
# Multi-line blocks have lasting overrides, as in:
# callFn({
# a: 10
# },
# 30);
close_block = token_info.token.metadata.context.end_token
stack_info.is_permanent_override = \
close_block.line_number != token_info.token.line_number
elif (token_info.token.type == Type.START_BLOCK and
token_info.token.metadata.context.type == Context.BLOCK and
(stack_token.IsAssignment() or
stack_token.type == Type.IDENTIFIER)):
# When starting a function block, the override can transcend lines.
# For example
# long.long.name = function(
# a) {
# In this case the { and the = are on different lines. But the
# override should still apply.
stack_info.overridden_by = token_info
stack_info.is_permanent_override = True
else:
break
index += 1
self._stack.append(token_info)
def _Pop(self):
"""Pops the top token from the stack.
Returns:
The popped token info.
"""
token_info = self._stack.pop()
if token_info.token.type not in (Type.START_BLOCK, Type.START_BRACKET):
# Remove any temporary overrides.
self._RemoveOverrides(token_info)
else:
# For braces and brackets, which can be object and array literals, remove
# overrides when the literal is closed on the same line.
token_check = token_info.token
same_type = token_check.type
goal_type = None
if token_info.token.type == Type.START_BRACKET:
goal_type = Type.END_BRACKET
else:
goal_type = Type.END_BLOCK
line_number = token_info.token.line_number
count = 0
while token_check and token_check.line_number == line_number:
if token_check.type == goal_type:
count -= 1
if not count:
self._RemoveOverrides(token_info)
break
if token_check.type == same_type:
count += 1
token_check = token_check.next
return token_info
def _PopToImpliedBlock(self):
"""Pops the stack until an implied block token is found."""
while not self._Pop().token.metadata.is_implied_block:
pass
def _PopTo(self, stop_type):
"""Pops the stack until a token of the given type is popped.
Args:
stop_type: The type of token to pop to.
Returns:
The token info of the given type that was popped.
"""
last = None
while True:
last = self._Pop()
if last.token.type == stop_type:
break
return last
def _RemoveOverrides(self, token_info):
"""Marks any token that was overridden by this token as active again.
Args:
token_info: The token that is being removed from the stack.
"""
for stack_token in self._stack:
if (stack_token.overridden_by == token_info and
not stack_token.is_permanent_override):
stack_token.overridden_by = None
def _PopTransient(self):
"""Pops all transient tokens - i.e. not blocks, literals, or parens."""
while self._stack and self._stack[-1].is_transient:
self._Pop()
| |
# coding=utf-8
# Copyright 2021 The Deadunits Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""Has some auxiliary layer definitions needed for pruning and mean replacement.
This file implements two layers:
- MeanReplacer
- Masked Layer
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
from six.moves import range
import tensorflow.compat.v2 as tf
class MaskedLayer(tf.keras.layers.Wrapper):
"""This layer wraps keras.layers for applying binary masks to parameters.
Example Usage:
`ml = MaskedLayer(tf.keras.layers.Dense(32))`
To ensure masks are saved properly, initiate tf.train.Checkpoint object after
masks are generated.
This layer can be used for two tasks.
1. Pruning
One can prune a layer by setting the binary mask.
`ml.set_mask(binary_weight_mask)`
2. Dynamic_training: using a part of the layer. controlled dropout
TODO
This part is NOT yet needed and therefore not supported.
"""
def __init__(self, layer, mask_initializer=tf.initializers.ones, **kwargs):
"""Initilizes the MaskedLayer.
MaskedLayer assumes that the `tf.keras.layers.Layer` that it is wrapping has
their weights at `layer.weights[0]` and bias at `layer.weights[1]`. This
is the current API for `tf.keras.layers.Conv2D` or `tf.keras.layers.Dense`.
Args:
layer: tf.keras.layers.Layer that is going to be wrapped.
mask_initializer: func, used to initialize the mask.
**kwargs: any remaining named arguments passed to the super(MaskedLayer).
Raises:
ValueError: if the `layer` passed is not a keras layer.
"""
if not isinstance(layer, tf.keras.layers.Layer):
raise ValueError('Please initialize `MaskedLayer` layer with a '
'`Layer` instance. You passed: %s' % layer)
super(MaskedLayer, self).__init__(layer, **kwargs)
self.enabled = True
self.mask_initializer = mask_initializer
def build(self, input_shape):
logging.debug('MaskedLayer generated with shape: %s', input_shape)
if not self.layer.built:
self.layer.build(input_shape)
self.layer.built = True
# This is the case for now, we only support layers with weights+bias
assert len(self.layer.weights) == 2
self.mask_weight = self.add_variable('mask_weight',
shape=self.layer.weights[0].shape,
initializer=self.mask_initializer,
trainable=False,
dtype=self.layer.weights[0].dtype)
self.mask_bias = self.add_variable('mask_bias',
shape=self.layer.weights[1].shape,
initializer=self.mask_initializer,
trainable=False,
dtype=self.layer.weights[1].dtype)
super(MaskedLayer, self).build(input_shape)
self.built = True
def call(self, inputs, **kwargs):
# We need to apply the mask to ensure the masked parameters are zero.
# This is due to the fact that the gradient update might change the masked
# parameters.
# Note: Another way to implement this is to mask gradient. This would remove
# the need for setting masked parameters zero before each forward pass.
self.apply_masks()
return self.layer(inputs, **kwargs)
def set_mask(self, mask, is_bias=False):
"""Verifies the shape of the mask and sets it.
Args:
mask: tf.Tensor, same size as the weights (`self.layer.weights[0]`) or
bias (`self.layer.weights[1]`) if `is_bias=True`.
is_bias: sets the mask for the bias (layer.weights[1]) parameters if True.
"""
# One shouldn't set the mask before building the layer /init.
assert self.built
current_mask = self.mask_bias if is_bias else self.mask_weight
assert isinstance(mask, tf.Tensor)
assert current_mask.shape == mask.shape
mask = tf.cast(mask, current_mask.dtype)
current_mask.assign(mask)
def apply_masks(self):
assert self.built
if self.enabled:
self.layer.weights[0].assign(self.layer.weights[0] * self.mask_weight)
self.layer.weights[1].assign(self.layer.weights[1] * self.mask_bias)
# Needs to be implemented (from tf.keras.layers.Layer)
def compute_output_shape(self, input_shape):
return input_shape
def __repr__(self):
return 'MaskedLayer object, name=%s' % self.name
# This is needed to be able to mock nicely.
def get_layer(self):
return self.layer
def get_sparsity(self, weight_only=True):
# Returns the sparsity of the layer counting the 0's in the mask.
total_param = tf.size(self.layer.weights[0], out_type=tf.int32)
def get_sparse_weight_count():
active_params = tf.math.count_nonzero(self.mask_weight, dtype=tf.int32)
return total_param - active_params
pruned_param = 0 if self.mask_weight is None else get_sparse_weight_count()
if not weight_only:
total_bias = tf.size(self.layer.weights[1], out_type=tf.int32)
def get_sparse_bias_count():
active_params = tf.math.count_nonzero(self.mask_bias, dtype=tf.int32)
return total_bias - active_params
pruned_bias = 0 if self.mask_bias is None else get_sparse_bias_count()
total_param += total_bias
pruned_param += pruned_bias
sparsity = float(pruned_param) / float(total_param)
return sparsity
class TaylorScorer(tf.keras.layers.Layer):
"""This layer uses forward activations to calculate two different scores.
`compute_mean_replacement_saliency` and `compute_removal_saliency`
has defaults and overwritten with `call` arguments.
There are two main scores you can calculate.
- Removal Saliency (RS): activated with `compute_removal_saliency` and if
activated, first order approximation of the change in the loss calculated
when the unit is replaced with zeros.
- Mean Replacement Saliency (MRS): activated with
`compute_mean_replacement_saliency` and if activated, first order
approximation of the change in the loss calculated when the unit is replaced
with its mean value calculated over the batch.
References for MRS:
- [Detecting Dead Weights and Units in Neural Networks]
(https://arxiv.org/abs/1806.06068)
"""
_saved_values_set = ['mean', 'l2norm', 'mrs', 'rs']
def __init__(self,
name=None,
compute_removal_saliency=False,
compute_mean_replacement_saliency=False,
is_abs=True,
save_l2norm=False,
**kwargs):
"""Initilizes the MeanReplacer.
Args:
name: str, layer name that is passed to the super-class of TaylorScorer
compute_removal_saliency: bool, activates Removal Saliency(RS)
calculation.
compute_mean_replacement_saliency: bool, if True, activates Mean
Replacement Saliency (MRS) calculation.
is_abs: bool, if True the first order approximation is aggregated through
l1 norm. Otherwise reduce_mean is going to be used. Absolute value
around delta_loss means we want to penalize negative changes as much as
the positive changes.
save_l2norm: bool, if True the normalized l2_norm calculated during
forward pass.
**kwargs: Any other named argument is passed to the super-class.
Input shape: Arbitrary. The last dimension is assumed to have the output of
separate units. In other words, an input of shape N * M * K * C would have
an output of C units. We would calculate C different MRS scores or replace
the M[:,:,:,i] with the corresponding mean.
Output shape: Same shape as input.
"""
super(TaylorScorer, self).__init__(name=name,
trainable=False,
**kwargs)
self.compute_removal_saliency = compute_removal_saliency
self.compute_mean_replacement_saliency = compute_mean_replacement_saliency
self.is_abs = is_abs
self.save_l2norm = save_l2norm
self._mrs = None
self._rs = None
self._mean = None
# TODO make this optional. This is used to save activations
self._l2norm = None
def set_or_aggregate(self, attr_name, val, n_elements):
"""Given an attr_name sets it to val if it is None or aggregates.
Args:
attr_name: str, must be one of 'mean', 'mrs', 'rs' or 'l2norm'.
val: Tensor, to be set or aggregate. It should be normalized value by
the n_elements.
n_elements: int, number of elements in current batch.
Raises:
AssertionError: if attr_name is not valid.
"""
assert attr_name in TaylorScorer._saved_values_set
attr_name = '_' + attr_name
prev_val = getattr(self, attr_name)
if prev_val is None:
setattr(self, attr_name, (val, n_elements))
else:
# We do running mean by keeping the (current_mean, total_n_elements)
prev_val, prev_n_elements = prev_val
c_n_elements = prev_n_elements + n_elements
c_val = (prev_val * prev_n_elements + val * n_elements) / c_n_elements
setattr(self, attr_name, (c_val, c_n_elements))
def build(self, _):
# We need to save the change in the activation (`c_delta`) in forward pass
# and multiply that with output gradient to get the first order
# approximation of mean replacement: MRS.
@tf.custom_gradient
def taylor_calc(x, compute_mean_replacement_saliency,
compute_removal_saliency, is_abs, save_l2norm):
"""Identity function calculating mrs, rs or both as side effects.
`compute_mean_replacement_saliency` and `compute_removal_saliency`
accepted as arguments to ensure consistency between
forward and backward pass. Reading from
`self.compute_mean_replacement_saliency` may cause inconsistencies, if
those fields are updated between forward and backward call.
Args:
x: input tensor of any_shape(>=1d)
compute_mean_replacement_saliency: Whether to calculate MRS during
gradient calculation.
compute_removal_saliency: Whether to calculate RS during gradient
calculation.
is_abs: bool, if True change in the loss penalized in both direction.
save_l2norm: bool, if True saves the average squared activations.
Returns:
the output, gradient
"""
self.xshape = x.shape
n_dims = len(x.shape)
n_elements = int(x.shape[0])
c_mean = tf.reduce_mean(x, axis=list(range(n_dims - 1)))
self.set_or_aggregate('mean', c_mean, n_elements)
if save_l2norm:
reshaped_inp = tf.reshape(x, [-1, x.shape[-1]])
l2norm = tf.reduce_sum(
tf.square(reshaped_inp), axis=0) / reshaped_inp.shape[0]
self.set_or_aggregate('l2norm', l2norm, n_elements)
def grad(dy):
"""Implements the calculation of two different saliencies.
Mean Replacement Saliency (MRS)
Removal Saliency (RS)
This function is an identity gradient function. If
`compute_mean_replacement_saliency`
is True, it calculates the MRS score and stores it at `self._mrs`. If
`compute_removal_saliency` is True, it calculates/aggregates the RS
score and stores it at `self._rs`.
Args:
dy: Output gradient.
Returns:
dy: input itself.
"""
if compute_mean_replacement_saliency:
# Using c_mean, not the saved value.
brodcasted_mean = tf.broadcast_to(c_mean, x.shape)
c_delta = brodcasted_mean - x
c_mrs = tf.multiply(c_delta, dy)
# Reduce the tensor through sum if it has more than 2 dimensions (e.g.
# output of Conv2D layer).
# For example: if 4D (output of Conv2D), it sums over axis=[1, 2].
for _ in range(n_dims - 2):
c_mrs = tf.reduce_mean(c_mrs, axis=1)
# Following is for approximating abs change.
if is_abs:
c_mrs = tf.abs(c_mrs)
c_mrs = tf.reduce_mean(c_mrs, axis=0)
self.set_or_aggregate('mrs', c_mrs, n_elements)
if compute_removal_saliency:
c_rs = tf.multiply(-x, dy)
# Reduce the tensor through sum if it has more than 2 dimensions (e.g.
# output of Conv2D layer).
# For example: if 4D (output of Conv2D), it sums over axis=[1, 2].
for _ in range(n_dims - 2):
c_rs = tf.reduce_mean(c_rs, axis=1)
# Following is for approximating abs change.
if is_abs:
c_rs = tf.abs(c_rs)
c_rs = tf.reduce_mean(c_rs, axis=0)
self.set_or_aggregate('rs', c_rs, n_elements)
return dy
return tf.identity(x), grad
self.custom_forward_fun = taylor_calc
def call(self,
inputs,
compute_removal_saliency=None,
compute_mean_replacement_saliency=None,
is_abs=None,
save_l2norm=None,
aggregate_values=False):
"""Forward call for the layer implementing custom behaviours.
There are two modes, that cannot be on at the same time.
- `self.compute_mean_replacement_saliency`: if True, MRS is calculated
during backprop.
- `self.compute_removal_saliency`: if True, RS is calculated during
backprop.
These two modes can be overwritten by the named_arguments.
Args:
inputs: tf.Tensor with at least one element.
compute_removal_saliency: overwrites the `self.compute_removal_saliency`
event for current call.
compute_mean_replacement_saliency: overwrites the
`self.compute_mean_replacement_saliency` event for current call.
is_abs: bool, if True change in the loss penalized in both direction.
save_l2norm: bool, if True overwrites the flag provided at initiliazation
for this specific call.
aggregate_values: bool, if True it aggregate the previous values with the
new ones.
Returns:
output: Same as `inputs`.
"""
if compute_removal_saliency is None:
compute_removal_saliency = self.compute_removal_saliency
if compute_mean_replacement_saliency is None:
compute_mean_replacement_saliency = self.compute_mean_replacement_saliency
if is_abs is None:
is_abs = self.is_abs
if save_l2norm is None:
save_l2norm = self.save_l2norm
if not aggregate_values:
# Remove the previous mrs/rs if it exists.
self.reset_saved_values()
output = self.custom_forward_fun(
inputs,
compute_mean_replacement_saliency=compute_mean_replacement_saliency,
compute_removal_saliency=compute_removal_saliency,
is_abs=is_abs,
save_l2norm=save_l2norm)
return output
def compute_output_shape(self, input_shape):
return input_shape
def reset_saved_values(self):
self._mean = None
self._l2norm = None
self._mrs = None
self._rs = None
def get_saved_values(self,
attr_name,
broadcast_to_input_shape=False,
unit_mask=None):
"""Returns the saved values of the most recent forward pass.
All of 'mean', 'l2norm', 'mrs' and 'rs' have the same shape and here we
define common getter operation for them.
Args:
attr_name: str, 'mean', 'l2norm', 'mrs' or 'rs'.
broadcast_to_input_shape: bool, if True the values are broadcast to the
input shape.
unit_mask: Tensor, same shape as `self._<attr_name>` and it is multiplied
with the saved tensor before broadcast operation.
Returns:
Tensor or None: None if there is no saved value exists.
Raises:
ValueError: when the `attr_name` is not valid.
"""
if attr_name not in TaylorScorer._saved_values_set:
raise ValueError('attr_name: %s is not valid. ' % attr_name)
attr_name = '_'+ attr_name
if getattr(self, attr_name) is None:
return None
val, _ = getattr(self, attr_name)
# TODO maybe get rid of this part. It doesn't belong here.
if unit_mask is None:
possibly_masked_mean = val
else:
tf.assert_equal(val.shape, unit_mask.shape)
possibly_masked_mean = tf.multiply(val, unit_mask)
if broadcast_to_input_shape:
return tf.broadcast_to(possibly_masked_mean, self.xshape)
else:
return possibly_masked_mean
def get_config(self):
config = {
'compute_removal_saliency':
self.compute_removal_saliency,
'compute_mean_replacement_saliency':
self.compute_mean_replacement_saliency,
'is_abs': self.is_abs,
'save_l2norm': self.save_l2norm
}
# Only add TensorFlow-specific parameters if they are set, so as to preserve
# model compatibility with external Keras.
base_config = super(TaylorScorer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def __repr__(self):
return ('TaylorScorer object, name=%s, '
'compute_removal_saliency=%s, compute_mean_replacement_saliency=%s,'
' save_l2norm:%s, is_abs:%s'
) % (self.name, self.compute_removal_saliency,
self.compute_mean_replacement_saliency, self.save_l2norm,
self.is_abs)
class MeanReplacer(tf.keras.layers.Layer):
"""This layer replaces some of the units asked with their mean.
`is_replacing` has default and can be overwritten with arguments during
`call`.
"""
def __init__(self, name=None, is_replacing=False, **kwargs):
"""Initilizes the MeanReplacer.
Args:
name: str, layer name that is passed to the super-class of MeanReplacer
is_replacing: bool, if True replaces the output of each unit (last
dimension) with the mean values calculated over the batch and filters
(everything but the last dim). If false, no side effects.
**kwargs: Any other named argument is passed to the super-class.
Input shape: Arbitrary. The last dimension is assumed to have the output of
separate units. In other words, an input of shape N * M * K * C would have
an output of C units. We would calculate C different MRS scores or replace
the M[:,:,:,i] with the corresponding mean.
Output shape: Same shape as input.
"""
super(MeanReplacer, self).__init__(name=name,
trainable=False,
**kwargs)
self.is_replacing = is_replacing
self._active_units = []
self.mrs = None
def set_active_units(self, active_units):
"""Checks validity of active units and sets it.
The layer should be built before calling this function.
Args:
active_units: list, of int. Every int should be in the range
0<i<input_shape[-1]. Note that input_shape[-1] is equal to number of
units. Duplicates are removed.
"""
assert self.built
# active_units returns True if not empty
assert (isinstance(active_units, list) and active_units)
for i in active_units:
assert (isinstance(i, int) and 0 <= i and i < self.n_units)
# Making sure we have a copy of the list and unique indices.
self._active_units = sorted(list(set(active_units)))
def build(self, input_shape):
# We need `n_units` for `self.set_active_units` function.
self.n_units = input_shape[-1]
def call(self, inputs, is_replacing=None):
"""Forward call for the mean replacing layer.
`self.is_replacing`: if True, the output is calculated through taking
the mean over the batch for each unit. Units for which we perform this
calculation is declared with self._active_units and set with
`set_active_units` function.
Replacing mode can be overwritten by the is_replacing named argument.
Args:
inputs: tf.Tensor with at least one element.
is_replacing: overwrites the `self.is_replacing` event for current call.
Returns:
output: Same as `inputs` if self.is_replacing` is False. If it is True,
all input channels except last dimension replaced with the mean for the
`active_units`.
Raises:
AssertionError: When the `input` and `n_units` doesn't match. We expect to
process same shape `input`. If another `input` shape is required, create
a new MeanReplacer.
"""
if is_replacing is None:
is_replacing = self.is_replacing
output = tf.Variable(inputs)
if is_replacing:
# If active units empty give a warning and return the input.
if not self._active_units:
logging.warning(
'From %s: is_replacing=True, but there are no active'
'units.', self)
return inputs
assert inputs.shape[-1] == self.n_units
n_dims = len(inputs.shape)
c_mean = tf.reduce_mean(inputs, axis=list(range(n_dims - 1)))
for i in self._active_units:
output[Ellipsis, i].assign(tf.broadcast_to(c_mean[i], output.shape[:-1]))
return output.read_value()
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
config = {
'is_replacing': self.is_replacing,
'_active_units': self._active_units,
}
# Only add TensorFlow-specific parameters if they are set, so as to preserve
# model compatibility with external Keras.
base_config = super(MeanReplacer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def __repr__(self):
return ('MeanReplacer object, name=%s, '
'is_replacing=%s, active_units=%s') % (self.name, self.is_replacing,
self._active_units)
| |
# Python 3 migration-ify
# (20200910/vicchi)
# import types
import os
import os.path
import csv
import json
import geojson
import logging
import math
import tempfile
import urllib
import requests
import mapzen.whosonfirst.machinetag
import machinetag.elasticsearch.hierarchy
import mapzen.whosonfirst.utils
import mapzen.whosonfirst.placetypes
import mapzen.whosonfirst.elasticsearch
import mapzen.whosonfirst.uri
class index(mapzen.whosonfirst.elasticsearch.index):
def __init__(self, **kwargs):
mapzen.whosonfirst.elasticsearch.index.__init__(self, **kwargs)
def prepare_feature(self, feature):
props = feature['properties']
id = props['wof:id']
doctype = props['wof:placetype']
body = self.prepare_geojson(feature)
return {
'id': id,
'index': self.index,
'doc_type': doctype,
'body': body
}
# https://stackoverflow.com/questions/20288770/how-to-use-bulk-api-to-store-the-keywords-in-es-by-using-python
def prepare_feature_bulk(self, feature):
props = feature['properties']
id = props['wof:id']
doctype = props['wof:placetype']
body = self.prepare_geojson(feature)
return {
'_id': id,
'_index': self.index,
'_type': doctype,
'_source': body
}
def prepare_geojson(self, geojson_doc):
props = geojson_doc['properties']
# Store a stringified bounding box so that tools like
# the spelunker can zoom to extent and stuff like that
# (20150730/thisisaaronland)
bbox = geojson_doc.get('bbox', [])
# https://github.com/whosonfirst/py-mapzen-whosonfirst-search/issues/25
if len(bbox) == 4:
minlon, minlat, maxlon, maxlat = bbox
props['geom:min_latitude'] = minlat
props['geom:min_longitude'] = minlon
props['geom:max_latitude'] = maxlat
props['geom:max_longitude'] = maxlon
bbox = map(str, bbox) # oh python...
bbox = ",".join(bbox)
props['geom:bbox'] = bbox
# ggggggrgrgrgrgrhhnhnnnhnhnhnhnhhzzzzpphphtttt - we shouldn't
# have to do this but even with the enstringification below
# ES tries to be too clever by half so in the interests of just
# getting stuff done we're going to be ruthless about things...
# (21050806/thisisaaronland)
# I mean seriously... what????
#
# DEBUG:urllib3.connectionpool:Starting new HTTP connection (1): localhost
# DEBUG:urllib3.connectionpool:http://localhost:9200 "POST /collection/region/85688637 HTTP/1.1" 400 313
# ERROR:root:failed to do_index http://localhost:9200/collection/region/85688637: 400 {"error":{"root_cause":[{"type":
# "mapper_parsing_exception","reason":"failed to parse [ne:wikipedia]"}],"type":"mapper_parsing_exception","reason":
# "failed to parse [ne:wikipedia]","caused_by":{"type":"number_format_exception","reason":"For input string:
# "http://en.wikipedia.org/wiki/California\""}},"status":400}
# DEBUG:mapzen.whosonfirst.elasticsearch:Finished call to 'mapzen.whosonfirst.elasticsearch.do_index' after 5.206(s),
# this was the 2nd time calling it.
#
# some or all of this should/might be addressed by @vicchi's work to generate
# JSON schemas from the whosonfirst-properties repo which in turn could be used
# to generate ES schemas that don't require a "oh god, just fuck it and give up"
# kill list but something something something yak-shaving something something something
#
# see also: https://whosonfirst.org/blog/2018/05/25/three-steps-backwards/
# https://github.com/whosonfirst/whosonfirst-json-schema
#
# (20181212/thisisaaronland)
omgwtf = (
u'ne:fips_10_',
u'ne:gdp_md_est',
u'ne:geou_dif',
u'ne:pop_est',
u'ne:su_dif',
u'ne:FIPS_10_',
u'ne:ISO_A3_EH',
u'ne:ISO_A3',
u'ne:adm0_dif',
u'ne:wikipedia',
u'ne:level',
u'fsgov:ajo_pvm',
u'statoids:as_of_date',
u'src:population:date', # https://github.com/whosonfirst-data/whosonfirst-data/issues/1662
u'src:population_date', # https://github.com/whosonfirst-data/whosonfirst-data/issues/1662
u'de-bkg:wsk', # https://github.com/whosonfirst/py-mapzen-whosonfirst-search/issues/27
u'ne:WB_A2',
)
for bbq in omgwtf:
# Python 3 migration-ify
# (20200910/vicchi)
if bbq in props:
logging.debug("remove tag '%s' because ES suffers from E_EXCESSIVE_CLEVERNESS" % bbq)
del(props[bbq])
# alt placetype names/ID
placetype = props['wof:placetype']
try:
placetype = mapzen.whosonfirst.placetypes.placetype(placetype)
placetype_id = placetype.id()
placetype_names = []
# Python 3 migration-ify
# (20200910/vicchi)
names = placetype.names()
for n in names:
placetype_names.append(str(n))
props['wof:placetype_id'] = placetype_id
props['wof:placetype_names'] = placetype_names
# Python 3 migration-ify
# (20200910/vicchi)
except Exception as e:
logging.debug("Invalid or unknown placetype (%s) - %s" % (placetype, e))
# Dates
# there used to be code here to set "private" ES date fields for EDTF stuff
# but it has been removed in favour of explicit date:inception/cessation_upper/lower
# properties that get added by py-mapzen-whosonfirst-export 0.9.9 +
# (20180504/thisisaaronland)
# wof categories
wof_categories = []
for tag in props.get('wof:categories', []):
mt = mapzen.whosonfirst.machinetag.machinetag(tag)
if not mt.is_machinetag():
logging.warning("%s is not a valid wof:categories machine tag, skipping" % tag)
continue
enpathified = machinetag.elasticsearch.hierarchy.enpathify_from_machinetag(mt)
if not enpathified in wof_categories:
wof_categories.append(enpathified)
props["wof:categories"] = wof_categories
# mz categories
mz_categories = []
for tag in props.get('mz:categories', []):
mt = mapzen.whosonfirst.machinetag.machinetag(tag)
if not mt.is_machinetag():
logging.warning("%s is not a valid wof:categories machine tag, skipping" % tag)
continue
enpathified = machinetag.elasticsearch.hierarchy.enpathify_from_machinetag(mt)
if not enpathified in mz_categories:
mz_categories.append(enpathified)
props["mz:categories"] = mz_categories
# simplegeo categories
sg_categories = []
for tag in props.get('sg:categories', []):
mt = mapzen.whosonfirst.machinetag.machinetag(tag)
if not mt.is_machinetag():
logging.warning("%s is not a valid sg:categories machine tag, skipping" % tag)
continue
enpathified = machinetag.elasticsearch.hierarchy.enpathify_from_machinetag(mt)
if not enpathified in sg_categories:
sg_categories.append(enpathified)
# old historical stuff that we may ignore/purge in time... but
# not today (20160613/thisisaaronland)
stz = mapzen.whosonfirst.machinetag.sanitize()
for cl in props.get('sg:classifiers', []):
sg_type = cl.get('type', '')
sg_category = cl.get('category', '')
sg_subcategory = cl.get('subcategory', '')
clean_type = stz.filter_namespace(sg_type)
clean_category = stz.filter_predicate(sg_category)
clean_subcategory = stz.filter_value(sg_subcategory)
tags = []
mt = "sg:%s=%s" % (clean_type, clean_category)
tags.append(mt)
if clean_subcategory != "":
mt = "%s:%s=%s" % (clean_type, clean_category, clean_subcategory)
tags.append(mt)
for t in tags:
mt = mapzen.whosonfirst.machinetag.machinetag(t)
if not mt.is_machinetag():
logging.warning("sg category fails machinetag test: '%s' (%s)" % (t, cl))
continue
enpathified = machinetag.elasticsearch.hierarchy.enpathify_from_machinetag(mt)
if not enpathified in sg_categories:
sg_categories.append(enpathified)
props["sg:categories"] = sg_categories
# Concordances
conc = props.get('wof:concordances', {})
# Because Boundary Issues was careless with how it encoded 'array()'
# See: https://github.com/whosonfirst/whosonfirst-www-boundaryissues/commit/436607e41b51890080064515582240bbedda633f
# (20161031/dphiffer)
if conc == []:
logging.warning("FIX %d concordances encoded as []" % props['wof:id'])
conc = {}
# So this may go away if we can ever figure out a simple way to facet on the
# set of unique keys for _all_ `wof:concordances` blobs but today we can't so
# this is faster and easier than standing around in ES-quicksand...
# (20160518/thisisaaronland)
# Python 3 migration-ify
# (20200910/vicchi)
props['wof:concordances_sources'] = list(conc.keys())
# Misc counters
# https://github.com/whosonfirst/py-mapzen-whosonfirst-search/issues/13
props['counts:concordances_total'] = len(conc.items())
# https://github.com/whosonfirst/py-mapzen-whosonfirst-search/issues/14
langs_official = props.get('wof:lang_x_official', [])
langs_spoken = props.get('wof:lang_x_spoken', [])
props['counts:languages_official'] = len(langs_official)
props['counts:languages_spoken'] = len(langs_spoken)
count_langs = len(langs_official)
for lang in langs_spoken:
if not lang in langs_official:
count_langs += 1
props['counts:languages_total'] = count_langs
# https://github.com/whosonfirst/py-mapzen-whosonfirst-search/issues/15
count_names_total = 0
count_names_prefered = 0
count_names_variant = 0
count_names_colloquial = 0
count_names_languages = 0
name_langs = []
translations = []
for k, v in props.items():
if not k.startswith("name:"):
continue
count_names = len(v)
count_names_total += count_names
# https://github.com/whosonfirst/whosonfirst-names/issues/3
try:
k = k.replace("name:", "")
parts = k.split("_x_")
lang, qualifier = parts
# eng
if not lang in translations:
translations.append(lang)
# eng_x_prefered
if not k in translations:
translations.append(k)
# Python 3 migration-ify
# (20200910/vicchi)
except Exception as e:
logging.warning("failed to parse '%s', because %s" % (k, e))
continue
if not lang in name_langs:
count_names_languages += 1
name_langs.append(lang)
if qualifier == 'prefered':
count_names_prefered += count_names
elif qualifier == 'variant':
count_names_variant += count_names
elif qualifier == 'colloquial':
count_names_colloquial += count_names
else:
pass
props['translations'] = translations
props['counts:names_total'] = count_names_total
props['counts:names_prefered'] = count_names_prefered
props['counts:names_variant'] = count_names_variant
props['counts:names_colloquial'] = count_names_colloquial
props['counts:names_languages'] = len(name_langs)
# https://github.com/whosonfirst/py-mapzen-whosonfirst-search/issues/3
try:
props['geom:type'] = geojson_doc['geometry']['type']
# Python 3 migration-ify
# (20200910/vicchi)
except Exception as e:
wofid = props["wof:id"]
logging.error("Hey wait a minute... %s is missing a geometry.type property" % wofid)
# Python 3 migration-ify
# (20200910/vicchi)
raise Exception(e)
# because ES suffers from E_EXCESSIVE_CLEVERNESS
# Because, for a time, Boundary Issues did not have the capacity to
# *remove* properties, and I was incorrectly setting edtf:deprecated
# to 'uuuu'. (20161103/dphiffer)
if "edtf:deprecated" in props and props['edtf:deprecated'] in ("uuuu", ""):
logging.debug("FIX %d edtf:deprecated set to uuuu" % props['wof:id'])
del props['edtf:deprecated']
# Python 3 migration-ify
# (20200910/vicchi)
fh = tempfile.NamedTemporaryFile('w+')
tmpname = fh.name
json.dump(geojson_doc, fh)
fsize = os.stat(tmpname).st_size
fh.close()
# this should never happen because fh.close should
# remove the file but just in case...
if os.path.exists(tmpname):
os.unlink(tmpname)
props['mz:filesize'] = fsize
#
props = self.enstringify(props)
return props
def enstringify(self, data, **kwargs):
ima_int = (
'continent_id',
'country_id',
'county_id',
'gn:elevation',
'gn:population',
'gn:id',
'gp:id',
'locality_id',
'neighbourhood_id',
'region_id',
'wof:id',
'wof:belongsto',
'wof:breaches',
'wof:lastmodified',
'wof:megacity',
'wof:placetype_id',
'wof:population',
'wof:scale',
'wof:superseded_by',
'wof:supersedes',
'zs:pop10',
)
ima_float = (
'geom:area',
'geom:latitude',
'geom:longitude',
'lbl:latitude',
'lbl:longitude',
'mps:latitude',
'mps:longitude',
'mz:min_zoom',
'mz:max_zoom',
)
ima_int_wildcard = (
)
ima_float_wildcard = (
'ne:',
)
# Python 3 migration-ify
# (20200910/vicchi)
if isinstance(data, dict):
for k, v in data.items():
v = self.enstringify(v, key=k)
data[k] = v
return data
# Python 3 migration-ify
# (20200910/vicchi)
elif isinstance(data, list):
str_data = []
for thing in data:
str_data.append(self.enstringify(thing, **kwargs))
return str_data
# Python 3 migration-ify
# (20200910/vicchi)
elif isinstance(data, type(None)):
return ""
else:
k = kwargs.get('key', None)
logging.debug("processing %s: %s" % (k,data))
if k and k in ima_int:
if data == '':
return 0
# I seriously hate you, Python...
# int('579.0')
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# int(float('589.0'))
# 589
#
# (20181029/thisisaaronland)
return int(float(data))
elif k and k in ima_float:
if data == '':
return 0.0
return float(data)
else:
if k:
for fl_k in ima_int_wildcard:
if k.startswith(fl_k):
try:
data = int(data)
return data
# Python 3 migration-ify
# (20200910/vicchi)
except Exception as e:
logging.debug("failed to convert %s to an int because %s" % (k.encode('utf8'), e))
for fl_k in ima_float_wildcard:
if k.startswith(fl_k):
try:
data = float(data)
return data
# Python 3 migration-ify
# (20200910/vicchi)
except Exception as e:
logging.debug("failed to convert %s to a float because %s" % (k.encode('utf8'), e))
return data
def load_file(self, f):
try:
fh = open(f, 'r')
return geojson.load(fh)
# Python 3 migration-ify
# (20200910/vicchi)
except Exception as e:
logging.error("failed to open %s, because %s" % (f, e))
raise Exception(e)
def prepare_file(self, f):
data = self.load_file(f)
data = self.prepare_feature(data)
return data
def prepare_file_bulk(self, f):
logging.debug("prepare file %s" % f)
data = self.load_file(f)
try:
data = self.prepare_feature_bulk(data)
logging.debug("yield %s" % data)
# Python 3 migration-ify
# (20200910/vicchi)
except Exception as e:
logging.warning("failed to prepare data for %s because %s" % (f, e))
raise Exception(e)
return data
def prepare_files_bulk(self, files):
for path in files:
logging.debug("prepare file %s" % path)
if mapzen.whosonfirst.uri.is_alt_file(path):
logging.warning("%s is an alt file so not indexing it" % path)
continue
data = self.prepare_file_bulk(path)
logging.debug("yield %s" % data)
yield data
def index_feature(self, feature):
prepped = self.prepare_feature(feature)
return self.index_document(prepped)
def index_file(self, path):
if mapzen.whosonfirst.uri.is_alt_file(path):
logging.warning("%s is an alt file so not indexing it" % path)
return False
path = os.path.abspath(path)
data = self.prepare_file(path)
return self.index_document(data)
def index_files(self, files):
iter = self.prepare_files_bulk(files)
return self.index_documents_bulk(iter)
def index_filelist(self, path, **kwargs):
def mk_files(fh):
for ln in fh.readlines():
path = ln.strip()
if kwargs.get('prefix', None):
path = os.path.join(kwargs['prefix'], path)
if mapzen.whosonfirst.uri.is_alt_file(path):
logging.warning("%s is an alt file so not indexing it" % path)
continue
logging.debug("index %s" % path)
yield path
fh = open(path, 'r')
files = mk_files(fh)
iter = self.prepare_files_bulk(files)
return self.index_documents_bulk(iter)
def delete_feature(self, feature):
props = feature['properties']
id = props['wof:id']
doctype = props['wof:placetype']
kwargs = {
'id': id,
'index': self.index,
'doc_type': doctype,
'refresh': True
}
self.delete_document(kwargs)
class search(mapzen.whosonfirst.elasticsearch.search):
def __init__(self, **kwargs):
# mapzen.whosonfirst.elasticsearch.query.__init__(self, **kwargs)
super().__init__(**kwargs)
def enfeaturify(self, row):
properties = row['_source']
id = properties['wof:id']
geom = {}
bbox = []
lat = None
lon = None
if not properties.get('wof:path', False):
path = mapzen.whosonfirst.utils.id2relpath(id)
properties['wof:path'] = path
if properties.get('geom:bbox', False):
bbox = properties['geom:bbox']
bbox = bbox.split(",")
if properties.get('geom:latitude', False) and properties.get('geom:longitude', False):
lat = properties['geom:latitude']
lat = properties['geom:longitude']
elif len(bbox) == 4:
pass # derive centroid here...
else:
pass
if properties.get('wof:placetype', None) == 'venue' and lat and lon:
geom = {'type': 'Point', 'coordinates': [ lon, lat ] }
return {
'type': 'Feature',
'id': id,
'bbox': bbox,
'geometry': geom,
'properties': properties
}
class query(search):
def __init__(self, **kwargs):
logging.warning("mapzen.whosonfirst.search.query is deprecated - please use mapzen.whosonfirst.search.search")
search.__init__(self, **kwargs)
if __name__ == '__main__':
print("Please rewrite me")
| |
"""
Library for processing SHARAD data
Author: Cyril Grima <cyril.grima@gmail.com>
"""
import numpy as np
import pandas as pd
import raw
from scipy.interpolate import splrep, splev
import matplotlib.pyplot as plt
import scipy.constants as ct
from planetbody import mars, ellipsoid
import rsr.utils
import rsr.fit
import os
import string
import glob
from params import *
def calibration(val, wl=ct.c/frq, rng = False, abs_calib = abs_calib):
"""Signal calibrated from instrumental and geophysic gains (power in dB)
If rng is given, will correct for the 2-way specular geomtric losses
Also, change bad values to nan
Arguments
--------
val : float
raw echo value(s)
wl : float
wavelength [m]
rng : float
range to the target [m]
abs_calib : float
absolute calibration value [dB]
"""
val[np.where(val == 0)] = np.nan
val = 10*np.log10(val)
geometric_loss = 0 if rng is False else 20*np.log10(8*np.pi*rng)
out = val + abs_calib + geometric_loss
return out
def get_pik(orbit):
"""Extract surface echo values (convert in linear amplitude)
Arguments
---------
orbit : string
orbit number
Keywords
--------
amplitude : bool
output as linear amplitude
abs_calib : float
calibration value
"""
pik = raw.read_pik(orbit)
rpb = raw.read_rpb(orbit)
z = np.round(pik.delay_pixel)-1
x = pik.frame-1
x = x.values.tolist()
echo = np.zeros(np.size(x))
for i, val in enumerate(x):
try:
echo[i] = rpb[z[i]-2:z[i]+2, val].max()
except:
echo[i] = np.nan
out = np.empty(rpb.shape[1])
out[x] = echo
return out
def get_aux(orbit):
"""Interpolate auxilliary values to echo sampling
Arguments
---------
orbit : string
orbit number
"""
aux = raw.read_aux(orbit)
rpb = raw.read_rpb(orbit)
xnew = np.arange(rpb.shape[1])
x = np.linspace(0, rpb.shape[1]-1, aux.UTC.size)
tck = splrep(x, aux.lon, s=0)
lon = splev(xnew, tck, der=0)
tck = splrep(x, aux.lat, s=0)
lat = splev(xnew, tck, der=0)
tck = splrep(x, aux.radius, s=0)
radius = splev(xnew, tck, der=0)
tck = splrep(x, aux.vtan, s=0)
vtan = splev(xnew, tck, der=0)
tck = splrep(x, aux.vrad, s=0)
vrad = splev(xnew, tck, der=0)
tck = splrep(x, aux.SZA, s=0)
sza = splev(xnew, tck, der=0)
tck = splrep(x, aux.pitch, s=0)
pitch = splev(xnew, tck, der=0)
tck = splrep(x, aux.yaw, s=0)
yaw = splev(xnew, tck, der=0)
tck = splrep(x, aux.roll, s=0)
roll = splev(xnew, tck, der=0)
tck = splrep(x, aux.Mag_field, s=0)
mag = splev(xnew, tck, der=0)
tck = splrep(x, aux.HGAout, s=0)
HGAout = splev(xnew, tck, der=0)
tck = splrep(x, aux.HGAin, s=0)
HGAin = splev(xnew, tck, der=0)
tck = splrep(x, aux.Sun_dist, s=0)
sun_dist = splev(xnew, tck, der=0)
rng = radius*1e3 - ellipsoid.lonlat2rad(lon, lat, mars.radius['val'])
out = {'lat':lat, 'lon':lon, 'radius':radius, 'vtan':vtan, 'vrad':vrad,
'sza':sza, 'pitch':pitch, 'yaw':yaw, 'roll':roll, 'mag':mag,
'HGAout':HGAout, 'HGAin':HGAin, 'sun_dist':sun_dist, 'rng':rng}
return pd.DataFrame(out)
def get_srf(orbit, save=False):
"""Bundle, calibrate and save data from pik and aux files
Arguments
---------
orbit : string
orbit number
Keywords
--------
save : bool
wether or not to save the results
"""
aux = get_aux(orbit)
pik = get_pik(orbit)
#pik[np.where(pik == 0)] = np.nan
pdb = calibration(pik, rng=aux.rng.values)
pdb[np.where(pdb < -200)] = np.nan
pdb[np.where(pdb > 10)] = np.nan
amp = 10**(pdb/20.)
aux['amp'] = amp
if save is True:
save_fil = srf_path + orbit.zfill(7) + '.srf.txt'
aux.to_csv(save_fil, sep='\t', index=False, float_format='%.7f')
return aux
def inline_rsr(orbit, fit_model='hk', inv='spm' ,winsize=1000., sampling=250., save=False, **kwargs):
"""launch sliding RSR along a track
Arguments
---------
orbit : string
orbit number
Keywords
--------
save : bool
wether or not to save the results
"""
srf = get_srf(orbit, save=True)
b = rsr.utils.inline_estim(srf.amp, fit_model=fit_model, inv=inv ,frq=frq, winsize=winsize,
sampling=sampling, verbose=True)
xo = np.round(np.array(b.xo)) # positions of the computed statistics
b['lat'] = np.array(srf.ix[xo, 'lat'])
b['lon'] = np.array(srf.ix[xo, 'lon'])
b['roll'] = np.array(srf.ix[xo, 'roll'])
b['rng'] = np.array(srf.ix[xo, 'rng'])
b['sza'] = np.array(srf.ix[xo, 'sza'])
if save is True:
save_fil = string.replace(os.getcwd(), 'code', 'targ') + '/rsr/' + \
orbit.zfill(7) + '.' + fit_model + '.' + inv
title = orbit.zfill(7)
b.to_csv(save_fil + '.txt', sep='\t', index=False, float_format='%.7f')
rsr.utils.plot_inline(b, frq=frq, title=title)
plt.savefig(save_fil + '.png', bbox_inches='tight')
return b
def rsr_orbit(orbit, frames, title=True, color='k'):
"""return RSR plot and statistics for a frame along an orbit
"""
srf = pd.read_table(srf_path+orbit.zfill(7)+'.srf.txt')
sample = srf.amp[frames[0]:frames[1]]
x = frames[0]+(frames[1]-frames[0])/2.
out = rsr.fit.hk(sample, param0=fit.hk_param0(sample))
print('Orbit '+orbit.zfill(7)+' [%i:%i]\nlat/lon: %.3f/%.3f'
% (frames[0], frames[1], srf.lat.values[x],
srf.lon.values[x]))
print('')
out.report(frq=20e6)
out.plot(bins=50, color=color)
if title is not False:
plt.title('Orbit '+orbit.zfill(7)+' [%i:%i]\nlat/lon: %.3f/%.3f'
% (frames[0], frames[1], srf.lat.values[x], srf.lon.values[x]))
def do_rsr(orbit, frames, title=True, color='k'):
"""return RSR plot and statistics for a frame along an orbit
"""
srf = pd.read_table(srf_path+orbit.zfill(7)+'.srf.txt')
sample = srf.amp[frames[0]:frames[1]]
x = frames[0]+(frames[1]-frames[0])/2.
out = rsr.fit.lmfit(sample)
print('Orbit '+orbit.zfill(7)+' [%i:%i]\nlat/lon: %.3f/%.3f'
% (frames[0], frames[1], srf.lat.values[x],
srf.lon.values[x]))
print('')
out.report(frq=20e6)
out.plot(color=color)
if title is not False:
plt.title('Orbit '+orbit.zfill(7)+' [%i:%i]\nlat/lon: %.3f/%.3f'
% (frames[0], frames[1], srf.lat.values[x], srf.lon.values[x]))
return out
def group_rsr(suffix, save=True):
fils = glob.glob(rsr_path + '[!all]*' + suffix)
fils.sort()
for fil in fils:
a = pd.read_table(fil)
orbit = np.empty(a.shape[0])
orbit.fill(fil.split('/')[-1].split('.')[0])
a['orbit'] = orbit
out = a if 'out' not in locals() else pd.concat([out, a])
if save is True:
filename = rsr_path + '/' + 'all' + suffix.replace('*','')
out.to_csv(filename, sep='\t', index=False, float_format='%.7f')
return out
| |
# Copyright (c) 2018-2020, Manfred Moitzi
# License: MIT License
import pytest
import ezdxf
import math
from ezdxf.math import (
ConstructionArc,
ConstructionCircle,
ConstructionLine,
UCS,
Vec3,
Vec2,
arc_segment_count,
)
from math import isclose
def test_arc_from_2p_angle_complex():
p1 = (-15.73335, 10.98719)
p2 = (-12.67722, 8.76554)
angle = 55.247230
arc = ConstructionArc.from_2p_angle(
start_point=p1, end_point=p2, angle=angle
)
arc_result = ConstructionArc(
center=(-12.08260, 12.79635),
radius=4.07443,
start_angle=-153.638906,
end_angle=-98.391676,
)
assert arc.center.isclose(arc_result.center, abs_tol=1e-5)
assert isclose(arc.radius, arc_result.radius, abs_tol=1e-5)
assert isclose(arc.start_angle, arc_result.start_angle, abs_tol=1e-4)
assert isclose(arc.end_angle, arc_result.end_angle, abs_tol=1e-4)
def test_arc_from_2p_angle_simple():
p1 = (2, 1)
p2 = (0, 3)
angle = 90
arc = ConstructionArc.from_2p_angle(
start_point=p1, end_point=p2, angle=angle
)
assert arc.center.isclose((0, 1))
assert isclose(arc.radius, 2)
assert isclose(arc.start_angle, 0, abs_tol=1e-12)
assert isclose(arc.end_angle, 90)
arc = ConstructionArc.from_2p_angle(
start_point=p2, end_point=p1, angle=angle
)
assert arc.center.isclose((2, 3))
assert isclose(arc.radius, 2)
assert isclose(arc.start_angle, 180)
assert isclose(arc.end_angle, -90)
def test_arc_from_2p_radius():
p1 = (2, 1)
p2 = (0, 3)
radius = 2
arc = ConstructionArc.from_2p_radius(
start_point=p1, end_point=p2, radius=radius
)
assert arc.center.isclose((0, 1))
assert isclose(arc.radius, radius)
assert isclose(arc.start_angle, 0)
assert isclose(arc.end_angle, 90)
arc = ConstructionArc.from_2p_radius(
start_point=p2, end_point=p1, radius=radius
)
assert arc.center.isclose((2, 3))
assert isclose(arc.radius, radius)
assert isclose(arc.start_angle, 180)
assert isclose(arc.end_angle, -90)
def test_arc_from_3p():
p1 = (-15.73335, 10.98719)
p2 = (-12.67722, 8.76554)
p3 = (-8.00817, 12.79635)
arc = ConstructionArc.from_3p(start_point=p1, end_point=p2, def_point=p3)
arc_result = ConstructionArc(
center=(-12.08260, 12.79635),
radius=4.07443,
start_angle=-153.638906,
end_angle=-98.391676,
)
assert arc.center.isclose(arc_result.center, abs_tol=1e-5)
assert isclose(arc.radius, arc_result.radius, abs_tol=1e-5)
assert isclose(arc.start_angle, arc_result.start_angle, abs_tol=1e-4)
assert isclose(arc.end_angle, arc_result.end_angle, abs_tol=1e-4)
def test_spatial_arc_from_3p():
start_point_wcs = Vec3(0, 1, 0)
end_point_wcs = Vec3(1, 0, 0)
def_point_wcs = Vec3(0, 0, 1)
ucs = UCS.from_x_axis_and_point_in_xy(
origin=def_point_wcs,
axis=end_point_wcs - def_point_wcs,
point=start_point_wcs,
)
start_point_ucs = ucs.from_wcs(start_point_wcs)
end_point_ucs = ucs.from_wcs(end_point_wcs)
def_point_ucs = Vec3(0, 0)
arc = ConstructionArc.from_3p(start_point_ucs, end_point_ucs, def_point_ucs)
dwg = ezdxf.new("R12")
msp = dwg.modelspace()
dxf_arc = arc.add_to_layout(msp, ucs)
assert dxf_arc.dxftype() == "ARC"
assert isclose(dxf_arc.dxf.radius, 0.81649658, abs_tol=1e-9)
assert isclose(dxf_arc.dxf.start_angle, -30)
assert isclose(dxf_arc.dxf.end_angle, -150)
assert dxf_arc.dxf.extrusion.isclose(
(0.57735027, 0.57735027, 0.57735027), abs_tol=1e-9
)
def test_bounding_box():
bbox = ConstructionArc(
center=(0, 0), radius=1, start_angle=0, end_angle=90
).bounding_box
assert bbox.extmin.isclose((0, 0))
assert bbox.extmax.isclose((1, 1))
bbox = ConstructionArc(
center=(0, 0), radius=1, start_angle=0, end_angle=180
).bounding_box
assert bbox.extmin.isclose((-1, 0))
assert bbox.extmax.isclose((1, 1))
bbox = ConstructionArc(
center=(0, 0), radius=1, start_angle=270, end_angle=90
).bounding_box
assert bbox.extmin.isclose((0, -1))
assert bbox.extmax.isclose((1, 1))
def test_angles():
arc = ConstructionArc(radius=1, start_angle=30, end_angle=60)
assert tuple(arc.angles(2)) == (30, 60)
assert tuple(arc.angles(3)) == (30, 45, 60)
arc.start_angle = 180
arc.end_angle = 0
assert tuple(arc.angles(2)) == (180, 0)
assert tuple(arc.angles(3)) == (180, 270, 0)
arc.start_angle = -90
arc.end_angle = -180
assert tuple(arc.angles(2)) == (270, 180)
assert tuple(arc.angles(4)) == (270, 0, 90, 180)
def test_vertices():
angles = [0, 45, 90, 135, -45, -90, -135, 180]
arc = ConstructionArc(center=(1, 1))
vertices = list(arc.vertices(angles))
for v, a in zip(vertices, angles):
a = math.radians(a)
assert v.isclose(Vec2((1 + math.cos(a), 1 + math.sin(a))))
def test_tangents():
angles = [0, 45, 90, 135, -45, -90, -135, 180]
sin45 = math.sin(math.pi / 4)
result = [
(0, 1),
(-sin45, sin45),
(-1, 0),
(-sin45, -sin45),
(sin45, sin45),
(1, 0),
(sin45, -sin45),
(0, -1),
]
arc = ConstructionArc(center=(1, 1))
vertices = list(arc.tangents(angles))
for v, r in zip(vertices, result):
assert v.isclose(Vec2(r))
def test_angle_span():
assert ConstructionArc(start_angle=30, end_angle=270).angle_span == 240
# crossing 0-degree:
assert (
ConstructionArc(
start_angle=30, end_angle=270, is_counter_clockwise=False
).angle_span
== 120
)
# crossing 0-degree:
assert ConstructionArc(start_angle=300, end_angle=60).angle_span == 120
assert (
ConstructionArc(
start_angle=300, end_angle=60, is_counter_clockwise=False
).angle_span
== 240
)
def test_arc_segment_count():
radius = 100
max_sagitta = 2
assert arc_segment_count(radius, math.tau, max_sagitta) == 16
alpha = math.tau / 16
l2 = math.sin(alpha / 2) * radius
sagitta = radius - math.sqrt(radius ** 2 - l2 ** 2)
assert max_sagitta / 2 < sagitta < max_sagitta
@pytest.mark.parametrize(
"r, s, e, sagitta, count",
[
(1, 0, 180, 0.35, 3),
(1, 0, 180, 0.10, 5),
(0, 0, 360, 0.10, 0), # radius 0 works but yields nothing
(-1, 0, 180, 0.35, 3), # negative radius same as positive radius
(1, 270, 90, 0.10, 5), # start angle > end angle
(1, 90, -90, 0.10, 5),
(1, 0, 0, 0.10, 0), # angle span 0 works but yields nothing
(1, -45, -45, 0.10, 0),
],
)
def test_flattening(r, s, e, sagitta, count):
arc = ConstructionArc((0, 0), r, s, e)
assert len(list(arc.flattening(sagitta))) == count
@pytest.mark.parametrize("p", [(2, 0), (2, 2), (0, 2), (2, -2), (0, -2)])
def test_point_is_in_arc_range(p):
"""
Test if the angle defined by arc.center and point "p" is in the range
arc.start_angle to arc.end_angle:
"""
arc = ConstructionArc((0, 0), 1, -90, 90)
assert arc._is_point_in_arc_range(Vec2(p)) is True
@pytest.mark.parametrize("p", [(-2, 0), (-2, 2), (-2, -2)])
def test_point_is_not_in_arc_range(p):
"""
Test if the angle defined by arc.center and point "p" is NOT in the range
arc.start_angle to arc.end_angle:
"""
arc = ConstructionArc((0, 0), 1, -90, 90)
assert arc._is_point_in_arc_range(Vec2(p)) is False
@pytest.mark.parametrize(
"s, e",
[
[(0, 0), (2, 0)], # touches the arc
[(0, 0), (3, 0)], # intersect
[(0, 0), (0, 2)], # touches the arc
[(0, 0), (0, 3)], # intersect
[(0, 0), (2, 2)], # intersect
[(0, -1), (2, -1)], # intersect
],
)
def test_arc_intersect_line_in_one_point(s, e):
arc = ConstructionArc((0, 0), 2, -90, 90)
assert len(arc.intersect_line(ConstructionLine(s, e))) == 1
@pytest.mark.parametrize(
"s, e",
[
[(-2, 0), (2, 0)], # touches
[(-2, 1), (2, 1)], # intersect
],
)
def test_arc_intersect_line_in_two_points(s, e):
arc = ConstructionArc((0, 0), 2, 0, 180)
assert len(arc.intersect_line(ConstructionLine(s, e))) == 2
@pytest.mark.parametrize(
"s, e",
[
[(0, 2), (1, 2)],
[(2, 0), (2, 1)],
[(1, 1), (2, 2)],
],
)
def test_arc_does_not_intersect_line(s, e):
arc = ConstructionArc((0, 0), 1, 0, 90)
assert len(arc.intersect_line(ConstructionLine(s, e))) == 0
@pytest.mark.parametrize(
"c, r",
[
[(0.0, 1.0), 1.0],
[(0.0, 0.5), 0.5],
[(2.0, 0.0), 1.0],
],
)
def test_arc_intersect_circle_in_one_point(c, r):
arc = ConstructionArc((0, 0), 1, -90, 90)
assert len(arc.intersect_circle(ConstructionCircle(c, r))) == 1
@pytest.mark.parametrize(
"c, r",
[
[(1.0, 0.0), 1.0],
[(0.5, 0.0), 1.0],
],
)
def test_arc_intersect_circle_in_two_points(c, r):
arc = ConstructionArc((0, 0), 1, -90, 90)
assert len(arc.intersect_circle(ConstructionCircle(c, r))) == 2
@pytest.mark.parametrize(
"c, r",
[
[(0.0, 0.0), 0.5], # concentric circle
[(0.0, 0.0), 1.0], # concentric circle
[(0.0, 0.0), 2.0], # concentric circle
[(2.0, 0.0), 0.5], # ) O
],
)
def test_arc_does_not_intersect_circle(c, r):
arc = ConstructionArc((0, 0), 1, -90, 90)
assert len(arc.intersect_circle(ConstructionCircle(c, r))) == 0
@pytest.mark.parametrize(
"c, r, s, e",
[
[(2.0, 0.0), 1.0, 90, 270], # touches in one point: )(
[(1.5, 0.0), 1.0, 90, 180], # intersect
],
)
def test_arc_intersect_arc_in_one_point(c, r, s, e):
arc = ConstructionArc((0, 0), 1, -90, 90)
assert len(arc.intersect_arc(ConstructionArc(c, r, s, e))) == 1
@pytest.mark.parametrize(
"c, r, s, e",
[
[(0.5, 0.0), 1.0, 90, 270], # intersect
[(1.5, 0.0), 1.0, 90, 270], # intersect
],
)
def test_arc_intersect_arc_in_two_points(c, r, s, e):
arc = ConstructionArc((0, 0), 1, -90, 90)
assert len(arc.intersect_arc(ConstructionArc(c, r, s, e))) == 2
@pytest.mark.parametrize(
"c, r, s, e",
[
[(0.0, 0.0), 1.0, 90, 270], # concentric arcs
[(-0.5, 0.0), 1.0, 90, 270], # insect circle but not arc: ( )
],
)
def test_arc_does_not_intersect_arc(c, r, s, e):
arc = ConstructionArc((0, 0), 1, -90, 90)
assert len(arc.intersect_arc(ConstructionArc(c, r, s, e))) == 0
| |
"""
SoftLayer.tests.CLI.modules.server_tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This is a series of integration tests designed to test the complete
command line interface.
:license: MIT, see LICENSE for more details.
"""
import mock
from SoftLayer.CLI import exceptions
from SoftLayer import testing
import json
import tempfile
class ServerCLITests(testing.TestCase):
def test_server_cancel_reasons(self):
result = self.run_command(['server', 'cancel-reasons'])
output = json.loads(result.output)
self.assert_no_fail(result)
self.assertEqual(len(output), 10)
def test_server_details(self):
result = self.run_command(['server', 'detail', '1234',
'--passwords', '--price'])
expected = {
'cores': 2,
'created': '2013-08-01 15:23:45',
'datacenter': 'TEST00',
'guid': '1a2b3c-1701',
'domain': 'test.sftlyr.ws',
'hostname': 'hardware-test1',
'fqdn': 'hardware-test1.test.sftlyr.ws',
'id': 1000,
'ipmi_ip': '10.1.0.3',
'memory': 2048,
'notes': 'These are test notes.',
'os': 'Ubuntu',
'owner': 'chechu',
'price rate': 16.08,
'private_ip': '10.1.0.2',
'ptr': '2.0.1.10.in-addr.arpa',
'public_ip': '172.16.1.100',
'remote users': [{'password': 'abc123', 'ipmi_username': 'root'}],
'status': 'ACTIVE',
'tags': ['test_tag'],
'users': [{'password': 'abc123', 'username': 'root'}],
'vlans': [{'id': 9653, 'number': 1800, 'type': 'PRIVATE'},
{'id': 19082, 'number': 3672, 'type': 'PUBLIC'}]
}
self.assert_no_fail(result)
self.assertEqual(json.loads(result.output), expected)
def test_detail_vs_empty_tag(self):
mock = self.set_mock('SoftLayer_Hardware_Server', 'getObject')
mock.return_value = {
'id': 100,
'processorPhysicalCoreAmount': 2,
'memoryCapacity': 2,
'tagReferences': [
{'tag': {'name': 'example-tag'}},
{},
],
}
result = self.run_command(['server', 'detail', '100'])
self.assert_no_fail(result)
self.assertEqual(
json.loads(result.output)['tags'],
['example-tag'],
)
def test_list_servers(self):
result = self.run_command(['server', 'list', '--tag=openstack'])
expected = [
{
'datacenter': 'TEST00',
'primary_ip': '172.16.1.100',
'hostname': 'hardware-test1',
'id': 1000,
'backend_ip': '10.1.0.2',
'action': 'TXN_NAME',
},
{
'datacenter': 'TEST00',
'primary_ip': '172.16.4.94',
'hostname': 'hardware-test2',
'id': 1001,
'backend_ip': '10.1.0.3',
'action': None,
},
{
'datacenter': 'TEST00',
'primary_ip': '172.16.4.95',
'hostname': 'hardware-bad-memory',
'id': 1002,
'backend_ip': '10.1.0.4',
'action': None,
},
{
'action': None,
'backend_ip': None,
'datacenter': None,
'hostname': None,
'id': 1003,
'primary_ip': None,
},
]
self.assert_no_fail(result)
self.assertEqual(expected, json.loads(result.output))
@mock.patch('SoftLayer.CLI.formatting.no_going_back')
@mock.patch('SoftLayer.HardwareManager.reload')
def test_server_reload(self, reload_mock, ngb_mock):
ngb_mock.return_value = False
# Check the positive case
result = self.run_command(['--really', 'server', 'reload', '12345',
'--key=4567'])
self.assert_no_fail(result)
reload_mock.assert_called_with(12345, None, [4567])
# Now check to make sure we properly call CLIAbort in the negative case
result = self.run_command(['server', 'reload', '12345'])
self.assertEqual(result.exit_code, 2)
self.assertIsInstance(result.exception, exceptions.CLIAbort)
@mock.patch('SoftLayer.CLI.formatting.no_going_back')
@mock.patch('SoftLayer.HardwareManager.cancel_hardware')
def test_cancel_server(self, cancel_mock, ngb_mock):
ngb_mock.return_value = False
# Check the positive case
result = self.run_command(['--really', 'server', 'cancel', '12345',
'--reason=Test', '--comment=Test'])
self.assert_no_fail(result)
cancel_mock.assert_called_with(12345, "Test", "Test", False)
# Test
result = self.run_command(['server', 'cancel', '12345',
'--reason=Test', '--comment=Test'])
self.assertEqual(result.exit_code, 2)
self.assertIsInstance(result.exception, exceptions.CLIAbort)
@mock.patch('SoftLayer.CLI.formatting.confirm')
def test_server_power_off(self, confirm_mock):
# Check the positive case
result = self.run_command(['--really', 'server', 'power-off', '12345'])
self.assert_called_with('SoftLayer_Hardware_Server', 'powerOff',
identifier=12345)
# Now check to make sure we properly call CLIAbort in the negative case
confirm_mock.return_value = False
result = self.run_command(['server', 'power-off', '12345'])
self.assertEqual(result.exit_code, 2)
self.assertIsInstance(result.exception, exceptions.CLIAbort)
def test_server_reboot_default(self):
result = self.run_command(['--really', 'server', 'reboot', '12345'])
self.assert_no_fail(result)
self.assert_called_with('SoftLayer_Hardware_Server', 'rebootDefault',
identifier=12345)
def test_server_reboot_soft(self):
result = self.run_command(['--really', 'server', 'reboot', '12345',
'--soft'])
self.assert_no_fail(result)
self.assert_called_with('SoftLayer_Hardware_Server', 'rebootSoft',
identifier=12345)
def test_server_reboot_hard(self):
result = self.run_command(['--really', 'server', 'reboot', '12345',
'--hard'])
self.assert_no_fail(result)
self.assert_called_with('SoftLayer_Hardware_Server', 'rebootHard',
identifier=12345)
@mock.patch('SoftLayer.CLI.formatting.confirm')
def test_server_reboot_negative(self, confirm_mock):
confirm_mock.return_value = False
result = self.run_command(['server', 'reboot', '12345'])
self.assertEqual(result.exit_code, 2)
self.assertIsInstance(result.exception, exceptions.CLIAbort)
def test_server_power_on(self):
result = self.run_command(['--really', 'server', 'power-on', '12345'])
self.assert_no_fail(result)
self.assert_called_with('SoftLayer_Hardware_Server', 'powerOn',
identifier=12345)
def test_server_power_cycle(self):
result = self.run_command(['--really', 'server', 'power-cycle',
'12345'])
self.assert_no_fail(result)
self.assert_called_with('SoftLayer_Hardware_Server', 'powerCycle',
identifier=12345)
@mock.patch('SoftLayer.CLI.formatting.confirm')
def test_server_power_cycle_negative(self, confirm_mock):
confirm_mock.return_value = False
result = self.run_command(['server', 'power-cycle', '12345'])
self.assertEqual(result.exit_code, 2)
self.assertIsInstance(result.exception, exceptions.CLIAbort)
@mock.patch('SoftLayer.HardwareManager.verify_order')
def test_create_server_test_flag(self, verify_mock):
verify_mock.return_value = {
'prices': [
{
'recurringFee': 0.0,
'setupFee': 0.0,
'item': {'description': 'First Item'},
},
{
'recurringFee': 25.0,
'setupFee': 0.0,
'item': {'description': 'Second Item'},
}
]
}
result = self.run_command(['--really', 'server', 'create',
'--size=S1270_8GB_2X1TBSATA_NORAID',
'--hostname=test',
'--domain=example.com',
'--datacenter=TEST00',
'--port-speed=100',
'--os=UBUNTU_12_64',
'--test'],
fmt='raw')
self.assert_no_fail(result)
self.assertIn("First Item", result.output)
self.assertIn("Second Item", result.output)
self.assertIn("Total monthly cost", result.output)
def test_create_options(self):
result = self.run_command(['server', 'create-options'])
self.assert_no_fail(result)
expected = [
[{'datacenter': 'Washington 1', 'value': 'wdc01'}],
[{'size': 'Single Xeon 1270, 8GB Ram, 2x1TB SATA disks, Non-RAID',
'value': 'S1270_8GB_2X1TBSATA_NORAID'}],
[{'operating_system': 'Ubuntu / 14.04-64',
'value': 'UBUNTU_14_64'}],
[{'port_speed': '10 Mbps Public & Private Network Uplinks',
'value': '10'}],
[{'extras': '1 IPv6 Address', 'value': '1_IPV6_ADDRESS'}]]
self.assertEqual(json.loads(result.output), expected)
@mock.patch('SoftLayer.HardwareManager.place_order')
def test_create_server(self, order_mock):
order_mock.return_value = {
'orderId': 98765,
'orderDate': '2013-08-02 15:23:47'
}
result = self.run_command(['--really', 'server', 'create',
'--size=S1270_8GB_2X1TBSATA_NORAID',
'--hostname=test',
'--domain=example.com',
'--datacenter=TEST00',
'--port-speed=100',
'--os=UBUNTU_12_64',
'--no-public',
'--key=10',
])
self.assert_no_fail(result)
self.assertEqual(json.loads(result.output),
{'id': 98765, 'created': '2013-08-02 15:23:47'})
def test_create_server_missing_required(self):
# This is missing a required argument
result = self.run_command(['server', 'create',
# Note: no chassis id
'--hostname=test',
'--domain=example.com',
'--datacenter=TEST00',
'--network=100',
'--os=UBUNTU_12_64_MINIMAL',
])
self.assertEqual(result.exit_code, 2)
self.assertIsInstance(result.exception, SystemExit)
@mock.patch('SoftLayer.CLI.template.export_to_template')
def test_create_server_with_export(self, export_mock):
result = self.run_command(['--really', 'server', 'create',
'--size=S1270_8GB_2X1TBSATA_NORAID',
'--hostname=test',
'--domain=example.com',
'--datacenter=TEST00',
'--port-speed=100',
'--os=UBUNTU_12_64',
'--no-public',
'--export=/path/to/test_file.txt'],
fmt='raw')
self.assert_no_fail(result)
self.assertIn("Successfully exported options to a template file.",
result.output)
export_mock.assert_called_with('/path/to/test_file.txt',
{'billing': 'hourly',
'datacenter': 'TEST00',
'domain': 'example.com',
'extra': (),
'hostname': 'test',
'key': (),
'os': 'UBUNTU_12_64',
'port_speed': 100,
'postinstall': None,
'size': 'S1270_8GB_2X1TBSATA_NORAID',
'test': False,
'no_public': True,
'wait': None,
'template': None},
exclude=['wait', 'test'])
def test_edit_server_userdata_and_file(self):
# Test both userdata and userfile at once
with tempfile.NamedTemporaryFile() as userfile:
result = self.run_command(['server', 'edit', '1000',
'--hostname=hardware-test1',
'--domain=test.sftlyr.ws',
'--userdata=My data',
'--userfile=%s' % userfile.name])
self.assertEqual(result.exit_code, 2)
self.assertIsInstance(result.exception, exceptions.ArgumentError)
def test_edit_server_userdata(self):
result = self.run_command(['server', 'edit', '1000',
'--hostname=hardware-test1',
'--domain=test.sftlyr.ws',
'--userdata=My data'])
self.assert_no_fail(result)
self.assertEqual(result.output, "")
self.assert_called_with('SoftLayer_Hardware_Server', 'editObject',
args=({'domain': 'test.sftlyr.ws',
'hostname': 'hardware-test1'},),
identifier=1000)
@mock.patch('SoftLayer.HardwareManager.edit')
def test_edit_server_failed(self, edit_mock):
edit_mock.return_value = False
result = self.run_command(['server', 'edit', '1000',
'--hostname=hardware-test1',
'--domain=test.sftlyr.ws',
'--userdata=My data'])
self.assertEqual(result.exit_code, 2)
self.assertEqual(result.output, "")
edit_mock.assert_called_with(1000,
userdata='My data',
domain='test.sftlyr.ws',
hostname='hardware-test1')
def test_edit_server_userfile(self):
with tempfile.NamedTemporaryFile() as userfile:
userfile.write(b"some data")
userfile.flush()
result = self.run_command(['server', 'edit', '1000',
'--userfile=%s' % userfile.name])
self.assert_no_fail(result)
self.assertEqual(result.output, "")
self.assert_called_with('SoftLayer_Hardware_Server',
'setUserMetadata',
args=(['some data'],),
identifier=1000)
@mock.patch('SoftLayer.CLI.formatting.confirm')
def test_update_firmware(self, confirm_mock):
confirm_mock.return_value = True
result = self.run_command(['server', 'update-firmware', '1000'])
self.assert_no_fail(result)
self.assertEqual(result.output, "")
self.assert_called_with('SoftLayer_Hardware_Server',
'createFirmwareUpdateTransaction',
args=((1, 1, 1, 1)), identifier=1000)
def test_edit(self):
result = self.run_command(['server', 'edit',
'--domain=example.com',
'--hostname=host',
'--userdata="testdata"',
'--tag=dev',
'--tag=green',
'--public-speed=10',
'--private-speed=100',
'100'])
self.assert_no_fail(result)
self.assertEqual(result.output, '')
self.assert_called_with(
'SoftLayer_Hardware_Server', 'editObject',
args=({'domain': 'example.com', 'hostname': 'host'},),
identifier=100,
)
self.assert_called_with(
'SoftLayer_Hardware_Server', 'setUserMetadata',
args=(['"testdata"'],),
identifier=100,
)
self.assert_called_with(
'SoftLayer_Hardware_Server', 'setPublicNetworkInterfaceSpeed',
args=(10,),
identifier=100,
)
self.assert_called_with(
'SoftLayer_Hardware_Server', 'setPrivateNetworkInterfaceSpeed',
args=(100,),
identifier=100,
)
| |
#!/usr/bin/python
#
# Copyright (c) 2018 Yuwei Zhou, <yuwzho@microsoft.com>
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: azure_rm_route
version_added: "2.7"
short_description: Manage Azure route resource
description:
- Create, update or delete a route.
options:
resource_group:
description:
- Name of resource group.
required: true
name:
description:
- Name of the route.
required: true
state:
description:
- Assert the state of the route. Use C(present) to create or update and C(absent) to delete.
default: present
choices:
- absent
- present
address_prefix:
description:
- The destination CIDR to which the route applies.
next_hop_type:
description:
- The type of Azure hop the packet should be sent to.
choices:
- virtual_network_gateway
- vnet_local
- internet
- virtual_appliance
- none
default: 'none'
next_hop_ip_address:
description:
- The IP address packets should be forwarded to.
- Next hop values are only allowed in routes where the next hop type is VirtualAppliance.
route_table_name:
description:
- The name of the route table.
required: true
extends_documentation_fragment:
- azure
- azure_tags
author:
- Yuwei Zhou (@yuwzho)
'''
EXAMPLES = '''
- name: Create a route
azure_rm_route:
resource_group: myResourceGroup
name: myRoute
address_prefix: 10.1.0.0/16
next_hop_type: virtual_network_gateway
route_table_name: table
- name: Delete a route
azure_rm_route:
resource_group: myResourceGroup
name: myRoute
route_table_name: table
state: absent
'''
RETURN = '''
id:
description:
- Current state of the route.
returned: success
type: str
sample: "/subscriptions/xxxx...xxxx/resourceGroups/v-xisuRG/providers/Microsoft.Network/routeTables/tableb57/routes/routeb57"
'''
try:
from msrestazure.azure_exceptions import CloudError
except ImportError:
# This is handled in azure_rm_common
pass
from ansible.module_utils.azure_rm_common import AzureRMModuleBase
from ansible.module_utils.common.dict_transformations import _snake_to_camel
class AzureRMRoute(AzureRMModuleBase):
def __init__(self):
self.module_arg_spec = dict(
resource_group=dict(type='str', required=True),
name=dict(type='str', required=True),
state=dict(type='str', default='present', choices=['present', 'absent']),
address_prefix=dict(type='str'),
next_hop_type=dict(type='str',
choices=['virtual_network_gateway',
'vnet_local',
'internet',
'virtual_appliance',
'none'],
default='none'),
next_hop_ip_address=dict(type='str'),
route_table_name=dict(type='str', required=True)
)
required_if = [
('state', 'present', ['next_hop_type'])
]
self.resource_group = None
self.name = None
self.state = None
self.address_prefix = None
self.next_hop_type = None
self.next_hop_ip_address = None
self.route_table_name = None
self.results = dict(
changed=False,
id=None
)
super(AzureRMRoute, self).__init__(self.module_arg_spec,
required_if=required_if,
supports_check_mode=True)
def exec_module(self, **kwargs):
for key in list(self.module_arg_spec.keys()):
setattr(self, key, kwargs[key])
result = dict()
changed = False
self.next_hop_type = _snake_to_camel(self.next_hop_type, capitalize_first=True)
result = self.get_route()
if self.state == 'absent' and result:
changed = True
if not self.check_mode:
self.delete_route()
elif self.state == 'present':
if not result:
changed = True # create new route
else: # check update
if result.next_hop_type != self.next_hop_type:
self.log('Update: {0} next_hop_type from {1} to {2}'.format(self.name, result.next_hop_type, self.next_hop_type))
changed = True
if result.next_hop_ip_address != self.next_hop_ip_address:
self.log('Update: {0} next_hop_ip_address from {1} to {2}'.format(self.name, result.next_hop_ip_address, self.next_hop_ip_address))
changed = True
if result.address_prefix != self.address_prefix:
self.log('Update: {0} address_prefix from {1} to {2}'.format(self.name, result.address_prefix, self.address_prefix))
changed = True
if changed:
result = self.network_models.Route(name=self.name,
address_prefix=self.address_prefix,
next_hop_type=self.next_hop_type,
next_hop_ip_address=self.next_hop_ip_address)
if not self.check_mode:
result = self.create_or_update_route(result)
self.results['id'] = result.id if result else None
self.results['changed'] = changed
return self.results
def create_or_update_route(self, param):
try:
poller = self.network_client.routes.create_or_update(self.resource_group, self.route_table_name, self.name, param)
return self.get_poller_result(poller)
except Exception as exc:
self.fail("Error creating or updating route {0} - {1}".format(self.name, str(exc)))
def delete_route(self):
try:
poller = self.network_client.routes.delete(self.resource_group, self.route_table_name, self.name)
result = self.get_poller_result(poller)
return result
except Exception as exc:
self.fail("Error deleting route {0} - {1}".format(self.name, str(exc)))
def get_route(self):
try:
return self.network_client.routes.get(self.resource_group, self.route_table_name, self.name)
except CloudError as cloud_err:
# Return None iff the resource is not found
if cloud_err.status_code == 404:
self.log('{0}'.format(str(cloud_err)))
return None
self.fail('Error: failed to get resource {0} - {1}'.format(self.name, str(cloud_err)))
except Exception as exc:
self.fail('Error: failed to get resource {0} - {1}'.format(self.name, str(exc)))
def main():
AzureRMRoute()
if __name__ == '__main__':
main()
| |
# -*- coding: utf-8 -*-
"""
Most statements correspond to some action on the database. Some of these
actions may be complex, but intuitively ``a.B()``, the Query form, will query
against the database for the value or values that are related to ``a`` through
``B``; on the other hand, ``a.B(c)``, the Update form, will add a statement to
the database that ``a`` relates to ``c`` through ``B``. For the Update form, a
Relationship object describing the relationship stated is returned as a side-
effect of the update.
The Update form can also be accessed through the set() method of a Property and
the Query form through the get() method like::
a.B.set(c)
and::
a.B.get()
The get() method also allows for parameterizing the query in ways specific to
the Property.
"""
import logging
from .mapper import Mapper
__version__ = "0.12.1.dev0"
__author__ = 'Mark Watts'
this_module = __import__('yarom')
this_module.connected = False
L = logging.getLogger(__name__)
DEFAULT_MODULES_TO_LOAD = ["yarom.dataObject",
"yarom.objectCollection",
"yarom.relationship",
"yarom.classRegistry"]
__all__ = ['setConf',
'config',
'loadConfig',
'loadData',
'connect',
'disconnect']
MAPPER = None
def yarom_import(cname_or_mname, cnames=None):
global MAPPER
if cnames:
mpart = cname_or_mname
else:
mpart, cpart = cname_or_mname.rsplit('.', 1)
cnames = (cpart,)
m = MAPPER.load_module(mpart)
if len(cnames) == 1:
return getattr(m, cnames[0])
else:
return tuple(getattr(m, cname) for cname in cnames)
def yarom_dependency(mname):
global MAPPER
MAPPER.add_module_dependency(mname)
def config(key=None, value=None):
from .configure import Configureable
if key is None:
return Configureable.conf
elif value is None:
return Configureable.conf[key]
else:
Configureable.conf[key] = value
def loadConfig(f):
""" Load configuration for the module """
from .configure import Configureable
from .data import Data
Configureable.setConf(Data.open(f))
return Configureable.conf
def disconnect(c=False):
""" Close the database """
from .configure import Configureable
global MAPPER
m = this_module
if not m.connected:
return
if not c:
c = Configureable.conf
MAPPER.deregister_all() # NOTE: We do NOT unmap on disconnect
# Note that `c' could be set in one of the previous branches;
# don't try to simplify this logic.
if c:
c.closeDatabase()
MAPPER = None
m.connected = False
def loadData(data, dataFormat):
import rdflib
if isinstance(data, str):
config('rdf.graph').parse(data, format=dataFormat)
elif isinstance(data, rdflib.ConjunctiveGraph):
g = config('rdf.graph')
for x in data.quads((None, None, None, None)):
g.add(x)
def connect(conf=False,
do_logging=False,
data=False,
dataFormat='n3',
modulesToLoad=None):
"""Load desired configuration and open the database
Parameters
----------
conf : :class:`str`, :class:`Data <yarom.data.Data>`, :class:`Configuration <yarom.configure.Configuration>` or :class:`dict`, optional
The configuration for the YAROM connection
do_logging : bool, optional
If True, turn on debug level logging. The default is False.
data : str or rdflib.ConjunctiveGraph, optional
If provided, specifies a file to load into the library.
dataFormat : str, optional
If provided, specifies the file format of the file pointed specified by
`data`.
The formats available are those accepted by RDFLib's serializer
plugins. 'n3' is the default.
"""
from .configure import Configureable
from .data import (
SPARQLSource,
TrixSource,
SerializationSource)
import atexit
global MAPPER
if MAPPER is None:
MAPPER = Mapper(('yarom.dataObject.DataObject',
'yarom.simpleProperty.SimpleProperty'))
m = this_module
if m.connected:
print("yarom already connected")
return
if do_logging:
logging.basicConfig(level=logging.DEBUG)
if modulesToLoad is None:
modulesToLoad = DEFAULT_MODULES_TO_LOAD
setConf(conf)
dbconn = Configureable.conf
dbconn.register_source(SPARQLSource)
dbconn.register_source(TrixSource)
dbconn.register_source(SerializationSource)
dbconn.openDatabase()
L.info("Connected to database")
atexit.register(disconnect)
for mod in modulesToLoad:
MAPPER.load_module(mod)
MAPPER.remap()
m.connected = True
if data:
loadData(data, dataFormat)
def setConf(conf):
""" Set the configuration
Parameters
----------
conf : str, Data, Configuration or dict, optional
The configuration to load.
If a Data object is provided, then it's used as is for the
configuration.
If either a Python dict or a Configuration object are provided, then the
contents of that object is used to make a Data object for configuration.
If a string is provided, then the file is read in as JSON to be parsed as
a dict and from there is treated as if you had passed that dict to
connect.
The default action is to attempt to open a file called 'yarom.conf' from
your current directory as the configuration. Failing that, an 'empty'
config with default values will be loaded.
"""
from .configure import Configuration, Configureable
from .data import Data
if conf:
if isinstance(conf, Data):
Configureable.setConf(conf)
elif isinstance(conf, (Configuration, dict)):
Configureable.setConf(Data(conf))
elif isinstance(conf, str):
Configureable.setConf(Data.open(conf))
else:
try:
Configureable.setConf(Data.open("yarom.conf"))
except Exception:
L.info("Couldn't load default configuration")
Configureable.setConf(Data())
| |
#
# OpenMX.py
#
# Interface to OpenMX (http://openmx-square.org)
#
# Copyright (c) 2018 Yuto Tanaka
#
# This file is distributed under the terms of the MIT license.
# Please see the file 'LICENCE.txt' in the root directory
# or http://opensource.org/licenses/mit-license.php for information.
#
import numpy as np
"""OpenMX"""
# Function for OpenMX
def read_OpenMX_input(file_original):
search_target = [
"atoms.number", "atoms.speciesandcoordinates.unit",
"<atoms.speciesandcoordinates", "<atoms.unitvectors"
]
#open original file
f = open(file_original, 'r')
#set initial patameters
nat = 0
lavec_flag = 0
lavec_row = 0
lavec = np.zeros([3, 3])
coord_flag = 0
coord_row = 0
#read oroginal file and pull out some infomations
for line in f:
ss = line.strip().split()
#number of atoms
if len(ss) > 0 and ss[0].lower() == search_target[0]:
nat = int(ss[1])
#atomic coordinates
if coord_flag == 1:
for j in range(3):
x_frac0[coord_row][j] = float(ss[j+2])
coord_row += 1
if coord_row == nat:
coord_flag = 0
#latice vector
if lavec_flag == 1:
for i in range(3):
lavec[lavec_row][i] = float(ss[i])
lavec_row += 1
if lavec_row == 3:
lavec_flag = 0
# unit of atomic coordinates
if len(ss) > 0 and ss[0].lower() == search_target[1]:
coord_unit = ss[1].lower()
if len(ss) > 0 and ss[0].lower() == search_target[2]:
coord_flag = 1
# initialize x_frac0 array
x_frac0 = np.zeros([nat, 3])
if len(ss) > 0 and ss[0].lower() == search_target[3]:
lavec_flag = 1
if np.linalg.norm(lavec) > 0 and lavec_flag == 0:
break
#errors
if nat == 0:
print("Could not read dat file properly.")
exit(1)
lavec_inv = (np.linalg.inv(lavec)).T
#convert to frac
if coord_unit == "ang":
for i in range(nat):
x_frac0[i] = np.dot(lavec_inv , x_frac0[i])
f.close()
return lavec, lavec_inv, nat, x_frac0
def write_OpenMX_input(prefix, counter, nzerofills, disp, lavec, file_in):
search_target = [
"atoms.number", "<atoms.speciesandcoordinates",
"atoms.speciesandcoordinates.unit", "system.name"
]
filename = prefix + str(counter).zfill(nzerofills) + ".dat"
fout = open(filename, 'w')
fin = open(file_in, 'r')
nat = 0
coord_flag = 0
coord_row = 0
conv = (np.linalg.inv(lavec)).T
conv_inv = np.linalg.inv(conv)
for i in range(nat):
print(np.dot(conv_inv, disp[i]))
disp[disp < 0] += 1
for line in fin:
ss = line.strip().split()
#number of atoms
if len(ss) > 0 and ss[0].lower() == search_target[0]:
nat = int(ss[1])
x_frac = np.zeros((nat, 3))
#coord = OrderedDict()
coord = {}
for i in range(nat):
coord[i+1] = []
#coordinates_unit
if len(ss) > 0 and ss[0].lower() == search_target[2]:
coord_unit = ss[1].lower()
#coordinates
if coord_flag == 1:
coord_column = len(ss)
for i in range(1, coord_column):
if i > 1:
coord[int(ss[0])].append(float(ss[i]))
else:
coord[int(ss[0])].append(ss[i])
#convert to frac
if coord_unit == "ang":
coord[coord_row+1] = np.dot(conv, coord[coord_row+1])
# add displacement
for j in range(1, 4):
coord[coord_row+1][j] += disp[coord_row][j-1]
coord[coord_row+1][j] = format(coord[coord_row+1][j],'20.16f')
fout.write(str(coord_row+1) + " ")
fout.write(" ".join(map(str, coord[coord_row+1])))
fout.write("\n")
coord_row += 1
if coord_row == nat:
coord_flag = 0
elif len(ss) > 0 and ss[0].lower() == search_target[3]:
ss[1] = prefix + str(counter).zfill(nzerofills)
fout.write(" ".join(map(str, ss)))
fout.write("\n")
else:
fout.write(line)
if len(ss) > 0 and ss[0].lower() == search_target[1]:
coord_flag = 1
fin.close()
fout.close()
"""OpenMX"""
# Function for OpenMX
def read_outfile(out_file, nat, column):
x = np.zeros([nat, 3], dtype = np.float64)
f = open(out_file, 'r')
flag = 0
atom_count = 0
nat_out = 0
for line in f:
ss = line.strip().split()
if len(ss) > 0:
if ss[0] == "<coordinates.forces":
flag = 1
continue
if flag == 0:
continue
elif flag == 1 and nat_out == 0:
nat_out = int(ss[0])
continue
elif flag == 1 and nat_out > 0:
for i in range(3):
x[atom_count][i] = float(ss[i+column])
atom_count += 1
if atom_count == nat:
break
f.close()
return x
#displacements
def get_coordinates_OpenMX(out_file, nat, lavec, conv):
x = read_outfile(out_file, nat, 2)
for i in range(nat):
# convert unit ang to frac
x[i] = np.dot(conv, x[i])
return x
def print_displacements_OpenMX(out_files,
lavec, lavec_inv, nat, x0,
conversion_factor,
file_offset):
vec_refold = np.vectorize(refold)
lavec_transpose = lavec.transpose()
conv = lavec_inv
conv_inv = np.linalg.inv(conv)
x0 = np.round(x0, 8)
if file_offset is None:
disp_offset = np.zeros([nat, 3])
else:
x0_offset = get_coordinates_OpenMX(file_offset, nat, lavec, conv)
try:
x0_offset = np.reshape(x0_offset, (nat, 3))
except:
print("File %s contains too many position entries" % file_offset)
disp_offset = x0_offset - x0
for search_target in out_files:
x = get_coordinates_OpenMX(search_target, nat, lavec, conv)
#ndata = len(x) / (3 * nat)
ndata = 1
#x = np.reshape(x, (1, nat, 3))
for idata in range(ndata):
#disp = x[idata, :, :] - x0 - disp_offset
disp = x - x0 - disp_offset
disp[disp > 0.96] -= 1.0
#disp = np.dot(vec_refold(disp), conv_inv)
for i in range(nat):
disp[i] = np.dot(conv_inv, disp[i])
disp[np.absolute(disp) < 1e-5] = 0.0
disp *= conversion_factor
for i in range(nat):
print("%15.7F %15.7F %15.7F" % (disp[i][0],
disp[i][1],
disp[i][2]))
#atomic forces
def get_atomicforces_OpenMX(out_file, nat):
force = read_outfile(out_file, nat, 5)
return force
def print_atomicforces_OpenMX(out_files,
nat,
conversion_factor,
file_offset):
if file_offset is None:
force_offset = np.zeros((nat, 3))
else:
data0 = get_atomicforces_OpenMX(file_offset, nat)
try:
force_offset = np.reshape(data0, (nat, 3))
except:
print("File %s contains too many force entries" % file_offset)
for search_target in out_files:
data = get_atomicforces_OpenMX(search_target, nat)
#ndata = len(data) / (3 * nat)
ndata = 1
#data = np.reshape(data, (ndata, nat, 3))
for idata in range(ndata):
#f = data[idata, :, :] - force_offset
f = data - force_offset
f *= conversion_factor
for i in range(nat):
print("%15.8E %15.8E %15.8E" % (f[i][0],
f[i][1],
f[i][2]))
def print_displacements_and_forces_OpenMX(out_files,
lavec, lavec_inv, nat, x0,
conversion_factor_disp,
conversion_factor_force,
file_offset):
vec_refold = np.vectorize(refold)
lavec_transpose = lavec.transpose()
conv = lavec_inv
conv_inv = np.linalg.inv(conv)
x0 = np.round(x0, 8)
if file_offset is None:
disp_offset = np.zeros((nat, 3))
force_offset = np.zeros((nat, 3))
else:
x0_offset = get_coordinates_OpenMX(file_offset, nat, lavec, conv)
force_offset = get_atomicforces_OpenMX(file_offset, nat)
try:
x0_offset = np.reshape(x0_offset, (nat, 3))
except:
print("File %s contains too many position entries" % file_offset)
disp_offset = x0_offset - x0
try:
force_offset = np.reshape(force_offset, (nat, 3))
except:
print("File %s contains too many force entries" % file_offset)
for search_target in out_files:
x = get_coordinates_OpenMX(search_target, nat, lavec, conv)
force = get_atomicforces_OpenMX(search_target, nat)
ndata = 1
for idata in range(ndata):
#disp = x[idata, :, :] - x0 - disp_offset
disp = x - x0 - disp_offset
disp[disp > 0.96] -= 1.0
#disp = np.dot(vec_refold(disp), conv_inv)
for i in range(nat):
disp[i] = np.dot(conv_inv, disp[i])
disp[np.absolute(disp) < 1e-5] = 0.0
disp *= conversion_factor_disp
f = force - force_offset
f *= conversion_factor_force
for i in range(nat):
print("%15.7F %15.7F %15.7F %20.8E %15.8E %15.8E" % (disp[i][0],
disp[i][1],
disp[i][2],
f[i][0],
f[i][1],
f[i][2]))
#total enegy
def get_energies_OpenMX(out_file):
target = "Utot."
etot = []
f = open(out_file, 'r')
for line in f:
ss = line.strip().split()
if len(ss) > 0 and ss[0] == target:
etot.extend([float(ss[1])])
break
else:
continue
if len(etot) == 0:
print("Total energy not found.")
exit(1)
return np.array(etot, dtype=np.float)
def print_energies_OpenMX(out_files,
conversion_factor,
file_offset):
if file_offset is None:
etot_offset = 0.0
else:
data = get_energies_OpenMX(file_offset)
if len(data) > 1:
print("File %s contains too many energy entries" % file_offset)
exit(1)
etot_offset = data[0]
print("# Etot")
for search_target in out_files:
etot = get_energies_OpenMX(search_target)
for idata in range(len(etot)):
val = etot[idata] - etot_offset
val *= conversion_factor
print("%19.11E" % val)
def refold(x):
if x >= 0.5:
return x - 1.0
elif x < -0.5:
return x + 1.0
else:
return x
def get_unit_conversion_factor(str_unit):
Bohr_radius = 0.52917721067
Rydberg_to_eV = 13.60569253
disp_conv_factor = 1.0
energy_conv_factor = 1.0
force_conv_factor = 1.0
if str_unit == "ev":
disp_conv_factor = 1.0
energy_conv_factor = 2.0 * Rydberg_to_eV
force_conv_factor = energy_conv_factor
elif str_unit == "rydberg":
disp_conv_factor = 1.0 / Bohr_radius
energy_conv_factor = 2.0
force_conv_factor = 2.0
elif str_unit == "hartree":
disp_conv_factor = 1.0 / Bohr_radius
energy_conv_factor = 1.0
force_conv_factor = 1.0
else:
print("This cannot happen")
exit(1)
return disp_conv_factor, force_conv_factor, energy_conv_factor
def parse(dat_init, out_files, out_file_offset, str_unit,
print_disp, print_force, print_energy):
aa, aa_inv, nat, x_frac0 = read_OpenMX_input(dat_init)
scale_disp, scale_force, scale_energy = get_unit_conversion_factor(str_unit)
if print_disp == True and print_force == True:
print_displacements_and_forces_OpenMX(out_files,
aa, aa_inv, nat,
x_frac0,
scale_disp,
scale_force,
out_file_offset)
elif print_disp == True:
print_displacements_OpenMX(out_files,
aa, aa_inv, nat,
x_frac0,
scale_disp,
out_file_offset)
elif print_force == True:
print_atomicforces_OpenMX(out_files,
nat,
scale_force,
out_file_offset)
elif print_energy == True:
print_energies_OpenMX(out_files,
scale_energy,
out_file_offset)
| |
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Read and preprocess image data.
Image processing occurs on a single image at a time. Image are read and
preprocessed in parallel across multiple threads. The resulting images
are concatenated together to form a single batch for training or evaluation.
-- Provide processed image data for a network:
inputs: Construct batches of evaluation examples of images.
distorted_inputs: Construct batches of training examples of images.
batch_inputs: Construct batches of training or evaluation examples of images.
-- Data processing:
parse_example_proto: Parses an Example proto containing a training example
of an image.
-- Image decoding:
decode_jpeg: Decode a JPEG encoded string into a 3-D float32 Tensor.
-- Image preprocessing:
image_preprocessing: Decode and preprocess one image for evaluation or training
distort_image: Distort one image for training a network.
eval_image: Prepare one image for evaluation.
distort_color: Distort the color in one image for training.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_integer('batch_size', 32,
"""Number of images to process in a batch.""")
tf.app.flags.DEFINE_integer('image_size', 299,
"""Provide square images of this size.""")
tf.app.flags.DEFINE_integer('num_preprocess_threads', 4,
"""Number of preprocessing threads per tower. """
"""Please make this a multiple of 4.""")
tf.app.flags.DEFINE_integer('num_readers', 4,
"""Number of parallel readers during train.""")
# Images are preprocessed asynchronously using multiple threads specified by
# --num_preprocss_threads and the resulting processed images are stored in a
# random shuffling queue. The shuffling queue dequeues --batch_size images
# for processing on a given Inception tower. A larger shuffling queue guarantees
# better mixing across examples within a batch and results in slightly higher
# predictive performance in a trained model. Empirically,
# --input_queue_memory_factor=16 works well. A value of 16 implies a queue size
# of 1024*16 images. Assuming RGB 299x299 images, this implies a queue size of
# 16GB. If the machine is memory limited, then decrease this factor to
# decrease the CPU memory footprint, accordingly.
tf.app.flags.DEFINE_integer('input_queue_memory_factor', 16,
"""Size of the queue of preprocessed images. """
"""Default is ideal but try smaller values, e.g. """
"""4, 2 or 1, if host memory is constrained. See """
"""comments in code for more details.""")
def inputs(dataset, batch_size=None, num_preprocess_threads=None):
"""Generate batches of ImageNet images for evaluation.
Use this function as the inputs for evaluating a network.
Note that some (minimal) image preprocessing occurs during evaluation
including central cropping and resizing of the image to fit the network.
Args:
dataset: instance of Dataset class specifying the dataset.
batch_size: integer, number of examples in batch
num_preprocess_threads: integer, total number of preprocessing threads but
None defaults to FLAGS.num_preprocess_threads.
Returns:
images: Images. 4D tensor of size [batch_size, FLAGS.image_size,
image_size, 3].
labels: 1-D integer Tensor of [FLAGS.batch_size].
"""
if not batch_size:
batch_size = FLAGS.batch_size
# Force all input processing onto CPU in order to reserve the GPU for
# the forward inference and back-propagation.
with tf.device('/cpu:0'):
images, labels = batch_inputs(
dataset, batch_size, train=False,
num_preprocess_threads=num_preprocess_threads,
num_readers=1)
return images, labels
def distorted_inputs(dataset, batch_size=None, num_preprocess_threads=None):
"""Generate batches of distorted versions of ImageNet images.
Use this function as the inputs for training a network.
Distorting images provides a useful technique for augmenting the data
set during training in order to make the network invariant to aspects
of the image that do not effect the label.
Args:
dataset: instance of Dataset class specifying the dataset.
batch_size: integer, number of examples in batch
num_preprocess_threads: integer, total number of preprocessing threads but
None defaults to FLAGS.num_preprocess_threads.
Returns:
images: Images. 4D tensor of size [batch_size, FLAGS.image_size,
FLAGS.image_size, 3].
labels: 1-D integer Tensor of [batch_size].
"""
if not batch_size:
batch_size = FLAGS.batch_size
# Force all input processing onto CPU in order to reserve the GPU for
# the forward inference and back-propagation.
with tf.device('/cpu:0'):
images, labels = batch_inputs(
dataset, batch_size, train=True,
num_preprocess_threads=num_preprocess_threads,
num_readers=FLAGS.num_readers)
return images, labels
def decode_jpeg(image_buffer, scope=None):
"""Decode a JPEG string into one 3-D float image Tensor.
Args:
image_buffer: scalar string Tensor.
scope: Optional scope for name_scope.
Returns:
3-D float Tensor with values ranging from [0, 1).
"""
with tf.name_scope(values=[image_buffer], name=scope,
default_name='decode_jpeg'):
# Decode the string as an RGB JPEG.
# Note that the resulting image contains an unknown height and width
# that is set dynamically by decode_jpeg. In other words, the height
# and width of image is unknown at compile-time.
image = tf.image.decode_jpeg(image_buffer, channels=3)
# After this point, all image pixels reside in [0,1)
# until the very end, when they're rescaled to (-1, 1). The various
# adjust_* ops all require this range for dtype float.
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
return image
def distort_color(image, thread_id=0, scope=None):
"""Distort the color of the image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a distinct ordering
of color ops for each preprocessing thread.
Args:
image: Tensor containing single image.
thread_id: preprocessing thread ID.
scope: Optional scope for name_scope.
Returns:
color-distorted image
"""
with tf.name_scope(values=[image], name=scope, default_name='distort_color'):
color_ordering = thread_id % 2
if color_ordering == 0:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
elif color_ordering == 1:
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
# The random_* ops do not necessarily clamp.
image = tf.clip_by_value(image, 0.0, 1.0)
return image
def distort_image(image, height, width, bbox, thread_id=0, scope=None):
"""Distort one image for training a network.
Distorting images provides a useful technique for augmenting the data
set during training in order to make the network invariant to aspects
of the image that do not effect the label.
Args:
image: 3-D float Tensor of image
height: integer
width: integer
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax].
thread_id: integer indicating the preprocessing thread.
scope: Optional scope for name_scope.
Returns:
3-D float Tensor of distorted image used for training.
"""
with tf.name_scope(values=[image, height, width, bbox], name=scope,
default_name='distort_image'):
# Each bounding box has shape [1, num_boxes, box coords] and
# the coordinates are ordered [ymin, xmin, ymax, xmax].
# Display the bounding box in the first thread only.
if not thread_id:
image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0),
bbox)
tf.summary.image('image_with_bounding_boxes', image_with_box)
# A large fraction of image datasets contain a human-annotated bounding
# box delineating the region of the image containing the object of interest.
# We choose to create a new bounding box for the object which is a randomly
# distorted version of the human-annotated bounding box that obeys an allowed
# range of aspect ratios, sizes and overlap with the human-annotated
# bounding box. If no box is supplied, then we assume the bounding box is
# the entire image.
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
tf.shape(image),
bounding_boxes=bbox,
min_object_covered=0.1,
aspect_ratio_range=[0.75, 1.33],
area_range=[0.05, 1.0],
max_attempts=100,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box
if not thread_id:
image_with_distorted_box = tf.image.draw_bounding_boxes(
tf.expand_dims(image, 0), distort_bbox)
tf.summary.image('images_with_distorted_bounding_box',
image_with_distorted_box)
# Crop the image to the specified bounding box.
distorted_image = tf.slice(image, bbox_begin, bbox_size)
# This resizing operation may distort the images because the aspect
# ratio is not respected. We select a resize method in a round robin
# fashion based on the thread number.
# Note that ResizeMethod contains 4 enumerated resizing methods.
resize_method = thread_id % 4
distorted_image = tf.image.resize_images(distorted_image, [height, width],
method=resize_method)
# Restore the shape since the dynamic slice based upon the bbox_size loses
# the third dimension.
distorted_image.set_shape([height, width, 3])
if not thread_id:
tf.summary.image('cropped_resized_image',
tf.expand_dims(distorted_image, 0))
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
# Randomly distort the colors.
distorted_image = distort_color(distorted_image, thread_id)
if not thread_id:
tf.summary.image('final_distorted_image',
tf.expand_dims(distorted_image, 0))
return distorted_image
def eval_image(image, height, width, scope=None):
"""Prepare one image for evaluation.
Args:
image: 3-D float Tensor
height: integer
width: integer
scope: Optional scope for name_scope.
Returns:
3-D float Tensor of prepared image.
"""
with tf.name_scope(values=[image, height, width], name=scope,
default_name='eval_image'):
# Crop the central region of the image with an area containing 87.5% of
# the original image.
image = tf.image.central_crop(image, central_fraction=0.875)
# Resize the image to the original height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width],
align_corners=False)
image = tf.squeeze(image, [0])
return image
def image_preprocessing(image_buffer, bbox, train, thread_id=0):
"""Decode and preprocess one image for evaluation or training.
Args:
image_buffer: JPEG encoded string Tensor
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
train: boolean
thread_id: integer indicating preprocessing thread
Returns:
3-D float Tensor containing an appropriately scaled image
Raises:
ValueError: if user does not provide bounding box
"""
if bbox is None:
raise ValueError('Please supply a bounding box.')
image = decode_jpeg(image_buffer)
height = FLAGS.image_size
width = FLAGS.image_size
if train:
image = distort_image(image, height, width, bbox, thread_id)
else:
image = eval_image(image, height, width)
# Finally, rescale to [-1,1] instead of [0, 1)
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
def parse_example_proto(example_serialized):
"""Parses an Example proto containing a training example of an image.
The output of the build_image_data.py image preprocessing script is a dataset
containing serialized Example protocol buffers. Each Example proto contains
the following fields:
image/height: 462
image/width: 581
image/colorspace: 'RGB'
image/channels: 3
image/class/label: 615
image/class/synset: 'n03623198'
image/class/text: 'knee pad'
image/object/bbox/xmin: 0.1
image/object/bbox/xmax: 0.9
image/object/bbox/ymin: 0.2
image/object/bbox/ymax: 0.6
image/object/bbox/label: 615
image/format: 'JPEG'
image/filename: 'ILSVRC2012_val_00041207.JPEG'
image/encoded: <JPEG encoded string>
Args:
example_serialized: scalar Tensor tf.string containing a serialized
Example protocol buffer.
Returns:
image_buffer: Tensor tf.string containing the contents of a JPEG file.
label: Tensor tf.int32 containing the label.
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
text: Tensor tf.string containing the human-readable label.
"""
# Dense features in Example proto.
feature_map = {
'image/encoded': tf.FixedLenFeature([], dtype=tf.string,
default_value=''),
'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64,
default_value=-1),
'image/class/text': tf.FixedLenFeature([], dtype=tf.string,
default_value=''),
}
sparse_float32 = tf.VarLenFeature(dtype=tf.float32)
# Sparse features in Example proto.
feature_map.update(
{k: sparse_float32 for k in ['image/object/bbox/xmin',
'image/object/bbox/ymin',
'image/object/bbox/xmax',
'image/object/bbox/ymax']})
features = tf.parse_single_example(example_serialized, feature_map)
label = tf.cast(features['image/class/label'], dtype=tf.int32)
xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0)
ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0)
xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0)
ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0)
# Note that we impose an ordering of (y, x) just to make life difficult.
bbox = tf.concat(axis=0, values=[ymin, xmin, ymax, xmax])
# Force the variable number of bounding boxes into the shape
# [1, num_boxes, coords].
bbox = tf.expand_dims(bbox, 0)
bbox = tf.transpose(bbox, [0, 2, 1])
return features['image/encoded'], label, bbox, features['image/class/text']
def batch_inputs(dataset, batch_size, train, num_preprocess_threads=None,
num_readers=1):
"""Contruct batches of training or evaluation examples from the image dataset.
Args:
dataset: instance of Dataset class specifying the dataset.
See dataset.py for details.
batch_size: integer
train: boolean
num_preprocess_threads: integer, total number of preprocessing threads
num_readers: integer, number of parallel readers
Returns:
images: 4-D float Tensor of a batch of images
labels: 1-D integer Tensor of [batch_size].
Raises:
ValueError: if data is not found
"""
with tf.name_scope('batch_processing'):
data_files = dataset.data_files()
if data_files is None:
raise ValueError('No data files found for this dataset')
# Create filename_queue
if train:
filename_queue = tf.train.string_input_producer(data_files,
shuffle=True,
capacity=16)
else:
filename_queue = tf.train.string_input_producer(data_files,
shuffle=False,
capacity=1)
if num_preprocess_threads is None:
num_preprocess_threads = FLAGS.num_preprocess_threads
if num_preprocess_threads % 4:
raise ValueError('Please make num_preprocess_threads a multiple '
'of 4 (%d % 4 != 0).', num_preprocess_threads)
if num_readers is None:
num_readers = FLAGS.num_readers
if num_readers < 1:
raise ValueError('Please make num_readers at least 1')
# Approximate number of examples per shard.
examples_per_shard = 1024
# Size the random shuffle queue to balance between good global
# mixing (more examples) and memory use (fewer examples).
# 1 image uses 299*299*3*4 bytes = 1MB
# The default input_queue_memory_factor is 16 implying a shuffling queue
# size: examples_per_shard * 16 * 1MB = 17.6GB
min_queue_examples = examples_per_shard * FLAGS.input_queue_memory_factor
if train:
examples_queue = tf.RandomShuffleQueue(
capacity=min_queue_examples + 3 * batch_size,
min_after_dequeue=min_queue_examples,
dtypes=[tf.string])
else:
examples_queue = tf.FIFOQueue(
capacity=examples_per_shard + 3 * batch_size,
dtypes=[tf.string])
# Create multiple readers to populate the queue of examples.
if num_readers > 1:
enqueue_ops = []
for _ in range(num_readers):
reader = dataset.reader()
_, value = reader.read(filename_queue)
enqueue_ops.append(examples_queue.enqueue([value]))
tf.train.queue_runner.add_queue_runner(
tf.train.queue_runner.QueueRunner(examples_queue, enqueue_ops))
example_serialized = examples_queue.dequeue()
else:
reader = dataset.reader()
_, example_serialized = reader.read(filename_queue)
images_and_labels = []
for thread_id in range(num_preprocess_threads):
# Parse a serialized Example proto to extract the image and metadata.
image_buffer, label_index, bbox, _ = parse_example_proto(
example_serialized)
image = image_preprocessing(image_buffer, bbox, train, thread_id)
images_and_labels.append([image, label_index])
images, label_index_batch = tf.train.batch_join(
images_and_labels,
batch_size=batch_size,
capacity=2 * num_preprocess_threads * batch_size)
# Reshape images into these desired dimensions.
height = FLAGS.image_size
width = FLAGS.image_size
depth = 3
images = tf.cast(images, tf.float32)
images = tf.reshape(images, shape=[batch_size, height, width, depth])
# Display the training images in the visualizer.
tf.summary.image('images', images)
return images, tf.reshape(label_index_batch, [batch_size])
| |
from pycket.values import W_MVector, W_VectorSuper, W_Fixnum, W_Flonum, W_Character, UNROLLING_CUTOFF, wrap
from pycket.base import W_Object, SingletonMeta
from pycket import config
from rpython.rlib import debug, jit
from rpython.rlib import rerased
from rpython.rlib.objectmodel import newlist_hint, import_from_mixin, specialize
from rpython.rlib.rarithmetic import intmask
@jit.look_inside_iff(lambda elements, immutable:
jit.loop_unrolling_heuristic(
elements, len(elements), UNROLLING_CUTOFF))
def _find_strategy_class(elements, immutable):
if not config.strategies or len(elements) == 0:
# An empty vector stays empty forever. Don't implement special EmptyVectorStrategy.
if immutable:
return ObjectImmutableVectorStrategy.singleton
return ObjectVectorStrategy.singleton
single_class = type(elements[0])
for elem in elements:
if not isinstance(elem, single_class):
if immutable:
return ObjectImmutableVectorStrategy.singleton
return ObjectVectorStrategy.singleton
if single_class is W_Fixnum:
if immutable:
return FixnumImmutableVectorStrategy.singleton
return FixnumVectorStrategy.singleton
if single_class is W_Flonum:
if immutable:
return FlonumImmutableVectorStrategy.singleton
return FlonumVectorStrategy.singleton
if single_class is W_Character:
if immutable:
return CharacterImmutableVectorStrategy.singleton
return CharacterVectorStrategy.singleton
if immutable:
return ObjectImmutableVectorStrategy.singleton
return ObjectVectorStrategy.singleton
def _immutable_strategy_variant(strategy):
if strategy is ObjectVectorStrategy.singleton:
return ObjectImmutableVectorStrategy.singleton
if strategy is CharacterVectorStrategy.singleton:
return CharacterImmutableVectorStrategy.singleton
if strategy is FixnumVectorStrategy.singleton:
return FixnumImmutableVectorStrategy.singleton
if strategy is FlonumVectorStrategy.singleton:
return FlonumImmutableVectorStrategy.singleton
assert strategy.immutable()
return strategy
class StrategyVectorMixin(object):
def get_storage(self):
return self.storage
def set_storage(self, storage):
self.storage = storage
def ref(self, i):
return self.get_strategy().ref(self, i)
def set(self, i, v):
self.get_strategy().set(self, i, v)
def immutable(self):
return self.get_strategy().immutable()
def vector_set(self, i, new, env, cont):
from pycket.interpreter import return_value
from pycket.values import w_void
self.set(i, new)
return return_value(w_void, env, cont)
def vector_ref(self, i, env, cont):
from pycket.interpreter import return_value
return return_value(self.ref(i), env, cont)
# unsafe versions
def unsafe_ref(self, i):
return self.get_strategy().ref(self, i, check=False)
def unsafe_set(self, i, v):
self.get_strategy().set(self, i, v, check=False)
def change_strategy(self, new_strategy):
old_list = self.get_strategy().ref_all(self)
self.set_strategy(new_strategy)
self.set_storage(new_strategy.create_storage_for_elements(old_list))
class W_Vector(W_MVector):
_immutable_fields_ = ["len"]
errorname = "vector"
import_from_mixin(StrategyVectorMixin)
def __init__(self, strategy, storage, len):
self.strategy = strategy
self.storage = storage
self.len = len
def get_strategy(self):
return self.strategy
def set_strategy(self, strategy):
if not config.strategies:
assert strategy is ObjectVectorStrategy.singleton
self.strategy = strategy
@staticmethod
def fromelements(elems, immutable=False):
strategy = _find_strategy_class(elems, immutable)
storage = strategy.create_storage_for_elements(elems)
return W_Vector(strategy, storage, len(elems))
@staticmethod
def fromelement(elem, times, immutable=False):
check_list = [elem]
if times == 0:
check_list = []
strategy = _find_strategy_class(check_list, immutable)
storage = strategy.create_storage_for_element(elem, times)
return W_Vector(strategy, storage, times)
def length(self):
return self.len
def tostring(self):
l = self.strategy.ref_all(self)
return "#(%s)" % " ".join([obj.tostring() for obj in l])
def _make_copy(self, immutable=False):
return self.strategy._copy_storage(self, immutable=immutable)
def hash_equal(self, info=None):
x = 0x456789
for i in range(self.len):
hash = self.ref(i).hash_equal(info=info)
x = intmask((1000003 * x) ^ hash)
return x
def equal(self, other):
# XXX could be optimized using strategies
if not isinstance(other, W_MVector):
return False
if self is other:
return True
if self.length() != other.length():
return False
for i in range(self.length()):
if not self.ref(i).equal(other.ref(i)):
return False
return True
class W_FlVector(W_VectorSuper):
_immutable_fields_ = ["len"]
errorname = "flvector"
import_from_mixin(StrategyVectorMixin)
def __init__(self, storage, len):
self.storage = storage
self.len = len
def get_strategy(self):
return FlonumVectorStrategy.singleton
def set_strategy(self, strategy):
assert 0, "unreachable"
@staticmethod
def fromelements(elems):
strategy = FlonumVectorStrategy.singleton
storage = strategy.create_storage_for_elements(elems)
return W_FlVector(storage, len(elems))
@staticmethod
def fromelement(elem, times):
check_list = [elem]
if times == 0:
check_list = []
strategy = FlonumVectorStrategy.singleton
storage = strategy.create_storage_for_element(elem, times)
return W_FlVector(storage, times)
def length(self):
return self.len
def tostring(self):
l = self.get_strategy().ref_all(self)
return "(flvector %s)" % " ".join([obj.tostring() for obj in l])
def hash_equal(self, info=None):
x = 0x567890
for i in range(self.len):
hash = self.ref(i).hash_equal(info=info)
x = intmask((1000003 * x) ^ hash)
return x
def equal(self, other):
# XXX could be optimized more
if not isinstance(other, W_FlVector):
return False
if self is other:
return True
if self.length() != other.length():
return False
for i in range(self.length()):
if not self.ref(i).equal(other.ref(i)):
return False
return True
class VectorStrategy(object):
""" works for any W_VectorSuper that has
get/set_strategy, get/set_storage
"""
__metaclass__ = SingletonMeta
def is_correct_type(self, w_obj):
raise NotImplementedError("abstract base class")
def immutable(self):
return False
def ref(self, w_vector, i, check=True):
if check:
self.indexcheck(w_vector, i)
return self._ref(w_vector, i)
def set(self, w_vector, i, w_val, check=True):
if check:
self.indexcheck(w_vector, i)
if not self.is_correct_type(w_val):
self.dehomogenize(w_vector)
# Now, try again. no need to use the safe version, we already
# checked the index
w_vector.unsafe_set(i, w_val)
else:
self._set(w_vector, i, w_val)
def indexcheck(self, w_vector, i):
assert 0 <= i < w_vector.length()
def _ref(self, w_vector, i):
raise NotImplementedError("abstract base class")
def _set(self, w_vector, i, w_val):
raise NotImplementedError("abstract base class")
# def length(self, w_vector):
# raise NotImplementedError("abstract base class")
def ref_all(self, w_vector):
raise NotImplementedError("abstract base class")
def create_storage_for_element(self, element, times):
raise NotImplementedError("abstract base class")
def create_storage_for_elements(self, elements):
raise NotImplementedError("abstract base class")
def dehomogenize(self, w_vector):
w_vector.change_strategy(ObjectVectorStrategy.singleton)
class ImmutableVectorStrategyMixin(object):
def immutable(self):
return True
def _set(self, w_vector, i, w_val):
assert 0, "unreachable"
class UnwrappedVectorStrategyMixin(object):
# the concrete class needs to implement:
# erase, unerase, is_correct_type, wrap, unwrap
def _copy_storage(self, w_vector, immutable=False):
strategy = self if not immutable else _immutable_strategy_variant(self)
l = self.unerase(w_vector.get_storage())[:]
# return strategy, strategy.erase(l), w_vector.
return W_Vector(strategy, self.erase(l), w_vector.len)
def _storage(self, w_vector):
l = self.unerase(w_vector.get_storage())
debug.make_sure_not_resized(l)
return l
def _ref(self, w_vector, i):
assert i >= 0
return self.wrap(self._storage(w_vector)[i])
def _set(self, w_vector, i, w_val):
assert i >= 0
self._storage(w_vector)[i] = self.unwrap(w_val)
# def length(self, w_vector):
# return len(self._storage(w_vector))
@jit.look_inside_iff(
lambda strategy, w_vector: jit.isconstant(w_vector.length()) and
w_vector.length() < UNROLLING_CUTOFF)
def ref_all(self, w_vector):
unwrapped = self._storage(w_vector)
return [self.wrap(i) for i in unwrapped]
def create_storage_for_element(self, element, times):
e = self.unwrap(element)
return self.erase([e] * times)
@jit.look_inside_iff(
lambda self, elements_w:
jit.loop_unrolling_heuristic(
elements_w, len(elements_w), UNROLLING_CUTOFF))
def create_storage_for_elements(self, elements_w):
if not elements_w:
return self.erase([])
l = [self.unwrap(elements_w[0])] * len(elements_w)
for i in range(1, len(elements_w)):
l[i] = self.unwrap(elements_w[i])
return self.erase(l)
class ObjectVectorStrategy(VectorStrategy):
import_from_mixin(UnwrappedVectorStrategyMixin)
erase, unerase = rerased.new_erasing_pair("object-vector-strategry")
erase = staticmethod(erase)
unerase = staticmethod(unerase)
def wrap(self, obj):
return obj
def unwrap(self, w_obj):
return w_obj
def is_correct_type(self, w_obj):
return True
def create_storage_for_elements(self, elements_w):
return self.erase(elements_w)
def dehomogenize(self, w_vector):
assert 0 # should be unreachable because is_correct_type is always True
class ObjectImmutableVectorStrategy(ObjectVectorStrategy):
import_from_mixin(ImmutableVectorStrategyMixin)
class FixnumVectorStrategy(VectorStrategy):
import_from_mixin(UnwrappedVectorStrategyMixin)
erase, unerase = rerased.new_erasing_pair("fixnum-vector-strategy")
erase = staticmethod(erase)
unerase = staticmethod(unerase)
def is_correct_type(self, w_obj):
return isinstance(w_obj, W_Fixnum)
def wrap(self, val):
assert isinstance(val, int)
return W_Fixnum(val)
def unwrap(self, w_val):
assert isinstance(w_val, W_Fixnum)
return w_val.value
class FixnumImmutableVectorStrategy(FixnumVectorStrategy):
import_from_mixin(ImmutableVectorStrategyMixin)
class CharacterVectorStrategy(VectorStrategy):
import_from_mixin(UnwrappedVectorStrategyMixin)
erase, unerase = rerased.new_erasing_pair("character-vector-strategy")
erase = staticmethod(erase)
unerase = staticmethod(unerase)
def is_correct_type(self, w_obj):
return isinstance(w_obj, W_Character)
def wrap(self, val):
return W_Character(val)
def unwrap(self, w_val):
assert isinstance(w_val, W_Character)
return w_val.value
class CharacterImmutableVectorStrategy(CharacterVectorStrategy):
import_from_mixin(ImmutableVectorStrategyMixin)
class FlonumVectorStrategy(VectorStrategy):
import_from_mixin(UnwrappedVectorStrategyMixin)
erase, unerase = rerased.new_erasing_pair("flonum-vector-strategry")
erase = staticmethod(erase)
unerase = staticmethod(unerase)
def is_correct_type(self, w_obj):
return isinstance(w_obj, W_Flonum)
def wrap(self, val):
assert isinstance(val, float)
return W_Flonum(val)
def unwrap(self, w_val):
assert isinstance(w_val, W_Flonum)
return w_val.value
class FlonumImmutableVectorStrategy(FlonumVectorStrategy):
import_from_mixin(ImmutableVectorStrategyMixin)
@specialize.argtype(0)
def pytype_strategy(lst):
if not lst:
strategy = ObjectVectorStrategy.singleton
elem = lst[0]
if isinstance(elem, int):
return FixnumVectorStrategy.singleton
if isinstance(elem, float):
return FlonumVectorStrategy.singleton
if isinstance(elem, W_Object):
return _find_strategy_class(lst, False)
assert False, "unsupported type"
@specialize.argtype(0)
def wrap_vector(elems, immutable=False):
# Allows for direct conversion between RPython lists and vectors with a
# corresponding strategy simply by copying the underlying list.
strategy = pytype_strategy(elems)
if immutable:
strategy = _immutable_strategy_variant(strategy)
storage = strategy.erase(elems)
else:
storage = strategy.erase(elems[:])
return W_Vector(strategy, storage, len(elems))
| |
#!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""api_static_checks.py - Enforce Cronet API requirements."""
import argparse
import os
import re
import shutil
import sys
import tempfile
REPOSITORY_ROOT = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir))
sys.path.insert(0, os.path.join(REPOSITORY_ROOT, 'build/android/gyp'))
from util import build_utils # pylint: disable=wrong-import-position
sys.path.insert(0, os.path.join(REPOSITORY_ROOT, 'components'))
from cronet.tools import update_api # pylint: disable=wrong-import-position
# These regular expressions catch the beginning of lines that declare classes
# and methods. The first group returned by a match is the class or method name.
from cronet.tools.update_api import CLASS_RE # pylint: disable=wrong-import-position
METHOD_RE = re.compile(r'.* ([^ ]*)\(.*\);')
# Allowed exceptions. Adding anything to this list is dangerous and should be
# avoided if possible. For now these exceptions are for APIs that existed in
# the first version of Cronet and will be supported forever.
# TODO(pauljensen): Remove these.
ALLOWED_EXCEPTIONS = [
'org.chromium.net.impl.CronetEngineBuilderImpl/build ->'
' org/chromium/net/ExperimentalCronetEngine/getVersionString:'
'()Ljava/lang/String;',
'org.chromium.net.urlconnection.CronetFixedModeOutputStream$UploadDataProviderI'
'mpl/read -> org/chromium/net/UploadDataSink/onReadSucceeded:(Z)V',
'org.chromium.net.urlconnection.CronetFixedModeOutputStream$UploadDataProviderI'
'mpl/rewind -> org/chromium/net/UploadDataSink/onRewindError:'
'(Ljava/lang/Exception;)V',
'org.chromium.net.urlconnection.CronetHttpURLConnection/disconnect ->'
' org/chromium/net/UrlRequest/cancel:()V',
'org.chromium.net.urlconnection.CronetHttpURLConnection/disconnect ->'
' org/chromium/net/UrlResponseInfo/getHttpStatusText:()Ljava/lang/String;',
'org.chromium.net.urlconnection.CronetHttpURLConnection/disconnect ->'
' org/chromium/net/UrlResponseInfo/getHttpStatusCode:()I',
'org.chromium.net.urlconnection.CronetHttpURLConnection/getHeaderField ->'
' org/chromium/net/UrlResponseInfo/getHttpStatusCode:()I',
'org.chromium.net.urlconnection.CronetHttpURLConnection/getErrorStream ->'
' org/chromium/net/UrlResponseInfo/getHttpStatusCode:()I',
'org.chromium.net.urlconnection.CronetHttpURLConnection/setConnectTimeout ->'
' org/chromium/net/UrlRequest/read:(Ljava/nio/ByteBuffer;)V',
'org.chromium.net.urlconnection.CronetHttpURLConnection$CronetUrlRequestCallbac'
'k/onRedirectReceived -> org/chromium/net/UrlRequest/followRedirect:()V',
'org.chromium.net.urlconnection.CronetHttpURLConnection$CronetUrlRequestCallbac'
'k/onRedirectReceived -> org/chromium/net/UrlRequest/cancel:()V',
'org.chromium.net.urlconnection.CronetChunkedOutputStream$UploadDataProviderImp'
'l/read -> org/chromium/net/UploadDataSink/onReadSucceeded:(Z)V',
'org.chromium.net.urlconnection.CronetChunkedOutputStream$UploadDataProviderImp'
'l/rewind -> org/chromium/net/UploadDataSink/onRewindError:'
'(Ljava/lang/Exception;)V',
'org.chromium.net.urlconnection.CronetBufferedOutputStream$UploadDataProviderIm'
'pl/read -> org/chromium/net/UploadDataSink/onReadSucceeded:(Z)V',
'org.chromium.net.urlconnection.CronetBufferedOutputStream$UploadDataProviderIm'
'pl/rewind -> org/chromium/net/UploadDataSink/onRewindSucceeded:()V',
'org.chromium.net.urlconnection.CronetHttpURLStreamHandler/org.chromium.net.url'
'connection.CronetHttpURLStreamHandler -> org/chromium/net/ExperimentalCron'
'etEngine/openConnection:(Ljava/net/URL;)Ljava/net/URLConnection;',
'org.chromium.net.urlconnection.CronetHttpURLStreamHandler/org.chromium.net.url'
'connection.CronetHttpURLStreamHandler -> org/chromium/net/ExperimentalCron'
'etEngine/openConnection:(Ljava/net/URL;Ljava/net/Proxy;)Ljava/net/URLConne'
'ction;',
# getMessage() is an java.lang.Exception member, and so cannot be removed.
'org.chromium.net.impl.NetworkExceptionImpl/getMessage -> '
'org/chromium/net/NetworkException/getMessage:()Ljava/lang/String;',
]
def find_api_calls(dump, api_classes, bad_calls):
# Given a dump of an implementation class, find calls through API classes.
# |dump| is the output of "javap -c" on the implementation class files.
# |api_classes| is the list of classes comprising the API.
# |bad_calls| is the list of calls through API classes. This list is built up
# by this function.
for line in dump:
if CLASS_RE.match(line):
caller_class = CLASS_RE.match(line).group(1)
if METHOD_RE.match(line):
caller_method = METHOD_RE.match(line).group(1)
if line[8:16] == ': invoke':
callee = line.split(' // ')[1].split('Method ')[1].split('\n')[0]
callee_class = callee.split('.')[0]
assert callee_class
if callee_class in api_classes:
callee_method = callee.split('.')[1]
assert callee_method
# Ignore constructor calls for now as every implementation class
# that extends an API class will call them.
# TODO(pauljensen): Look into enforcing restricting constructor calls.
# https://crbug.com/674975
if callee_method.startswith('"<init>"'):
continue
# Ignore VersionSafe calls
if 'VersionSafeCallbacks' in caller_class:
continue
bad_call = '%s/%s -> %s/%s' % (caller_class, caller_method,
callee_class, callee_method)
if bad_call in ALLOWED_EXCEPTIONS:
continue
bad_calls += [bad_call]
def check_api_calls(opts):
# Returns True if no calls through API classes in implementation.
temp_dir = tempfile.mkdtemp()
# Extract API class files from jar
jar_cmd = ['jar', 'xf', os.path.abspath(opts.api_jar)]
build_utils.CheckOutput(jar_cmd, cwd=temp_dir)
shutil.rmtree(os.path.join(temp_dir, 'META-INF'), ignore_errors=True)
# Collect names of API classes
api_classes = []
for dirpath, _, filenames in os.walk(temp_dir):
if not filenames:
continue
package = os.path.relpath(dirpath, temp_dir)
for filename in filenames:
if filename.endswith('.class'):
classname = filename[:-len('.class')]
api_classes += [os.path.normpath(os.path.join(package, classname))]
shutil.rmtree(temp_dir)
temp_dir = tempfile.mkdtemp()
# Extract impl class files from jars
for impl_jar in opts.impl_jar:
jar_cmd = ['jar', 'xf', os.path.abspath(impl_jar)]
build_utils.CheckOutput(jar_cmd, cwd=temp_dir)
shutil.rmtree(os.path.join(temp_dir, 'META-INF'), ignore_errors=True)
# Process classes
bad_api_calls = []
for dirpath, _, filenames in os.walk(temp_dir):
if not filenames:
continue
# Dump classes
dump_file = os.path.join(temp_dir, 'dump.txt')
if os.system('javap -c %s > %s' % (
' '.join(os.path.join(dirpath, f) for f in filenames).replace(
'$', '\\$'),
dump_file)):
print('ERROR: javap failed on ' + ' '.join(filenames))
return False
# Process class dump
with open(dump_file, 'r') as dump:
find_api_calls(dump, api_classes, bad_api_calls)
shutil.rmtree(temp_dir)
if bad_api_calls:
print('ERROR: Found the following calls from implementation classes '
'through')
print(' API classes. These could fail if older API is used that')
print(' does not contain newer methods. Please call through a')
print(' wrapper class from VersionSafeCallbacks.')
print('\n'.join(bad_api_calls))
return not bad_api_calls
def check_api_version(opts):
if update_api.check_up_to_date(opts.api_jar):
return True
print('ERROR: API file out of date. Please run this command:')
print(' components/cronet/tools/update_api.py --api_jar %s' % (
os.path.abspath(opts.api_jar)))
return False
def main(args):
parser = argparse.ArgumentParser(
description='Enforce Cronet API requirements.')
parser.add_argument('--api_jar',
help='Path to API jar (i.e. cronet_api.jar)',
required=True,
metavar='path/to/cronet_api.jar')
parser.add_argument('--impl_jar',
help='Path to implementation jar '
'(i.e. cronet_impl_native_java.jar)',
required=True,
metavar='path/to/cronet_impl_native_java.jar',
action='append')
parser.add_argument('--stamp', help='Path to touch on success.')
opts = parser.parse_args(args)
ret = True
ret = check_api_calls(opts) and ret
ret = check_api_version(opts) and ret
if ret and opts.stamp:
build_utils.Touch(opts.stamp)
return ret
if __name__ == '__main__':
sys.exit(0 if main(sys.argv[1:]) else -1)
| |
"""Submodule for timing components of a code.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
from datetime import datetime
import warnings
import numpy as np
import click
from . import inout_core
__all__ = ['Timer', 'Timings', 'TimeIt']
class TimeIt:
def __init__(self, name="", simple=True, timeonly=False):
self.name = name
self.simple = simple
self.timeonly = timeonly
return
def __enter__(self):
self.beg = datetime.now()
return
def __exit__(self, type, value, traceback):
end = datetime.now()
beg = self.beg
dur = end - beg
name = self.name
if len(name) > 0:
name += " "
msg = "{}{} {}".format(
click.style(name, fg='magenta'),
click.style("done after", fg='black'),
click.style(str(dur), fg='red')
)
if not self.simple:
if self.timeonly:
beg = beg.time()
end = end.time()
msg += click.style(" ({} ==> {})".format(beg, end), fs='black')
# print(msg)
click.echo(msg)
return
class Timer(object):
"""Class for timing a single series of events.
Methods
-------
- start - Start, or restart, this timer.
- stop - Stop this (already started) timer, store the duration.
- ave - Return the cumulative average of previously calculated durations.
- durations - Return an array of all previously calculated durations.
- total - Return the total, cumulative duration of all intervals.
- last - Return the duration of the last (most recent) calculated interval.
"""
def __init__(self, name=None):
self.name = name
self._start = None
self._ave = 0.0
self._total = 0.0
# Variance (standard-deviation squared)
self._var = 0.0
self._num = 0
self._durations = []
def start(self, restart=False):
"""Start, or restart, this timer.
"""
durat = None
# If there is already a `_start` value
if self._start is not None:
# If we are *not* restarting (i.e. new start, without a duration)
if not restart:
durat = self.stop()
# If there is no `_start` yet, or we are restarting
self._start = datetime.now()
return durat
def stop(self):
"""Stop this (already started) timer, store the duration.
See: `https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance` for calculating the
variance using an 'online algorithm'.
"""
if self._start is None:
return None
durat = datetime.now() - self._start
durat = durat.total_seconds()
self._durations.append(durat)
self._num += 1
# Increment cumulative average
prev_ave = self._ave
self._ave = self._ave + (durat - self._ave)/self._num
# Calculate the variance using an 'online algorithm'
self._var = ((self._num - 1)*self._var + (durat - prev_ave)*(durat - self._ave))/self._num
self._total += durat
return durat
def ave(self):
"""Return the cumulative average of previously calculated durations.
"""
return self._ave
def std(self):
"""Return the cumulative standard-deviation of the previously calculated durations.
"""
# Standard-deviation is the square root of the variance
return np.sqrt(self._var)
def durations(self):
"""Return an array of all previously calculated durations.
"""
return np.array(self._durations)
def total(self):
"""Return the total, cumulative duration of all intervals.
"""
return self._total
def last(self):
"""Return the last (most recent) calculated duration.
"""
if self._num:
return self._durations[-1]
else:
return None
class Timings(object):
"""Class for timing a set of different events, managing invidivual timers for each.
Methods
-------
- start - Start the `timer` with the given name.
- stop - Stop the timer with the given name.
- names - Return an array with all of the names of the different timers.
- durations - Returns an array of all the durations of the target timer.
- report - Report results of durations.
Internal:
- _create_timer - Create a new timer with the given name.
- _ind_for_name - The return the index corresponding to the timer of the given name.
"""
def __init__(self, errors=False):
# List of all individual timer names
# self._names = np.array([])
self._names = []
# List of all individual timers
self._timers = []
# The number of timers being tracked
self._num = 0
def start(self, name, restart=False):
"""Start the timer with the given name.
If this timer doesnt already exist, it is created. This is the only way to create a timer.
"""
ind = self._ind_for_name(name, create=True)
self._timers[ind].start(restart=restart)
def stop(self, name):
"""Stop the timer with the given name.
If the timer doesnt already exist, a `ValueError` is raised.
If the timer exists, but was not yet started, a warning is raised.
"""
ind = self._ind_for_name(name, create=False)
if ind is None:
raise ValueError("Timer '{}' does not exist.".format(name))
durat = self._timers[ind].stop()
if durat is None:
warnings.warn("Timer '{}' was not started.".format(name))
return
def names(self):
"""Return an array with all of the names of the different timers.
"""
return np.array(self._names)
def durations(self, name):
"""Returns an array of all the durations of the target timer.
If the timer doesnt already exist, a `ValueError` is raised.
"""
ind = self._ind_for_name(name, create=False)
if not ind:
raise ValueError("Timer '{}' does not exist.".format(name))
return self._timers[ind].durations()
def report(self, out=print):
"""Report the collected durations from all timers.
If no internal timers exist, a warning is raised.
If `out` is a function (e.g. `print`), then the results are outputted using that function.
If `out` is `None`, then the results are returned as a string.
Returns
-------
"""
if self._num == 0:
warnings.warn("No timers exist.")
return
totals = np.array([tim.total() for tim in self._timers])
aves = np.array([tim.ave() for tim in self._timers])
stds = np.array([tim.std() for tim in self._timers])
cum_tot = np.sum(totals)
fracs = totals/cum_tot
# Convert statistics to strings for printing
# Add the total fraction (1.0)
str_fracs = np.append(fracs, np.sum(fracs))
str_fracs = ["{:.4f}".format(fr) for fr in str_fracs]
# Add the total duration
str_tots = np.append(totals, cum_tot)
str_tots = ["{}".format(tt) for tt in str_tots]
str_aves = ["{}".format(av) for av in aves]
str_stds = ["{}".format(st) for st in stds]
# Add empty elements for overall average and standard deviation
str_aves.append("")
str_stds.append("")
# Construct 2D array of results suitable for `ascii_table`
data = np.c_[str_fracs, str_tots, str_aves, str_stds]
cols = ['Fraction', 'Total', 'Average', 'StdDev']
# rows = np.append(self._names, "Overall")
rows = np.append(self._names, "Overall")
# Print reuslts as table
if out is None: prepend = ""
else: prepend = "\n"
rep = inout_core.ascii_table(data, rows=rows, cols=cols, title='Timing Results',
out=out, prepend=prepend)
return rep
def _create_timer(self, name):
"""Create a new timer with the given name.
"""
# self._names = np.append(self._names, name)
self._names.append(name)
self._timers.append(Timer(name))
self._num += len(self._timers)
return
def _ind_for_name(self, name, create=True):
"""The return the index corresponding to the timer of the given name.
If there is no timer with the given name, and ``create == True``, then a new timer is
created with the given name.
"""
# No timer with this name exists
if name not in self._names:
# Create a new one
if create:
self._create_timer(name)
else:
return None
# print(np.shape(self._names), np.shape(name))
# ind = np.where(self._names == name)[0]
# print(np.shape(ind))
ind = self._names.index(name)
# # Should be a single matching name
# if ind.size != 1:
# raise RuntimeError("Name '{}' matched {} times. Names = '{}'".format(
# name, ind.size, self._names))
# Make sure internal name matches array name
if self._timers[ind].name != name:
raise RuntimeError("Names mismatch, name = '{}', timers[{}].name = '{}'".format(
name, ind, self._timers[ind].name))
return ind
| |
r"""
This is based on SmartyPants.py by `Chad Miller`_.
Copyright and License
=====================
SmartyPants_ license::
Copyright (c) 2003 John Gruber
(http://daringfireball.net/)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name "SmartyPants" nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
This software is provided by the copyright holders and contributors "as
is" and any express or implied warranties, including, but not limited
to, the implied warranties of merchantability and fitness for a
particular purpose are disclaimed. In no event shall the copyright
owner or contributors be liable for any direct, indirect, incidental,
special, exemplary, or consequential damages (including, but not
limited to, procurement of substitute goods or services; loss of use,
data, or profits; or business interruption) however caused and on any
theory of liability, whether in contract, strict liability, or tort
(including negligence or otherwise) arising in any way out of the use
of this software, even if advised of the possibility of such damage.
smartypants.py license::
smartypants.py is a derivative work of SmartyPants.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
This software is provided by the copyright holders and contributors "as
is" and any express or implied warranties, including, but not limited
to, the implied warranties of merchantability and fitness for a
particular purpose are disclaimed. In no event shall the copyright
owner or contributors be liable for any direct, indirect, incidental,
special, exemplary, or consequential damages (including, but not
limited to, procurement of substitute goods or services; loss of use,
data, or profits; or business interruption) however caused and on any
theory of liability, whether in contract, strict liability, or tort
(including negligence or otherwise) arising in any way out of the use
of this software, even if advised of the possibility of such damage.
.. _Chad Miller: http://web.chad.org/
"""
import re
def sphinx_smarty_pants(t):
t = t.replace('"', '"')
t = educateDashesOldSchool(t)
t = educateQuotes(t)
t = t.replace('"', '"')
return t
# Constants for quote education.
punct_class = r"""[!"#\$\%'()*+,-.\/:;<=>?\@\[\\\]\^_`{|}~]"""
close_class = r"""[^\ \t\r\n\[\{\(\-]"""
dec_dashes = r"""–|—"""
# Special case if the very first character is a quote
# followed by punctuation at a non-word-break. Close the quotes by brute force:
single_quote_start_re = re.compile(r"""^'(?=%s\\B)""" % (punct_class,))
double_quote_start_re = re.compile(r"""^"(?=%s\\B)""" % (punct_class,))
# Special case for double sets of quotes, e.g.:
# <p>He said, "'Quoted' words in a larger quote."</p>
double_quote_sets_re = re.compile(r""""'(?=\w)""")
single_quote_sets_re = re.compile(r"""'"(?=\w)""")
# Special case for decade abbreviations (the '80s):
decade_abbr_re = re.compile(r"""\b'(?=\d{2}s)""")
# Get most opening double quotes:
opening_double_quotes_regex = re.compile(r"""
(
\s | # a whitespace char, or
| # a non-breaking space entity, or
-- | # dashes, or
&[mn]dash; | # named dash entities
%s | # or decimal entities
&\#x201[34]; # or hex
)
" # the quote
(?=\w) # followed by a word character
""" % (dec_dashes,), re.VERBOSE)
# Double closing quotes:
closing_double_quotes_regex = re.compile(r"""
#(%s)? # character that indicates the quote should be closing
"
(?=\s)
""" % (close_class,), re.VERBOSE)
closing_double_quotes_regex_2 = re.compile(r"""
(%s) # character that indicates the quote should be closing
"
""" % (close_class,), re.VERBOSE)
# Get most opening single quotes:
opening_single_quotes_regex = re.compile(r"""
(
\s | # a whitespace char, or
| # a non-breaking space entity, or
-- | # dashes, or
&[mn]dash; | # named dash entities
%s | # or decimal entities
&\#x201[34]; # or hex
)
' # the quote
(?=\w) # followed by a word character
""" % (dec_dashes,), re.VERBOSE)
closing_single_quotes_regex = re.compile(r"""
(%s)
'
(?!\s | s\b | \d)
""" % (close_class,), re.VERBOSE)
closing_single_quotes_regex_2 = re.compile(r"""
(%s)
'
(\s | s\b)
""" % (close_class,), re.VERBOSE)
def educateQuotes(s):
"""
Parameter: String.
Returns: The string, with "educated" curly quote HTML entities.
Example input: "Isn't this fun?"
Example output: “Isn’t this fun?”
"""
# Special case if the very first character is a quote
# followed by punctuation at a non-word-break. Close the quotes by brute force:
s = single_quote_start_re.sub("’", s)
s = double_quote_start_re.sub("”", s)
# Special case for double sets of quotes, e.g.:
# <p>He said, "'Quoted' words in a larger quote."</p>
s = double_quote_sets_re.sub("“‘", s)
s = single_quote_sets_re.sub("‘“", s)
# Special case for decade abbreviations (the '80s):
s = decade_abbr_re.sub("’", s)
s = opening_single_quotes_regex.sub(r"\1‘", s)
s = closing_single_quotes_regex.sub(r"\1’", s)
s = closing_single_quotes_regex_2.sub(r"\1’\2", s)
# Any remaining single quotes should be opening ones:
s = s.replace("'", "‘")
s = opening_double_quotes_regex.sub(r"\1“", s)
s = closing_double_quotes_regex.sub(r"”", s)
s = closing_double_quotes_regex_2.sub(r"\1”", s)
# Any remaining quotes should be opening ones.
return s.replace('"', "“")
def educateQuotesLatex(s, dquotes=("``", "''")):
"""
Parameter: String.
Returns: The string, with double quotes corrected to LaTeX quotes.
Example input: "Isn't this fun?"
Example output: ``Isn't this fun?'';
"""
# Special case if the very first character is a quote
# followed by punctuation at a non-word-break. Close the quotes by brute force:
s = single_quote_start_re.sub("\x04", s)
s = double_quote_start_re.sub("\x02", s)
# Special case for double sets of quotes, e.g.:
# <p>He said, "'Quoted' words in a larger quote."</p>
s = double_quote_sets_re.sub("\x01\x03", s)
s = single_quote_sets_re.sub("\x03\x01", s)
# Special case for decade abbreviations (the '80s):
s = decade_abbr_re.sub("\x04", s)
s = opening_single_quotes_regex.sub("\\1\x03", s)
s = closing_single_quotes_regex.sub("\\1\x04", s)
s = closing_single_quotes_regex_2.sub("\\1\x04\\2", s)
# Any remaining single quotes should be opening ones:
s = s.replace("'", "\x03")
s = opening_double_quotes_regex.sub("\\1\x01", s)
s = closing_double_quotes_regex.sub("\x02", s)
s = closing_double_quotes_regex_2.sub("\\1\x02", s)
# Any remaining quotes should be opening ones.
s = s.replace('"', "\x01")
# Finally, replace all helpers with quotes.
return s.replace("\x01", dquotes[0]).replace("\x02", dquotes[1]).\
replace("\x03", "`").replace("\x04", "'")
def educateBackticks(s):
"""
Parameter: String.
Returns: The string, with ``backticks'' -style double quotes
translated into HTML curly quote entities.
Example input: ``Isn't this fun?''
Example output: “Isn't this fun?”
"""
return s.replace("``", "“").replace("''", "”")
def educateSingleBackticks(s):
"""
Parameter: String.
Returns: The string, with `backticks' -style single quotes
translated into HTML curly quote entities.
Example input: `Isn't this fun?'
Example output: ‘Isn’t this fun?’
"""
return s.replace('`', "‘").replace("'", "’")
def educateDashesOldSchool(s):
"""
Parameter: String.
Returns: The string, with each instance of "--" translated to
an en-dash HTML entity, and each "---" translated to
an em-dash HTML entity.
"""
return s.replace('---', "—").replace('--', "–")
def educateDashesOldSchoolInverted(s):
"""
Parameter: String.
Returns: The string, with each instance of "--" translated to
an em-dash HTML entity, and each "---" translated to
an en-dash HTML entity. Two reasons why: First, unlike the
en- and em-dash syntax supported by
EducateDashesOldSchool(), it's compatible with existing
entries written before SmartyPants 1.1, back when "--" was
only used for em-dashes. Second, em-dashes are more
common than en-dashes, and so it sort of makes sense that
the shortcut should be shorter to type. (Thanks to Aaron
Swartz for the idea.)
"""
return s.replace('---', "–").replace('--', "—")
def educateEllipses(s):
"""
Parameter: String.
Returns: The string, with each instance of "..." translated to
an ellipsis HTML entity.
Example input: Huh...?
Example output: Huh…?
"""
return s.replace('...', "…").replace('. . .', "…")
__author__ = "Chad Miller <smartypantspy@chad.org>"
__version__ = "1.5_1.5: Sat, 13 Aug 2005 15:50:24 -0400"
__url__ = "http://wiki.chad.org/SmartyPantsPy"
__description__ = \
"Smart-quotes, smart-ellipses, and smart-dashes for weblog entries in pyblosxom"
| |
# -*- coding: utf-8 -*-
import json
import os
from datetime import datetime
from django import forms
from django.conf import settings
from django.core.validators import URLValidator
from django.forms import widgets
from django.forms.extras.widgets import SelectDateWidget
from django.forms.models import modelformset_factory
from django.template.defaultfilters import filesizeformat
from django.utils import six
from django.utils.functional import lazy
from django.utils.safestring import mark_safe
from django.utils.translation import trans_real as translation
import commonware
import happyforms
import waffle
from jinja2 import escape as jinja2_escape
from jinja2.filters import do_dictsort
from mpconstants import regions as mpconstants_regions
from quieter_formset.formset import BaseModelFormSet
from django.utils.translation import (ugettext as _, ugettext_lazy as _lazy,
ungettext as ngettext)
import lib.iarc
import mkt
from lib.video import tasks as vtasks
from mkt import get_user
from mkt.access import acl
from mkt.api.models import Access
from mkt.constants import (CATEGORY_CHOICES, MAX_PACKAGED_APP_SIZE,
ratingsbodies)
from mkt.developers.utils import prioritize_app
from mkt.files.models import FileUpload
from mkt.files.utils import SafeUnzip, WebAppParser
from mkt.regions import REGIONS_CHOICES_SORTED_BY_NAME
from mkt.regions.utils import parse_region
from mkt.reviewers.models import RereviewQueue
from mkt.site.fields import SeparatedValuesField
from mkt.site.forms import AddonChoiceField
from mkt.site.utils import remove_icons, slug_validator, slugify
from mkt.tags.models import Tag
from mkt.tags.utils import can_edit_restricted_tags, clean_tags
from mkt.translations.fields import TransField
from mkt.translations.forms import TranslationFormMixin
from mkt.translations.models import Translation
from mkt.translations.widgets import TranslationTextarea, TransTextarea
from mkt.versions.models import Version
from mkt.webapps.models import (AddonUser, BlockedSlug, IARCInfo, Preview,
Webapp)
from mkt.webapps.tasks import (index_webapps, set_storefront_data,
update_manifests)
from . import tasks
log = commonware.log.getLogger('mkt.developers')
def region_error(region):
return forms.ValidationError(_('You cannot select {region}.').format(
region=unicode(parse_region(region).name)
))
def toggle_app_for_special_regions(request, app, enabled_regions=None):
"""Toggle for special regions (e.g., China)."""
if not waffle.flag_is_active(request, 'special-regions'):
return
for region in mkt.regions.SPECIAL_REGIONS:
status = app.geodata.get_status(region)
if enabled_regions is not None:
if region.id in enabled_regions:
# If it's not already enabled, mark as pending.
if status != mkt.STATUS_PUBLIC:
# Developer requested for it to be in China.
status = mkt.STATUS_PENDING
value, changed = app.geodata.set_status(region, status)
if changed:
log.info(u'[Webapp:%s] App marked as pending '
u'special region (%s).' % (app, region.slug))
value, changed = app.geodata.set_nominated_date(
region, save=True)
log.info(u'[Webapp:%s] Setting nomination date to '
u'now for region (%s).' % (app, region.slug))
else:
# Developer cancelled request for approval.
status = mkt.STATUS_NULL
value, changed = app.geodata.set_status(
region, status, save=True)
if changed:
log.info(u'[Webapp:%s] App marked as null special '
u'region (%s).' % (app, region.slug))
if status == mkt.STATUS_PUBLIC:
# Reviewer approved for it to be in China.
aer = app.addonexcludedregion.filter(region=region.id)
if aer.exists():
aer.delete()
log.info(u'[Webapp:%s] App included in new special '
u'region (%s).' % (app, region.slug))
else:
# Developer requested for it to be in China.
aer, created = app.addonexcludedregion.get_or_create(
region=region.id)
if created:
log.info(u'[Webapp:%s] App excluded from new special '
u'region (%s).' % (app, region.slug))
class AuthorForm(happyforms.ModelForm):
def clean_user(self):
user = self.cleaned_data['user']
if not user.read_dev_agreement:
raise forms.ValidationError(
_('All team members must have read and agreed to the '
'developer agreement.'))
return user
class Meta:
model = AddonUser
exclude = ('addon',)
class BaseModelFormSet(BaseModelFormSet):
"""
Override the parent's is_valid to prevent deleting all forms.
"""
def is_valid(self):
# clean() won't get called in is_valid() if all the rows are getting
# deleted. We can't allow deleting everything.
rv = super(BaseModelFormSet, self).is_valid()
return rv and not any(self.errors) and not bool(self.non_form_errors())
class BaseAuthorFormSet(BaseModelFormSet):
def clean(self):
if any(self.errors):
return
# cleaned_data could be None if it's the empty extra form.
data = filter(None, [f.cleaned_data for f in self.forms
if not f.cleaned_data.get('DELETE', False)])
if not any(d['role'] == mkt.AUTHOR_ROLE_OWNER for d in data):
raise forms.ValidationError(_('Must have at least one owner.'))
if not any(d['listed'] for d in data):
raise forms.ValidationError(
_('At least one team member must be listed.'))
users = [d['user'] for d in data]
if sorted(users) != sorted(set(users)):
raise forms.ValidationError(
_('A team member can only be listed once.'))
AuthorFormSet = modelformset_factory(AddonUser, formset=BaseAuthorFormSet,
form=AuthorForm, can_delete=True, extra=0)
class DeleteForm(happyforms.Form):
reason = forms.CharField(required=False)
def __init__(self, request):
super(DeleteForm, self).__init__(request.POST)
def trap_duplicate(request, manifest_url):
# See if this user has any other apps with the same manifest.
owned = (request.user.addonuser_set
.filter(addon__manifest_url=manifest_url))
if not owned:
return
try:
app = owned[0].addon
except Webapp.DoesNotExist:
return
error_url = app.get_dev_url()
msg = None
if app.status == mkt.STATUS_PUBLIC:
msg = _(u'Oops, looks like you already submitted that manifest '
'for %s, which is currently public. '
'<a href="%s">Edit app</a>')
elif app.status == mkt.STATUS_PENDING:
msg = _(u'Oops, looks like you already submitted that manifest '
'for %s, which is currently pending. '
'<a href="%s">Edit app</a>')
elif app.status == mkt.STATUS_NULL:
msg = _(u'Oops, looks like you already submitted that manifest '
'for %s, which is currently incomplete. '
'<a href="%s">Resume app</a>')
elif app.status == mkt.STATUS_REJECTED:
msg = _(u'Oops, looks like you already submitted that manifest '
'for %s, which is currently rejected. '
'<a href="%s">Edit app</a>')
elif app.status == mkt.STATUS_DISABLED:
msg = _(u'Oops, looks like you already submitted that manifest '
'for %s, which is currently banned on Marketplace. '
'<a href="%s">Edit app</a>')
elif app.disabled_by_user:
msg = _(u'Oops, looks like you already submitted that manifest '
'for %s, which is currently disabled. '
'<a href="%s">Edit app</a>')
if msg:
return msg % (jinja2_escape(app.name), error_url)
def verify_app_domain(manifest_url, exclude=None, packaged=False):
if packaged or waffle.switch_is_active('webapps-unique-by-domain'):
domain = Webapp.domain_from_url(manifest_url)
qs = Webapp.objects.filter(app_domain=domain)
if exclude:
qs = qs.exclude(pk=exclude.pk)
if qs.exists():
raise forms.ValidationError(
_('An app already exists on this domain; '
'only one app per domain is allowed.'))
class PreviewForm(happyforms.ModelForm):
file_upload = forms.FileField(required=False)
upload_hash = forms.CharField(required=False)
# This lets us POST the data URIs of the unsaved previews so we can still
# show them if there were form errors.
unsaved_image_data = forms.CharField(required=False,
widget=forms.HiddenInput)
unsaved_image_type = forms.CharField(required=False,
widget=forms.HiddenInput)
def save(self, addon, commit=True):
if self.cleaned_data:
self.instance.addon = addon
if self.cleaned_data.get('DELETE'):
# Existing preview.
if self.instance.id:
self.instance.delete()
# User has no desire to save this preview.
return
super(PreviewForm, self).save(commit=commit)
if self.cleaned_data['upload_hash']:
upload_hash = self.cleaned_data['upload_hash']
upload_path = os.path.join(settings.TMP_PATH, 'preview',
upload_hash)
filetype = (os.path.splitext(upload_hash)[1][1:]
.replace('-', '/'))
if filetype in mkt.VIDEO_TYPES:
self.instance.update(filetype=filetype)
vtasks.resize_video.delay(upload_path, self.instance.pk,
user_pk=mkt.get_user().pk)
else:
self.instance.update(filetype='image/png')
tasks.resize_preview.delay(upload_path, self.instance.pk,
set_modified_on=[self.instance])
class Meta:
model = Preview
fields = ('file_upload', 'upload_hash', 'id', 'position')
class JSONField(forms.Field):
def to_python(self, value):
if value == '':
return None
try:
if isinstance(value, basestring):
return json.loads(value)
except ValueError:
pass
return value
class JSONMultipleChoiceField(forms.MultipleChoiceField, JSONField):
widget = forms.CheckboxSelectMultiple
class AdminSettingsForm(PreviewForm):
DELETE = forms.BooleanField(required=False)
mozilla_contact = SeparatedValuesField(forms.EmailField, separator=',',
required=False)
vip_app = forms.BooleanField(required=False)
priority_review = forms.BooleanField(required=False)
class Meta:
model = Preview
fields = ('file_upload', 'upload_hash', 'position')
def __init__(self, *args, **kw):
# Note that this form is not inheriting from AddonFormBase, so we have
# to get rid of 'version' ourselves instead of letting the parent class
# do it.
kw.pop('version', None)
# Get the object for the app's promo `Preview` and pass it to the form.
if kw.get('instance'):
addon = kw.pop('instance')
self.instance = addon
self.promo = addon.get_promo()
self.request = kw.pop('request', None)
# Note: After calling `super`, `self.instance` becomes the `Preview`
# object.
super(AdminSettingsForm, self).__init__(*args, **kw)
self.initial['vip_app'] = addon.vip_app
self.initial['priority_review'] = addon.priority_review
if self.instance:
self.initial['mozilla_contact'] = addon.mozilla_contact
@property
def regions_by_id(self):
return mkt.regions.REGIONS_CHOICES_ID_DICT
def clean_position(self):
return -1
def clean_mozilla_contact(self):
contact = self.cleaned_data.get('mozilla_contact')
if self.cleaned_data.get('mozilla_contact') is None:
return u''
return contact
def save(self, addon, commit=True):
if (self.cleaned_data.get('DELETE') and
'upload_hash' not in self.changed_data and self.promo.id):
self.promo.delete()
elif self.promo and 'upload_hash' in self.changed_data:
self.promo.delete()
elif self.cleaned_data.get('upload_hash'):
super(AdminSettingsForm, self).save(addon, True)
updates = {
'vip_app': self.cleaned_data.get('vip_app'),
}
contact = self.cleaned_data.get('mozilla_contact')
if contact is not None:
updates['mozilla_contact'] = contact
if (self.cleaned_data.get('priority_review') and
not addon.priority_review):
# addon.priority_review gets updated within prioritize_app().
prioritize_app(addon, self.request.user)
else:
updates['priority_review'] = self.cleaned_data.get(
'priority_review')
addon.update(**updates)
index_webapps.delay([addon.id])
return addon
class BasePreviewFormSet(BaseModelFormSet):
def clean(self):
if any(self.errors):
return
at_least_one = False
for form in self.forms:
if (not form.cleaned_data.get('DELETE') and
form.cleaned_data.get('upload_hash') is not None):
at_least_one = True
if not at_least_one:
raise forms.ValidationError(
_('You must upload at least one screenshot or video.'))
PreviewFormSet = modelformset_factory(Preview, formset=BasePreviewFormSet,
form=PreviewForm, can_delete=True,
extra=1)
class NewManifestForm(happyforms.Form):
manifest = forms.URLField()
def __init__(self, *args, **kwargs):
self.is_standalone = kwargs.pop('is_standalone', False)
super(NewManifestForm, self).__init__(*args, **kwargs)
def clean_manifest(self):
manifest = self.cleaned_data['manifest']
# Skip checking the domain for the standalone validator.
if not self.is_standalone:
verify_app_domain(manifest)
return manifest
class NewPackagedAppForm(happyforms.Form):
upload = forms.FileField()
def __init__(self, *args, **kwargs):
self.max_size = kwargs.pop('max_size', MAX_PACKAGED_APP_SIZE)
self.user = kwargs.pop('user', get_user())
self.addon = kwargs.pop('addon', None)
self.file_upload = None
super(NewPackagedAppForm, self).__init__(*args, **kwargs)
def clean_upload(self):
upload = self.cleaned_data['upload']
errors = []
if upload.size > self.max_size:
errors.append({
'type': 'error',
'message': _('Packaged app too large for submission. Packages '
'must be smaller than %s.' % filesizeformat(
self.max_size)),
'tier': 1,
})
# Immediately raise an error, do not process the rest of the view,
# which would read the file.
raise self.persist_errors(errors, upload)
manifest = None
try:
# Be careful to keep this as in-memory zip reading.
safe_zip = SafeUnzip(upload, 'r')
safe_zip.is_valid() # Will throw ValidationError if necessary.
manifest = safe_zip.extract_path('manifest.webapp')
except forms.ValidationError as e:
errors.append({
'type': 'error',
'message': ''.join(e.messages),
'tier': 1,
})
except Exception as e:
errors.append({
'type': 'error',
'message': _('Error extracting manifest from zip file.'),
'tier': 1,
})
finally:
safe_zip.close()
origin = None
if manifest:
try:
origin = WebAppParser.decode_manifest(manifest).get('origin')
except forms.ValidationError as e:
errors.append({
'type': 'error',
'message': ''.join(e.messages),
'tier': 1,
})
if origin:
try:
verify_app_domain(origin, packaged=True, exclude=self.addon)
except forms.ValidationError, e:
errors.append({
'type': 'error',
'message': ''.join(e.messages),
'tier': 1,
})
if errors:
raise self.persist_errors(errors, upload)
# Everything passed validation.
self.file_upload = FileUpload.from_post(
upload, upload.name, upload.size, user=self.user)
def persist_errors(self, errors, upload):
"""
Persist the error with this into FileUpload (but do not persist
the file contents, which are too large) and return a ValidationError.
"""
validation = {
'errors': len(errors),
'success': False,
'messages': errors,
}
self.file_upload = FileUpload.objects.create(
user=self.user, name=getattr(upload, 'name', ''),
validation=json.dumps(validation))
# Return a ValidationError to be raised by the view.
return forms.ValidationError(' '.join(e['message'] for e in errors))
class AddonFormBase(TranslationFormMixin, happyforms.ModelForm):
def __init__(self, *args, **kw):
self.request = kw.pop('request')
self.version = kw.pop('version', None)
super(AddonFormBase, self).__init__(*args, **kw)
class Meta:
models = Webapp
fields = ('name', 'slug')
class AppFormBasic(AddonFormBase):
"""Form to edit basic app info."""
slug = forms.CharField(max_length=30, widget=forms.TextInput)
manifest_url = forms.URLField()
hosted_url = forms.CharField(
label=_lazy(u'Hosted URL:'), required=False,
help_text=_lazy(
u'A URL to where your app is hosted on the web, if it exists. This'
u' allows users to try out your app before installing it.'))
description = TransField(
required=True,
label=_lazy(u'Provide a detailed description of your app'),
help_text=_lazy(u'This description will appear on the details page.'),
widget=TransTextarea)
tags = forms.CharField(
label=_lazy(u'Search Keywords:'), required=False,
widget=forms.Textarea(attrs={'rows': 3}),
help_text=_lazy(
u'The search keywords are used to return search results in the '
u'Firefox Marketplace. Be sure to include a keywords that '
u'accurately reflect your app.'))
class Meta:
model = Webapp
fields = ('slug', 'manifest_url', 'hosted_url', 'description', 'tags')
def __init__(self, *args, **kw):
# Force the form to use app_slug. We want to keep
# this under "slug" so all the js continues to work.
kw.setdefault('initial', {})['slug'] = kw['instance'].app_slug
super(AppFormBasic, self).__init__(*args, **kw)
self.old_manifest_url = self.instance.manifest_url
if self.instance.is_packaged:
# Manifest URL cannot be changed for packaged apps.
del self.fields['manifest_url']
self.initial['tags'] = ', '.join(self.get_tags(self.instance))
def clean_tags(self):
return clean_tags(self.request, self.cleaned_data['tags'])
def get_tags(self, addon):
if can_edit_restricted_tags(self.request):
return list(addon.tags.values_list('tag_text', flat=True))
else:
return list(addon.tags.filter(restricted=False)
.values_list('tag_text', flat=True))
def _post_clean(self):
# Switch slug to app_slug in cleaned_data and self._meta.fields so
# we can update the app_slug field for webapps.
try:
self._meta.fields = list(self._meta.fields)
slug_idx = self._meta.fields.index('slug')
data = self.cleaned_data
if 'slug' in data:
data['app_slug'] = data.pop('slug')
self._meta.fields[slug_idx] = 'app_slug'
super(AppFormBasic, self)._post_clean()
finally:
self._meta.fields[slug_idx] = 'slug'
def clean_slug(self):
slug = self.cleaned_data['slug']
slug_validator(slug, lower=False)
if slug != self.instance.app_slug:
if Webapp.objects.filter(app_slug=slug).exists():
raise forms.ValidationError(
_('This slug is already in use. Please choose another.'))
if BlockedSlug.blocked(slug):
raise forms.ValidationError(_('The slug cannot be "%s". '
'Please choose another.' % slug))
return slug.lower()
def clean_manifest_url(self):
manifest_url = self.cleaned_data['manifest_url']
# Only verify if manifest changed.
if 'manifest_url' in self.changed_data:
verify_app_domain(manifest_url, exclude=self.instance)
return manifest_url
def save(self, addon, commit=False):
# We ignore `commit`, since we need it to be `False` so we can save
# the ManyToMany fields on our own.
addonform = super(AppFormBasic, self).save(commit=False)
addonform.save()
if 'manifest_url' in self.changed_data:
before_url = self.old_manifest_url
after_url = self.cleaned_data['manifest_url']
# If a non-admin edited the manifest URL, add to Re-review Queue.
if not acl.action_allowed(self.request, 'Admin', '%'):
log.info(u'[Webapp:%s] (Re-review) Manifest URL changed '
u'from %s to %s'
% (self.instance, before_url, after_url))
msg = (_(u'Manifest URL changed from {before_url} to '
u'{after_url}')
.format(before_url=before_url, after_url=after_url))
RereviewQueue.flag(self.instance,
mkt.LOG.REREVIEW_MANIFEST_URL_CHANGE, msg)
# Refetch the new manifest.
log.info('Manifest %s refreshed for %s'
% (addon.manifest_url, addon))
update_manifests.delay([self.instance.id])
tags_new = self.cleaned_data['tags']
tags_old = [slugify(t, spaces=True) for t in self.get_tags(addon)]
add_tags = set(tags_new) - set(tags_old)
del_tags = set(tags_old) - set(tags_new)
# Add new tags.
for t in add_tags:
Tag(tag_text=t).save_tag(addon)
# Remove old tags.
for t in del_tags:
Tag(tag_text=t).remove_tag(addon)
return addonform
class AppFormDetails(AddonFormBase):
LOCALES = [(translation.to_locale(k).replace('_', '-'), v)
for k, v in do_dictsort(settings.LANGUAGES)]
default_locale = forms.TypedChoiceField(required=False, choices=LOCALES)
homepage = TransField.adapt(forms.URLField)(required=False)
privacy_policy = TransField(
widget=TransTextarea(), required=True,
label=_lazy(u"Please specify your app's Privacy Policy"))
class Meta:
model = Webapp
fields = ('default_locale', 'homepage', 'privacy_policy')
def clean(self):
# Make sure we have the required translations in the new locale.
required = ['name', 'description']
data = self.cleaned_data
if not self.errors and 'default_locale' in self.changed_data:
fields = dict((k, getattr(self.instance, k + '_id'))
for k in required)
locale = data['default_locale']
ids = filter(None, fields.values())
qs = (Translation.objects.filter(locale=locale, id__in=ids,
localized_string__isnull=False)
.values_list('id', flat=True))
missing = [k for k, v in fields.items() if v not in qs]
if missing:
raise forms.ValidationError(
_('Before changing your default locale you must have a '
'name and description in that locale. '
'You are missing %s.') % ', '.join(map(repr, missing)))
return data
class AppFormMedia(AddonFormBase):
icon_upload_hash = forms.CharField(required=False)
unsaved_icon_data = forms.CharField(required=False,
widget=forms.HiddenInput)
class Meta:
model = Webapp
fields = ('icon_upload_hash', 'icon_type')
def save(self, addon, commit=True):
if self.cleaned_data['icon_upload_hash']:
upload_hash = self.cleaned_data['icon_upload_hash']
upload_path = os.path.join(settings.TMP_PATH, 'icon', upload_hash)
dirname = addon.get_icon_dir()
destination = os.path.join(dirname, '%s' % addon.id)
remove_icons(destination)
tasks.resize_icon.delay(upload_path, destination,
mkt.CONTENT_ICON_SIZES,
set_modified_on=[addon])
return super(AppFormMedia, self).save(commit)
class AppSupportFormMixin(object):
def get_default_translation_for(self, field_name):
"""
Return the cleaned_data for the specified field_name, using the
field's default_locale.
"""
default_locale = self.fields[field_name].default_locale
return self.cleaned_data.get(field_name, {}).get(default_locale, '')
def clean_support_fields(self):
"""
Make sure either support email or support url are present.
"""
if ('support_email' in self._errors or
'support_url' in self._errors):
# If there are already errors for those fields, bail out, that
# means at least one of them was filled, the user just needs to
# correct the error.
return
support_email = self.get_default_translation_for('support_email')
support_url = self.get_default_translation_for('support_url')
if not support_email and not support_url:
# Mark the fields as invalid, add an error message on a special
# 'support' field that the template will use if necessary, not on
# both fields individually.
self._errors['support'] = self.error_class(
[_('You must provide either a website, an email, or both.')])
self._errors['support_email'] = self.error_class([''])
self._errors['support_url'] = self.error_class([''])
def clean(self):
cleaned_data = super(AppSupportFormMixin, self).clean()
self.clean_support_fields()
return cleaned_data
class AppFormSupport(AppSupportFormMixin, AddonFormBase):
support_url = TransField.adapt(forms.URLField)(required=False)
support_email = TransField.adapt(forms.EmailField)(required=False)
class Meta:
model = Webapp
fields = ('support_email', 'support_url')
class AppAppealForm(happyforms.Form):
"""
If a developer's app is rejected he can make changes and request
another review.
"""
notes = forms.CharField(
label=_lazy(u'Your comments'),
required=False, widget=forms.Textarea(attrs={'rows': 2}))
def __init__(self, *args, **kw):
self.product = kw.pop('product', None)
super(AppAppealForm, self).__init__(*args, **kw)
def save(self):
version = self.product.versions.latest()
notes = self.cleaned_data['notes']
if notes:
mkt.log(mkt.LOG.WEBAPP_RESUBMIT, self.product, version,
details={'comments': notes})
else:
mkt.log(mkt.LOG.WEBAPP_RESUBMIT, self.product, version)
# Mark app and file as pending again.
self.product.update(status=mkt.WEBAPPS_UNREVIEWED_STATUS)
version.all_files[0].update(status=mkt.WEBAPPS_UNREVIEWED_STATUS)
return version
class PublishForm(happyforms.Form):
# Publish choice wording is slightly different here than with the
# submission flow because the app may have already been published.
mark_safe_lazy = lazy(mark_safe, six.text_type)
PUBLISH_CHOICES = (
(mkt.PUBLISH_IMMEDIATE,
mark_safe_lazy(_lazy(
u'<b>Published</b>: Visible to everyone in the Marketplace and '
u'included in search results and listing pages.'))),
(mkt.PUBLISH_HIDDEN,
mark_safe_lazy(_lazy(
u'<b>Unlisted</b>: Visible to only people with the URL and '
u'does not appear in search results and listing pages.'))),
)
# Used for setting initial form values.
PUBLISH_MAPPING = {
mkt.STATUS_PUBLIC: mkt.PUBLISH_IMMEDIATE,
mkt.STATUS_UNLISTED: mkt.PUBLISH_HIDDEN,
mkt.STATUS_APPROVED: mkt.PUBLISH_PRIVATE,
}
# Use in form processing to set status.
STATUS_MAPPING = dict((v, k) for k, v in PUBLISH_MAPPING.items())
publish_type = forms.TypedChoiceField(
required=False, choices=PUBLISH_CHOICES, widget=forms.RadioSelect(),
initial=0, coerce=int, label=_lazy('App Visibility:'))
limited = forms.BooleanField(
required=False, label=_lazy(
u'<b>Limit to my team</b>: Visible to only Team Members.'))
def __init__(self, *args, **kwargs):
self.addon = kwargs.pop('addon')
super(PublishForm, self).__init__(*args, **kwargs)
limited = False
publish = self.PUBLISH_MAPPING.get(self.addon.status,
mkt.PUBLISH_IMMEDIATE)
if self.addon.status == mkt.STATUS_APPROVED:
# Special case if app is currently private.
limited = True
publish = mkt.PUBLISH_HIDDEN
# Determine the current selection via STATUS to publish choice mapping.
self.fields['publish_type'].initial = publish
self.fields['limited'].initial = limited
# Make the limited label safe so we can display the HTML.
self.fields['limited'].label = mark_safe(self.fields['limited'].label)
def save(self):
publish = self.cleaned_data['publish_type']
limited = self.cleaned_data['limited']
if publish == mkt.PUBLISH_HIDDEN and limited:
publish = mkt.PUBLISH_PRIVATE
status = self.STATUS_MAPPING[publish]
self.addon.update(status=status)
mkt.log(mkt.LOG.CHANGE_STATUS, self.addon.get_status_display(),
self.addon)
# Call update_version, so various other bits of data update.
self.addon.update_version()
# Call to update names and locales if changed.
self.addon.update_name_from_package_manifest()
self.addon.update_supported_locales()
set_storefront_data.delay(self.addon.pk)
class RegionForm(forms.Form):
regions = forms.MultipleChoiceField(
required=False, choices=[], widget=forms.CheckboxSelectMultiple,
label=_lazy(u'Choose the regions your app will be listed in:'),
error_messages={'required':
_lazy(u'You must select at least one region.')})
special_regions = forms.MultipleChoiceField(
required=False, widget=forms.CheckboxSelectMultiple,
choices=[(x.id, x.name) for x in mkt.regions.SPECIAL_REGIONS])
enable_new_regions = forms.BooleanField(
required=False, label=_lazy(u'Enable new regions'))
restricted = forms.TypedChoiceField(
required=False, initial=0, coerce=int,
choices=[(0, _lazy('Make my app available in most regions')),
(1, _lazy('Choose where my app is made available'))],
widget=forms.RadioSelect(attrs={'class': 'choices'}))
def __init__(self, *args, **kw):
self.product = kw.pop('product', None)
self.request = kw.pop('request', None)
super(RegionForm, self).__init__(*args, **kw)
self.fields['regions'].choices = REGIONS_CHOICES_SORTED_BY_NAME()
# This is the list of the user's exclusions as we don't
# want the user's choices to be altered by external
# exclusions e.g. payments availability.
user_exclusions = list(
self.product.addonexcludedregion.values_list('region', flat=True)
)
# If we have excluded regions, uncheck those.
# Otherwise, default to everything checked.
self.regions_before = self.product.get_region_ids(
restofworld=True,
excluded=user_exclusions
)
self.initial = {
'regions': sorted(self.regions_before),
'restricted': int(self.product.geodata.restricted),
'enable_new_regions': self.product.enable_new_regions,
}
# The checkboxes for special regions are
#
# - checked ... if an app has not been requested for approval in
# China or the app has been rejected in China.
#
# - unchecked ... if an app has been requested for approval in
# China or the app has been approved in China.
unchecked_statuses = (mkt.STATUS_NULL, mkt.STATUS_REJECTED)
for region in self.special_region_objs:
if self.product.geodata.get_status(region) in unchecked_statuses:
# If it's rejected in this region, uncheck its checkbox.
if region.id in self.initial['regions']:
self.initial['regions'].remove(region.id)
elif region.id not in self.initial['regions']:
# If it's pending/public, check its checkbox.
self.initial['regions'].append(region.id)
@property
def regions_by_id(self):
return mkt.regions.REGIONS_CHOICES_ID_DICT
@property
def special_region_objs(self):
return mkt.regions.SPECIAL_REGIONS
@property
def special_region_ids(self):
return mkt.regions.SPECIAL_REGION_IDS
@property
def low_memory_regions(self):
return any(region.low_memory for region in self.regions_by_id.values())
@property
def special_region_statuses(self):
"""Returns the null/pending/public status for each region."""
statuses = {}
for region in self.special_region_objs:
statuses[region.id] = self.product.geodata.get_status_slug(region)
return statuses
@property
def special_region_messages(self):
"""Returns the L10n messages for each region's status."""
return self.product.geodata.get_status_messages()
def is_toggling(self):
if not self.request or not hasattr(self.request, 'POST'):
return False
value = self.request.POST.get('toggle-paid')
return value if value in ('free', 'paid') else False
def _product_is_paid(self):
return (self.product.premium_type in mkt.ADDON_PREMIUMS or
self.product.premium_type == mkt.ADDON_FREE_INAPP)
def clean_regions(self):
regions = self.cleaned_data['regions']
if not self.is_toggling():
if not regions:
raise forms.ValidationError(
_('You must select at least one region.'))
return regions
def save(self):
# Don't save regions if we are toggling.
if self.is_toggling():
return
regions = [int(x) for x in self.cleaned_data['regions']]
special_regions = [
int(x) for x in self.cleaned_data['special_regions']
]
restricted = int(self.cleaned_data['restricted'] or 0)
if restricted:
before = set(self.regions_before)
after = set(regions)
log.info(u'[Webapp:%s] App marked as restricted.' % self.product)
# Add new region exclusions.
to_add = before - after
for region in to_add:
aer, created = self.product.addonexcludedregion.get_or_create(
region=region)
if created:
log.info(u'[Webapp:%s] Excluded from new region (%s).'
% (self.product, region))
# Remove old region exclusions.
to_remove = after - before
for region in to_remove:
self.product.addonexcludedregion.filter(
region=region).delete()
log.info(u'[Webapp:%s] No longer excluded from region (%s).'
% (self.product, region))
# If restricted, check how we should handle new regions.
if self.cleaned_data['enable_new_regions']:
self.product.update(enable_new_regions=True)
log.info(u'[Webapp:%s] will be added to future regions.'
% self.product)
else:
self.product.update(enable_new_regions=False)
log.info(u'[Webapp:%s] will not be added to future regions.'
% self.product)
else:
# If not restricted, set `enable_new_regions` to True and remove
# currently excluded regions.
self.product.update(enable_new_regions=True)
self.product.addonexcludedregion.all().delete()
log.info(u'[Webapp:%s] App marked as unrestricted.' % self.product)
self.product.geodata.update(restricted=restricted)
# Toggle region exclusions/statuses for special regions (e.g., China).
toggle_app_for_special_regions(self.request, self.product,
special_regions)
class CategoryForm(happyforms.Form):
categories = forms.MultipleChoiceField(label=_lazy(u'Categories'),
choices=CATEGORY_CHOICES,
widget=forms.CheckboxSelectMultiple)
def __init__(self, *args, **kw):
self.request = kw.pop('request', None)
self.product = kw.pop('product', None)
super(CategoryForm, self).__init__(*args, **kw)
self.cats_before = (list(self.product.categories)
if self.product.categories else [])
self.initial['categories'] = self.cats_before
def max_categories(self):
return mkt.MAX_CATEGORIES
def clean_categories(self):
categories = self.cleaned_data['categories']
set_categories = set(categories)
total = len(set_categories)
max_cat = mkt.MAX_CATEGORIES
if total > max_cat:
# L10n: {0} is the number of categories.
raise forms.ValidationError(ngettext(
'You can have only {0} category.',
'You can have only {0} categories.',
max_cat).format(max_cat))
return categories
def save(self):
after = list(self.cleaned_data['categories'])
self.product.update(categories=after)
toggle_app_for_special_regions(self.request, self.product)
class DevAgreementForm(happyforms.Form):
read_dev_agreement = forms.BooleanField(label=_lazy(u'Agree'),
widget=forms.HiddenInput)
def __init__(self, *args, **kw):
self.instance = kw.pop('instance')
super(DevAgreementForm, self).__init__(*args, **kw)
def save(self):
self.instance.read_dev_agreement = datetime.now()
self.instance.save()
class DevNewsletterForm(happyforms.Form):
"""Devhub newsletter subscription form."""
email = forms.EmailField(
error_messages={'required':
_lazy(u'Please enter a valid email address.')},
widget=forms.TextInput(attrs={'required': '',
'placeholder':
_lazy(u'Your email address')}))
email_format = forms.ChoiceField(
widget=forms.RadioSelect(),
choices=(('H', 'HTML'), ('T', _lazy(u'Text'))),
initial='H')
privacy = forms.BooleanField(
error_messages={'required':
_lazy(u'You must agree to the Privacy Policy.')})
country = forms.ChoiceField(label=_lazy(u'Country'))
def __init__(self, locale, *args, **kw):
regions = mpconstants_regions.get_region(locale).REGIONS
regions = sorted(regions.iteritems(), key=lambda x: x[1])
super(DevNewsletterForm, self).__init__(*args, **kw)
self.fields['country'].choices = regions
self.fields['country'].initial = 'us'
class AppFormTechnical(AddonFormBase):
is_offline = forms.BooleanField(required=False)
class Meta:
model = Webapp
fields = ('is_offline', 'public_stats',)
class TransactionFilterForm(happyforms.Form):
app = AddonChoiceField(queryset=None, required=False, label=_lazy(u'App'))
transaction_type = forms.ChoiceField(
required=False, label=_lazy(u'Transaction Type'),
choices=[(None, '')] + mkt.MKT_TRANSACTION_CONTRIB_TYPES.items())
transaction_id = forms.CharField(
required=False, label=_lazy(u'Transaction ID'))
current_year = datetime.today().year
years = [current_year - x for x in range(current_year - 2012)]
date_from = forms.DateTimeField(
required=False, widget=SelectDateWidget(years=years),
label=_lazy(u'From'))
date_to = forms.DateTimeField(
required=False, widget=SelectDateWidget(years=years),
label=_lazy(u'To'))
def __init__(self, *args, **kwargs):
self.apps = kwargs.pop('apps', [])
super(TransactionFilterForm, self).__init__(*args, **kwargs)
self.fields['app'].queryset = self.apps
class APIConsumerForm(happyforms.ModelForm):
app_name = forms.CharField(required=False)
oauth_leg = forms.ChoiceField(choices=(
('website', _lazy('Web site')),
('command', _lazy('Command line')))
)
redirect_uri = forms.CharField(validators=[URLValidator()], required=False)
class Meta:
model = Access
fields = ('app_name', 'redirect_uri')
def __init__(self, *args, **kwargs):
super(APIConsumerForm, self).__init__(*args, **kwargs)
if self.data.get('oauth_leg') == 'website':
for field in ['app_name', 'redirect_uri']:
self.fields[field].required = True
class AppVersionForm(happyforms.ModelForm):
releasenotes = TransField(widget=TransTextarea(), required=False)
approvalnotes = forms.CharField(
widget=TranslationTextarea(attrs={'rows': 4}), required=False)
publish_immediately = forms.BooleanField(
required=False,
label=_lazy(u'Make this the Active version of my app as soon as it '
u'has been reviewed and approved.'))
class Meta:
model = Version
fields = ('releasenotes', 'approvalnotes')
def __init__(self, *args, **kwargs):
super(AppVersionForm, self).__init__(*args, **kwargs)
self.fields['publish_immediately'].initial = (
self.instance.addon.publish_type == mkt.PUBLISH_IMMEDIATE)
def save(self, *args, **kwargs):
rval = super(AppVersionForm, self).save(*args, **kwargs)
if self.instance.all_files[0].status == mkt.STATUS_PENDING:
# If version is pending, allow changes to publish_type.
if self.cleaned_data.get('publish_immediately'):
publish_type = mkt.PUBLISH_IMMEDIATE
else:
publish_type = mkt.PUBLISH_PRIVATE
self.instance.addon.update(publish_type=publish_type)
return rval
class IARCGetAppInfoForm(happyforms.Form):
submission_id = forms.CharField()
security_code = forms.CharField(max_length=10)
def __init__(self, app, *args, **kwargs):
self.app = app
super(IARCGetAppInfoForm, self).__init__(*args, **kwargs)
def clean_submission_id(self):
submission_id = (
# Also allow "subm-1234" since that's what IARC tool displays.
self.cleaned_data['submission_id'].lower().replace('subm-', ''))
if submission_id.isdigit():
return int(submission_id)
raise forms.ValidationError(_('Please enter a valid submission ID.'))
def clean(self):
cleaned_data = super(IARCGetAppInfoForm, self).clean()
app = self.app
iarc_id = cleaned_data.get('submission_id')
if not app or not iarc_id:
return cleaned_data
if (not settings.IARC_ALLOW_CERT_REUSE and
IARCInfo.objects.filter(submission_id=iarc_id)
.exclude(addon=app).exists()):
del cleaned_data['submission_id']
raise forms.ValidationError(
_('This IARC certificate is already being used for another '
'app. Please create a new IARC Ratings Certificate.'))
return cleaned_data
def save(self, *args, **kwargs):
app = self.app
iarc_id = self.cleaned_data['submission_id']
iarc_code = self.cleaned_data['security_code']
if settings.DEBUG and iarc_id == 0:
# A local developer is being lazy. Skip the hard work.
app.set_iarc_info(iarc_id, iarc_code)
app.set_descriptors([])
app.set_interactives([])
app.set_content_ratings({ratingsbodies.ESRB: ratingsbodies.ESRB_E})
return
# Generate XML.
xml = lib.iarc.utils.render_xml(
'get_app_info.xml',
{'submission_id': iarc_id, 'security_code': iarc_code})
# Process that shizzle.
client = lib.iarc.client.get_iarc_client('services')
resp = client.Get_App_Info(XMLString=xml)
# Handle response.
data = lib.iarc.utils.IARC_XML_Parser().parse_string(resp)
if data.get('rows'):
row = data['rows'][0]
if 'submission_id' not in row:
# [{'ActionStatus': 'No records found. Please try another
# 'criteria.', 'rowId: 1}].
msg = _('Invalid submission ID or security code.')
self._errors['submission_id'] = self.error_class([msg])
log.info('[IARC] Bad GetAppInfo: %s' % row)
raise forms.ValidationError(msg)
# We found a rating, so store the id and code for future use.
app.set_iarc_info(iarc_id, iarc_code)
app.set_descriptors(row.get('descriptors', []))
app.set_interactives(row.get('interactives', []))
app.set_content_ratings(row.get('ratings', {}))
else:
msg = _('Invalid submission ID or security code.')
self._errors['submission_id'] = self.error_class([msg])
log.info('[IARC] Bad GetAppInfo. No rows: %s' % data)
raise forms.ValidationError(msg)
class ContentRatingForm(happyforms.Form):
since = forms.DateTimeField()
class MOTDForm(happyforms.Form):
motd = forms.CharField(widget=widgets.Textarea())
| |
"""GradingPeriods API Version 1.0.
This API client was generated using a template. Make sure this code is valid before using it.
"""
import logging
from datetime import date, datetime
from .base import BaseCanvasAPI
from .base import BaseModel
class GradingPeriodsAPI(BaseCanvasAPI):
"""GradingPeriods API Version 1.0."""
def __init__(self, *args, **kwargs):
"""Init method for GradingPeriodsAPI."""
super(GradingPeriodsAPI, self).__init__(*args, **kwargs)
self.logger = logging.getLogger("py3canvas.GradingPeriodsAPI")
def list_grading_periods_accounts(self, account_id):
"""
List grading periods.
Returns the paginated list of grading periods for the current course.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""
ID
"""
path["account_id"] = account_id
self.logger.debug(
"GET /api/v1/accounts/{account_id}/grading_periods with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/accounts/{account_id}/grading_periods".format(**path),
data=data,
params=params,
no_data=True,
)
def list_grading_periods_courses(self, course_id):
"""
List grading periods.
Returns the paginated list of grading periods for the current course.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
self.logger.debug(
"GET /api/v1/courses/{course_id}/grading_periods with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/courses/{course_id}/grading_periods".format(**path),
data=data,
params=params,
no_data=True,
)
def get_single_grading_period(self, course_id, id):
"""
Get a single grading period.
Returns the grading period with the given id
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
# REQUIRED - PATH - id
"""
ID
"""
path["id"] = id
self.logger.debug(
"GET /api/v1/courses/{course_id}/grading_periods/{id} with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"GET",
"/api/v1/courses/{course_id}/grading_periods/{id}".format(**path),
data=data,
params=params,
no_data=True,
)
def update_single_grading_period(
self,
course_id,
grading_periods_end_date,
grading_periods_start_date,
id,
grading_periods_weight=None,
):
"""
Update a single grading period.
Update an existing grading period.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
# REQUIRED - PATH - id
"""
ID
"""
path["id"] = id
# REQUIRED - grading_periods[start_date]
"""
The date the grading period starts.
"""
data["grading_periods[start_date]"] = grading_periods_start_date
# REQUIRED - grading_periods[end_date]
"""
no description
"""
data["grading_periods[end_date]"] = grading_periods_end_date
# OPTIONAL - grading_periods[weight]
"""
A weight value that contributes to the overall weight of a grading period set which is used to calculate how much assignments in this period contribute to the total grade
"""
if grading_periods_weight is not None:
data["grading_periods[weight]"] = grading_periods_weight
self.logger.debug(
"PUT /api/v1/courses/{course_id}/grading_periods/{id} with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"PUT",
"/api/v1/courses/{course_id}/grading_periods/{id}".format(**path),
data=data,
params=params,
no_data=True,
)
def delete_grading_period_courses(self, course_id, id):
"""
Delete a grading period.
<b>204 No Content</b> response code is returned if the deletion was
successful.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""
ID
"""
path["course_id"] = course_id
# REQUIRED - PATH - id
"""
ID
"""
path["id"] = id
self.logger.debug(
"DELETE /api/v1/courses/{course_id}/grading_periods/{id} with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"DELETE",
"/api/v1/courses/{course_id}/grading_periods/{id}".format(**path),
data=data,
params=params,
no_data=True,
)
def delete_grading_period_accounts(self, account_id, id):
"""
Delete a grading period.
<b>204 No Content</b> response code is returned if the deletion was
successful.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""
ID
"""
path["account_id"] = account_id
# REQUIRED - PATH - id
"""
ID
"""
path["id"] = id
self.logger.debug(
"DELETE /api/v1/accounts/{account_id}/grading_periods/{id} with query params: {params} and form data: {data}".format(
params=params, data=data, **path
)
)
return self.generic_request(
"DELETE",
"/api/v1/accounts/{account_id}/grading_periods/{id}".format(**path),
data=data,
params=params,
no_data=True,
)
class Gradingperiod(BaseModel):
"""Gradingperiod Model."""
def __init__(
self,
id,
start_date,
end_date,
title=None,
close_date=None,
weight=None,
is_closed=None,
):
"""Init method for Gradingperiod class."""
self._id = id
self._title = title
self._start_date = start_date
self._end_date = end_date
self._close_date = close_date
self._weight = weight
self._is_closed = is_closed
self.logger = logging.getLogger("py3canvas.Gradingperiod")
@property
def id(self):
"""The unique identifier for the grading period."""
return self._id
@id.setter
def id(self, value):
"""Setter for id property."""
self.logger.warn(
"Setting values on id will NOT update the remote Canvas instance."
)
self._id = value
@property
def title(self):
"""The title for the grading period."""
return self._title
@title.setter
def title(self, value):
"""Setter for title property."""
self.logger.warn(
"Setting values on title will NOT update the remote Canvas instance."
)
self._title = value
@property
def start_date(self):
"""The start date of the grading period."""
return self._start_date
@start_date.setter
def start_date(self, value):
"""Setter for start_date property."""
self.logger.warn(
"Setting values on start_date will NOT update the remote Canvas instance."
)
self._start_date = value
@property
def end_date(self):
"""The end date of the grading period."""
return self._end_date
@end_date.setter
def end_date(self, value):
"""Setter for end_date property."""
self.logger.warn(
"Setting values on end_date will NOT update the remote Canvas instance."
)
self._end_date = value
@property
def close_date(self):
"""Grades can only be changed before the close date of the grading period."""
return self._close_date
@close_date.setter
def close_date(self, value):
"""Setter for close_date property."""
self.logger.warn(
"Setting values on close_date will NOT update the remote Canvas instance."
)
self._close_date = value
@property
def weight(self):
"""A weight value that contributes to the overall weight of a grading period set which is used to calculate how much assignments in this period contribute to the total grade."""
return self._weight
@weight.setter
def weight(self, value):
"""Setter for weight property."""
self.logger.warn(
"Setting values on weight will NOT update the remote Canvas instance."
)
self._weight = value
@property
def is_closed(self):
"""If true, the grading period's close_date has passed."""
return self._is_closed
@is_closed.setter
def is_closed(self, value):
"""Setter for is_closed property."""
self.logger.warn(
"Setting values on is_closed will NOT update the remote Canvas instance."
)
self._is_closed = value
| |
"""
Interfaces for serializing Django objects.
Usage::
from django.core import serializers
json = serializers.serialize("json", some_queryset)
objects = list(serializers.deserialize("json", json))
To add your own serializers, use the SERIALIZATION_MODULES setting::
SERIALIZATION_MODULES = {
"csv": "path.to.csv.serializer",
"txt": "path.to.txt.serializer",
}
"""
import importlib
from django.apps import apps
from django.conf import settings
from django.core.serializers.base import SerializerDoesNotExist
# Built-in serializers
BUILTIN_SERIALIZERS = {
"xml": "django.core.serializers.xml_serializer",
"python": "django.core.serializers.python",
"json": "django.core.serializers.json",
"yaml": "django.core.serializers.pyyaml",
}
_serializers = {}
class BadSerializer:
"""
Stub serializer to hold exception raised during registration
This allows the serializer registration to cache serializers and if there
is an error raised in the process of creating a serializer it will be
raised and passed along to the caller when the serializer is used.
"""
internal_use_only = False
def __init__(self, exception):
self.exception = exception
def __call__(self, *args, **kwargs):
raise self.exception
def register_serializer(format, serializer_module, serializers=None):
"""Register a new serializer.
``serializer_module`` should be the fully qualified module name
for the serializer.
If ``serializers`` is provided, the registration will be added
to the provided dictionary.
If ``serializers`` is not provided, the registration will be made
directly into the global register of serializers. Adding serializers
directly is not a thread-safe operation.
"""
if serializers is None and not _serializers:
_load_serializers()
try:
module = importlib.import_module(serializer_module)
except ImportError as exc:
bad_serializer = BadSerializer(exc)
module = type('BadSerializerModule', (), {
'Deserializer': bad_serializer,
'Serializer': bad_serializer,
})
if serializers is None:
_serializers[format] = module
else:
serializers[format] = module
def unregister_serializer(format):
"Unregister a given serializer. This is not a thread-safe operation."
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
del _serializers[format]
def get_serializer(format):
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
return _serializers[format].Serializer
def get_serializer_formats():
if not _serializers:
_load_serializers()
return list(_serializers)
def get_public_serializer_formats():
if not _serializers:
_load_serializers()
return [k for k, v in _serializers.items() if not v.Serializer.internal_use_only]
def get_deserializer(format):
if not _serializers:
_load_serializers()
if format not in _serializers:
raise SerializerDoesNotExist(format)
return _serializers[format].Deserializer
def serialize(format, queryset, **options):
"""
Serialize a queryset (or any iterator that returns database objects) using
a certain serializer.
"""
s = get_serializer(format)()
s.serialize(queryset, **options)
return s.getvalue()
def deserialize(format, stream_or_string, **options):
"""
Deserialize a stream or a string. Return an iterator that yields ``(obj,
m2m_relation_dict)``, where ``obj`` is an instantiated -- but *unsaved* --
object, and ``m2m_relation_dict`` is a dictionary of ``{m2m_field_name :
list_of_related_objects}``.
"""
d = get_deserializer(format)
return d(stream_or_string, **options)
def _load_serializers():
"""
Register built-in and settings-defined serializers. This is done lazily so
that user code has a chance to (e.g.) set up custom settings without
needing to be careful of import order.
"""
global _serializers
serializers = {}
for format in BUILTIN_SERIALIZERS:
register_serializer(format, BUILTIN_SERIALIZERS[format], serializers)
if hasattr(settings, "SERIALIZATION_MODULES"):
for format in settings.SERIALIZATION_MODULES:
register_serializer(format, settings.SERIALIZATION_MODULES[format], serializers)
_serializers = serializers
def sort_dependencies(app_list):
"""Sort a list of (app_config, models) pairs into a single list of models.
The single list of models is sorted so that any model with a natural key
is serialized before a normal model, and any model with a natural key
dependency has it's dependencies serialized first.
"""
# Process the list of models, and get the list of dependencies
model_dependencies = []
models = set()
for app_config, model_list in app_list:
if model_list is None:
model_list = app_config.get_models()
for model in model_list:
models.add(model)
# Add any explicitly defined dependencies
if hasattr(model, 'natural_key'):
deps = getattr(model.natural_key, 'dependencies', [])
if deps:
deps = [apps.get_model(dep) for dep in deps]
else:
deps = []
# Now add a dependency for any FK relation with a model that
# defines a natural key
for field in model._meta.fields:
if field.remote_field:
rel_model = field.remote_field.model
if hasattr(rel_model, 'natural_key') and rel_model != model:
deps.append(rel_model)
# Also add a dependency for any simple M2M relation with a model
# that defines a natural key. M2M relations with explicit through
# models don't count as dependencies.
for field in model._meta.many_to_many:
if field.remote_field.through._meta.auto_created:
rel_model = field.remote_field.model
if hasattr(rel_model, 'natural_key') and rel_model != model:
deps.append(rel_model)
model_dependencies.append((model, deps))
model_dependencies.reverse()
# Now sort the models to ensure that dependencies are met. This
# is done by repeatedly iterating over the input list of models.
# If all the dependencies of a given model are in the final list,
# that model is promoted to the end of the final list. This process
# continues until the input list is empty, or we do a full iteration
# over the input models without promoting a model to the final list.
# If we do a full iteration without a promotion, that means there are
# circular dependencies in the list.
model_list = []
while model_dependencies:
skipped = []
changed = False
while model_dependencies:
model, deps = model_dependencies.pop()
# If all of the models in the dependency list are either already
# on the final model list, or not on the original serialization list,
# then we've found another model with all it's dependencies satisfied.
if all(d not in models or d in model_list for d in deps):
model_list.append(model)
changed = True
else:
skipped.append((model, deps))
if not changed:
raise RuntimeError(
"Can't resolve dependencies for %s in serialized app list." %
', '.join(
'%s.%s' % (model._meta.app_label, model._meta.object_name)
for model, deps in sorted(skipped, key=lambda obj: obj[0].__name__)
)
)
model_dependencies = skipped
return model_list
| |
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_db import exception as db_exc
from oslo_utils import uuidutils
from oslo_utils import versionutils
from nova import db
from nova import exception
from nova.i18n import _
from nova import objects
from nova.objects import base
from nova.objects import fields
def determine_migration_type(migration):
if migration['old_instance_type_id'] != migration['new_instance_type_id']:
return 'resize'
else:
return 'migration'
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class Migration(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
# Version 1.0: Initial version
# Version 1.1: String attributes updated to support unicode
# Version 1.2: Added migration_type and hidden
# Version 1.3: Added get_by_id_and_instance()
# Version 1.4: Added migration progress detail
# Version 1.5: Added uuid
VERSION = '1.5'
fields = {
'id': fields.IntegerField(),
'uuid': fields.UUIDField(),
'source_compute': fields.StringField(nullable=True), # source hostname
'dest_compute': fields.StringField(nullable=True), # dest hostname
'source_node': fields.StringField(nullable=True), # source nodename
'dest_node': fields.StringField(nullable=True), # dest nodename
'dest_host': fields.StringField(nullable=True), # dest host IP
'old_instance_type_id': fields.IntegerField(nullable=True),
'new_instance_type_id': fields.IntegerField(nullable=True),
'instance_uuid': fields.StringField(nullable=True),
'status': fields.StringField(nullable=True),
'migration_type': fields.EnumField(['migration', 'resize',
'live-migration', 'evacuation'],
nullable=False),
'hidden': fields.BooleanField(nullable=False, default=False),
'memory_total': fields.IntegerField(nullable=True),
'memory_processed': fields.IntegerField(nullable=True),
'memory_remaining': fields.IntegerField(nullable=True),
'disk_total': fields.IntegerField(nullable=True),
'disk_processed': fields.IntegerField(nullable=True),
'disk_remaining': fields.IntegerField(nullable=True),
}
@staticmethod
def _from_db_object(context, migration, db_migration):
for key in migration.fields:
value = db_migration[key]
if key == 'migration_type' and value is None:
value = determine_migration_type(db_migration)
elif key == 'uuid' and value is None:
continue
migration[key] = value
migration._context = context
migration.obj_reset_changes()
migration._ensure_uuid()
return migration
def obj_make_compatible(self, primitive, target_version):
super(Migration, self).obj_make_compatible(primitive, target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
if target_version < (1, 2):
if 'migration_type' in primitive:
del primitive['migration_type']
del primitive['hidden']
if target_version < (1, 4):
if 'memory_total' in primitive:
del primitive['memory_total']
del primitive['memory_processed']
del primitive['memory_remaining']
del primitive['disk_total']
del primitive['disk_processed']
del primitive['disk_remaining']
if target_version < (1, 5):
if 'uuid' in primitive:
del primitive['uuid']
def obj_load_attr(self, attrname):
if attrname == 'migration_type':
# NOTE(danms): The only reason we'd need to load this is if
# some older node sent us one. So, guess the type.
self.migration_type = determine_migration_type(self)
elif attrname == 'hidden':
self.hidden = False
else:
super(Migration, self).obj_load_attr(attrname)
def _ensure_uuid(self):
if 'uuid' in self:
return
self.uuid = uuidutils.generate_uuid()
try:
self.save()
except db_exc.DBDuplicateEntry:
# NOTE(danms) We raced to generate a uuid for this,
# so fetch the winner and use that uuid
fresh = self.__class__.get_by_id(self.context, self.id)
self.uuid = fresh.uuid
@base.remotable_classmethod
def get_by_id(cls, context, migration_id):
db_migration = db.migration_get(context, migration_id)
return cls._from_db_object(context, cls(), db_migration)
@base.remotable_classmethod
def get_by_id_and_instance(cls, context, migration_id, instance_uuid):
db_migration = db.migration_get_by_id_and_instance(
context, migration_id, instance_uuid)
return cls._from_db_object(context, cls(), db_migration)
@base.remotable_classmethod
def get_by_instance_and_status(cls, context, instance_uuid, status):
db_migration = db.migration_get_by_instance_and_status(
context, instance_uuid, status)
return cls._from_db_object(context, cls(), db_migration)
@base.remotable
def create(self):
if self.obj_attr_is_set('id'):
raise exception.ObjectActionError(action='create',
reason='already created')
if 'uuid' not in self:
self.uuid = uuidutils.generate_uuid()
updates = self.obj_get_changes()
if 'migration_type' not in updates:
raise exception.ObjectActionError(
action="create",
reason=_("cannot create a Migration object without a "
"migration_type set"))
db_migration = db.migration_create(self._context, updates)
self._from_db_object(self._context, self, db_migration)
@base.remotable
def save(self):
updates = self.obj_get_changes()
updates.pop('id', None)
db_migration = db.migration_update(self._context, self.id, updates)
self._from_db_object(self._context, self, db_migration)
self.obj_reset_changes()
@property
def instance(self):
if not hasattr(self, '_cached_instance'):
self._cached_instance = objects.Instance.get_by_uuid(
self._context, self.instance_uuid)
return self._cached_instance
@instance.setter
def instance(self, instance):
self._cached_instance = instance
@base.NovaObjectRegistry.register
class MigrationList(base.ObjectListBase, base.NovaObject):
# Version 1.0: Initial version
# Migration <= 1.1
# Version 1.1: Added use_slave to get_unconfirmed_by_dest_compute
# Version 1.2: Migration version 1.2
# Version 1.3: Added a new function to get in progress migrations
# for an instance.
# Version 1.4: Added sort_keys, sort_dirs, limit, marker kwargs to
# get_by_filters for migrations pagination support.
VERSION = '1.4'
fields = {
'objects': fields.ListOfObjectsField('Migration'),
}
@staticmethod
@db.select_db_reader_mode
def _db_migration_get_unconfirmed_by_dest_compute(
context, confirm_window, dest_compute, use_slave=False):
return db.migration_get_unconfirmed_by_dest_compute(
context, confirm_window, dest_compute)
@base.remotable_classmethod
def get_unconfirmed_by_dest_compute(cls, context, confirm_window,
dest_compute, use_slave=False):
db_migrations = cls._db_migration_get_unconfirmed_by_dest_compute(
context, confirm_window, dest_compute, use_slave=use_slave)
return base.obj_make_list(context, cls(context), objects.Migration,
db_migrations)
@base.remotable_classmethod
def get_in_progress_by_host_and_node(cls, context, host, node):
db_migrations = db.migration_get_in_progress_by_host_and_node(
context, host, node)
return base.obj_make_list(context, cls(context), objects.Migration,
db_migrations)
@base.remotable_classmethod
def get_by_filters(cls, context, filters, sort_keys=None, sort_dirs=None,
limit=None, marker=None):
db_migrations = db.migration_get_all_by_filters(
context, filters, sort_keys=sort_keys, sort_dirs=sort_dirs,
limit=limit, marker=marker)
return base.obj_make_list(context, cls(context), objects.Migration,
db_migrations)
@base.remotable_classmethod
def get_in_progress_by_instance(cls, context, instance_uuid,
migration_type=None):
db_migrations = db.migration_get_in_progress_by_instance(
context, instance_uuid, migration_type)
return base.obj_make_list(context, cls(context), objects.Migration,
db_migrations)
| |
#!/usr/bin/env python
"""Build 837 parsers and 837 Segment definitions from XML Configuration.
Synopsis
========
:samp:`make837.py -p {python} -d {django} -b {baseDir} {name=source}...`
Description
===========
Reads the PyX12 source files (in .XML) and writes a Python parser module
for unmarshalling messages as well as a Django model for persisting the
message instances.
Prior to this step you must initialize the application directory
structure with :samp:`manage.py startapp {newApp}`.
After this step, you must update :file:`settings.py` to include the new application.
Then you must sync the database with the following command before you can start
using the parser, factory and Django persistent class definitions you've created.
:samp:`manage.py syncdb --pythonpath=".."`
Options
=======
:samp:`-p {python}` writes the Python module for unmarshalling the given messages.
:samp:`-d {django}` writes two Django modules for persisting the given message instances.
This will be :samp:`{django}/models.py` and :samp:`{django}/admin.py`.
:samp:`-b {baseDir}` is the base directory for the various source files.
:samp:`name={source}` a list of parser names and message definitions.
The name is the Python variable name; the source is the PyX12 XML source file.
Notes
=====
An X12 parser is defined to use a Factory object to emit message instance objects.
This Factory object is a run-time binding between a parser and some message instance module.
Generic message instance modules include :mod:`X12.message` and :mod:`web.claims.models`.
This application builds a specific message instance module, with specific message types.
In order for an :mod:`X12.parse` module to make use of the Django message instance module,
a Factory is required to instantiate the Django objects as well as the generic
X12Segment object defined in :mod:`web.claims.models`.
"""
from __future__ import print_function
import optparse
import tools.convertPyX12
import os
import logging
import sys
import datetime
import X12.map.source
import X12.map.dj
class ApplicationBuilder( object ):
"""Build the core parser and message persistence modules for
an application.
Generally, given a base directly, we'll load a number of XML files
which will define X12N message parsers. We have to emit a Python
source module for each individual message type, since they have unique
compliance rules.
After seeing all of those XML files, we'll also know the various
segments defined and can emit a peristence model for the unique
kinds of segments.
Note that the Django Visitor *accumulates* the various definitions.
Only the last output from the Django Visitor is particularly useful.
"""
def __init__( self, baseDir=None ):
"""Initialize the ApplicationBuilder with a base directory."""
self.baseDir= baseDir if baseDir is not None else "."
self.log= logging.getLogger( "make837.LoadXMLDefs")
self.bldParser= tools.convertPyX12.ParserBuilder()
# Accumulate Django definitions until after all messages have been examined
self.djMap= X12.map.dj.DjangoModelVisitor( )
self.djAdm= X12.map.dj.DjangoAdminVisitor( )
self.x12p= None
def load( self, xmlDef ):
"""Load another message definition. This will also accumulate
Django Segment definitions.
"""
try:
xml= tools.convertPyX12.XMLParser()
xml.data( os.path.join( self.baseDir, "dataele.xml") )
xml.codes( os.path.join( self.baseDir, "codes.xml") )
xml.read( os.path.join( self.baseDir, xmlDef) )
self.x12p= self.bldParser.build( xml )
except Warning as w:
log.warning( '*** WARNING IN %s', xmlDef )
self.x12p= None
raise
self.pyCode= None
self.x12p.visit( self.djMap )
self.x12p.visit( self.djAdm )
return self.x12p
@property
def getParser( self ):
"""Return the current message's parser object."""
return self.x12p
def getPython( self, name ):
"""Return Python source for the current message."""
pyMap= X12.map.source.FlatPythonVisitor( name )
self.x12p.visit( pyMap )
self.pyCode= pyMap.getSource()
return self.pyCode
def getDjangoModel( self ):
"""Emit the Django model code built so far."""
self.djangoModel= self.djMap.getSource()
return self.djangoModel
def getDjangoAdmin( self, appname='claims_837'):
"""Emit the Django Admin code built so far."""
self.djangoAdmin= self.djAdm.getSource(appname)
return self.djangoAdmin
factory='''
import web.claims.models
class Factory( web.claims.models.Factory ):
"""Factory for a generated application."""
@staticmethod
def makeSegment( segmentToken, compositeSep, segmentType=None ):
"""Create a Segment from a SegmentToken and an Segment definition.
:param segmentToken: An :class:`X12.parse.SegmentToken` instance:
a list-like collection of Element values. It turns out that a simple
list of values may also work, if it does NOT have trailing empty
items omitted. Real Segment Tokens can have trailing empty items
omitted.
:param compositeSep: Composite internal separator from the ISA segment.
:param segmentType: An :class:`X12.parse.Segment` instance, which
defines the Elements and Composites of this X12Segment.
:returns: X12Segment instance
"""
seg= web.claims.models.Factory.makeSegment( segmentToken, compositeSep, segmentType )
appSegClass= eval( "Segment_%s" % ( segmentToken[0], ) )
appSeg= appSegClass( segment= seg )
appSeg.unmarshall( segmentToken, compositeSep )
appSeg.save()
return seg
'''
def process( baseDir=r"C:\Python25\share\pyx12\map" ):
convertList= [
("parse_837i", "837.4010.x096.A1.xml"),
("parse_837d", "837.4010.x097.A1.xml"),
("parse_839p", "837.4010.x098.A1.xml"),
]
# Load messages, generating Python as we go
builder= ApplicationBuilder( baseDir )
for parserName, fileName in convertList:
builder.load( fileName )
pySrc= builder.getPython( parserName )
print( pySrc )
# Write Django all in one final 'models' file
print( factory )
builder.getDjangoModel()
print( builder.djangoModel )
builder.getDjangoAdmin( 'claims_837' )
print( builder.djangoAdmin )
def main():
# Parse command-line options
cmdParse= optparse.OptionParser(version="0.1")
cmdParse.add_option( "-p", "--python", dest="python", default=None )
cmdParse.add_option( "-d", "--django", dest="django", default=None )
cmdParse.add_option( "-b", "--basedir", dest="baseDir", default="." )
options, args = cmdParse.parse_args()
now= datetime.datetime.now()
builder= ApplicationBuilder( options.baseDir )
if options.python is not None:
with open(options.python, "w" ) as pFile:
pFile.write( '#!/usr/bin/env python\n' )
pFile.write( '"""Generated by make837.py on %s"""\n' % (now,) )
for argText in args:
parserName, punct, fileName = argText.partition( "=" )
builder.load( fileName )
if options.python is not None:
pySrc= builder.getPython( parserName )
pFile.write( "# %s/%s\n" % ( options.baseDir, fileName ) )
pFile.write( pySrc )
pFile.write( '\n\n' )
if options.django is not None:
with open( os.path.join(options.django,"models.py"), "w" ) as dFile:
dFile.write( '#!/usr/bin/env python\n' )
dFile.write( '"""Generated by make837.py on %s"""\n' % ( now,) )
dFile.write( factory )
dFile.write( '\n\n' )
djSrc= builder.getDjangoModel( options.django )
dFile.write( djSrc )
dFile.write( '\n\n' )
with open( os.path.join(options.django,"admin.py"), "w" ) as dFile:
dFile.write( '#!/usr/bin/env python\n' )
dFile.write( '"""Generated by make837.py on %s"""\n' % ( now,) )
djSrc= builder.getDjangoAdmin( options.django )
dFile.write( djSrc )
dFile.write( '\n\n' )
if __name__ == "__main__":
logging.basicConfig(
stream=sys.stderr,
level=logging.DEBUG,
)
logging.getLogger("tools.convertPyX12.BuildParser").setLevel(logging.INFO)
#process()
main()
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Minimal runtime type checking library.
This module should not be considered public API.
"""
# TODO(ericmc,shoyer): Delete this in favor of using pytype or mypy
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import re
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.compat import collections_abc
# used for register_type_abbreviation and _type_repr below.
_TYPE_ABBREVIATIONS = {}
class Type(object):
"""Base class for type checker types.
The custom types defined in this module are based on types in the standard
library's typing module (in Python 3.5):
https://docs.python.org/3/library/typing.html
The only difference should be that we use actual instances of Type classes to
represent custom types rather than the metaclass magic typing uses to create
new class objects. In practice, all this should mean is that we use
`List(int)` rather than `List[int]`.
Custom types should implement __instancecheck__ and inherit from Type. Every
argument in the constructor must be a type or Type instance, and these
arguments must be stored as a tuple on the `_types` attribute.
"""
def __init__(self, *types):
self._types = types
def __repr__(self):
args_repr = ", ".join(repr(t) for t in self._types)
return "typecheck.%s(%s)" % (type(self).__name__, args_repr)
class _SingleArgumentType(Type):
"""Use this subclass for parametric types that accept only one argument."""
def __init__(self, tpe):
super(_SingleArgumentType, self).__init__(tpe)
@property
def _type(self):
tpe, = self._types # pylint: disable=unbalanced-tuple-unpacking
return tpe
class _TwoArgumentType(Type):
"""Use this subclass for parametric types that accept two arguments."""
def __init__(self, first_type, second_type):
super(_TwoArgumentType, self).__init__(first_type, second_type)
class Union(Type):
"""A sum type.
A correct type is any of the types provided.
"""
def __instancecheck__(self, instance):
return isinstance(instance, self._types)
class Optional(_SingleArgumentType):
"""An optional type.
A correct type is either the provided type or NoneType.
"""
def __instancecheck__(self, instance):
# types.NoneType does not exist in Python 3
return isinstance(instance, (self._type, type(None)))
class List(_SingleArgumentType):
"""A typed list.
A correct type is a list where each element has the single provided type.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, list) and
all(isinstance(x, self._type) for x in instance))
class Sequence(_SingleArgumentType):
"""A typed sequence.
A correct type is a sequence where each element has the single provided type.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, collections_abc.Sequence) and
all(isinstance(x, self._type) for x in instance))
class Collection(_SingleArgumentType):
"""A sized, iterable container.
A correct type is an iterable and container with known size where each element
has the single provided type.
We use this in preference to Iterable because we check each instance of the
iterable at runtime, and hence need to avoid iterables that could be
exhausted.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, collections_abc.Iterable) and
isinstance(instance, collections_abc.Sized) and
isinstance(instance, collections_abc.Container) and
all(isinstance(x, self._type) for x in instance))
class Tuple(Type):
"""A typed tuple.
A correct type is a tuple with the correct length where each element has
the correct type.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, tuple) and
len(instance) == len(self._types) and
all(isinstance(x, t) for x, t in zip(instance, self._types)))
class Mapping(_TwoArgumentType):
"""A typed mapping.
A correct type has the correct parametric types for keys and values.
"""
def __instancecheck__(self, instance):
key_type, value_type = self._types # pylint: disable=unbalanced-tuple-unpacking
return (isinstance(instance, collections_abc.Mapping) and
all(isinstance(k, key_type) for k in instance.keys()) and
all(isinstance(k, value_type) for k in instance.values()))
class Dict(Mapping):
"""A typed dict.
A correct type has the correct parametric types for keys and values.
"""
def __instancecheck__(self, instance):
return (isinstance(instance, dict) and
super(Dict, self).__instancecheck__(instance))
def _replace_forward_references(t, context):
"""Replace forward references in the given type."""
if isinstance(t, str):
return context[t]
elif isinstance(t, Type):
return type(t)(*[_replace_forward_references(t, context) for t in t._types]) # pylint: disable=protected-access
else:
return t
def register_type_abbreviation(name, alias):
"""Register an abbreviation for a type in typecheck tracebacks.
This makes otherwise very long typecheck errors much more readable.
Example:
typecheck.register_type_abbreviation(tf.compat.v1.Dimension,
'tf.compat.v1.Dimension')
Args:
name: type or class to abbreviate.
alias: string alias to substitute.
"""
_TYPE_ABBREVIATIONS[name] = alias
def _type_repr(t):
"""A more succinct repr for typecheck tracebacks."""
string = repr(t)
for type_, alias in _TYPE_ABBREVIATIONS.items():
string = string.replace(repr(type_), alias)
string = re.sub(r"<(class|type) '([\w.]+)'>", r"\2", string)
string = re.sub(r"typecheck\.(\w+)", r"\1", string)
return string
class Error(TypeError):
"""Exception for typecheck failures."""
def accepts(*types):
"""A decorator which checks the input types of a function.
Based on:
http://stackoverflow.com/questions/15299878/how-to-use-python-decorators-to-check-function-arguments
The above draws from:
https://www.python.org/dev/peps/pep-0318/
Args:
*types: A list of Python types.
Returns:
A function to use as a decorator.
"""
def check_accepts(f):
"""Check the types."""
spec = tf_inspect.getargspec(f)
num_function_arguments = len(spec.args)
if len(types) != num_function_arguments:
raise Error(
"Function %r has %d arguments but only %d types were provided in the "
"annotation." % (f, num_function_arguments, len(types)))
if spec.defaults:
num_defaults = len(spec.defaults)
for (name, a, t) in zip(spec.args[-num_defaults:], spec.defaults,
types[-num_defaults:]):
allowed_type = _replace_forward_references(t, f.__globals__)
if not isinstance(a, allowed_type):
raise Error("default argument value %r of type %r is not an instance "
"of the allowed type %s for the %s argument to %r" %
(a, type(a), _type_repr(allowed_type), name, f))
@functools.wraps(f)
def new_f(*args, **kwds):
"""A helper function."""
for (a, t) in zip(args, types):
allowed_type = _replace_forward_references(t, f.__globals__)
if not isinstance(a, allowed_type):
raise Error("%r of type %r is not an instance of the allowed type %s "
"for %r" % (a, type(a), _type_repr(allowed_type), f))
return f(*args, **kwds)
return new_f
return check_accepts
def returns(*types):
"""A decorator which checks the return types of a function.
Based on:
http://stackoverflow.com/questions/15299878/how-to-use-python-decorators-to-check-function-arguments
The above draws from:
https://www.python.org/dev/peps/pep-0318/
Args:
*types: A list of Python types. A list of one element corresponds to a
single return value. A list of several elements corresponds to several
return values. Note that a function with no explicit return value has an
implicit NoneType return and should be annotated correspondingly.
Returns:
A function to use as a decorator.
"""
def check_returns(f):
"""Check the types."""
if not types:
raise TypeError("A return type annotation must contain at least one type")
@functools.wraps(f)
def new_f(*args, **kwds):
"""A helper function."""
return_value = f(*args, **kwds)
if len(types) == 1:
# The function has a single return value.
allowed_type = _replace_forward_references(types[0], f.__globals__)
if not isinstance(return_value, allowed_type):
raise Error(
"%r of type %r is not an instance of the allowed type %s "
"for %r" %
(return_value, type(return_value), _type_repr(allowed_type), f))
else:
if len(return_value) != len(types):
raise Error("Function %r has %d return values but only %d types were "
"provided in the annotation." %
(f, len(return_value), len(types)))
for (r, t) in zip(return_value, types):
allowed_type = _replace_forward_references(t, f.__globals__)
if not isinstance(r, allowed_type):
raise Error("%r of type %r is not an instance of allowed type %s "
"for %r" % (r, type(r), _type_repr(allowed_type), f))
return return_value
return new_f
return check_returns
| |
from {{appname}}.database.mongodblib import db, client
from {{appname}}.lib.powlib import pluralize
import datetime
import xmltodict
import simplejson as json
import datetime, decimal
from {{appname}}.conf.config import myapp
from {{appname}}.lib.powlib import merge_two_dicts
from {{appname}}.lib.encoders import pow_json_serializer
from {{appname}}.models.modelobject import ModelObject
from bson.json_util import dumps
import pymongo
import uuid
class MongoBaseModel(ModelObject):
"""
The Raw BaseModel Class
"""
def init_on_load(self, *args, **kwargs):
"""
basic setup for all mongoDB models.
"""
#print("executin init_on_load")
super().init_on_load()
self.basic_schema = {
"id" : { "type" : "string", "default" : None },
"_uuid" : { "type" : "string", "default" : None },
"created_at" : { "type" : "datetime", "default" : None },
"last_updated" : { "type" : "datetime", "default" : None },
}
#create an index for our own id field.
self.setup_instance_schema()
#
# if there is a schema (cerberus) set it in the instance
#
# if "schema" in self.__class__.__dict__:
# #print(" .. found a schema for: " +str(self.__class__.__name__) + " in class dict")
# self.schema = merge_two_dicts(
# self.__class__.__dict__["schema"],
# self.__class__.basic_schema)
#print(" .. Schema is now: " + str(self.schema))
# setup the instance attributes from schema
#for key in self.schema.keys():
# if self.schema[key].get("default", None) != None:
# setattr(self,key,self.schema[key].get("default"))
# self.schema[key].pop("default", None)
# else:
# #print("no default for: " + str(self.schema[key]))
# setattr(self, key, None)
self.setup_instance_values()
#
# setup values from kwargs or from init_from_<format> if format="someformat"
# example: m = Model( data = { 'test' : 1 }, format="json")
# will call m.init_from_json(data)
#
if "format" in kwargs:
# set the format and call the according init_from_<format> method
# which initializes the instance with the given vaules (from data)
# e.g. Model(format=json, data={data})
f = getattr(self, "init_from_" + kwargs["format"], None)
if f:
f(kwargs)
else:
# initializes the instanmce with the given kwargs values:
# e.g.: Model(test="sometext", title="sometitle")
for key in kwargs.keys():
#if key in self.__class__.__dict__:
if key in self.schema:
setattr(self, key, kwargs[key])
self.table = db[pluralize(self.__class__.__name__.lower())]
self.collection = self.table
self.table.create_index([('id', pymongo.ASCENDING)], unique=True)
self.tablename = pluralize(self.__class__.__name__.lower())
#self.table = self.__class__.table
self._id = None
self.id = str(uuid.uuid4())
self._uuid = self.id
#print("new id is: " + self.id)
self.init_observers()
#self.setup_dirty_model()
#
# These Methods should be implemented by every subclass
#
def get(self, name):
return getattr(self,name)
def to_json(self):
""" just dump to json formatted string
parameter: res must be pymongo cursor.
Example: res = self.table.find()
"""
# uses bson.json_util dumps
from bson.json_util import DEFAULT_JSON_OPTIONS
DEFAULT_JSON_OPTIONS.datetime_representation = 2
return dumps(self.to_dict())
# def init_from_json(self, data, ignore=False):
# """
# makes a py dict from input json and
# sets the instance attributes
# """
# from bson.json_util import loads
# print(data)
# try:
# d=loads(data)
# except Exception as e:
# print("Ex1 : " + str(e))
# try:
# d=loads(data.decode("utf-8") )
# except Exception as e:
# print("E2: " + str(e))
# raise e
# print(d)
# print(str(type(d)))
# return self.init_from_dict(d, ignore)
def json_result_to_object(self, res):
"""
returns a list of objects from a given json list (string)
"""
raise NotImplementedError("Subclasses should overwrite this Method.")
def _get_next_object(self, cursor):
"""
return a generator that creates a Model object
for each next call.
"""
for elem in cursor:
m=self.__class__()
m.init_from_dict(elem)
yield m
def _return_find(self, res):
"""
returns a list of models from a given cursor.
parameter: res can be pymongo cursor or is handled as a single document (dict).
Example: res = self.table.find()
returns: a sinlge Model or a [Models]
"""
# single result element
if not isinstance(res, (pymongo.cursor.Cursor)):
m=self.__class__()
m.init_from_dict(res)
#print("returning: " +str(m))
#print(" type: " + str(type(m)))
return m
# return the generator function.
return self._get_next_object(res)
# handle cursor (many result elelemts)
# reslist = []
# for elem in res:
# m=self.__class__()
# m.init_from_dict(elem)
# #print("appending: " +str(m))
# #print(" type: " + str(type(m)))
# reslist.append(m)
# return reslist
def print_full(self):
""" Subclasses should overwrite this Method.
prints every attribute including related objects in FULL
lenghty but you see everything.
"""
raise NotImplementedError("Subclasses should overwrite this Method.")
def json_load_from_db(self, data, keep_id=False):
""" refresh the object from db and return json """
raise NotImplementedError("Subclasses should overwrite this Method.")
def print_db_schema(self):
""" Subclasses should overwrite this Method.
Shows the schema as returned by the db
"""
raise NotImplementedError("Subclasses should overwrite this Method.")
def get_relationships(self):
""" Subclasses should overwrite this Method.
Shows all related classes
"""
raise NotImplementedError("Subclasses should overwrite this Method.")
def get_relations(self):
""" Subclasses should overwrite this Method.
Shows all related classes
"""
raise NotImplementedError("Subclasses should overwrite this Method.")
def create_table(self):
"""
create the physical table in the DB
"""
raise NotImplementedError("creat_table is not implemented, yet")
def drop_table(self):
"""
drop the physical table in the DB
"""
raise NotImplementedError("drop_table is not implemented, yet.")
def upsert(self):
""" insert or update intelligently """
#self.last_updated = datetime.datetime.utcnow().strftime(myapp["datetime_format"])
if self.observers_initialized:
for observer in self.observers:
try:
observer.before_upsert(self)
except:
pass
self.last_updated = datetime.datetime.utcnow()
if self.observers_initialized:
for observer in self.observers:
try:
ret = observer.before_upsert(self)
except:
pass
if self._id == None:
#print("** insert **")
# insert. so set created at
self.created_at = datetime.datetime.utcnow().strftime(myapp["datetime_format"])
self.last_updated = self.created_at
ior = self.table.insert_one(self.to_dict())
self._id = ior.inserted_id
return self._id
else:
# update
#print("** update **")
#print(self.to_dict())
self.last_updated = datetime.datetime.utcnow().strftime(myapp["datetime_format"])
ior = self.table.update_one({"_id" : self._id}, {"$set": self.to_dict()}, upsert=False )
return ior
# clean dirty marks
self.dirty = {}
self.is_dirty = False
def delete(self, filter=None, many=False):
""" delete item """
if filter == None:
filter = {"id" : self.id }
# clean dirty marks
self.dirty = {}
self.is_dirty = False
if not many:
return self.table.delete_one(filter)
else:
return self.table.delete_many(filter)
def find_by_id(self, id, use_object_id=False):
""" return result by id (only)
parameter: use_object_id if true find by MongoDB ObjectID
else use the PoW id (uuid4)
"""
if use_object_id:
return self.find_one({"_id": id})
else:
return self.find_one({"id": id})
def from_statement(self, statement):
""" execute a given DB statement raw """
raise NotImplementedError("from_statement is not available for mongoDB.")
def page(self, filter={}, page=0, page_size=None):
""" return the next page of results. See config["myapp"].page_size
actually means: (taking the sql understandng)
page === offset
limit === limit
"""
if page_size == None:
page_size = myapp["page_size"]
return self._return_find(self.table.find(filter).skip(page*page_size).limit(page_size))
def find(self,filter={}, raw=False):
""" Find something given a query or criterion
filter = { "key" : value, ..}
"""
#print("Find parameter:" + str(filter))
if raw:
return self.table.find(filter)
return self._return_find(self.table.find(filter))
def find_all(self, filter=None, raw=False, limit=0, offset=0):
""" Find something given a query or criterion and parameters """
if (limit>0) or (offset>0):
return self.page(filter=filter, limit=limit, offset=offset)
else:
return self.find(filter)
def get_all(self):
""" just a synonym for find_all . but without any filters or limits. """
return self.find_all()
def find_one(self, filter={}):
""" find only one result. Raise Excaption if more than one was found"""
res = self.table.find_one(filter)
if res != None:
return self._return_find(res)
else:
return None
def find_first(self, filter={}):
""" return the first hit, or None"""
raise NotImplementedError("Not available for MongoDB")
def q(self):
""" return a raw query so the user can do
everything the DB offers without limitations
for sqlalchemy: return session.query(self.__class__)
for elastic: return Q
for tinyDB return Query
for MongoDB: not implemented
"""
return self.table
| |
#!/usr/bin/python
# Copyright 2015 The ANGLE Project Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
#
# gen_texture_format_table.py:
# Code generation for texture format map
#
import json
import math
import pprint
import re
template_texture_format_table_autogen_h = """// GENERATED FILE - DO NOT EDIT.
// Generated by gen_texture_format_table.py using data from texture_format_data.json
//
// Copyright 2016 The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
namespace rx
{{
namespace d3d11
{{
enum ANGLEFormat
{{
{angle_format_enum}
}};
}} // namespace d3d11
}} // namespace rx
"""
template_texture_format_table_autogen_cpp = """// GENERATED FILE - DO NOT EDIT.
// Generated by gen_texture_format_table.py using data from texture_format_data.json
//
// Copyright 2015 The ANGLE Project Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// texture_format_table:
// Queries for full textureFormat information based in internalFormat
//
#include "libANGLE/renderer/d3d/d3d11/texture_format_table.h"
#include "libANGLE/renderer/d3d/d3d11/formatutils11.h"
#include "libANGLE/renderer/d3d/d3d11/load_functions_table.h"
#include "libANGLE/renderer/d3d/d3d11/renderer11_utils.h"
#include "libANGLE/renderer/d3d/copyimage.h"
#include "libANGLE/renderer/d3d/generatemip.h"
#include "libANGLE/renderer/d3d/loadimage.h"
namespace rx
{{
namespace d3d11
{{
namespace
{{
typedef bool (*FormatSupportFunction)(const Renderer11DeviceCaps &);
bool OnlyFL10Plus(const Renderer11DeviceCaps &deviceCaps)
{{
return (deviceCaps.featureLevel >= D3D_FEATURE_LEVEL_10_0);
}}
bool OnlyFL9_3(const Renderer11DeviceCaps &deviceCaps)
{{
return (deviceCaps.featureLevel == D3D_FEATURE_LEVEL_9_3);
}}
template <DXGI_FORMAT format, bool requireSupport>
bool SupportsFormat(const Renderer11DeviceCaps &deviceCaps)
{{
// Must support texture, SRV and RTV support
UINT mustSupport = D3D11_FORMAT_SUPPORT_TEXTURE2D | D3D11_FORMAT_SUPPORT_TEXTURECUBE |
D3D11_FORMAT_SUPPORT_SHADER_SAMPLE | D3D11_FORMAT_SUPPORT_MIP |
D3D11_FORMAT_SUPPORT_RENDER_TARGET;
if (d3d11_gl::GetMaximumClientVersion(deviceCaps.featureLevel) > 2)
{{
mustSupport |= D3D11_FORMAT_SUPPORT_TEXTURE3D;
}}
bool fullSupport = false;
if (format == DXGI_FORMAT_B5G6R5_UNORM)
{{
// All hardware that supports DXGI_FORMAT_B5G6R5_UNORM should support autogen mipmaps, but
// check anyway.
mustSupport |= D3D11_FORMAT_SUPPORT_MIP_AUTOGEN;
fullSupport = ((deviceCaps.B5G6R5support & mustSupport) == mustSupport);
}}
else if (format == DXGI_FORMAT_B4G4R4A4_UNORM)
{{
fullSupport = ((deviceCaps.B4G4R4A4support & mustSupport) == mustSupport);
}}
else if (format == DXGI_FORMAT_B5G5R5A1_UNORM)
{{
fullSupport = ((deviceCaps.B5G5R5A1support & mustSupport) == mustSupport);
}}
else
{{
UNREACHABLE();
return false;
}}
// This 'SupportsFormat' function is used by individual entries in the D3D11 Format Map below,
// which maps GL formats to DXGI formats.
if (requireSupport)
{{
// This means that ANGLE would like to use the entry in the map if the inputted DXGI format
// *IS* supported.
// e.g. the entry might map GL_RGB5_A1 to DXGI_FORMAT_B5G5R5A1, which should only be used if
// DXGI_FORMAT_B5G5R5A1 is supported.
// In this case, we should only return 'true' if the format *IS* supported.
return fullSupport;
}}
else
{{
// This means that ANGLE would like to use the entry in the map if the inputted DXGI format
// *ISN'T* supported.
// This might be a fallback entry. e.g. for ANGLE to use DXGI_FORMAT_R8G8B8A8_UNORM if
// DXGI_FORMAT_B5G5R5A1 isn't supported.
// In this case, we should only return 'true' if the format *ISN'T* supported.
return !fullSupport;
}}
}}
// End Format Support Functions
}} // namespace
ANGLEFormatSet::ANGLEFormatSet()
: format(ANGLE_FORMAT_NONE),
glInternalFormat(GL_NONE),
texFormat(DXGI_FORMAT_UNKNOWN),
srvFormat(DXGI_FORMAT_UNKNOWN),
rtvFormat(DXGI_FORMAT_UNKNOWN),
dsvFormat(DXGI_FORMAT_UNKNOWN),
blitSRVFormat(DXGI_FORMAT_UNKNOWN),
swizzleFormat(ANGLE_FORMAT_NONE),
mipGenerationFunction(nullptr),
colorReadFunction(nullptr)
{{
}}
// For sized GL internal formats, there are several possible corresponding D3D11 formats depending
// on device capabilities.
// This function allows querying for the DXGI texture formats to use for textures, SRVs, RTVs and
// DSVs given a GL internal format.
TextureFormat::TextureFormat(GLenum internalFormat,
const ANGLEFormat angleFormat,
InitializeTextureDataFunction internalFormatInitializer)
: dataInitializerFunction(internalFormatInitializer)
{{
formatSet = &GetANGLEFormatSet(angleFormat);
swizzleFormatSet = &GetANGLEFormatSet(formatSet->swizzleFormat);
// Gather all the load functions for this internal format
loadFunctions = GetLoadFunctionsMap(internalFormat, formatSet->texFormat);
ASSERT(loadFunctions.size() != 0 || internalFormat == GL_NONE);
}}
ANGLEFormatSet::ANGLEFormatSet(ANGLEFormat format,
GLenum glInternalFormat,
DXGI_FORMAT texFormat,
DXGI_FORMAT srvFormat,
DXGI_FORMAT rtvFormat,
DXGI_FORMAT dsvFormat,
DXGI_FORMAT blitSRVFormat,
ANGLEFormat swizzleFormat,
MipGenerationFunction mipGenerationFunction,
ColorReadFunction colorReadFunction)
: format(format),
glInternalFormat(glInternalFormat),
texFormat(texFormat),
srvFormat(srvFormat),
rtvFormat(rtvFormat),
dsvFormat(dsvFormat),
blitSRVFormat(blitSRVFormat),
swizzleFormat(swizzleFormat),
mipGenerationFunction(mipGenerationFunction),
colorReadFunction(colorReadFunction)
{{
}}
const ANGLEFormatSet &GetANGLEFormatSet(ANGLEFormat angleFormat)
{{
// clang-format off
switch (angleFormat)
{{
{angle_format_info_cases}
default:
break;
}}
// clang-format on
UNREACHABLE();
static const ANGLEFormatSet defaultInfo;
return defaultInfo;
}}
const TextureFormat &GetTextureFormatInfo(GLenum internalFormat,
const Renderer11DeviceCaps &renderer11DeviceCaps)
{{
// clang-format off
switch (internalFormat)
{{
{texture_format_info_cases}
default:
break;
}}
// clang-format on
static const TextureFormat defaultInfo(GL_NONE, ANGLE_FORMAT_NONE, nullptr);
return defaultInfo;
}} // GetTextureFormatInfo
}} // namespace d3d11
}} // namespace rx
"""
# TODO(oetuaho): Expand this code so that it could generate the gl format info tables as well.
def gl_format_channels(internal_format):
if internal_format == 'GL_BGR5_A1_ANGLEX':
return 'bgra'
if internal_format == 'GL_R11F_G11F_B10F':
return 'rgb'
if internal_format == 'GL_RGB5_A1':
return 'rgba'
if internal_format.find('GL_RGB10_A2') == 0:
return 'rgba'
channels_pattern = re.compile('GL_(COMPRESSED_)?(SIGNED_)?(ETC\d_)?([A-Z]+)')
match = re.search(channels_pattern, internal_format)
channels_string = match.group(4)
if channels_string == 'ALPHA':
return 'a'
if channels_string == 'LUMINANCE':
if (internal_format.find('ALPHA') >= 0):
return 'la'
return 'l'
if channels_string == 'SRGB':
if (internal_format.find('ALPHA') >= 0):
return 'rgba'
return 'rgb'
if channels_string == 'DEPTH':
if (internal_format.find('STENCIL') >= 0):
return 'ds'
return 'd'
if channels_string == 'STENCIL':
return 's'
return channels_string.lower()
def get_internal_format_initializer(internal_format, angle_format):
internal_format_initializer = 'nullptr'
gl_channels = gl_format_channels(internal_format)
gl_format_no_alpha = gl_channels == 'rgb' or gl_channels == 'l'
if gl_format_no_alpha and angle_format['channels'] == 'rgba':
if angle_format['texFormat'] == 'DXGI_FORMAT_BC1_UNORM':
# BC1 is a special case since the texture data determines whether each block has an alpha channel or not.
# This if statement is hit by COMPRESSED_RGB_S3TC_DXT1, which is a bit of a mess.
# TODO(oetuaho): Look into whether COMPRESSED_RGB_S3TC_DXT1 works right in general.
# Reference: https://www.opengl.org/registry/specs/EXT/texture_compression_s3tc.txt
pass
elif 'componentType' not in angle_format:
raise ValueError('warning: internal format initializer could not be generated and may be needed for ' + internal_format)
elif angle_format['componentType'] == 'uint' and angle_format['bits']['red'] == 8:
internal_format_initializer = 'Initialize4ComponentData<GLubyte, 0x00, 0x00, 0x00, 0x01>'
elif angle_format['componentType'] == 'unorm' and angle_format['bits']['red'] == 8:
internal_format_initializer = 'Initialize4ComponentData<GLubyte, 0x00, 0x00, 0x00, 0xFF>'
elif angle_format['componentType'] == 'int' and angle_format['bits']['red'] == 8:
internal_format_initializer = 'Initialize4ComponentData<GLbyte, 0x00, 0x00, 0x00, 0x01>'
elif angle_format['componentType'] == 'snorm' and angle_format['bits']['red'] == 8:
internal_format_initializer = 'Initialize4ComponentData<GLbyte, 0x00, 0x00, 0x00, 0x7F>'
elif angle_format['componentType'] == 'float' and angle_format['bits']['red'] == 16:
internal_format_initializer = 'Initialize4ComponentData<GLhalf, 0x0000, 0x0000, 0x0000, gl::Float16One>'
elif angle_format['componentType'] == 'uint' and angle_format['bits']['red'] == 16:
internal_format_initializer = 'Initialize4ComponentData<GLushort, 0x0000, 0x0000, 0x0000, 0x0001>'
elif angle_format['componentType'] == 'int' and angle_format['bits']['red'] == 16:
internal_format_initializer = 'Initialize4ComponentData<GLshort, 0x0000, 0x0000, 0x0000, 0x0001>'
elif angle_format['componentType'] == 'float' and angle_format['bits']['red'] == 32:
internal_format_initializer = 'Initialize4ComponentData<GLfloat, 0x00000000, 0x00000000, 0x00000000, gl::Float32One>'
elif angle_format['componentType'] == 'int' and angle_format['bits']['red'] == 32:
internal_format_initializer = 'Initialize4ComponentData<GLint, 0x00000000, 0x00000000, 0x00000000, 0x00000001>'
elif angle_format['componentType'] == 'uint' and angle_format['bits']['red'] == 32:
internal_format_initializer = 'Initialize4ComponentData<GLuint, 0x00000000, 0x00000000, 0x00000000, 0x00000001>'
else:
raise ValueError('warning: internal format initializer could not be generated and may be needed for ' + internal_format)
return internal_format_initializer
def get_swizzle_format_id(angle_format_id, angle_format):
if angle_format_id == 'ANGLE_FORMAT_NONE':
return 'ANGLE_FORMAT_NONE'
elif 'swizzleFormat' in angle_format:
# For some special formats like compressed formats that don't have a clearly defined number
# of bits per channel, swizzle format needs to be specified manually.
return angle_format['swizzleFormat']
if 'bits' not in angle_format:
raise ValueError('no bits information for determining swizzleformat for format: ' + angle_format_id)
bits = angle_format['bits']
max_component_bits = max(bits.itervalues())
channels_different = not all([component_bits == bits.itervalues().next() for component_bits in bits.itervalues()])
# The format itself can be used for swizzles if it can be accessed as a render target and
# sampled and the bit count for all 4 channels is the same.
if "rtvFormat" in angle_format and "srvFormat" in angle_format and not channels_different and len(angle_format['channels']) == 4:
return angle_format_id
b = int(math.ceil(float(max_component_bits) / 8) * 8)
# Depth formats need special handling, since combined depth/stencil formats don't have a clearly
# defined component type.
if angle_format['channels'].find('d') >= 0:
if b == 24 or b == 32:
return 'ANGLE_FORMAT_R32G32B32A32_FLOAT'
if b == 16:
return 'ANGLE_FORMAT_R16G16B16A16_UNORM'
if b == 24:
raise ValueError('unexpected 24-bit format when determining swizzleformat for format: ' + angle_format_id)
if 'componentType' not in angle_format:
raise ValueError('no component type information for determining swizzleformat for format: ' + angle_format_id)
component_type = angle_format['componentType']
if component_type == 'uint':
return 'ANGLE_FORMAT_R{}G{}B{}A{}_UINT'.format(b, b, b, b)
elif component_type == 'int':
return 'ANGLE_FORMAT_R{}G{}B{}A{}_SINT'.format(b, b, b, b)
elif component_type == 'unorm':
return 'ANGLE_FORMAT_R{}G{}B{}A{}_UNORM'.format(b, b, b, b)
elif component_type == 'snorm':
return 'ANGLE_FORMAT_R{}G{}B{}A{}_SNORM'.format(b, b, b, b)
elif component_type == 'float':
return 'ANGLE_FORMAT_R{}G{}B{}A{}_FLOAT'.format(b, b, b, b)
else:
raise ValueError('could not determine swizzleformat based on componentType for format: ' + angle_format_id)
def get_texture_format_item(idx, internal_format, requirements_fn, angle_format_id, angle_format):
table_data = '';
internal_format_initializer = get_internal_format_initializer(internal_format, angle_format)
indent = ' '
if requirements_fn != None:
if idx == 0:
table_data += ' if (' + requirements_fn + '(renderer11DeviceCaps))\n'
else:
table_data += ' else if (' + requirements_fn + '(renderer11DeviceCaps))\n'
table_data += ' {\n'
indent += ' '
table_data += indent + 'static const TextureFormat textureFormat(internalFormat,\n'
table_data += indent + ' ' + angle_format_id + ',\n'
table_data += indent + ' ' + internal_format_initializer + ');\n'
table_data += indent + 'return textureFormat;\n'
if requirements_fn != None:
table_data += ' }\n'
return table_data
def parse_json_into_switch_texture_format_string(json_map, json_data):
table_data = ''
angle_format_map = {}
for internal_format_item in sorted(json_map.iteritems()):
internal_format = internal_format_item[0]
table_data += ' case ' + internal_format + ':\n'
table_data += ' {\n'
if isinstance(json_map[internal_format], basestring):
angle_format_id = json_map[internal_format]
table_data += get_texture_format_item(0, internal_format, None, angle_format_id, json_data[angle_format_id])
else:
for idx, requirements_map in enumerate(sorted(json_map[internal_format].iteritems())):
angle_format_id = requirements_map[1]
table_data += get_texture_format_item(idx, internal_format, requirements_map[0], angle_format_id, json_data[angle_format_id])
table_data += ' else\n'
table_data += ' {\n'
table_data += ' break;\n'
table_data += ' }\n'
table_data += ' }\n'
return table_data
def get_channel_struct(angle_format):
if 'bits' not in angle_format:
return None
bits = angle_format['bits']
if 'depth' in bits or 'stencil' in bits:
return None
if 'channelStruct' in angle_format:
return angle_format['channelStruct']
struct_name = ''
for channel in angle_format['channels']:
if channel == 'r':
struct_name += 'R{}'.format(bits['red'])
if channel == 'g':
struct_name += 'G{}'.format(bits['green'])
if channel == 'b':
struct_name += 'B{}'.format(bits['blue'])
if channel == 'a':
struct_name += 'A{}'.format(bits['alpha'])
if angle_format['componentType'] == 'float':
struct_name += 'F'
if angle_format['componentType'] == 'int' or angle_format['componentType'] == 'snorm':
struct_name += 'S'
return struct_name
def get_mip_generation_function(angle_format):
channel_struct = get_channel_struct(angle_format)
if channel_struct == None:
return 'nullptr'
return 'GenerateMip<' + channel_struct + '>'
def get_color_read_function(angle_format):
channel_struct = get_channel_struct(angle_format)
if channel_struct == None:
return 'nullptr'
component_type_map = {
'uint': 'GLuint',
'int': 'GLint',
'unorm': 'GLfloat',
'snorm': 'GLfloat',
'float': 'GLfloat'
}
return 'ReadColor<' + channel_struct + ', '+ component_type_map[angle_format['componentType']] + '>'
def get_blit_srv_format(angle_format):
if 'channels' not in angle_format:
return 'DXGI_FORMAT_UNKNOWN'
if 'r' in angle_format['channels'] and angle_format['componentType'] in ['int', 'uint']:
return angle_format['rtvFormat']
return angle_format["srvFormat"] if "srvFormat" in angle_format else "DXGI_FORMAT_UNKNOWN"
def parse_json_into_switch_angle_format_string(json_data):
table_data = ''
for angle_format_item in sorted(json_data.iteritems()):
table_data += ' case ' + angle_format_item[0] + ':\n'
angle_format = angle_format_item[1]
gl_internal_format = angle_format["glInternalFormat"] if "glInternalFormat" in angle_format else "GL_NONE"
tex_format = angle_format["texFormat"] if "texFormat" in angle_format else "DXGI_FORMAT_UNKNOWN"
srv_format = angle_format["srvFormat"] if "srvFormat" in angle_format else "DXGI_FORMAT_UNKNOWN"
rtv_format = angle_format["rtvFormat"] if "rtvFormat" in angle_format else "DXGI_FORMAT_UNKNOWN"
dsv_format = angle_format["dsvFormat"] if "dsvFormat" in angle_format else "DXGI_FORMAT_UNKNOWN"
blit_srv_format = get_blit_srv_format(angle_format)
swizzle_format = get_swizzle_format_id(angle_format_item[0], angle_format)
mip_generation_function = get_mip_generation_function(angle_format)
color_read_function = get_color_read_function(angle_format)
table_data += ' {\n'
table_data += ' static const ANGLEFormatSet formatInfo(' + angle_format_item[0] + ',\n'
table_data += ' ' + gl_internal_format + ',\n'
table_data += ' ' + tex_format + ',\n'
table_data += ' ' + srv_format + ',\n'
table_data += ' ' + rtv_format + ',\n'
table_data += ' ' + dsv_format + ',\n'
table_data += ' ' + blit_srv_format + ',\n'
table_data += ' ' + swizzle_format + ',\n'
table_data += ' ' + mip_generation_function + ',\n'
table_data += ' ' + color_read_function + ');\n'
table_data += ' return formatInfo;\n'
table_data += ' }\n'
return table_data
def parse_json_into_angle_format_enum_string(json_data):
enum_data = ''
index = 0
for angle_format_item in sorted(json_data.iteritems()):
if index > 0:
enum_data += ',\n'
enum_data += ' ' + angle_format_item[0]
index += 1
return enum_data
def reject_duplicate_keys(pairs):
found_keys = {}
for key, value in pairs:
if key in found_keys:
raise ValueError("duplicate key: %r" % (key,))
else:
found_keys[key] = value
return found_keys
with open('texture_format_map.json') as texture_format_map_file:
with open('texture_format_data.json') as texture_format_json_file:
texture_format_map = texture_format_map_file.read()
texture_format_data = texture_format_json_file.read()
texture_format_map_file.close()
texture_format_json_file.close()
json_map = json.loads(texture_format_map, object_pairs_hook=reject_duplicate_keys)
json_data = json.loads(texture_format_data, object_pairs_hook=reject_duplicate_keys)
texture_format_cases = parse_json_into_switch_texture_format_string(json_map, json_data)
angle_format_cases = parse_json_into_switch_angle_format_string(json_data)
output_cpp = template_texture_format_table_autogen_cpp.format(
texture_format_info_cases=texture_format_cases,
angle_format_info_cases=angle_format_cases)
with open('texture_format_table_autogen.cpp', 'wt') as out_file:
out_file.write(output_cpp)
out_file.close()
enum_data = parse_json_into_angle_format_enum_string(json_data)
output_h = template_texture_format_table_autogen_h.format(angle_format_enum=enum_data)
with open('texture_format_table_autogen.h', 'wt') as out_file:
out_file.write(output_h)
out_file.close()
| |
#!/usr/bin/env python
import os
import sys
import shutil
import tempfile
import subprocess
from distutils.core import setup, Command
from distutils.dir_util import remove_tree
MODULE_NAME = "binwalk"
SCRIPT_NAME = MODULE_NAME
# Python2/3 compliance
try:
raw_input
except NameError:
raw_input = input
# cd into the src directory, no matter where setup.py was invoked from
os.chdir(os.path.join(os.path.dirname(os.path.realpath(__file__)), "src"))
def which(command):
# /usr/local/bin is usually the default install path, though it may not be in $PATH
usr_local_bin = os.path.sep.join([os.path.sep, 'usr', 'local', 'bin', command])
try:
location = subprocess.Popen(["which", command], shell=False, stdout=subprocess.PIPE).communicate()[0].strip()
except KeyboardInterrupt as e:
raise e
except Exception as e:
pass
if not location and os.path.exists(usr_local_bin):
location = usr_local_bin
return location
def find_binwalk_module_paths():
paths = []
try:
import binwalk
paths = binwalk.__path__
except KeyboardInterrupt as e:
raise e
except Exception:
pass
return paths
def remove_binwalk_module(pydir=None, pybin=None):
if pydir:
module_paths = [pydir]
else:
module_paths = find_binwalk_module_paths()
for path in module_paths:
try:
remove_tree(path)
except OSError as e:
pass
if not pybin:
pybin = which(MODULE_NAME)
if pybin:
try:
sys.stdout.write("removing '%s'\n" % pybin)
os.remove(pybin)
except KeyboardInterrupt as e:
pass
except Exception as e:
pass
class IDAUnInstallCommand(Command):
description = "Uninstalls the binwalk IDA plugin module"
user_options = [
('idadir=', None, 'Specify the path to your IDA install directory.'),
]
def initialize_options(self):
self.idadir = None
self.mydir = os.path.dirname(os.path.realpath(__file__))
def finalize_options(self):
pass
def run(self):
if self.idadir is None:
sys.stderr.write("Please specify the path to your IDA install directory with the '--idadir' option!\n")
return
binida_dst_path = os.path.join(self.idadir, 'plugins', 'binida.py')
binwalk_dst_path = os.path.join(self.idadir, 'python', 'binwalk')
if os.path.exists(binida_dst_path):
sys.stdout.write("removing %s\n" % binida_dst_path)
os.remove(binida_dst_path)
if os.path.exists(binwalk_dst_path):
sys.stdout.write("removing %s\n" % binwalk_dst_path)
shutil.rmtree(binwalk_dst_path)
class IDAInstallCommand(Command):
description = "Installs the binwalk IDA plugin module"
user_options = [
('idadir=', None, 'Specify the path to your IDA install directory.'),
]
def initialize_options(self):
self.idadir = None
self.mydir = os.path.dirname(os.path.realpath(__file__))
def finalize_options(self):
pass
def run(self):
if self.idadir is None:
sys.stderr.write("Please specify the path to your IDA install directory with the '--idadir' option!\n")
return
binida_src_path = os.path.join(self.mydir, 'scripts', 'binida.py')
binida_dst_path = os.path.join(self.idadir, 'plugins')
if not os.path.exists(binida_src_path):
sys.stderr.write("ERROR: could not locate IDA plugin file '%s'!\n" % binida_src_path)
return
if not os.path.exists(binida_dst_path):
sys.stderr.write("ERROR: could not locate the IDA plugins directory '%s'! Check your --idadir option.\n" % binida_dst_path)
return
binwalk_src_path = os.path.join(self.mydir, 'binwalk')
binwalk_dst_path = os.path.join(self.idadir, 'python')
if not os.path.exists(binwalk_src_path):
sys.stderr.write("ERROR: could not locate binwalk source directory '%s'!\n" % binwalk_src_path)
return
if not os.path.exists(binwalk_dst_path):
sys.stderr.write("ERROR: could not locate the IDA python directory '%s'! Check your --idadir option.\n" % binwalk_dst_path)
return
binida_dst_path = os.path.join(binida_dst_path, 'binida.py')
binwalk_dst_path = os.path.join(binwalk_dst_path, 'binwalk')
if os.path.exists(binida_dst_path):
os.remove(binida_dst_path)
if os.path.exists(binwalk_dst_path):
shutil.rmtree(binwalk_dst_path)
sys.stdout.write("copying %s -> %s\n" % (binida_src_path, binida_dst_path))
shutil.copyfile(binida_src_path, binida_dst_path)
sys.stdout.write("copying %s -> %s\n" % (binwalk_src_path, binwalk_dst_path))
shutil.copytree(binwalk_src_path, binwalk_dst_path)
class UninstallCommand(Command):
description = "Uninstalls the Python module"
user_options = [
('pydir=', None, 'Specify the path to the binwalk python module to be removed.'),
('pybin=', None, 'Specify the path to the binwalk executable to be removed.'),
]
def initialize_options(self):
self.pydir = None
self.pybin = None
def finalize_options(self):
pass
def run(self):
remove_binwalk_module(self.pydir, self.pybin)
class CleanCommand(Command):
description = "Clean Python build directories"
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
remove_tree("build")
except KeyboardInterrupt as e:
raise e
except Exception:
pass
try:
remove_tree("dist")
except KeyboardInterrupt as e:
raise e
except Exception:
pass
# The data files to install along with the module
install_data_files = []
for data_dir in ["magic", "config", "plugins", "modules", "core"]:
install_data_files.append("%s%s*" % (data_dir, os.path.sep))
# Install the module, script, and support files
setup(name = MODULE_NAME,
version = "2.1.2b",
description = "Firmware analysis tool",
author = "Craig Heffner",
url = "https://github.com/devttys0/%s" % MODULE_NAME,
requires = [],
packages = [MODULE_NAME],
package_data = {MODULE_NAME : install_data_files},
scripts = [os.path.join("scripts", SCRIPT_NAME)],
cmdclass = {'clean' : CleanCommand, 'uninstall' : UninstallCommand, 'idainstall' : IDAInstallCommand, 'idauninstall' : IDAUnInstallCommand}
)
| |
from __future__ import absolute_import
from __future__ import unicode_literals
import mock
import sys
if sys.version_info >= (2, 7):
import unittest
else: # pragma: nocover
from django.utils import unittest # noqa
from django import forms
from django.test import TestCase
from django_filters.fields import Lookup
from django_filters.fields import RangeField
from django_filters.fields import LookupTypeField
from django_filters.filters import Filter
from django_filters.filters import CharFilter
from django_filters.filters import BooleanFilter
from django_filters.filters import ChoiceFilter
from django_filters.filters import MultipleChoiceFilter
from django_filters.filters import DateFilter
from django_filters.filters import DateTimeFilter
from django_filters.filters import TimeFilter
from django_filters.filters import ModelChoiceFilter
from django_filters.filters import ModelMultipleChoiceFilter
from django_filters.filters import NumberFilter
from django_filters.filters import RangeFilter
from django_filters.filters import DateRangeFilter
from django_filters.filters import AllValuesFilter
from django_filters.filters import LOOKUP_TYPES
from tests.models import Book, User
class FilterTests(TestCase):
def test_creation(self):
f = Filter()
self.assertEqual(f.lookup_type, 'exact')
self.assertEqual(f.exclude, False)
def test_creation_order(self):
f = Filter()
f2 = Filter()
self.assertTrue(f2.creation_counter > f.creation_counter)
def test_default_field(self):
f = Filter()
field = f.field
self.assertIsInstance(field, forms.Field)
self.assertEqual(field.help_text, '')
def test_field_with_exclusion(self):
f = Filter(exclude=True)
field = f.field
self.assertIsInstance(field, forms.Field)
self.assertEqual(field.help_text, 'This is an exclusion filter')
def test_field_with_single_lookup_type(self):
f = Filter(lookup_type='iexact')
field = f.field
self.assertIsInstance(field, forms.Field)
def test_field_with_none_lookup_type(self):
f = Filter(lookup_type=None)
field = f.field
self.assertIsInstance(field, LookupTypeField)
choice_field = field.fields[1]
self.assertEqual(len(choice_field.choices), len(LOOKUP_TYPES))
def test_field_with_lookup_type_and_exlusion(self):
f = Filter(lookup_type=None, exclude=True)
field = f.field
self.assertIsInstance(field, LookupTypeField)
self.assertEqual(field.help_text, 'This is an exclusion filter')
def test_field_with_list_lookup_type(self):
f = Filter(lookup_type=('istartswith', 'iendswith'))
field = f.field
self.assertIsInstance(field, LookupTypeField)
choice_field = field.fields[1]
self.assertEqual(len(choice_field.choices), 2)
def test_field_params(self):
with mock.patch.object(Filter, 'field_class',
spec=['__call__']) as mocked:
f = Filter(name='somefield', label='somelabel',
widget='somewidget')
f.field
mocked.assert_called_once_with(required=False,
label='somelabel', widget='somewidget', help_text=mock.ANY)
def test_field_extra_params(self):
with mock.patch.object(Filter, 'field_class',
spec=['__call__']) as mocked:
f = Filter(someattr='someattr')
f.field
mocked.assert_called_once_with(required=mock.ANY,
label=mock.ANY, widget=mock.ANY, help_text=mock.ANY,
someattr='someattr')
def test_field_with_required_filter(self):
with mock.patch.object(Filter, 'field_class',
spec=['__call__']) as mocked:
f = Filter(required=True)
f.field
mocked.assert_called_once_with(required=True,
label=mock.ANY, widget=mock.ANY, help_text=mock.ANY)
def test_filtering(self):
qs = mock.Mock(spec=['filter'])
f = Filter()
result = f.filter(qs, 'value')
qs.filter.assert_called_once_with(None__exact='value')
self.assertNotEqual(qs, result)
def test_filtering_exclude(self):
qs = mock.Mock(spec=['filter', 'exclude'])
f = Filter(exclude=True)
result = f.filter(qs, 'value')
qs.exclude.assert_called_once_with(None__exact='value')
self.assertNotEqual(qs, result)
def test_filtering_uses_name(self):
qs = mock.Mock(spec=['filter'])
f = Filter(name='somefield')
f.filter(qs, 'value')
result = qs.filter.assert_called_once_with(somefield__exact='value')
self.assertNotEqual(qs, result)
def test_filtering_skipped_with_blank_value(self):
qs = mock.Mock()
f = Filter()
result = f.filter(qs, '')
self.assertListEqual(qs.method_calls, [])
self.assertEqual(qs, result)
def test_filtering_skipped_with_none_value(self):
qs = mock.Mock()
f = Filter()
result = f.filter(qs, None)
self.assertListEqual(qs.method_calls, [])
self.assertEqual(qs, result)
def test_filtering_with_list_value(self):
qs = mock.Mock(spec=['filter'])
f = Filter(name='somefield', lookup_type=['some_lookup_type'])
result = f.filter(qs, Lookup('value', 'some_lookup_type'))
qs.filter.assert_called_once_with(somefield__some_lookup_type='value')
self.assertNotEqual(qs, result)
def test_filtering_skipped_with_list_value_with_blank(self):
qs = mock.Mock()
f = Filter(name='somefield', lookup_type=['some_lookup_type'])
result = f.filter(qs, Lookup('', 'some_lookup_type'))
self.assertListEqual(qs.method_calls, [])
self.assertEqual(qs, result)
def test_filtering_skipped_with_list_value_with_blank_lookup(self):
return # Now field is required to provide valid lookup_type if it provides any
qs = mock.Mock(spec=['filter'])
f = Filter(name='somefield', lookup_type=None)
result = f.filter(qs, Lookup('value', ''))
qs.filter.assert_called_once_with(somefield__exact='value')
self.assertNotEqual(qs, result)
def test_filter_using_action(self):
qs = mock.NonCallableMock(spec=[])
action = mock.Mock(spec=['filter'])
f = Filter(action=action)
result = f.filter(qs, 'value')
action.assert_called_once_with(qs, 'value')
self.assertNotEqual(qs, result)
def test_filtering_uses_distinct(self):
qs = mock.Mock(spec=['filter', 'distinct'])
f = Filter(name='somefield', distinct=True)
f.filter(qs, 'value')
result = qs.distinct.assert_called_once()
self.assertNotEqual(qs, result)
class CharFilterTests(TestCase):
def test_default_field(self):
f = CharFilter()
field = f.field
self.assertIsInstance(field, forms.CharField)
class BooleanFilterTests(TestCase):
def test_default_field(self):
f = BooleanFilter()
field = f.field
self.assertIsInstance(field, forms.NullBooleanField)
def test_filtering(self):
qs = mock.Mock(spec=['filter'])
f = BooleanFilter(name='somefield')
result = f.filter(qs, True)
qs.filter.assert_called_once_with(somefield=True)
self.assertNotEqual(qs, result)
@unittest.expectedFailure
def test_filtering_skipped_with_blank_value(self):
qs = mock.Mock()
f = BooleanFilter(name='somefield')
result = f.filter(qs, '')
self.assertListEqual(qs.method_calls, [])
self.assertEqual(qs, result)
def test_filtering_skipped_with_none_value(self):
qs = mock.Mock()
f = BooleanFilter(name='somefield')
result = f.filter(qs, None)
self.assertListEqual(qs.method_calls, [])
self.assertEqual(qs, result)
class ChoiceFilterTests(TestCase):
def test_default_field(self):
f = ChoiceFilter()
field = f.field
self.assertIsInstance(field, forms.ChoiceField)
class MultipleChoiceFilterTests(TestCase):
def test_default_field(self):
f = MultipleChoiceFilter()
field = f.field
self.assertIsInstance(field, forms.MultipleChoiceField)
def test_filtering_requires_name(self):
qs = mock.Mock(spec=['filter'])
f = MultipleChoiceFilter()
with self.assertRaises(TypeError):
f.filter(qs, ['value'])
def test_conjoined_default_value(self):
f = MultipleChoiceFilter()
self.assertFalse(f.conjoined)
def test_conjoined_true(self):
f = MultipleChoiceFilter(conjoined=True)
self.assertTrue(f.conjoined)
def test_filtering(self):
qs = mock.Mock(spec=['filter'])
f = MultipleChoiceFilter(name='somefield')
with mock.patch('django_filters.filters.Q') as mockQclass:
mockQ1, mockQ2 = mock.MagicMock(), mock.MagicMock()
mockQclass.side_effect = [mockQ1, mockQ2]
f.filter(qs, ['value'])
self.assertEqual(mockQclass.call_args_list,
[mock.call(), mock.call(somefield='value')])
mockQ1.__ior__.assert_called_once_with(mockQ2)
qs.filter.assert_called_once_with(mockQ1.__ior__.return_value)
qs.filter.return_value.distinct.assert_called_once_with()
def test_filtering_on_required_skipped_when_len_of_value_is_len_of_field_choices(self):
qs = mock.Mock(spec=[])
f = MultipleChoiceFilter(name='somefield', required=True)
f.always_filter = False
result = f.filter(qs, [])
self.assertEqual(len(f.field.choices), 0)
self.assertEqual(qs, result)
f.field.choices = ['some', 'values', 'here']
result = f.filter(qs, ['some', 'values', 'here'])
self.assertEqual(qs, result)
result = f.filter(qs, ['other', 'values', 'there'])
self.assertEqual(qs, result)
@unittest.expectedFailure
def test_filtering_skipped_with_empty_list_value_and_some_choices(self):
qs = mock.Mock(spec=[])
f = MultipleChoiceFilter(name='somefield')
f.field.choices = ['some', 'values', 'here']
result = f.filter(qs, [])
self.assertEqual(qs, result)
def test_filter_conjoined_true(self):
"""Tests that a filter with `conjoined=True` returns objects that
have all the values included in `value`. For example filter
users that have all of this books.
"""
book_kwargs = {'price': 1, 'average_rating': 1}
books = []
books.append(Book.objects.create(**book_kwargs))
books.append(Book.objects.create(**book_kwargs))
books.append(Book.objects.create(**book_kwargs))
books.append(Book.objects.create(**book_kwargs))
books.append(Book.objects.create(**book_kwargs))
books.append(Book.objects.create(**book_kwargs))
user1 = User.objects.create()
user2 = User.objects.create()
user3 = User.objects.create()
user4 = User.objects.create()
user5 = User.objects.create()
user1.favorite_books.add(books[0], books[1])
user2.favorite_books.add(books[0], books[1], books[2])
user3.favorite_books.add(books[1], books[2])
user4.favorite_books.add(books[2], books[3])
user5.favorite_books.add(books[4], books[5])
filter_list = (
((books[0].pk, books[0].pk), # values
[1, 2]), # list of user.pk that have `value` books
((books[1].pk, books[1].pk),
[1, 2, 3]),
((books[2].pk, books[2].pk),
[2, 3, 4]),
((books[3].pk, books[3].pk),
[4, ]),
((books[4].pk, books[4].pk),
[5, ]),
((books[0].pk, books[1].pk),
[1, 2]),
((books[0].pk, books[2].pk),
[2, ]),
((books[1].pk, books[2].pk),
[2, 3]),
((books[2].pk, books[3].pk),
[4, ]),
((books[4].pk, books[5].pk),
[5, ]),
((books[3].pk, books[4].pk),
[]),
)
users = User.objects.all()
for item in filter_list:
f = MultipleChoiceFilter(name='favorite_books__pk', conjoined=True)
queryset = f.filter(users, item[0])
expected_pks = [c[0] for c in queryset.values_list('pk')]
self.assertListEqual(
expected_pks,
item[1],
'Lists Differ: {0} != {1} for case {2}'.format(
expected_pks, item[1], item[0]))
class DateFilterTests(TestCase):
def test_default_field(self):
f = DateFilter()
field = f.field
self.assertIsInstance(field, forms.DateField)
class DateTimeFilterTests(TestCase):
def test_default_field(self):
f = DateTimeFilter()
field = f.field
self.assertIsInstance(field, forms.DateTimeField)
class TimeFilterTests(TestCase):
def test_default_field(self):
f = TimeFilter()
field = f.field
self.assertIsInstance(field, forms.TimeField)
class ModelChoiceFilterTests(TestCase):
def test_default_field_without_queryset(self):
f = ModelChoiceFilter()
with self.assertRaises(TypeError):
f.field
def test_default_field_with_queryset(self):
qs = mock.NonCallableMock(spec=[])
f = ModelChoiceFilter(queryset=qs)
field = f.field
self.assertIsInstance(field, forms.ModelChoiceField)
self.assertEqual(field.queryset, qs)
class ModelMultipleChoiceFilterTests(TestCase):
def test_default_field_without_queryset(self):
f = ModelMultipleChoiceFilter()
with self.assertRaises(TypeError):
f.field
def test_default_field_with_queryset(self):
qs = mock.NonCallableMock(spec=[])
f = ModelMultipleChoiceFilter(queryset=qs)
field = f.field
self.assertIsInstance(field, forms.ModelMultipleChoiceField)
self.assertEqual(field.queryset, qs)
class NumberFilterTests(TestCase):
def test_default_field(self):
f = NumberFilter()
field = f.field
self.assertIsInstance(field, forms.DecimalField)
def test_filtering(self):
qs = mock.Mock(spec=['filter'])
f = NumberFilter()
f.filter(qs, 1)
qs.filter.assert_called_once_with(None__exact=1)
# Also test 0 as it once had a bug
qs.reset_mock()
f.filter(qs, 0)
qs.filter.assert_called_once_with(None__exact=0)
class RangeFilterTests(TestCase):
def test_default_field(self):
f = RangeFilter()
field = f.field
self.assertIsInstance(field, RangeField)
def test_filtering(self):
qs = mock.Mock(spec=['filter'])
value = mock.Mock(start=20, stop=30)
f = RangeFilter()
f.filter(qs, value)
qs.filter.assert_called_once_with(None__range=(20, 30))
def test_filtering_skipped_with_none_value(self):
qs = mock.Mock(spec=['filter'])
f = RangeFilter()
result = f.filter(qs, None)
self.assertEqual(qs, result)
def test_filtering_ignores_lookup_type(self):
qs = mock.Mock()
value = mock.Mock(start=20, stop=30)
f = RangeFilter(lookup_type='gte')
f.filter(qs, value)
qs.filter.assert_called_once_with(None__range=(20, 30))
class DateRangeFilterTests(TestCase):
def test_creating(self):
f = DateRangeFilter()
self.assertIn('choices', f.extra)
self.assertEqual(len(DateRangeFilter.options), len(f.extra['choices']))
def test_default_field(self):
f = DateRangeFilter()
field = f.field
self.assertIsInstance(field, forms.ChoiceField)
def test_filtering(self):
qs = mock.Mock(spec=['all'])
f = DateRangeFilter()
f.filter(qs, '')
qs.all.assert_called_once_with()
# the correct behavior fails right now
@unittest.expectedFailure
def test_filtering_skipped_with_blank_value(self):
qs = mock.Mock(spec=[])
f = DateRangeFilter()
result = f.filter(qs, '')
self.assertEqual(qs, result)
@unittest.expectedFailure
def test_filtering_skipped_with_out_of_range_value(self):
qs = mock.Mock(spec=[])
f = DateRangeFilter()
result = f.filter(qs, 999)
self.assertEqual(qs, result)
def test_filtering_for_this_year(self):
qs = mock.Mock(spec=['filter'])
with mock.patch('django_filters.filters.now') as mock_now:
now_dt = mock_now.return_value
f = DateRangeFilter()
f.filter(qs, '4')
qs.filter.assert_called_once_with(
None__year=now_dt.year)
def test_filtering_for_this_month(self):
qs = mock.Mock(spec=['filter'])
with mock.patch('django_filters.filters.now') as mock_now:
now_dt = mock_now.return_value
f = DateRangeFilter()
f.filter(qs, '3')
qs.filter.assert_called_once_with(
None__year=now_dt.year, None__month=now_dt.month)
def test_filtering_for_7_days(self):
qs = mock.Mock(spec=['filter'])
with mock.patch('django_filters.filters.now'):
with mock.patch('django_filters.filters.timedelta') as mock_td:
with mock.patch(
'django_filters.filters._truncate') as mock_truncate:
mock_dt1, mock_dt2 = mock.MagicMock(), mock.MagicMock()
mock_truncate.side_effect = [mock_dt1, mock_dt2]
f = DateRangeFilter()
f.filter(qs, '2')
self.assertEqual(mock_td.call_args_list,
[mock.call(days=7), mock.call(days=1)])
qs.filter.assert_called_once_with(
None__lt=mock_dt2, None__gte=mock_dt1)
def test_filtering_for_today(self):
qs = mock.Mock(spec=['filter'])
with mock.patch('django_filters.filters.now') as mock_now:
now_dt = mock_now.return_value
f = DateRangeFilter()
f.filter(qs, '1')
qs.filter.assert_called_once_with(
None__year=now_dt.year,
None__month=now_dt.month,
None__day=now_dt.day)
class AllValuesFilterTests(TestCase):
def test_default_field_without_assigning_model(self):
f = AllValuesFilter()
with self.assertRaises(AttributeError):
f.field
def test_default_field_with_assigning_model(self):
mocked = mock.Mock()
chained_call = '.'.join(['_default_manager', 'distinct.return_value',
'order_by.return_value', 'values_list.return_value'])
mocked.configure_mock(**{chained_call: iter([])})
f = AllValuesFilter()
f.model = mocked
field = f.field
self.assertIsInstance(field, forms.ChoiceField)
| |
import itertools
from random import random, choice, randint
from field import wall, Location
from shared import *
"""
Simplify path finding? * available starting points; - and |: available doorways
|*
|*
#
--##
**
**
##--
#
*|
"""
class Path(object):
def __init__(self):
self.path = []
@classmethod
def clear(cls, fld, path, val=None):
for l in path:
if val: fld.put(val, Location(l))
else: fld.remove(wall, Location(l))
@classmethod
def random(cls, fld, rooms):
"""Random path to connect any 2 rooms."""
room = choice(rooms)
loc_dir = room.rnd_perim_point()
if not loc_dir: return
loc, dir = loc_dir
path = [loc]
while 1:
loc = cls.next(loc, dir)
path.append(loc)
if not fld.valid(loc) or fld.empty(loc):
break
if fld.near_border(loc):
break
if any(r.near(loc) and r!=room for r in rooms):
cls.clear(fld, path)
return True
nb = fld.all_neighbours(loc)
if any([n==' ' for n in nb]):
break
if len(path)>3 and cls.near(path[:-3], loc):
break
if random() > 0.9:
dir = cls.turn_dir(dir)
@classmethod
def next(cls, loc, dir):
x, y = loc
if dir == 0: return x, y-1
elif dir == 1: return x+1, y
elif dir == 2: return x, y+1
elif dir == 3: return x-1, y
@classmethod
def turn_dir(cls, dir):
"""Return random direction perpendicular to `dir`."""
dirs = ((0,2), (1,3))
return choice( dirs[0 if dir in dirs[1] else 1] )
@classmethod
def near(cls, path, loc):
x, y = loc
for x2, y2 in path:
if abs(x-x2) == 1 and abs(y-y2) == 1:
return True
def extend(self, x, y, axis, end, step):
"""Extend `path` by stepping through from `loc` to `end` along `axis`, using `step`."""
# if axis : seg = [(x,y) for y in range(min(y,y2), max(y,y2)+1)]
# else : self.path.extend([(x,y) for x in range(min(x,x2), max(x,x2)+1)])
if axis : self.path.extend([(x,y) for y in range(y, end+step, step)])
else : self.path.extend([(x,y) for x in range(x, end+step, step)])
return self.path[-1]
def create(self, r1, r2):
""" Connect two rooms with a corridor.
corners: c1 is upper left; clockwise
"""
if r1.c1.x > r2.c1.x:
r1, r2 = r2, r1
c2, c3 = r1.c2, r1.c3
x = c2.x + 1
c1, c4 = r2.c1, r2.c4
mode = 3
if c1.x-x >= 5 : mode = 1
elif 0 < c1.x-x < 5 : mode = 2
y = randint(c2.y+1, c3.y-1)
modes = (self.mode1, self.mode2, self.mode3)
modes[mode-1](x, y, r1, r2, c1, c2, c4)
return self.path
def mode1(self, x, y, r1, r2, c1, c2, c4):
"""mode 1: right, down, right"""
x2 = c1.x-1
y2 = randint(c1.y+1, c4.y-1)
self.path.append((x,y))
x += 1
step = 1 if y<y2 else -1
x, y = self.extend(x, y, 1, y2, step)
self.extend(x, y, 0, x2, 1)
def mode2(self, x, y, r1, r2, c1, c2, c4):
"""mode 2: right, down"""
c2 = r2.c2
y2 = c2.y + 1
x2 = randint(c1.x+1, c2.x-1)
self.path.append((x,y))
x += 1
x, y = self.extend(x, y, 0, x2, 1)
step = 1 if y<y2 else -1
self.extend(x, y, 1, y2, step)
def mode3(self, x, y, r1, r2, c1, c2, c4):
"""mode 3: down, left/right, down OR just down"""
if r1.c1.y > r2.c1.y: r1,r2 = r2,r1
y = r1.c3.y + 1
y2 = r2.c1.y - 1
if abs(y-y2) < 2:
# find x axis line common to both rooms
xset = set( range(r1.c1.x, r1.c2.x) )
x2set = set( range(r2.c1.x, r2.c2.x) )
intersection = list(xset & x2set)
if intersection:
self.extend(choice(intersection), y, 1, y2, 1)
else:
# horizontally right next to each other, tunnel down from corner to corner
x = max(r1.c1.x, r2.c1.x)
self.extend(x, y-1, 1, r2.c2.y, 1)
else:
x = randint(r1.c4.x+1, r1.c3.x-1)
x2 = randint(r2.c1.x+1, r2.c2.x-1)
self.path.append((x,y))
y += 1
step = 1 if x<x2 else -1
if x != x2:
x, y = self.extend(x, y, 0, x2, step)
self.extend(x, y, 1, y2, 1)
class Corridors(object):
def __init__(self, fld, rooms):
self.fld = fld
self.rooms = rooms
def find_closest(self, room, rooms):
"""UNUSED"""
distances = sorted( [(dist(room.center, r.center), r) for r in rooms] )
return distances[0][1]
def create(self):
""" Connect rooms with corridors.
- create all possible combinations of each 2 rooms
- while there are unconnected rooms, connect closest connected and unconnected room
- add one random corridor
"""
rcomb = [set(x) for x in itertools.combinations(self.rooms, 2)]
connected, unconnected = [self.rooms[0]], self.rooms[1:]
while unconnected:
room_dist = []
# create dist/room list for pairs of unconnected/connected rooms
for r1, r2 in rcomb:
if r1 in connected and r2 in connected : continue
if r1 in unconnected and r2 in unconnected : continue
room_dist.append( (dist(r1.center, r2.center), (r1,r2)) )
# get closest rooms and set r1 to unconnected
r1, r2 = sorted(room_dist)[0][1]
if r1 in connected: r1,r2 = r2,r1
self.connect_rooms(r1, r2)
connected.append(r1)
unconnected.remove(r1)
if random() > 0.4:
for _ in range(99):
if Path.random(self.fld, self.rooms): break
def connect_rooms(self, r1, r2):
Path.clear( self.fld, Path().create(r1, r2) )
| |
#!/usr/bin/python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from gae_ext_runtime import testutil
RUNTIME_DEF_ROOT = os.path.dirname(os.path.dirname(__file__))
DOCKERFILE_TEXT = '''\
# This Dockerfile for a Ruby application was generated by gcloud.
# The base Dockerfile installs:
# * A number of packages needed by the Ruby runtime and by gems
# commonly used in Ruby web apps (such as libsqlite3)
# * A recent version of NodeJS
# * A recent version of the standard Ruby runtime to use by default
# * The bundler gem
FROM gcr.io/google-appengine/ruby:{base_image_tag}
# If your application requires a specific ruby version (compatible with rbenv),
# set it here. Leave blank to use the currently recommended default.
ARG REQUESTED_RUBY_VERSION="{ruby_version}"
# Install any requested ruby if not already preinstalled by the base image.
# Tries installing a prebuilt package first, then falls back to a source build.
RUN if test -n "$REQUESTED_RUBY_VERSION" -a \\
! -x /rbenv/versions/$REQUESTED_RUBY_VERSION/bin/ruby; then \\
(apt-get update -y \\
&& apt-get install -y -q gcp-ruby-$REQUESTED_RUBY_VERSION) \\
|| (cd /rbenv/plugins/ruby-build \\
&& git pull \\
&& rbenv install -s $REQUESTED_RUBY_VERSION) \\
&& rbenv global $REQUESTED_RUBY_VERSION \\
&& gem install -q --no-rdoc --no-ri bundler --version $BUNDLER_VERSION \\
&& apt-get clean \\
&& rm -f /var/lib/apt/lists/*_*; \\
fi
ENV RBENV_VERSION=${{REQUESTED_RUBY_VERSION:-$RBENV_VERSION}}
# Copy the application files.
COPY . /app/
# Install required gems if Gemfile.lock is present.
RUN if test -f Gemfile.lock; then \\
bundle install --deployment --without="development test" \\
&& rbenv rehash; \\
fi
# Temporary. Will be moved to base image later.
ENV RACK_ENV=production \\
RAILS_ENV=production \\
RAILS_SERVE_STATIC_FILES=true
# Run asset pipeline if we're in a Rails app.
RUN if test -d app/assets -a -f config/application.rb; then \\
bundle exec rake assets:precompile || true; \\
fi
# BUG: Reset entrypoint to override base image.
ENTRYPOINT []
# Start application on port $PORT.
CMD {entrypoint}
'''
class RuntimeTestCase(testutil.TestBase):
"""Tests for the Ruby external runtime fingerprinter."""
def file_contents(self, filename):
"""Reads the contents of the file from the tempdir.
Args:
filename: (str) filename to be joined with tempdir prefix.
Returns:
File contents.
"""
with open(self.full_path(filename)) as f:
return f.read()
def stub_response(self, response):
"""Stubs the console response from the user.
Args:
response: (str) stubbed response.
Returns:
A function to reset the stubbed functions to their original
implementations.
"""
can_prompt = self.exec_env.CanPrompt
prompt_response = self.exec_env.PromptResponse
def unstub():
self.exec_env.CanPrompt = can_prompt
self.exec_env.PromptResponse = prompt_response
self.exec_env.CanPrompt = lambda: True
self.exec_env.PromptResponse = lambda prompt: response
return unstub
def setUp(self):
self.runtime_def_root = RUNTIME_DEF_ROOT
super(RuntimeTestCase, self).setUp()
def test_generate_without_ruby_files(self):
self.write_file('index.html', 'index')
self.generate_configs()
self.assertFalse(os.path.exists(self.full_path('app.yaml')))
self.assertFalse(os.path.exists(self.full_path('Dockerfile')))
self.assertFalse(os.path.exists(self.full_path('.dockerignore')))
def test_generate_without_ruby_files_no_write(self):
"""Tests generate_config_data does nothing if no ruby files."""
self.write_file('index.html', 'index')
self.assertIsNone(self.generate_config_data())
self.assertFalse(os.path.exists(self.full_path('app.yaml')))
def test_generate_with_ruby_files(self):
self.write_file('index.rb', 'class Index; end')
self.write_file('Gemfile', 'source "https://rubygems.org"')
self.write_file('config.ru', 'run Index.app')
unstub = self.stub_response('bundle exec rackup -p $PORT -E deployment')
self.generate_configs()
unstub()
app_yaml = self.file_contents('app.yaml')
self.assertIn('runtime: ruby\n', app_yaml)
self.assertIn('env: flex\n', app_yaml)
self.assertIn('entrypoint: bundle exec rackup -p $PORT -E deployment\n',
app_yaml)
self.assertFalse(os.path.exists(self.full_path('Dockerfile')))
self.assertFalse(os.path.exists(self.full_path('.dockerignore')))
def test_generate_with_ruby_files_no_write(self):
"""Tests generate_config_data with basic Ruby files.
Tests that app.yaml is written with correct contents given entrypoint
response, and that Dockerfile and .dockerignore not written to disk.
"""
self.write_file('index.rb', 'class Index; end')
self.write_file('Gemfile', 'source "https://rubygems.org"')
self.write_file('config.ru', 'run Index.app')
unstub = self.stub_response('bundle exec rackup -p $PORT -E deployment')
cfg_files = self.generate_config_data()
unstub()
app_yaml = self.file_contents('app.yaml')
self.assertIn('runtime: ruby\n', app_yaml)
self.assertIn('env: flex\n', app_yaml)
self.assertIn('entrypoint: bundle exec rackup -p $PORT -E deployment\n',
app_yaml)
self.assertNotIn('Dockerfile', [f.filename for f in cfg_files])
self.assertNotIn('.dockerignore', [f.filename for f in cfg_files])
def test_generate_with_deploy(self):
self.write_file('index.rb', 'class Index; end')
self.write_file('Gemfile', 'source "https://rubygems.org"')
self.write_file('config.ru', 'run Index.app')
self.write_file('.ruby-version', 'rbx-3.9')
unstub = self.stub_response('bundle exec rackup -p $PORT -E deployment')
self.generate_configs(deploy=True)
unstub()
dockerfile = self.file_contents('Dockerfile')
self.assertEqual(
dockerfile,
DOCKERFILE_TEXT.format(
ruby_version='rbx-3.9',
entrypoint='bundle exec rackup -p $PORT -E deployment'))
dockerignore = self.file_contents('.dockerignore')
self.assertIn('.dockerignore\n', dockerignore)
self.assertIn('Dockerfile\n', dockerignore)
self.assertIn('.git\n', dockerignore)
self.assertIn('.hg\n', dockerignore)
self.assertIn('.svn\n', dockerignore)
def test_generate_with_deploy_no_write(self):
"""Tests generate_config_data with deploy=True.
Tests that .dockerignore and Dockerfile contents are correct
based on contents of app.
"""
self.write_file('index.rb', 'class Index; end')
self.write_file('Gemfile', 'source "https://rubygems.org"')
self.write_file('config.ru', 'run Index.app')
self.write_file('.ruby-version', 'rbx-3.9')
unstub = self.stub_response('bundle exec rackup -p $PORT -E deployment')
cfg_files = self.generate_config_data(deploy=True)
unstub()
self.assert_genfile_exists_with_contents(
cfg_files,
'Dockerfile',
DOCKERFILE_TEXT.format(
ruby_version='rbx-3.9',
entrypoint='bundle exec rackup -p $PORT -E deployment'))
self.assertIn('.dockerignore', [f.filename for f in cfg_files])
dockerignore = [f.contents for f in cfg_files if
f.filename == '.dockerignore'][0]
self.assertIn('.dockerignore\n', dockerignore)
self.assertIn('Dockerfile\n', dockerignore)
self.assertIn('.git\n', dockerignore)
self.assertIn('.hg\n', dockerignore)
self.assertIn('.svn\n', dockerignore)
def test_generate_with_custom(self):
self.write_file('index.rb', 'class Index; end')
self.write_file('Gemfile', 'source "https://rubygems.org"')
self.write_file('config.ru', 'run Index.app')
unstub = self.stub_response('bundle exec rackup -p $PORT -E deployment')
self.generate_configs(custom=True)
unstub()
app_yaml = self.file_contents('app.yaml')
self.assertIn('runtime: custom\n', app_yaml)
self.assertIn('env: flex\n', app_yaml)
self.assertIn('entrypoint: bundle exec rackup -p $PORT -E deployment\n',
app_yaml)
dockerfile = self.file_contents('Dockerfile')
self.assertEqual(
dockerfile,
DOCKERFILE_TEXT.format(
ruby_version='',
entrypoint='bundle exec rackup -p $PORT -E deployment'))
dockerignore = self.file_contents('.dockerignore')
self.assertIn('.dockerignore\n', dockerignore)
self.assertIn('Dockerfile\n', dockerignore)
self.assertIn('.git\n', dockerignore)
self.assertIn('.hg\n', dockerignore)
self.assertIn('.svn\n', dockerignore)
def test_generate_with_custom_no_write(self):
"""Tests generate_config_data with custom=True.
Tests that app.yaml is written with correct parameters and
Dockerfile, .dockerignore contents are correctly returned by method.
"""
self.write_file('index.rb', 'class Index; end')
self.write_file('Gemfile', 'source "https://rubygems.org"')
self.write_file('config.ru', 'run Index.app')
unstub = self.stub_response('bundle exec rackup -p $PORT -E deployment')
cfg_files = self.generate_config_data(custom=True)
unstub()
app_yaml = self.file_contents('app.yaml')
self.assertIn('runtime: custom\n', app_yaml)
self.assertIn('env: flex\n', app_yaml)
self.assertIn('entrypoint: bundle exec rackup -p $PORT -E deployment\n',
app_yaml)
self.assert_genfile_exists_with_contents(
cfg_files,
'Dockerfile',
DOCKERFILE_TEXT.format(
ruby_version='',
entrypoint='bundle exec rackup -p $PORT -E deployment'))
self.assertIn('.dockerignore', [f.filename for f in cfg_files])
dockerignore = [f.contents for f in cfg_files if
f.filename == '.dockerignore'][0]
self.assertIn('.dockerignore\n', dockerignore)
self.assertIn('Dockerfile\n', dockerignore)
self.assertIn('.git\n', dockerignore)
self.assertIn('.hg\n', dockerignore)
self.assertIn('.svn\n', dockerignore)
def test_generate_with_existing_appinfo(self):
self.write_file('index.rb', 'class Index; end')
self.write_file('Gemfile', 'source "https://rubygems.org"')
self.write_file('config.ru', 'run Index.app')
appinfo = testutil.AppInfoFake(
entrypoint='bundle exec ruby index.rb $PORT',
runtime='ruby',
vm=True)
self.generate_configs(appinfo=appinfo, deploy=True)
self.assertFalse(os.path.exists(self.full_path('app.yaml')))
dockerfile = self.file_contents('Dockerfile')
self.assertEqual(
dockerfile,
DOCKERFILE_TEXT.format(
ruby_version='',
entrypoint='bundle exec ruby index.rb $PORT'))
dockerignore = self.file_contents('.dockerignore')
self.assertIn('.dockerignore\n', dockerignore)
self.assertIn('Dockerfile\n', dockerignore)
self.assertIn('.git\n', dockerignore)
self.assertIn('.hg\n', dockerignore)
self.assertIn('.svn\n', dockerignore)
def test_generate_with_existing_appinfo_no_write(self):
"""Tests generate_config_data with passed appinfo."""
self.write_file('index.rb', 'class Index; end')
self.write_file('Gemfile', 'source "https://rubygems.org"')
self.write_file('config.ru', 'run Index.app')
appinfo = testutil.AppInfoFake(
entrypoint='bundle exec ruby index.rb $PORT',
runtime='ruby',
vm=True)
cfg_files = self.generate_config_data(appinfo=appinfo, deploy=True)
self.assertFalse(os.path.exists(self.full_path('app.yaml')))
self.assert_genfile_exists_with_contents(
cfg_files,
'Dockerfile',
DOCKERFILE_TEXT.format(
ruby_version='',
entrypoint='bundle exec ruby index.rb $PORT'))
self.assertIn('.dockerignore', [f.filename for f in cfg_files])
dockerignore = [f.contents for f in cfg_files if
f.filename == '.dockerignore'][0]
self.assertIn('.dockerignore\n', dockerignore)
self.assertIn('Dockerfile\n', dockerignore)
self.assertIn('.git\n', dockerignore)
self.assertIn('.hg\n', dockerignore)
self.assertIn('.svn\n', dockerignore)
def test_generate_with_ruby_version(self):
self.write_file('index.rb', 'class Index; end')
self.write_file('Gemfile', 'source "https://rubygems.org"')
self.write_file('config.ru', 'run Index.app')
self.write_file('.ruby-version', '2.3.1\n')
appinfo = testutil.AppInfoFake(
entrypoint='bundle exec ruby index.rb $PORT',
runtime='ruby',
vm=True)
self.generate_configs(appinfo=appinfo, deploy=True)
self.assertFalse(os.path.exists(self.full_path('app.yaml')))
dockerfile = self.file_contents('Dockerfile')
self.assertEqual(
dockerfile,
DOCKERFILE_TEXT.format(
ruby_version='2.3.1',
entrypoint='bundle exec ruby index.rb $PORT'))
dockerignore = self.file_contents('.dockerignore')
self.assertIn('.dockerignore\n', dockerignore)
self.assertIn('Dockerfile\n', dockerignore)
self.assertIn('.git\n', dockerignore)
self.assertIn('.hg\n', dockerignore)
self.assertIn('.svn\n', dockerignore)
def test_generate_with_ruby_version_no_write(self):
"""Tests generate_config_data with .ruby-version file."""
self.write_file('index.rb', 'class Index; end')
self.write_file('Gemfile', 'source "https://rubygems.org"')
self.write_file('config.ru', 'run Index.app')
self.write_file('.ruby-version', '2.3.1\n')
appinfo = testutil.AppInfoFake(
entrypoint='bundle exec ruby index.rb $PORT',
runtime='ruby',
vm=True)
cfg_files = self.generate_config_data(appinfo=appinfo, deploy=True)
self.assertFalse(os.path.exists(self.full_path('app.yaml')))
self.assert_genfile_exists_with_contents(
cfg_files,
'Dockerfile',
DOCKERFILE_TEXT.format(
ruby_version='2.3.1',
entrypoint='bundle exec ruby index.rb $PORT'))
self.assertIn('.dockerignore', [f.filename for f in cfg_files])
dockerignore = [f.contents for f in cfg_files if
f.filename == '.dockerignore'][0]
self.assertIn('.dockerignore\n', dockerignore)
self.assertIn('Dockerfile\n', dockerignore)
self.assertIn('.git\n', dockerignore)
self.assertIn('.hg\n', dockerignore)
self.assertIn('.svn\n', dockerignore)
def test_generate_with_prompt(self):
self.write_file('index.rb', 'class Index; end')
self.write_file('Gemfile', 'source "https://rubygems.org"')
unstub = self.stub_response('bundle exec ruby index.rb $PORT')
self.generate_configs(deploy=True)
unstub()
dockerfile = self.file_contents('Dockerfile')
self.assertEqual(
dockerfile,
DOCKERFILE_TEXT.format(
ruby_version='',
entrypoint='bundle exec ruby index.rb $PORT'))
dockerignore = self.file_contents('.dockerignore')
self.assertIn('.dockerignore\n', dockerignore)
self.assertIn('Dockerfile\n', dockerignore)
self.assertIn('.git\n', dockerignore)
self.assertIn('.hg\n', dockerignore)
self.assertIn('.svn\n', dockerignore)
def test_generate_with_prompt_no_write(self):
"""Tests generate_config_data with entrypoint given by prompt."""
self.write_file('index.rb', 'class Index; end')
self.write_file('Gemfile', 'source "https://rubygems.org"')
unstub = self.stub_response('bundle exec ruby index.rb $PORT')
cfg_files = self.generate_config_data(deploy=True)
unstub()
self.assert_genfile_exists_with_contents(
cfg_files,
'Dockerfile',
DOCKERFILE_TEXT.format(
ruby_version='',
entrypoint='bundle exec ruby index.rb $PORT'))
self.assertIn('.dockerignore', [f.filename for f in cfg_files])
dockerignore = [f.contents for f in cfg_files if
f.filename == '.dockerignore'][0]
self.assertIn('.dockerignore\n', dockerignore)
self.assertIn('Dockerfile\n', dockerignore)
self.assertIn('.git\n', dockerignore)
self.assertIn('.hg\n', dockerignore)
self.assertIn('.svn\n', dockerignore)
if __name__ == '__main__':
unittest.main()
| |
#!flask/bin/python
from flask import Flask, jsonify, abort, request, make_response, url_for
from flask.ext.httpauth import HTTPBasicAuth
#from flask.ext.sqlalchemy import SQLAlchemy
import Adafruit_BBIO.GPIO as GPIO
import Adafruit_BBIO.ADC as ADC
from time import sleep
""" BeagleBone Black setup """
GPIO.setup("P8_11", GPIO.OUT)
GPIO.setup("P8_03", GPIO.OUT)
ADC.setup()
""" Enable MCP9700 """
GPIO.setup("P8_03", GPIO.HIGH)
app = Flask(__name__, static_url_path = "")
#app.run(host='192.168.1.107', port=5001)
auth = HTTPBasicAuth()
#db = SQLAlchemy(app)
@auth.get_password
def get_password(username):
if username == 'medity':
return 'appengine'
return None
@auth.error_handler
def unauthorized():
return make_response(jsonify( { 'error': 'Unauthorized access' } ), 403)
# return 403 instead of 401 to prevent browsers from displaying the default auth dialog
@app.errorhandler(400)
def not_found(error):
return make_response(jsonify( { 'error': 'Bad request' } ), 400)
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify( { 'error': 'Not found' } ), 404)
devices = [
{
'id': 1,
'title': u'Temperature',
'type': u'Termometer',
'value': '20.1',
'description': u"Read temperature in Daniele's room",
'parameter1': u"Test",
'value': 1,
'temp_on': 10,
'temp_off': 20,
'active': True
},
{
'id': 2,
'type': u'led',
'title': u'LED',
'description': u'Turn a led on',
'value': 1,
'active': True
},
{
'id': 3,
'type': u'dimmer',
'title': u'Dimmer',
'description': u'Dimmer using PWM',
'value': 100,
'active': True
},
{
'id': 4,
'title': u'Generic Pin',
'type': u'pin',
'value': 1,
'description': u"General purpose.",
'parameter1': u"Test",
'value': 0,
'active': True
},
]
@app.route('/remote/api/v1.0/devices/<pin_to_change>/<action>')
#@auth.login_required
def do_something(pin_to_change, action):
pin_to_change = int(pin_to_change) #cast to integer
#get the device name
deviceName = pins[changePin]['name']
device = filter(lambda t: t['title'] == 'Temperature', devices)
#device[0]['value'] = "15" #request.json.get('status', device[0]['status'])
device[0]['value'] = '4' #update the temperature
if len(device) == 0:
abort(404)
return jsonify( { 'device': make_public(device[0]) } )
""" Set value for termometer """
@app.route('/remote/api/v1.0/devices/temp')
#@auth.login_required
def get_temperature():
device = filter(lambda t: t['title'] == 'Temperature', devices)
#device[0]['value'] = "15" #request.json.get('status', device[0]['status'])
""" Read ambient temperature """
reading = ADC.read("P9_39")
millivolts = reading * 1800
temp_c = "{0:.1f}".format((millivolts - 500) / 10)
device[0]['value'] = str(temp_c) #update the temperature
if len(device) == 0:
abort(404)
return jsonify( { 'device': make_public(device[0]) } )
""" Termostato """
@app.route('/remote/api/v1.0/devices/term' , methods = ['PUT'])
#@auth.login_required
def set_temperature():
device = filter(lambda t: t['title'] == 'Temperature', devices)
#device[0]['value'] = "15" #request.json.get('status', device[0]['status'])
device[0]['value'] = '4' #update the temperature
if len(device) == 0:
abort(404)
device[0]['temp_on'] = request.json.get('temp_on', device[0]['temp_on'])
device[0]['temp_off'] = request.json.get('temp_off', device[0]['temp_off'])
return jsonify( { 'device': make_public(device[0]) } )
def make_public(device):
new_device = {}
for field in device:
if field == 'id':
new_device['uri'] = url_for('get_device', device_id = device['id'], _external = True)
else:
new_device[field] = device[field]
return new_device
@app.route('/remote/api/v1.0/devices', methods = ['GET'])
#@auth.login_required
def get_devices():
return jsonify( { 'devices': map(make_public, devices) } )
@app.route('/remote/api/v1.0/devices/<int:device_id>', methods = ['GET'])
#@auth.login_required
def get_device(device_id):
device = filter(lambda t: t['id'] == device_id, devices)
if len(device) == 0:
abort(404)
return jsonify( { 'device': make_public(device[0]) } )
@app.route('/remote/api/v1.0/devices', methods = ['POST'])
@auth.login_required
def create_device():
if not request.json or not 'title' in request.json:
abort(400)
device = {
'id': devices[-1]['id'] + 1,
'title': request.json['title'],
'description': request.json.get('description', ""),
#'parameter2': request.json.get('parameter2', ""),
'status': False
}
devices.append(device)
return jsonify( { 'device': make_public(device) } ), 201
@app.route('/remote/api/v1.0/devices/<int:device_id>', methods = ['PUT'])
#@auth.login_required
def update_device(device_id):
device = filter(lambda t: t['id'] == device_id, devices)
if len(device) == 0:
abort(404)
if not request.json:
abort(400)
if 'title' in request.json and type(request.json['title']) != unicode:
abort(400)
if 'description' in request.json and type(request.json['description']) is not unicode:
abort(400)
if 'active' in request.json and type(request.json['active']) is not bool:
abort(400)
device[0]['title'] = request.json.get('title', device[0]['title'])
device[0]['description'] = request.json.get('description', device[0]['description'])
device[0]['active'] = request.json.get('active', device[0]['active'])
device[0]['value'] = request.json.get('value', device[0]['value'])
""" update led status """
if device[0]['value'] == 1:
GPIO.output("P8_11", GPIO.HIGH)
elif device[0]['value'] == 0:
GPIO.output("P8_11", GPIO.LOW)
#update_status(device[0]['value']) #update led status
return jsonify( { 'device': make_public(device[0]) } )
@app.route('/remote/api/v1.0/devices/<int:device_id>', methods = ['DELETE'])
@auth.login_required
def delete_device(device_id):
device = filter(lambda t: t['id'] == device_id, devices)
if len(device) == 0:
abort(404)
devices.remove(device[0])
return jsonify( { 'result': True } )
def update_status(status):
device = filter(lambda t: t['title'] == 'LED', devices)
device[0]['value'] = '1' #update the led status
if len(device) == 0:
abort(404)
return jsonify( { 'device': make_public(device[0]) } )
if __name__ == '__main__':
app.run(host='192.168.1.107', port = 5001, debug = True)
| |
#!/usr/bin/env python
# encoding: utf-8
"""
This module of native functions is implemented for
compatability with 010 editor functions. Some of these functions
are nops, some are fully implemented.
"""
import six
import sys
from pfp.native import native
import pfp.fields
import pfp.errors as errors
import pfp.bitwrap as bitwrap
# http://www.sweetscape.com/010editor/manual/FuncIO.htm
#void BigEndian()
@native(name="BigEndian", ret=pfp.fields.Void)
def BigEndian(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(coord, "0 arguments", "{} args".format(len(params)))
pfp.fields.NumberBase.endian = pfp.fields.BIG_ENDIAN
#void BitfieldDisablePadding()
@native(name="BitfieldDisablePadding", ret=pfp.fields.Void, send_interp=True)
def BitfieldDisablePadding(params, ctxt, scope, stream, coord, interp):
if len(params) > 0:
raise errors.InvalidArguments(coord, "0 arguments", "{} args".format(len(params)))
interp.set_bitfield_padded(False)
#void BitfieldEnablePadding()
@native(name="BitfieldEnablePadding", ret=pfp.fields.Void, send_interp=True)
def BitfieldEnablePadding(params, ctxt, scope, stream, coord, interp):
if len(params) > 0:
raise errors.InvalidArguments(coord, "0 arguments", "{} args".format(len(params)))
interp.set_bitfield_padded(True)
#void BitfieldLeftToRight()
@native(name="BitfieldLeftToRight", ret=pfp.fields.Void, send_interp=True)
def BitfieldLeftToRight(params, ctxt, scope, stream, coord, interp):
if len(params) > 0:
raise errors.InvalidArguments(coord, "0 arguments", "{} args".format(len(params)))
interp.set_bitfield_left_right()
#void BitfieldRightToLeft()
@native(name="BitfieldRightToLeft", ret=pfp.fields.Void, send_interp=True)
def BitfieldRightToLeft(params, ctxt, scope, stream, coord, interp):
if len(params) > 0:
raise errors.InvalidArguments(coord, "0 arguments", "{} args".format(len(params)))
interp.set_bitfield_right_left()
#double ConvertBytesToDouble( uchar byteArray[] )
@native(name="ConvertBytesToDouble", ret=pfp.fields.Double)
def ConvertBytesToDouble(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#float ConvertBytesToFloat( uchar byteArray[] )
@native(name="ConvertBytesToFloat", ret=pfp.fields.Float)
def ConvertBytesToFloat(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#hfloat ConvertBytesToHFloat( uchar byteArray[] )
@native(name="ConvertBytesToHFloat", ret=pfp.fields.Float)
def ConvertBytesToHFloat(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int ConvertDataToBytes( data_type value, uchar byteArray[] )
@native(name="ConvertDataToBytes", ret=pfp.fields.Int)
def ConvertDataToBytes(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#void DeleteBytes( int64 start, int64 size )
@native(name="DeleteBytes", ret=pfp.fields.Void)
def DeleteBytes(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int DirectoryExists( string dir )
@native(name="DirectoryExists", ret=pfp.fields.Int)
def DirectoryExists(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int FEof()
@native(name="FEof", ret=pfp.fields.Int)
def FEof(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(coord, "0 arguments", "{} args".format(len(params)))
# now that streams are _ALL_ BitwrappedStreams, we can use BitwrappedStream-specific
# functions
if stream.is_eof():
return 1
else:
return 0
#int64 FileSize()
@native(name="FileSize", ret=pfp.fields.Int64)
def FileSize(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(coord, "0 arguments", "{} args".format(len(params)))
return stream.size()
#TFileList FindFiles( string dir, string filter )
@native(name="FindFiles", ret=pfp.fields.Void)
def FindFiles(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int FPrintf( int fileNum, char format[], ... )
@native(name="FPrintf", ret=pfp.fields.Int)
def FPrintf(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int FSeek( int64 pos )
@native(name="FSeek", ret=pfp.fields.Int)
def FSeek(params, ctxt, scope, stream, coord):
"""Returns 0 if successful or -1 if the address is out of range
"""
if len(params) != 1:
raise errors.InvalidArguments(coord, "{} args".format(len(params)), "FSeek accepts only one argument")
pos = PYVAL(params[0])
curr_pos = stream.tell()
fsize = stream.size()
if pos > fsize or pos < 0:
return -1
diff = pos - curr_pos
if diff < 0:
stream.seek(pos)
return 0
data = stream.read(diff)
# let the ctxt automatically append numbers, as needed, unless the previous
# child was also a skipped field
skipped_name = "_skipped"
if len(ctxt._pfp__children) > 0 and ctxt._pfp__children[-1]._pfp__name.startswith("_skipped"):
data = ctxt._pfp__children[-1].raw_data + data
skipped_name = ctxt._pfp__children[-1]._pfp__name
ctxt._pfp__children = ctxt._pfp__children[:-1]
tmp_stream = bitwrap.BitwrappedStream(six.BytesIO(data))
new_field = pfp.fields.Array(len(data), pfp.fields.Char, tmp_stream)
ctxt._pfp__add_child(skipped_name, new_field, stream)
scope.add_var(skipped_name, new_field)
return 0
#int FSkip( int64 offset )
@native(name="FSkip", ret=pfp.fields.Int)
def FSkip(params, ctxt, scope, stream, coord):
"""Returns 0 if successful or -1 if the address is out of range
"""
if len(params) != 1:
raise errors.InvalidArguments(coord, "{} args".format(len(params)), "FSkip accepts only one argument")
skip_amt = PYVAL(params[0])
pos = stream.tell()
return FSeek([pos + skip_amt], ctxt, scope, stream, coord)
#int64 FTell()
@native(name="FTell", ret=pfp.fields.Int64)
def FTell(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(coord, "0 arguments", "{} args".format(len(params)))
return stream.tell()
#void InsertBytes( int64 start, int64 size, uchar value=0 )
@native(name="InsertBytes", ret=pfp.fields.Void)
def InsertBytes(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int IsBigEndian()
@native(name="IsBigEndian", ret=pfp.fields.Int)
def IsBigEndian(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(coord, "0 arguments", "{} args".format(len(params)))
if pfp.fields.NumberBase.endian == pfp.fields.BIG_ENDIAN:
return 1
else:
return 0
#int IsLittleEndian()
@native(name="IsLittleEndian", ret=pfp.fields.Int)
def IsLittleEndian(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(coord, "0 arguments", "{} args".format(len(params)))
if pfp.fields.NumberBase.endian == pfp.fields.LITTLE_ENDIAN:
return 0
else:
return 1
#void LittleEndian()
@native(name="LittleEndian", ret=pfp.fields.Void)
def LittleEndian(params, ctxt, scope, stream, coord):
if len(params) > 0:
raise errors.InvalidArguments(coord, "0 arguments", "{} args".format(len(params)))
pfp.fields.NumberBase.endian = pfp.fields.LITTLE_ENDIAN
#int MakeDir( string dir )
@native(name="MakeDir", ret=pfp.fields.Int)
def MakeDir(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#void OverwriteBytes( int64 start, int64 size, uchar value=0 )
@native(name="OverwriteBytes", ret=pfp.fields.Void)
def OverwriteBytes(params, ctxt, scope, stream, coord):
raise NotImplementedError()
def _read_data(params, stream, cls, coord):
bits = stream._bits
curr_pos = stream.tell()
if len(params) == 1:
pos = PYVAL(params[0])
stream.seek(pos, 0)
elif len(params) > 1:
raise errors.InvalidArguments(coord, "at most 1 arguments", "{} args".format(len(params)))
res = cls(stream=stream)
# reset the stream
stream.seek(curr_pos, 0)
stream._bits = bits
return res
#char ReadByte( int64 pos=FTell() )
@native(name="ReadByte", ret=pfp.fields.Char)
def ReadByte(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.Char, coord)
#double ReadDouble( int64 pos=FTell() )
@native(name="ReadDouble", ret=pfp.fields.Double)
def ReadDouble(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.Double, coord)
#float ReadFloat( int64 pos=FTell() )
@native(name="ReadFloat", ret=pfp.fields.Float)
def ReadFloat(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.Float, coord)
#hfloat ReadHFloat( int64 pos=FTell() )
@native(name="ReadHFloat", ret=pfp.fields.Float)
def ReadHFloat(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.Float, coord)
#int ReadInt( int64 pos=FTell() )
@native(name="ReadInt", ret=pfp.fields.Int)
def ReadInt(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.Int, coord)
#int64 ReadInt64( int64 pos=FTell() )
@native(name="ReadInt64", ret=pfp.fields.Int64)
def ReadInt64(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.Int64, coord)
#int64 ReadQuad( int64 pos=FTell() )
@native(name="ReadQuad", ret=pfp.fields.Int64)
def ReadQuad(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.Int64, coord)
#short ReadShort( int64 pos=FTell() )
@native(name="ReadShort", ret=pfp.fields.Short)
def ReadShort(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.Short, coord)
#uchar ReadUByte( int64 pos=FTell() )
@native(name="ReadUByte", ret=pfp.fields.UChar)
def ReadUByte(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.UChar, coord)
#uint ReadUInt( int64 pos=FTell() )
@native(name="ReadUInt", ret=pfp.fields.UInt)
def ReadUInt(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.UInt, coord)
#uint64 ReadUInt64( int64 pos=FTell() )
@native(name="ReadUInt64", ret=pfp.fields.UInt64)
def ReadUInt64(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.UInt64, coord)
#uint64 ReadUQuad( int64 pos=FTell() )
@native(name="ReadUQuad", ret=pfp.fields.UInt64)
def ReadUQuad(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.UInt64, coord)
#ushort ReadUShort( int64 pos=FTell() )
@native(name="ReadUShort", ret=pfp.fields.UShort)
def ReadUShort(params, ctxt, scope, stream, coord):
return _read_data(params, stream, pfp.fields.UShort, coord)
#char[] ReadLine( int64 pos, int maxLen=-1, int includeLinefeeds=true )
@native(name="ReadLine", ret=pfp.fields.String)
def ReadLine(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#void ReadBytes( uchar buffer[], int64 pos, int n )
@native(name="ReadBytes", ret=pfp.fields.Void)
def ReadBytes(params, ctxt, scope, stream, coord):
if len(params) != 3:
raise errors.InvalidArguments(coord, "3 arguments (buffer, pos, n)", "{} args".format(len(params)))
if not isinstance(params[0], pfp.fields.Array):
raise errors.InvalidArguments(coord, "buffer must be an array", params[0].__class__.__name__)
if params[0].field_cls not in [pfp.fields.UChar, pfp.fields.Char]:
raise errors.InvalidArguments(coord, "buffer must be an array of uchar or char", params[0].field_cls.__name__)
if not isinstance(params[1], pfp.fields.IntBase):
raise errors.InvalidArguments(coord, "pos must be an integer", params[1].__class__.__name__)
if not isinstance(params[2], pfp.fields.IntBase):
raise errors.InvalidArguments(coord, "n must be an integer", params[2].__class__.__name__)
bits = stream._bits
curr_pos = stream.tell()
vals = [params[0].field_cls(stream) for x in six.moves.range(PYVAL(params[2]))]
stream.seek(curr_pos, 0)
stream._bits = bits
params[0]._pfp__set_value(vals)
#char[] ReadString( int64 pos, int maxLen=-1 )
@native(name="ReadString", ret=pfp.fields.String)
def ReadString(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int ReadStringLength( int64 pos, int maxLen=-1 )
@native(name="ReadStringLength", ret=pfp.fields.Int)
def ReadStringLength(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#wstring ReadWLine( int64 pos, int maxLen=-1 )
@native(name="ReadWLine", ret=pfp.fields.WString)
def ReadWLine(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#wstring ReadWString( int64 pos, int maxLen=-1 )
@native(name="ReadWString", ret=pfp.fields.WString)
def ReadWString(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int ReadWStringLength( int64 pos, int maxLen=-1 )
@native(name="ReadWStringLength", ret=pfp.fields.Int)
def ReadWStringLength(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int64 TextAddressToLine( int64 address )
@native(name="TextAddressToLine", ret=pfp.fields.Int64)
def TextAddressToLine(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int TextAddressToColumn( int64 address )
@native(name="TextAddressToColumn", ret=pfp.fields.Int)
def TextAddressToColumn(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int64 TextColumnToAddress( int64 line, int column )
@native(name="TextColumnToAddress", ret=pfp.fields.Int64)
def TextColumnToAddress(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int64 TextGetNumLines()
@native(name="TextGetNumLines", ret=pfp.fields.Int64)
def TextGetNumLines(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int TextGetLineSize( int64 line, int includeLinefeeds=true )
@native(name="TextGetLineSize", ret=pfp.fields.Int)
def TextGetLineSize(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int64 TextLineToAddress( int64 line )
@native(name="TextLineToAddress", ret=pfp.fields.Int64)
def TextLineToAddress(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int TextReadLine( char buffer[], int64 line, int maxsize, int includeLinefeeds=true )
@native(name="TextReadLine", ret=pfp.fields.Int)
def TextReadLine(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#int TextReadLineW( wchar_t buffer[], int64 line, int maxsize, int includeLinefeeds=true )
@native(name="TextReadLineW", ret=pfp.fields.Int)
def TextReadLineW(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#void TextWriteLine( const char buffer[], int64 line, int includeLinefeeds=true )
@native(name="TextWriteLine", ret=pfp.fields.Void)
def TextWriteLine(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#void TextWriteLineW( const wchar_t buffer[], int64 line, int includeLinefeeds=true )
@native(name="TextWriteLineW", ret=pfp.fields.Void)
def TextWriteLineW(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#void WriteByte( int64 pos, char value )
@native(name="WriteByte", ret=pfp.fields.Void)
def WriteByte(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#void WriteDouble( int64 pos, double value )
@native(name="WriteDouble", ret=pfp.fields.Void)
def WriteDouble(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#void WriteFloat( int64 pos, float value )
@native(name="WriteFloat", ret=pfp.fields.Void)
def WriteFloat(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#void WriteHFloat( int64 pos, float value )
@native(name="WriteHFloat", ret=pfp.fields.Void)
def WriteHFloat(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#void WriteInt( int64 pos, int value )
@native(name="WriteInt", ret=pfp.fields.Void)
def WriteInt(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#void WriteInt64( int64 pos, int64 value )
@native(name="WriteInt64", ret=pfp.fields.Void)
def WriteInt64(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#void WriteQuad( int64 pos, int64 value )
@native(name="WriteQuad", ret=pfp.fields.Void)
def WriteQuad(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#void WriteShort( int64 pos, short value )
@native(name="WriteShort", ret=pfp.fields.Void)
def WriteShort(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#void WriteUByte( int64 pos, uchar value )
@native(name="WriteUByte", ret=pfp.fields.Void)
def WriteUByte(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#void WriteUInt( int64 pos, uint value )
@native(name="WriteUInt", ret=pfp.fields.Void)
def WriteUInt(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#void WriteUInt64( int64 pos, uint64 value )
@native(name="WriteUInt64", ret=pfp.fields.Void)
def WriteUInt64(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#void WriteUQuad( int64 pos, uint64 value )
@native(name="WriteUQuad", ret=pfp.fields.Void)
def WriteUQuad(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#void WriteUShort( int64 pos, ushort value )
@native(name="WriteUShort", ret=pfp.fields.Void)
def WriteUShort(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#void WriteBytes( const uchar buffer[], int64 pos, int n )
@native(name="WriteBytes", ret=pfp.fields.Void)
def WriteBytes(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#void WriteString( int64 pos, const char value[] )
@native(name="WriteString", ret=pfp.fields.Void)
def WriteString(params, ctxt, scope, stream, coord):
raise NotImplementedError()
#void WriteWString( int64 pos, const wstring value )
@native(name="WriteWString", ret=pfp.fields.Void)
def WriteWString(params, ctxt, scope, stream, coord):
raise NotImplementedError()
| |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles all requests relating to volumes + cinder.
"""
import copy
import sys
from cinderclient import exceptions as cinder_exception
from cinderclient import service_catalog
from cinderclient.v1 import client as cinder_client
from oslo.config import cfg
from nova import availability_zones as az
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import log as logging
cinder_opts = [
cfg.StrOpt('cinder_catalog_info',
default='volume:cinder:publicURL',
help='Info to match when looking for cinder in the service '
'catalog. Format is: separated values of the form: '
'<service_type>:<service_name>:<endpoint_type>'),
cfg.StrOpt('cinder_endpoint_template',
help='Override service catalog lookup with template for cinder '
'endpoint e.g. http://localhost:8776/v1/%(project_id)s'),
cfg.StrOpt('os_region_name',
help='Region name of this node'),
cfg.StrOpt('cinder_ca_certificates_file',
help='Location of ca certificates file to use for cinder '
'client requests.'),
cfg.IntOpt('cinder_http_retries',
default=3,
help='Number of cinderclient retries on failed http calls'),
cfg.BoolOpt('cinder_api_insecure',
default=False,
help='Allow to perform insecure SSL requests to cinder'),
cfg.BoolOpt('cinder_cross_az_attach',
default=True,
help='Allow attach between instance and volume in different '
'availability zones.'),
]
CONF = cfg.CONF
CONF.register_opts(cinder_opts)
LOG = logging.getLogger(__name__)
def cinderclient(context):
# FIXME: the cinderclient ServiceCatalog object is mis-named.
# It actually contains the entire access blob.
# Only needed parts of the service catalog are passed in, see
# nova/context.py.
compat_catalog = {
'access': {'serviceCatalog': context.service_catalog or []}
}
sc = service_catalog.ServiceCatalog(compat_catalog)
if CONF.cinder_endpoint_template:
url = CONF.cinder_endpoint_template % context.to_dict()
else:
info = CONF.cinder_catalog_info
service_type, service_name, endpoint_type = info.split(':')
# extract the region if set in configuration
if CONF.os_region_name:
attr = 'region'
filter_value = CONF.os_region_name
else:
attr = None
filter_value = None
url = sc.url_for(attr=attr,
filter_value=filter_value,
service_type=service_type,
service_name=service_name,
endpoint_type=endpoint_type)
LOG.debug('Cinderclient connection created using URL: %s', url)
c = cinder_client.Client(context.user_id,
context.auth_token,
project_id=context.project_id,
auth_url=url,
insecure=CONF.cinder_api_insecure,
retries=CONF.cinder_http_retries,
cacert=CONF.cinder_ca_certificates_file)
# noauth extracts user_id:project_id from auth_token
c.client.auth_token = context.auth_token or '%s:%s' % (context.user_id,
context.project_id)
c.client.management_url = url
return c
def _untranslate_volume_summary_view(context, vol):
"""Maps keys for volumes summary view."""
d = {}
d['id'] = vol.id
d['status'] = vol.status
d['size'] = vol.size
d['availability_zone'] = vol.availability_zone
d['created_at'] = vol.created_at
# TODO(jdg): The calling code expects attach_time and
# mountpoint to be set. When the calling
# code is more defensive this can be
# removed.
d['attach_time'] = ""
d['mountpoint'] = ""
if vol.attachments:
att = vol.attachments[0]
d['attach_status'] = 'attached'
d['instance_uuid'] = att['server_id']
d['mountpoint'] = att['device']
else:
d['attach_status'] = 'detached'
d['display_name'] = vol.display_name
d['display_description'] = vol.display_description
# TODO(jdg): Information may be lost in this translation
d['volume_type_id'] = vol.volume_type
d['snapshot_id'] = vol.snapshot_id
d['volume_metadata'] = {}
for key, value in vol.metadata.items():
d['volume_metadata'][key] = value
if hasattr(vol, 'volume_image_metadata'):
d['volume_image_metadata'] = copy.deepcopy(vol.volume_image_metadata)
return d
def _untranslate_snapshot_summary_view(context, snapshot):
"""Maps keys for snapshots summary view."""
d = {}
d['id'] = snapshot.id
d['status'] = snapshot.status
d['progress'] = snapshot.progress
d['size'] = snapshot.size
d['created_at'] = snapshot.created_at
d['display_name'] = snapshot.display_name
d['display_description'] = snapshot.display_description
d['volume_id'] = snapshot.volume_id
d['project_id'] = snapshot.project_id
d['volume_size'] = snapshot.size
return d
def translate_volume_exception(method):
"""Transforms the exception for the volume but keeps its traceback intact.
"""
def wrapper(self, ctx, volume_id, *args, **kwargs):
try:
res = method(self, ctx, volume_id, *args, **kwargs)
except cinder_exception.ClientException:
exc_type, exc_value, exc_trace = sys.exc_info()
if isinstance(exc_value, cinder_exception.NotFound):
exc_value = exception.VolumeNotFound(volume_id=volume_id)
elif isinstance(exc_value, cinder_exception.BadRequest):
exc_value = exception.InvalidInput(reason=exc_value.message)
raise exc_value, None, exc_trace
return res
return wrapper
def translate_snapshot_exception(method):
"""Transforms the exception for the snapshot but keeps its traceback
intact.
"""
def wrapper(self, ctx, snapshot_id, *args, **kwargs):
try:
res = method(self, ctx, snapshot_id, *args, **kwargs)
except cinder_exception.ClientException:
exc_type, exc_value, exc_trace = sys.exc_info()
if isinstance(exc_value, cinder_exception.NotFound):
exc_value = exception.SnapshotNotFound(snapshot_id=snapshot_id)
raise exc_value, None, exc_trace
return res
return wrapper
class API(object):
"""API for interacting with the volume manager."""
@translate_volume_exception
def get(self, context, volume_id):
item = cinderclient(context).volumes.get(volume_id)
return _untranslate_volume_summary_view(context, item)
def get_all(self, context, search_opts={}):
items = cinderclient(context).volumes.list(detailed=True)
rval = []
for item in items:
rval.append(_untranslate_volume_summary_view(context, item))
return rval
def check_attached(self, context, volume):
if volume['status'] != "in-use":
msg = _("status must be 'in-use'")
raise exception.InvalidVolume(reason=msg)
def check_attach(self, context, volume, instance=None):
# TODO(vish): abstract status checking?
if volume['status'] != "available":
msg = _("status must be 'available'")
raise exception.InvalidVolume(reason=msg)
if volume['attach_status'] == "attached":
msg = _("already attached")
raise exception.InvalidVolume(reason=msg)
if instance and not CONF.cinder_cross_az_attach:
# NOTE(sorrison): If instance is on a host we match against it's AZ
# else we check the intended AZ
if instance.get('host'):
instance_az = az.get_instance_availability_zone(
context, instance)
else:
instance_az = instance['availability_zone']
if instance_az != volume['availability_zone']:
msg = _("Instance and volume not in same availability_zone")
raise exception.InvalidVolume(reason=msg)
def check_detach(self, context, volume):
# TODO(vish): abstract status checking?
if volume['status'] == "available":
msg = _("already detached")
raise exception.InvalidVolume(reason=msg)
@translate_volume_exception
def reserve_volume(self, context, volume_id):
cinderclient(context).volumes.reserve(volume_id)
@translate_volume_exception
def unreserve_volume(self, context, volume_id):
cinderclient(context).volumes.unreserve(volume_id)
@translate_volume_exception
def begin_detaching(self, context, volume_id):
cinderclient(context).volumes.begin_detaching(volume_id)
@translate_volume_exception
def roll_detaching(self, context, volume_id):
cinderclient(context).volumes.roll_detaching(volume_id)
@translate_volume_exception
def attach(self, context, volume_id, instance_uuid, mountpoint, mode='rw'):
cinderclient(context).volumes.attach(volume_id, instance_uuid,
mountpoint, mode=mode)
@translate_volume_exception
def detach(self, context, volume_id):
cinderclient(context).volumes.detach(volume_id)
@translate_volume_exception
def initialize_connection(self, context, volume_id, connector):
return cinderclient(context).volumes.initialize_connection(volume_id,
connector)
@translate_volume_exception
def terminate_connection(self, context, volume_id, connector):
return cinderclient(context).volumes.terminate_connection(volume_id,
connector)
def migrate_volume_completion(self, context, old_volume_id, new_volume_id,
error=False):
return cinderclient(context).volumes.migrate_volume_completion(
old_volume_id, new_volume_id, error)
def create(self, context, size, name, description, snapshot=None,
image_id=None, volume_type=None, metadata=None,
availability_zone=None):
if snapshot is not None:
snapshot_id = snapshot['id']
else:
snapshot_id = None
kwargs = dict(snapshot_id=snapshot_id,
display_name=name,
display_description=description,
volume_type=volume_type,
user_id=context.user_id,
project_id=context.project_id,
availability_zone=availability_zone,
metadata=metadata,
imageRef=image_id)
try:
item = cinderclient(context).volumes.create(size, **kwargs)
return _untranslate_volume_summary_view(context, item)
except cinder_exception.OverLimit:
raise exception.OverQuota(overs='volumes')
except cinder_exception.BadRequest as e:
raise exception.InvalidInput(reason=unicode(e))
@translate_volume_exception
def delete(self, context, volume_id):
cinderclient(context).volumes.delete(volume_id)
@translate_volume_exception
def update(self, context, volume_id, fields):
raise NotImplementedError()
@translate_snapshot_exception
def get_snapshot(self, context, snapshot_id):
item = cinderclient(context).volume_snapshots.get(snapshot_id)
return _untranslate_snapshot_summary_view(context, item)
def get_all_snapshots(self, context):
items = cinderclient(context).volume_snapshots.list(detailed=True)
rvals = []
for item in items:
rvals.append(_untranslate_snapshot_summary_view(context, item))
return rvals
@translate_volume_exception
def create_snapshot(self, context, volume_id, name, description):
item = cinderclient(context).volume_snapshots.create(volume_id,
False,
name,
description)
return _untranslate_snapshot_summary_view(context, item)
@translate_volume_exception
def create_snapshot_force(self, context, volume_id, name, description):
item = cinderclient(context).volume_snapshots.create(volume_id,
True,
name,
description)
return _untranslate_snapshot_summary_view(context, item)
@translate_snapshot_exception
def delete_snapshot(self, context, snapshot_id):
cinderclient(context).volume_snapshots.delete(snapshot_id)
def get_volume_encryption_metadata(self, context, volume_id):
return cinderclient(context).volumes.get_encryption_metadata(volume_id)
@translate_volume_exception
def get_volume_metadata(self, context, volume_id):
raise NotImplementedError()
@translate_volume_exception
def delete_volume_metadata(self, context, volume_id, key):
raise NotImplementedError()
@translate_volume_exception
def update_volume_metadata(self, context, volume_id,
metadata, delete=False):
raise NotImplementedError()
@translate_volume_exception
def get_volume_metadata_value(self, volume_id, key):
raise NotImplementedError()
@translate_snapshot_exception
def update_snapshot_status(self, context, snapshot_id, status):
vs = cinderclient(context).volume_snapshots
# '90%' here is used to tell Cinder that Nova is done
# with its portion of the 'creating' state. This can
# be removed when we are able to split the Cinder states
# into 'creating' and a separate state of
# 'creating_in_nova'. (Same for 'deleting' state.)
vs.update_snapshot_status(
snapshot_id,
{'status': status,
'progress': '90%'}
)
| |
# In order to manipulate the array
import numpy as np
# In order to load mat file
from scipy.io import loadmat
# In order to import the libsvm format dataset
from sklearn.datasets import load_svmlight_file
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import Imputer
from sklearn.preprocessing import Binarizer
from collections import Counter
from fetch.coil_2000 import fetch_coil_2000
from process.coil_2000 import convert_coil_2000
def abalone_19():
# Abalone dataset - Convert the ring = 19 to class 1 and the other to class 0
filename = '../../data/raw/mldata/uci-20070111-abalone.mat'
matfile = loadmat(filename)
sex_array = np.zeros(np.ravel(matfile['int1']).shape[0])
sex_array[np.nonzero(np.ravel(matfile['Sex']) == 'M')] = 0
sex_array[np.nonzero(np.ravel(matfile['Sex']) == 'F')] = 1
sex_array[np.nonzero(np.ravel(matfile['Sex']) == 'I')] = 2
data = np.zeros((np.ravel(matfile['int1']).shape[0], 8))
data[:, 0] = sex_array
data[:, 1::] = matfile['double0'].T
label = np.zeros((np.ravel(matfile['int1']).shape[0], ), dtype=(int))
label[np.nonzero(np.ravel(matfile['int1']) == 19)] = 1
np.savez('../../data/clean/uci-abalone-19.npz', data=data, label=label)
def abalone_7():
# Abalone dataset - Convert the ring = 19 to class 1 and the other to class 0
filename = '../../data/raw/mldata/uci-20070111-abalone.mat'
matfile = loadmat(filename)
sex_array = np.zeros(np.ravel(matfile['int1']).shape[0])
sex_array[np.nonzero(np.ravel(matfile['Sex']) == 'M')] = 0
sex_array[np.nonzero(np.ravel(matfile['Sex']) == 'F')] = 1
sex_array[np.nonzero(np.ravel(matfile['Sex']) == 'I')] = 2
data = np.zeros((np.ravel(matfile['int1']).shape[0], 8))
data[:, 0] = sex_array
data[:, 1::] = matfile['double0'].T
label = np.zeros((np.ravel(matfile['int1']).shape[0], ), dtype=(int))
label[np.nonzero(np.ravel(matfile['int1']) == 7)] = 1
np.savez('../../data/clean/uci-abalone-7.npz', data=data, label=label)
def adult():
# Adult dataset
filename = '../../data/raw/mldata/adult'
tmp_input = np.loadtxt(filename, delimiter = ',', usecols = (0, 2, 4, 10, 11, 12, 14))
data = tmp_input[:, :-1]
label = tmp_input[:, -1].astype(int)
np.savez('../../data/clean/uci-adult.npz', data=data, label=label)
def ecoli():
# ecoli dataset
filename = '../../data/raw/mldata/ecoli.data'
# We are loading only the 7th continous attributes
data = np.loadtxt(filename, usecols = (1, 2, 3, 4, 5, 6, 7), dtype=float)
# Get the label
tmp_label = np.loadtxt(filename, usecols = (8, ), dtype=str)
label = np.zeros(tmp_label.shape, dtype=int)
label[np.nonzero(tmp_label == 'imU')] = 1
np.savez('../../data/clean/uci-ecoli.npz', data=data, label=label)
def optical_digits():
# optical digits dataset
filename = '../../data/raw/mldata/optdigits'
# We are loading only the 7th continous attributes
data = np.loadtxt(filename, delimiter = ',', usecols = tuple(range(64)), dtype=float)
# Get the label
tmp_label = np.loadtxt(filename, delimiter = ',', usecols = (64, ), dtype=int)
label = np.zeros(tmp_label.shape, dtype=int)
label[np.nonzero(tmp_label == 8)] = 1
np.savez('../../data/clean/uci-optical-digits.npz', data=data, label=label)
def sat_image():
# sat image dataset
filename = '../../data/raw/mldata/satimage.scale'
tmp_data, tmp_label = load_svmlight_file(filename)
data = tmp_data.toarray()
label = np.zeros(tmp_label.shape, dtype=int)
label[np.nonzero(tmp_label == 4)] = 1
np.savez('../../data/clean/uci-sat-image.npz', data=data, label=label)
def pen_digits():
# sat image dataset
filename = '../../data/raw/mldata/pendigits'
tmp_data, tmp_label = load_svmlight_file(filename)
data = tmp_data.toarray()
label = np.zeros(tmp_label.shape, dtype=int)
label[np.nonzero(tmp_label == 5)] = 1
np.savez('../../data/clean/uci-pen-digits.npz', data=data, label=label)
def spectrometer():
# spectrometer dataset
filename = '../../data/raw/mldata/lrs.data'
# We are loading only the 7th continous attributes
data = np.loadtxt(filename, usecols = tuple(range(10, 103)), dtype=float)
# Get the label
tmp_label = np.loadtxt(filename, usecols = (1, ), dtype=int)
label = np.zeros(tmp_label.shape, dtype=int)
label[np.nonzero(tmp_label == 44)] = 1
np.savez('../../data/clean/uci-spectrometer.npz', data=data, label=label)
def balance():
# balance dataset
filename = '../../data/raw/mldata/balance-scale.data'
# We are loading only the 7th continous attributes
data = np.loadtxt(filename, delimiter= ',', usecols = tuple(range(1, 5)), dtype=float)
# Get the label
tmp_label = np.loadtxt(filename, delimiter = ',', usecols = (0, ), dtype=str)
label = np.zeros(tmp_label.shape, dtype=int)
label[np.nonzero(tmp_label == 'B')] = 1
np.savez('../../data/clean/uci-balance.npz', data=data, label=label)
def car_eval_34():
# car eval dataset
filename = '../../data/raw/mldata/car.data'
tmp_data = np.loadtxt(filename, delimiter = ',', dtype=str)
tmp_label = tmp_data[:, -1]
tmp_data_2 = np.zeros((tmp_data.shape), dtype=int)
# Encode each label with an integer
for f_idx in range(tmp_data.shape[1]):
le = LabelEncoder()
tmp_data_2[:, f_idx] = le.fit_transform(tmp_data[:, f_idx])
# initialise the data
data = np.zeros((tmp_data.shape[0], tmp_data.shape[1] - 1), dtype=float)
label = np.zeros((tmp_data.shape[0], ), dtype=int)
# Push the data
data = tmp_data_2[:, :-1]
label[np.nonzero(tmp_label == 'good')] = 1
label[np.nonzero(tmp_label == 'vgood')] = 1
np.savez('../../data/clean/uci-car-eval-34.npz', data=data, label=label)
def car_eval_4():
# car eval dataset
filename = '../../data/raw/mldata/car.data'
tmp_data = np.loadtxt(filename, delimiter = ',', dtype=str)
tmp_label = tmp_data[:, -1]
tmp_data_2 = np.zeros((tmp_data.shape), dtype=int)
# Encode each label with an integer
for f_idx in range(tmp_data.shape[1]):
le = LabelEncoder()
tmp_data_2[:, f_idx] = le.fit_transform(tmp_data[:, f_idx])
# initialise the data
data = np.zeros((tmp_data.shape[0], tmp_data.shape[1] - 1), dtype=float)
label = np.zeros((tmp_data.shape[0], ), dtype=int)
# Push the data
data = tmp_data_2[:, :-1]
label[np.nonzero(tmp_label == 'vgood')] = 1
np.savez('../../data/clean/uci-car-eval-4.npz', data=data, label=label)
def isolet():
# isolet dataset
filename = '../../data/raw/mldata/isolet.data'
data = np.loadtxt(filename, delimiter = ',', usecols = tuple(range(617)), dtype=float)
tmp_label = np.loadtxt(filename, delimiter = ',', usecols = (617, ), dtype=float)
label = np.zeros(tmp_label.shape, dtype=int)
label[np.nonzero(tmp_label == 1.)] = 1
label[np.nonzero(tmp_label == 2.)] = 1
np.savez('../../data/clean/uci-isolet.npz', data=data, label=label)
def us_crime():
# US crime dataset
filename = '../../data/raw/mldata/communities.data'
# The missing data will be consider as NaN
# Only use 122 continuous features
tmp_data = np.genfromtxt(filename, delimiter = ',')
tmp_data = tmp_data[:, 5:]
# replace missing value by the mean
imp = Imputer(verbose = 1)
tmp_data = imp.fit_transform(tmp_data)
# extract the data to be saved
data = tmp_data[:, :-1]
bn = Binarizer(threshold=0.65)
label = np.ravel(bn.fit_transform(tmp_data[:, -1]))
np.savez('../../data/clean/uci-us-crime.npz', data=data, label=label)
def yeast_ml8():
# yeast dataset
filename = '../../data/raw/mldata/yeast.svm'
tmp_data, tmp_label = load_svmlight_file(filename, multilabel=True)
data = tmp_data.toarray()
label = np.zeros(len(tmp_label), dtype=int)
# Get only the value with label 8.
for idx in range(len(tmp_label)):
if 8.0 in tmp_label[idx]:
label[idx] = 1
np.savez('../../data/clean/libsvm-yeast-ml8.npz', data=data, label=label)
def yeast_me2():
# yeast dataset
filename = '../../data/raw/mldata/yeast.data'
# read the first column and convert it into float
tmp_data = np.atleast_2d(np.loadtxt(filename, usecols=(0,), dtype=str)).T
# encode this feature to get a float
le = LabelEncoder()
tmp_data = np.ravel(le.fit_transform(tmp_data[:, 0]).astype(float))
data = np.hstack((np.atleast_2d(tmp_data).T, np.loadtxt(filename, usecols=tuple(range(1, 9)), dtype=float)))
# read the label as string
tmp_label = np.ravel(np.loadtxt(filename, usecols=(9, ), dtype=str))
label = np.zeros(tmp_label.shape, dtype=int)
# Get only the label equal to ME2
label[np.nonzero(tmp_label == 'ME2')] = 1
np.savez('../../data/clean/uci-yeast-me2.npz', data=data, label=label)
def scene():
# scene dataset
filename = '../../data/raw/mldata/scene.svm'
tmp_data, tmp_label = load_svmlight_file(filename, multilabel=True)
data = tmp_data.toarray()
label = np.zeros(len(tmp_label), dtype=int)
# Get only the value with label 8.
for idx in range(len(tmp_label)):
if len(tmp_label[idx]) > 1:
label[idx] = 1
np.savez('../../data/clean/libsvm-scene.npz', data=data, label=label)
def movement_libras():
# movement libras dataset
filename = '../../data/raw/mldata/movement_libras.data'
data = np.loadtxt(filename, delimiter = ',', usecols = tuple(range(90)), dtype=float)
tmp_label = np.loadtxt(filename, delimiter = ',', usecols = (90, ), dtype=float)
label = np.zeros(tmp_label.shape, dtype=int)
label[np.nonzero(tmp_label == 1.)] = 1
np.savez('../../data/clean/uci-movement-libras.npz', data=data, label=label)
def sick():
# sick dataset
filename = '../../data/raw/mldata/sick.data'
# Read the data for the 26th first dimension
tmp_data = np.loadtxt(filename, delimiter = ',', usecols = tuple(range(26)), dtype=str)
tmp_data_3 = np.loadtxt(filename, delimiter = ',', usecols = (29, ), dtype=str)
tmp_data_2 = []
tmp_data_4 = []
for s_idx in range(tmp_data.shape[0]):
if not np.any(tmp_data[s_idx, :] == '?'):
tmp_data_2.append(tmp_data[s_idx, :])
tmp_data_4.append(tmp_data_3[s_idx])
tmp_data_2 = np.array(tmp_data_2)
data = np.zeros(tmp_data_2.shape, dtype=float)
data[:, 0] = tmp_data_2[:, 0].astype(float)
# encode the category
f_idx = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18, 20, 22, 24]
for i in f_idx:
le = LabelEncoder()
data[:, i] = le.fit_transform(tmp_data_2[:, i]).astype(float)
f_idx = [17, 19, 21, 23]
for i in f_idx:
data[:, i] = tmp_data_2[:, i].astype(float)
# Create the label
label = np.zeros((len(tmp_data_4), ), dtype=int)
for s_idx in range(len(tmp_data_4)):
if "sick" in tmp_data_4[s_idx]:
label[s_idx] = 1
np.savez('../../data/clean/uci-sick.npz', data=data, label=label)
def sick():
# sick dataset
filename = '../../data/raw/mldata/sick.data'
# Read the data for the 26th first dimension
tmp_data = np.loadtxt(filename, delimiter = ',', usecols = tuple(range(26)), dtype=str)
tmp_data_3 = np.loadtxt(filename, delimiter = ',', usecols = (29, ), dtype=str)
tmp_data_2 = []
tmp_data_4 = []
for s_idx in range(tmp_data.shape[0]):
if not np.any(tmp_data[s_idx, :] == '?'):
tmp_data_2.append(tmp_data[s_idx, :])
tmp_data_4.append(tmp_data_3[s_idx])
tmp_data_2 = np.array(tmp_data_2)
data = np.zeros(tmp_data_2.shape, dtype=float)
data[:, 0] = tmp_data_2[:, 0].astype(float)
# encode the category
f_idx = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 18, 20, 22, 24]
for i in f_idx:
le = LabelEncoder()
data[:, i] = le.fit_transform(tmp_data_2[:, i]).astype(float)
f_idx = [17, 19, 21, 23]
for i in f_idx:
data[:, i] = tmp_data_2[:, i].astype(float)
# Create the label
label = np.zeros((len(tmp_data_4), ), dtype=int)
for s_idx in range(len(tmp_data_4)):
if "sick" in tmp_data_4[s_idx]:
label[s_idx] = 1
np.savez('../../data/clean/uci-sick.npz', data=data, label=label)
def glass():
# glass dataset
filename = '../../data/raw/mldata/glass.data'
data = np.loadtxt(filename, delimiter=',', usecols = tuple(range(1, 10)), dtype=float)
# Get the label
tmp_label = np.loadtxt(filename, delimiter=',', usecols = (10, ), dtype=int)
count_l = Counter(tmp_label)
label = np.zeros(tmp_label.shape, dtype=int)
label[np.nonzero(tmp_label == min(count_l, key=count_l.get))] = 1
np.savez('../../data/clean/uci-glass.npz', data=data, label=label)
def ionosphere():
# ionosphere dataset
filename = '../../data/raw/mldata/ionosphere.data'
data = np.loadtxt(filename, delimiter=',', usecols = tuple(range(0, 34)), dtype=float)
# Get the label
tmp_label = np.loadtxt(filename, delimiter=',', usecols = (34, ), dtype=str)
label = np.zeros(tmp_label.shape, dtype=int)
label[np.nonzero(tmp_label == 'b')] = 1
np.savez('../../data/clean/uci-ionosphere.npz', data=data, label=label)
def phoneme():
# phoneme dataset
filename = '../../data/raw/mldata/phoneme.dat'
data = np.loadtxt(filename, usecols = tuple(range(0, 5)), dtype=float)
# Get the label
label = np.loadtxt(filename, usecols = (5, ), dtype=int)
np.savez('../../data/clean/elena-phoneme.npz', data=data, label=label)
def arrhythmia():
# arrhythmia dataset
filename = '../../data/raw/mldata/arrhythmia.data'
# The missing data will be consider as NaN
tmp_data = np.genfromtxt(filename, delimiter = ',')
# replace missing value by the mean
imp = Imputer(verbose = 1)
tmp_data = imp.fit_transform(tmp_data)
# extract the data to be saved
data = tmp_data[:, :-1]
label = np.zeros(tmp_data.shape[0], dtype=int)
label[np.nonzero(tmp_data[:, -1] == 6)] = 1
np.savez('../../data/clean/uci-arrhythmia.npz', data=data, label=label)
def solar_flare():
# solar dataset
filename = '../../data/raw/mldata/flare.data'
tmp_data = np.loadtxt(filename, delimiter = ' ', dtype=str)
tmp_label = tmp_data[:, -2].astype(int)
# Only the first ten columns are attributes
tmp_data_2 = np.zeros((tmp_data.shape[0], 10), dtype=int)
# Encode each label with an integer
for f_idx in range(tmp_data_2.shape[1]):
le = LabelEncoder()
tmp_data_2[:, f_idx] = le.fit_transform(tmp_data[:, f_idx])
# initialise the data
data = np.zeros(tmp_data_2.shape, dtype=float)
label = np.zeros(tmp_label.shape, dtype=int)
# Push the data
data = tmp_data_2[:, :]
label[np.nonzero(tmp_label > 0)] = 1
np.savez('../../data/clean/uci-solar-flare-m0.npz', data=data, label=label)
def wine_quality_white():
# white wine quality dataset
filename = '../../data/raw/mldata/winequality-white.csv'
# The data corresponds to the 11 first column of the csv file
data = np.loadtxt(filename, usecols=tuple(range(11)), delimiter=';', dtype=float)
# Read the label
# We need to binarise the label using a threshold at 4
bn = Binarizer(threshold=4)
label = bn.fit_transform(np.loadtxt(filename, usecols=(11,), delimiter=';', dtype=int))
# We need to inverse the label -> 1=0 and 0=1
label = np.ravel(np.abs(label - 1))
np.savez('../../data/clean/uci-wine-quality-white.npz', data=data, label=label)
def letter_recognition():
# letter recognition dataset
filename = '../../data/raw/mldata/letter-recognition.data'
# The data to read are from the second to the last
data = np.loadtxt(filename, usecols=tuple(range(1, 17)), delimiter=',', dtype=float)
# The label is the first column of the data file and need to be converted to integer
tmp_label = np.ravel(np.loadtxt(filename, usecols=(0,), delimiter=',', dtype=str))
label = np.zeros(tmp_label.shape, dtype=int)
# Find only the Z letter
label[np.nonzero(tmp_label == 'Z')] = 1
np.savez('../../data/clean/uci-letter-recognition-z.npz', data=data, label=label)
def webpage():
# webpage dataset
filename = '../../data/raw/mldata/w7a'
tmp_data, tmp_label = load_svmlight_file(filename, multilabel=True)
data = tmp_data.toarray()
label = np.ravel(np.array(tmp_label, dtype=int))
# We need to convert the label -1 to 0
label[np.nonzero(label == -1)] = 0
np.savez('../../data/clean/libsvm-webpage-w7a.npz', data=data, label=label)
def ozone():
# ozone dataset
filename = '../../data/raw/mldata/onehr.data'
# The missing data will be consider as NaN
# The first column is useless
tmp_data = np.genfromtxt(filename, delimiter = ',', usecols=tuple(range(1, 74)))
# replace missing value by the mean
imp = Imputer(verbose = 1)
tmp_data = imp.fit_transform(tmp_data)
# Select the first 72 dimensions
data = tmp_data[:, :-1].astype(float)
label = tmp_data[:, -1].astype(int)
np.savez('../../data/clean/uci-ozone.npz', data=data, label=label)
def mammography():
# mammography dataset
filename = '../../data/raw/mldata/phpn1jVwe'
data = np.loadtxt(filename, usecols=tuple(range(6)), delimiter=',', dtype=float)
label = np.loadtxt(filename, usecols=(6, ), delimiter=',', dtype=int)
np.savez('../../data/clean/uci-mammography.npz', data=data, label=label)
def convert_mat_to_npz(filename, dir_to_save):
# Load the data
matfile = loadmat(filename)
# There is 2 variables of interest: 1. label; 2. mat
data = matfile['mat'].astype(float)
label = np.ravel(matfile['label']).astype(int)
# Save the file in the folder indicated
# Import the necessary library
from os.path import splitext, basename, join
# Get only the core of the file and join with the rest
filename_npz = join(dir_to_save, splitext(basename(filename))[0] + '.npz')
np.savez(filename_npz, data=data, label=label)
def convert(convert_func, out_file_name, force):
path = '../data/clean/' + out_file_name
if force or not exist(path):
convert_func(path)
if __name__ == "__main__":
# abalone_19()
# adult()
# ecoli()
# optical_digits()
# sat_image()
# pen_digits()
# abalone_7()
# spectrometer()
# balance()
# car_eval_34()
# car_eval_4()
# isolet()
# us_crime()
# yeast_ml8()
# yeast_me2()
# scene()
# movement_libras()
# sick()
# glass()
# ionosphere()
# phoneme()
# # force_convertion=False
# # convert(convert_coil2000, 'coil_2000.npz', force_convertion)
# arrhythmia()
# solar_flare()
# wine_quality_white()
# letter_recognition()
# webpage()
# ozone()
# mammography()
# Define the path to get the mat file
path_to_mat = "../../data/clean/mat"
# Define the path to store the npz file
path_to_npz = "../../data/clean/npz"
# Convert each file from mat to npz
# Import the necessary library
from os import listdir
from os.path import isfile, join
matfiles = [f for f in listdir(path_to_mat) if isfile(join(path_to_mat, f))]
# Go through each file
for filename_mat in matfiles:
#Convert the current file
convert_mat_to_npz(join(path_to_mat, filename_mat), path_to_npz)
| |
"""
Module to provide ceph control with salt.
:depends: - ceph_cfg Python module
.. versionadded:: 2016.11.0
"""
import logging
log = logging.getLogger(__name__)
__virtualname__ = "ceph"
try:
import ceph_cfg
HAS_CEPH_CFG = True
except ImportError:
HAS_CEPH_CFG = False
def __virtual__():
if HAS_CEPH_CFG is False:
msg = "ceph_cfg unavailable: {} execution module cant be loaded ".format(
__virtualname__
)
return False, msg
return __virtualname__
def partition_list():
"""
List partitions by disk
CLI Example:
.. code-block:: bash
salt '*' ceph.partition_list
"""
return ceph_cfg.partition_list()
def partition_list_osd():
"""
List all OSD data partitions by partition
CLI Example:
.. code-block:: bash
salt '*' ceph.partition_list_osd
"""
return ceph_cfg.partition_list_osd()
def partition_list_journal():
"""
List all OSD journal partitions by partition
CLI Example:
.. code-block:: bash
salt '*' ceph.partition_list_journal
"""
return ceph_cfg.partition_list_journal()
def osd_discover():
"""
List all OSD by cluster
CLI Example:
.. code-block:: bash
salt '*' ceph.osd_discover
"""
return ceph_cfg.osd_discover()
def partition_is(dev):
"""
Check whether a given device path is a partition or a full disk.
CLI Example:
.. code-block:: bash
salt '*' ceph.partition_is /dev/sdc1
"""
return ceph_cfg.partition_is(dev)
def zap(target=None, **kwargs):
"""
Destroy the partition table and content of a given disk.
.. code-block:: bash
salt '*' ceph.osd_prepare 'dev'='/dev/vdc' \\
'cluster_name'='ceph' \\
'cluster_uuid'='cluster_uuid'
dev
The block device to format.
cluster_name
The cluster name. Defaults to ``ceph``.
cluster_uuid
The cluster UUID. Defaults to value found in ceph config file.
"""
if target is not None:
log.warning("Deprecated use of function, use kwargs")
target = kwargs.get("dev", target)
kwargs["dev"] = target
return ceph_cfg.zap(**kwargs)
def osd_prepare(**kwargs):
"""
Prepare an OSD
CLI Example:
.. code-block:: bash
salt '*' ceph.osd_prepare 'osd_dev'='/dev/vdc' \\
'journal_dev'='device' \\
'cluster_name'='ceph' \\
'cluster_uuid'='cluster_uuid' \\
'osd_fs_type'='xfs' \\
'osd_uuid'='2a143b73-6d85-4389-a9e9-b8a78d9e1e07' \\
'journal_uuid'='4562a5db-ff6f-4268-811d-12fd4a09ae98'
cluster_uuid
The device to store the osd data on.
journal_dev
The journal device. defaults to osd_dev.
cluster_name
The cluster name. Defaults to ``ceph``.
cluster_uuid
The cluster date will be added too. Defaults to the value found in local config.
osd_fs_type
set the file system to store OSD data with. Defaults to "xfs".
osd_uuid
set the OSD data UUID. If set will return if OSD with data UUID already exists.
journal_uuid
set the OSD journal UUID. If set will return if OSD with journal UUID already exists.
"""
return ceph_cfg.osd_prepare(**kwargs)
def osd_activate(**kwargs):
"""
Activate an OSD
CLI Example:
.. code-block:: bash
salt '*' ceph.osd_activate 'osd_dev'='/dev/vdc'
"""
return ceph_cfg.osd_activate(**kwargs)
def keyring_create(**kwargs):
"""
Create keyring for cluster
CLI Example:
.. code-block:: bash
salt '*' ceph.keyring_create \\
'keyring_type'='admin' \\
'cluster_name'='ceph' \\
'cluster_uuid'='cluster_uuid'
keyring_type (required)
One of ``admin``, ``mon``, ``osd``, ``rgw``, ``mds``
cluster_uuid
The cluster UUID. Defaults to value found in ceph config file.
cluster_name
The cluster name. Defaults to ``ceph``.
"""
return ceph_cfg.keyring_create(**kwargs)
def keyring_save(**kwargs):
"""
Create save keyring locally
CLI Example:
.. code-block:: bash
salt '*' ceph.keyring_save \\
'keyring_type'='admin' \\
'cluster_name'='ceph' \\
'cluster_uuid'='cluster_uuid'
keyring_type (required)
One of ``admin``, ``mon``, ``osd``, ``rgw``, ``mds``
cluster_uuid
The cluster UUID. Defaults to value found in ceph config file.
cluster_name
The cluster name. Defaults to ``ceph``.
"""
return ceph_cfg.keyring_save(**kwargs)
def keyring_purge(**kwargs):
"""
Delete keyring for cluster
CLI Example:
.. code-block:: bash
salt '*' ceph.keyring_purge \\
'keyring_type'='admin' \\
'cluster_name'='ceph' \\
'cluster_uuid'='cluster_uuid'
keyring_type (required)
One of ``admin``, ``mon``, ``osd``, ``rgw``, ``mds``
cluster_uuid
The cluster UUID. Defaults to value found in ceph config file.
cluster_name
The cluster name. Defaults to ``ceph``.
If no ceph config file is found, this command will fail.
"""
return ceph_cfg.keyring_purge(**kwargs)
def keyring_present(**kwargs):
"""
Returns ``True`` if the keyring is present on disk, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' ceph.keyring_present \\
'keyring_type'='admin' \\
'cluster_name'='ceph' \\
'cluster_uuid'='cluster_uuid'
keyring_type (required)
One of ``admin``, ``mon``, ``osd``, ``rgw``, ``mds``
cluster_uuid
The cluster UUID. Defaults to value found in ceph config file.
cluster_name
The cluster name. Defaults to ``ceph``.
"""
return ceph_cfg.keyring_present(**kwargs)
def keyring_auth_add(**kwargs):
"""
Add keyring to authorized list
CLI Example:
.. code-block:: bash
salt '*' ceph.keyring_auth_add \\
'keyring_type'='admin' \\
'cluster_name'='ceph' \\
'cluster_uuid'='cluster_uuid'
keyring_type (required)
One of ``admin``, ``mon``, ``osd``, ``rgw``, ``mds``
cluster_uuid
The cluster UUID. Defaults to value found in ceph config file.
cluster_name
The cluster name. Defaults to ``ceph``.
"""
return ceph_cfg.keyring_auth_add(**kwargs)
def keyring_auth_del(**kwargs):
"""
Remove keyring from authorised list
CLI Example:
.. code-block:: bash
salt '*' ceph.keyring_osd_auth_del \\
'keyring_type'='admin' \\
'cluster_name'='ceph' \\
'cluster_uuid'='cluster_uuid'
keyring_type (required)
One of ``admin``, ``mon``, ``osd``, ``rgw``, ``mds``
cluster_uuid
The cluster UUID. Defaults to value found in ceph config file.
cluster_name
The cluster name. Defaults to ``ceph``.
"""
return ceph_cfg.keyring_auth_del(**kwargs)
def mon_is(**kwargs):
"""
Returns ``True`` if the target is a mon node, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' ceph.mon_is \\
'cluster_name'='ceph' \\
'cluster_uuid'='cluster_uuid'
cluster_name
The cluster name. Defaults to ``ceph``.
cluster_uuid
The cluster UUID. Defaults to value found in ceph config file.
"""
return ceph_cfg.mon_is(**kwargs)
def mon_status(**kwargs):
"""
Get status from mon daemon
CLI Example:
.. code-block:: bash
salt '*' ceph.mon_status \\
'cluster_name'='ceph' \\
'cluster_uuid'='cluster_uuid'
cluster_uuid
The cluster UUID. Defaults to value found in ceph config file.
cluster_name
The cluster name. Defaults to ``ceph``.
"""
return ceph_cfg.status(**kwargs)
def mon_quorum(**kwargs):
"""
Returns ``True`` if the mon daemon is in the quorum, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' ceph.mon_quorum \\
'cluster_name'='ceph' \\
'cluster_uuid'='cluster_uuid'
cluster_uuid
The cluster UUID. Defaults to value found in ceph config file.
cluster_name
The cluster name. Defaults to ``ceph``.
"""
return ceph_cfg.mon_quorum(**kwargs)
def mon_active(**kwargs):
"""
Returns ``True`` if the mon daemon is running, otherwise ``False``
CLI Example:
.. code-block:: bash
salt '*' ceph.mon_active \\
'cluster_name'='ceph' \\
'cluster_uuid'='cluster_uuid'
cluster_uuid
The cluster UUID. Defaults to value found in ceph config file.
cluster_name
The cluster name. Defaults to ``ceph``.
"""
return ceph_cfg.mon_active(**kwargs)
def mon_create(**kwargs):
"""
Create a mon node
CLI Example:
.. code-block:: bash
salt '*' ceph.mon_create \\
'cluster_name'='ceph' \\
'cluster_uuid'='cluster_uuid'
cluster_uuid
The cluster UUID. Defaults to value found in ceph config file.
cluster_name
The cluster name. Defaults to ``ceph``.
"""
return ceph_cfg.mon_create(**kwargs)
def rgw_pools_create(**kwargs):
"""
Create pools for rgw
CLI Example:
.. code-block:: bash
salt '*' ceph.rgw_pools_create
cluster_uuid
The cluster UUID. Defaults to value found in ceph config file.
cluster_name
The cluster name. Defaults to ``ceph``.
"""
return ceph_cfg.rgw_pools_create(**kwargs)
def rgw_pools_missing(**kwargs):
"""
Show pools missing for rgw
CLI Example:
.. code-block:: bash
salt '*' ceph.rgw_pools_missing
cluster_uuid
The cluster UUID. Defaults to value found in ceph config file.
cluster_name
The cluster name. Defaults to ``ceph``.
"""
return ceph_cfg.rgw_pools_missing(**kwargs)
def rgw_create(**kwargs):
"""
Create a rgw
CLI Example:
.. code-block:: bash
salt '*' ceph.rgw_create \\
'name' = 'rgw.name' \\
'cluster_name'='ceph' \\
'cluster_uuid'='cluster_uuid'
name (required)
The RGW client name. Must start with ``rgw.``
cluster_uuid
The cluster UUID. Defaults to value found in ceph config file.
cluster_name
The cluster name. Defaults to ``ceph``.
"""
return ceph_cfg.rgw_create(**kwargs)
def rgw_destroy(**kwargs):
"""
Remove a rgw
CLI Example:
.. code-block:: bash
salt '*' ceph.rgw_destroy \\
'name' = 'rgw.name' \\
'cluster_name'='ceph' \\
'cluster_uuid'='cluster_uuid'
name (required)
The RGW client name (must start with ``rgw.``)
cluster_uuid
The cluster UUID. Defaults to value found in ceph config file.
cluster_name
The cluster name. Defaults to ``ceph``.
"""
return ceph_cfg.rgw_destroy(**kwargs)
def mds_create(**kwargs):
"""
Create a mds
CLI Example:
.. code-block:: bash
salt '*' ceph.mds_create \\
'name' = 'mds.name' \\
'port' = 1000, \\
'addr' = 'fqdn.example.org' \\
'cluster_name'='ceph' \\
'cluster_uuid'='cluster_uuid'
name (required)
The MDS name (must start with ``mds.``)
port (required)
Port to which the MDS will listen
addr (required)
Address or IP address for the MDS to listen
cluster_uuid
The cluster UUID. Defaults to value found in ceph config file.
cluster_name
The cluster name. Defaults to ``ceph``.
"""
return ceph_cfg.mds_create(**kwargs)
def mds_destroy(**kwargs):
"""
Remove a mds
CLI Example:
.. code-block:: bash
salt '*' ceph.mds_destroy \\
'name' = 'mds.name' \\
'cluster_name'='ceph' \\
'cluster_uuid'='cluster_uuid'
name (required)
The MDS name (must start with ``mds.``)
cluster_uuid
The cluster UUID. Defaults to value found in ceph config file.
cluster_name
The cluster name. Defaults to ``ceph``.
"""
return ceph_cfg.mds_destroy(**kwargs)
def keyring_auth_list(**kwargs):
"""
List all cephx authorization keys
CLI Example:
.. code-block:: bash
salt '*' ceph.keyring_auth_list \\
'cluster_name'='ceph' \\
'cluster_uuid'='cluster_uuid'
cluster_name
The cluster name. Defaults to ``ceph``.
cluster_uuid
The cluster UUID. Defaults to value found in ceph config file.
"""
return ceph_cfg.keyring_auth_list(**kwargs)
def pool_list(**kwargs):
"""
List all pools
CLI Example:
.. code-block:: bash
salt '*' ceph.pool_list \\
'cluster_name'='ceph' \\
'cluster_uuid'='cluster_uuid'
cluster_name
The cluster name. Defaults to ``ceph``.
cluster_uuid
The cluster UUID. Defaults to value found in ceph config file.
"""
return ceph_cfg.pool_list(**kwargs)
def pool_add(pool_name, **kwargs):
"""
Create a pool
CLI Example:
.. code-block:: bash
salt '*' ceph.pool_add pool_name \\
'cluster_name'='ceph' \\
'cluster_uuid'='cluster_uuid'
cluster_name
The cluster name. Defaults to ``ceph``.
cluster_uuid
The cluster UUID. Defaults to value found in ceph config file.
pg_num
Default to 8
pgp_num
Default to pg_num
pool_type
can take values "replicated" or "erasure"
erasure_code_profile
The "erasure_code_profile"
crush_ruleset
The crush map rule set
"""
return ceph_cfg.pool_add(pool_name, **kwargs)
def pool_del(pool_name, **kwargs):
"""
Delete a pool
CLI Example:
.. code-block:: bash
salt '*' ceph.pool_del pool_name \\
'cluster_name'='ceph' \\
'cluster_uuid'='cluster_uuid'
cluster_name
The cluster name. Defaults to ``ceph``.
cluster_uuid
The cluster UUID. Defaults to value found in ceph config file.
"""
return ceph_cfg.pool_del(pool_name, **kwargs)
def purge(**kwargs):
"""
purge ceph configuration on the node
CLI Example:
.. code-block:: bash
salt '*' ceph.purge \\
'cluster_name'='ceph' \\
'cluster_uuid'='cluster_uuid'
cluster_name
The cluster name. Defaults to ``ceph``.
cluster_uuid
The cluster UUID. Defaults to value found in ceph config file.
"""
return ceph_cfg.purge(**kwargs)
def ceph_version():
"""
Get the version of ceph installed
CLI Example:
.. code-block:: bash
salt '*' ceph.ceph_version
"""
return ceph_cfg.ceph_version()
def cluster_quorum(**kwargs):
"""
Get the cluster's quorum status
CLI Example:
.. code-block:: bash
salt '*' ceph.cluster_quorum \\
'cluster_name'='ceph' \\
'cluster_uuid'='cluster_uuid'
cluster_uuid
The cluster UUID. Defaults to value found in ceph config file.
cluster_name
The cluster name. Defaults to ``ceph``.
"""
return ceph_cfg.cluster_quorum(**kwargs)
def cluster_status(**kwargs):
"""
Get the cluster status, including health if in quorum
CLI Example:
.. code-block:: bash
salt '*' ceph.cluster_status \\
'cluster_name'='ceph' \\
'cluster_uuid'='cluster_uuid'
cluster_uuid
The cluster UUID. Defaults to value found in ceph config file.
cluster_name
The cluster name. Defaults to ``ceph``.
"""
return ceph_cfg.cluster_status(**kwargs)
| |
from django.contrib.auth.models import User
from core.tests import datautil
from package.models import Category, Package, Version
from profiles.models import Profile
def load():
category, created = Category.objects.get_or_create(
pk=2,
description=u'Large efforts that combine many python modules or apps. Examples include Django, Pinax, and Satchmo. Most CMS falls into this category.',
show_pypi=True,
title_plural=u'Frameworks',
title=u'Framework',
slug=u'frameworks',
)
package, created = Package.objects.get_or_create(
pk=6,
category=category,
title=u'Django CMS',
created_by=None,
repo_watchers=967,
pypi_url=u'http://pypi.python.org/pypi/django-cms',
pypi_downloads=26257,
last_modified_by=None,
repo_url=u'https://github.com/divio/django-cms',
participants=u'chrisglass,digi604,erobit,fivethreeo,ojii,stefanfoulis,pcicman,DrMeers,brightwhitefox,FlashJunior,philomat,jezdez,havan,acdha,m000,hedberg,piquadrat,spookylukey,izimobil,ulope,emiquelito,aaloy,lasarux,yohanboniface,aparo,jsma,johbo,ionelmc,quattromic,almost,specialunderwear,mitar,yml,pajusmar,diofeher,marcor,cortextual,hysia,dstufft,ssteinerx,oversize,jalaziz,tercerojista,eallik,f4nt,kaapa,mbrochh,srj55,dz,mathijs-dumon,sealibora,cyberj,adsworth,tokibito,DaNmarner,IanLewis,indexofire,bneijt,tehfink,PPvG,seyhunak,pigletto,fcurella,gleb-chipiga,beshrkayali,kinea,lucasvo,jordanjambazov,tonnzor,centralniak,arthur-debert,bzed,jasondavies,nimnull,limpbrains,pvanderlinden,sleytr,sublimevelo,netpastor,dtt101,fkazimierczak,merlex,mrlundis,restless,eged,shanx,ptoal',
# usage=[129, 50, 43, 183, 87, 204, 1, 231, 233, 239, 241, 248, 252, 262, 263, 268, 282, 284, 298, 32, 338, 342, 344, 345, 348, 355, 388, 401, 295, 36, 444, 422, 449, 157, 457, 462, 271, 143, 433, 554, 448, 470, 562, 86, 73, 504, 610, 621, 651, 663, 688, 661, 766, 770, 773, 799, 821, 834, 847, 848, 850, 322, 883, 823, 958, 387, 361, 123, 1026, 516, 715, 1105],
repo_commits=0,
repo_forks=283,
slug=u'django-cms',
repo_description=u'An Advanced Django CMS.',
)
user, created = User.objects.get_or_create(
pk=129,
username=u'unbracketed',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-08-28 20:48:35',
# groups=[],
# user_permissions=[],
password=u'sha1$a5c47$0e9be0aee0cb60648a3e0a70f462e0943a46aeab',
email=u'brian@unbracketed.com',
date_joined=u'2010-08-28 20:47:52',
)
user, created = User.objects.get_or_create(
pk=50,
username=u'ojii',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2011-03-09 14:50:02',
# groups=[],
# user_permissions=[],
password=u'sha1$a7428$563858792ba94c8706db374eed9d2708536ea2a5',
email=u'jonas.obrist@divio.ch',
date_joined=u'2010-08-18 03:35:23',
)
user, created = User.objects.get_or_create(
pk=43,
username=u'vvarp',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-09-29 12:08:01',
# groups=[],
# user_permissions=[],
password=u'sha1$ed0c0$ec7ed6b92a963fd02cd0e1e1fcd90d66591a29b8',
email=u'maciek@id43.net',
date_joined=u'2010-08-17 18:43:12',
)
user, created = User.objects.get_or_create(
pk=183,
username=u'onjin',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-09-09 07:47:11',
# groups=[],
# user_permissions=[],
password=u'sha1$1965c$9b8cc38cec3672b515787c227a3ef7ceea2ae785',
email=u'onjinx@gmail.com',
date_joined=u'2010-09-07 02:23:11',
)
user, created = User.objects.get_or_create(
pk=87,
username=u'jezdez',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2011-02-09 09:29:33',
# groups=[],
# user_permissions=[],
password=u'sha1$97523$0d3cdbbd2930052fe89ebf38ef7267bc85479032',
email=u'jannis@leidel.info',
date_joined=u'2010-08-21 04:14:03',
)
user, created = User.objects.get_or_create(
pk=204,
username=u'flmendes',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-09-14 18:01:16',
# groups=[],
# user_permissions=[],
password=u'sha1$68ef6$750d02a7c6a1b8d14adb31a8374cb18d6f37708e',
email=u'flmendes@gmail.com',
date_joined=u'2010-09-08 22:49:34',
)
user, created = User.objects.get_or_create(
pk=1,
username=u'audreyr',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=True,
is_staff=True,
last_login=u'2011-03-13 23:44:00',
# groups=[],
# user_permissions=[],
password=u'sha1$c84c1$dfd3748f63f48e2639d3c4d1caa113acf6bde51f',
email=u'audreyr@gmail.com',
date_joined=u'2010-08-15 22:15:50',
)
user, created = User.objects.get_or_create(
pk=231,
username=u'digi604',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-09-12 07:34:07',
# groups=[],
# user_permissions=[],
password=u'sha1$0f7f5$523594505138d1182fa413826c02b1e32ee8b95c',
email=u'digi@treepy.com',
date_joined=u'2010-09-12 07:32:42',
)
user, created = User.objects.get_or_create(
pk=233,
username=u'mikl',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-09-12 08:57:34',
# groups=[],
# user_permissions=[],
password=u'sha1$c4af2$e50af5facac17d8b6cd83e7ccc06dee27e33a6a1',
email=u'mikkel@hoegh.org',
date_joined=u'2010-09-12 08:56:36',
)
user, created = User.objects.get_or_create(
pk=239,
username=u'arthurk',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-09-12 19:13:47',
# groups=[],
# user_permissions=[],
password=u'sha1$dbc73$f0d8c4476121c8a66fae45f7131db9df71e9aab4',
email=u'arthur@arthurkoziel.com',
date_joined=u'2010-09-12 19:12:55',
)
user, created = User.objects.get_or_create(
pk=241,
username=u'juacompe',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-09-13 03:23:21',
# groups=[],
# user_permissions=[],
password=u'sha1$d08ef$4f9c0272cafe2ce6b5619c79f8ecf7f6dd3c024e',
email=u'juacompe@gmail.com',
date_joined=u'2010-09-13 03:10:39',
)
user, created = User.objects.get_or_create(
pk=248,
username=u'kocakafa',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-09-13 10:09:36',
# groups=[],
# user_permissions=[],
password=u'sha1$624bd$101a762ea78432c4a4c25c3a4f2558e14126b0d5',
email=u'cemrekutluay@gmail.com',
date_joined=u'2010-09-13 10:08:40',
)
user, created = User.objects.get_or_create(
pk=252,
username=u'dmoisset',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-11-09 09:21:27',
# groups=[],
# user_permissions=[],
password=u'sha1$0b205$20bbdba061603ed658ef772d360dd30f34b6aad6',
email=u'dmoisset@machinalis.com',
date_joined=u'2010-09-13 13:53:32',
)
user, created = User.objects.get_or_create(
pk=262,
username=u'eged',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-10-06 04:22:27',
# groups=[],
# user_permissions=[],
password=u'sha1$b81b9$76327ae4d11587d816a7dc0da89b71c2e36be73d',
email=u'viliam.segeda@gmail.com',
date_joined=u'2010-09-14 06:50:44',
)
user, created = User.objects.get_or_create(
pk=263,
username=u'rtpm',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-09-14 07:48:22',
# groups=[],
# user_permissions=[],
password=u'sha1$7d578$d69bb08ff132271fa1725245ec79dfb8296a0a4b',
email=u'rtpm@gazeta.pl',
date_joined=u'2010-09-14 07:47:29',
)
user, created = User.objects.get_or_create(
pk=268,
username=u'flynnguy',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-09-14 09:21:21',
# groups=[],
# user_permissions=[],
password=u'sha1$615f8$2286dc5ce690fcd70ebc796f7ffd9742a0fbce8e',
email=u'chris@flynnguy.com',
date_joined=u'2010-09-14 09:20:08',
)
user, created = User.objects.get_or_create(
pk=282,
username=u'mcosta',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-09-14 18:41:59',
# groups=[],
# user_permissions=[],
password=u'sha1$7a4e1$8f37adee1eaa107354d0400cfbd7e7a678506aa9',
email=u'm.costacano@gmail.com',
date_joined=u'2010-09-14 18:41:02',
)
user, created = User.objects.get_or_create(
pk=284,
username=u'chromano',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-10-13 12:50:44',
# groups=[],
# user_permissions=[],
password=u'sha1$7c7b3$5c009b3002d04f2ef3db01a17501f8d852a8e3ee',
email=u'chromano@gmail.com',
date_joined=u'2010-09-14 19:30:41',
)
user, created = User.objects.get_or_create(
pk=298,
username=u'robedwards',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-09-15 07:50:34',
# groups=[],
# user_permissions=[],
password=u'sha1$cc020$29594e7501c4697ba86c8ff0698f7d5eaf16ff14',
email=u'rob@brycefarrah.com',
date_joined=u'2010-09-15 07:42:18',
)
user, created = User.objects.get_or_create(
pk=32,
username=u'markusgattol',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2011-01-07 04:14:45',
# groups=[],
# user_permissions=[],
password=u'sha1$289e4$fb00c9de77f991b423ba91edcd91f14ab91afcd7',
email=u'markus.gattol@sunoano.org',
date_joined=u'2010-08-17 14:05:10',
)
user, created = User.objects.get_or_create(
pk=338,
username=u'iamsk',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-09-17 04:26:17',
# groups=[],
# user_permissions=[],
password=u'sha1$7f703$cf45848f28e30adfee3a30cc329e66a14d25bbce',
email=u'iamsk.info@gmail.com',
date_joined=u'2010-09-17 04:14:36',
)
user, created = User.objects.get_or_create(
pk=342,
username=u'kiello',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-09-17 05:40:20',
# groups=[],
# user_permissions=[],
password=u'sha1$72adc$31bd0fc2440d9ded4b816e6812165f3565801807',
email=u'mauro.doglio@gmail.com',
date_joined=u'2010-09-17 05:39:00',
)
user, created = User.objects.get_or_create(
pk=344,
username=u'nimnull',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-09-17 07:32:43',
# groups=[],
# user_permissions=[],
password=u'sha1$08b73$b8c7e885cffcc351540dbae0b5d35cdf3123a3c2',
email=u'nimnull@gmail.com',
date_joined=u'2010-09-17 07:31:45',
)
user, created = User.objects.get_or_create(
pk=345,
username=u'dblkey',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-09-17 08:17:44',
# groups=[],
# user_permissions=[],
password=u'sha1$1f412$5c7a8e402e0f9e588632d6662ec6da3029eaf72f',
email=u'thedoublekey@gmail.com',
date_joined=u'2010-09-17 08:16:44',
)
user, created = User.objects.get_or_create(
pk=348,
username=u'netpastor',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-09-17 10:18:34',
# groups=[],
# user_permissions=[],
password=u'sha1$9971f$cffb9546d3f399a0c6e7f34ad57144e8c9a66b32',
email=u'vadimshatalov@yandex.ru',
date_joined=u'2010-09-17 10:17:26',
)
user, created = User.objects.get_or_create(
pk=355,
username=u'limpbrains',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-09-17 17:45:01',
# groups=[],
# user_permissions=[],
password=u'sha1$d521e$ac48433870ff69fbbdf8e67b6d5b9341b3f70565',
email=u'limpbrains@mail.ru',
date_joined=u'2010-09-17 17:43:45',
)
user, created = User.objects.get_or_create(
pk=388,
username=u'mrbox',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-11-18 13:01:33',
# groups=[],
# user_permissions=[],
password=u'sha1$c34a1$c228bbe096cd1cce6f6121aa3502f88a3df271a1',
email=u'jakub@paczkowski.eu',
date_joined=u'2010-09-21 09:20:54',
)
user, created = User.objects.get_or_create(
pk=401,
username=u'archatas',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-10-14 12:54:28',
# groups=[],
# user_permissions=[],
password=u'sha1$42d2e$39b36644f6246297e77af51837d898bd784b62ff',
email=u'aidasbend@yahoo.com',
date_joined=u'2010-09-21 23:45:16',
)
user, created = User.objects.get_or_create(
pk=295,
username=u'mat',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-09-15 07:49:20',
# groups=[],
# user_permissions=[],
password=u'sha1$2cb8b$09abc15052a91cd123c32f1a3cd1402a3f5759bf',
email=u'mat@apinc.org',
date_joined=u'2010-09-15 07:08:21',
)
user, created = User.objects.get_or_create(
pk=36,
username=u'joshourisman',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2011-02-11 08:41:47',
# groups=[],
# user_permissions=[],
password=u'sha1$8a414$44a1517f3443f3c2094d760fcf10c06ac6fca38f',
email=u'josh@joshourisman.com',
date_joined=u'2010-08-17 14:53:18',
)
user, created = User.objects.get_or_create(
pk=444,
username=u'piquadrat',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-09-28 04:51:17',
# groups=[],
# user_permissions=[],
password=u'sha1$e048a$fc0ea2dc56c3ec7a4ea3c2af981d0e5633f0a1b6',
email=u'piquadrat@gmail.com',
date_joined=u'2010-09-28 04:49:14',
)
user, created = User.objects.get_or_create(
pk=422,
username=u'evotech',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2011-03-05 11:33:42',
# groups=[],
# user_permissions=[],
password=u'sha1$c4056$24151ba166330bc8a113432f35d549bec8e603de',
email=u'ivzak@yandex.ru',
date_joined=u'2010-09-24 05:41:49',
)
user, created = User.objects.get_or_create(
pk=449,
username=u'partizan',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-09-28 17:58:49',
# groups=[],
# user_permissions=[],
password=u'sha1$c5658$5738a1688c1ed065eeda86eed5441b4a3f564dff',
email=u'psychotechnik@gmail.com',
date_joined=u'2010-09-28 13:03:59',
)
user, created = User.objects.get_or_create(
pk=157,
username=u'feuervogel',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-10-21 07:46:40',
# groups=[],
# user_permissions=[],
password=u'sha1$b62ba$b0002edb8ce814228b3812112f2878d44dd880ee',
email=u'jumo@gmx.de',
date_joined=u'2010-08-31 13:47:05',
)
user, created = User.objects.get_or_create(
pk=457,
username=u'LukaszDziedzia',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-11-30 05:44:58',
# groups=[],
# user_permissions=[],
password=u'sha1$651bc$71ea97a3322ea5720884654a6fa360f415fca698',
email=u'l.dziedzia@gmail.com',
date_joined=u'2010-09-29 07:43:26',
)
user, created = User.objects.get_or_create(
pk=462,
username=u'emencia',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-11-15 16:25:23',
# groups=[],
# user_permissions=[],
password=u'sha1$28466$0cd6b9fbb628c2837c0e4dcdc3e433aed1174ead',
email=u'roger@emencia.com',
date_joined=u'2010-09-29 12:00:32',
)
user, created = User.objects.get_or_create(
pk=271,
username=u'zenweasel',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-11-10 22:41:53',
# groups=[],
# user_permissions=[],
password=u'sha1$705e8$5759884e7222e061d08de3cbff31b53b068fd266',
email=u'brent@thebuddhalodge.com',
date_joined=u'2010-09-14 11:35:25',
)
user, created = User.objects.get_or_create(
pk=143,
username=u'spookylukey',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2011-02-19 17:09:12',
# groups=[],
# user_permissions=[],
password=u'sha1$70e5d$50c6a37cbac336cf1a08f4672bfa2002b4d2a55f',
email=u'L.Plant.98@cantab.net',
date_joined=u'2010-08-30 08:33:49',
)
user, created = User.objects.get_or_create(
pk=433,
username=u'avoine',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-12-03 10:57:01',
# groups=[],
# user_permissions=[],
password=u'sha1$9d041$240309830d9ca972b2dc2fe24158e01ee7ba4a9d',
email=u'patrick@koumbit.org',
date_joined=u'2010-09-26 16:31:53',
)
user, created = User.objects.get_or_create(
pk=554,
username=u'ethan',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-12-15 15:59:39',
# groups=[],
# user_permissions=[],
password=u'sha1$92dd1$37703fa27808c902fcd58792936afe41c02a70d0',
email=u'Ethan.Leland@gmail.com',
date_joined=u'2010-10-20 18:23:30',
)
user, created = User.objects.get_or_create(
pk=448,
username=u'chem',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-12-23 11:47:50',
# groups=[],
# user_permissions=[],
password=u'sha1$f45bb$40332f58b55706b6f6059f855b18b3cd588b8948',
email=u'chemt@ukr.net',
date_joined=u'2010-09-28 11:36:56',
)
user, created = User.objects.get_or_create(
pk=470,
username=u'wires',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-09-30 10:26:07',
# groups=[],
# user_permissions=[],
password=u'sha1$e15d5$a18fc4b6f8b628cc6fb6ae0d8133a423e9ad1d1e',
email=u'jelle@defekt.nl',
date_joined=u'2010-09-30 10:20:20',
)
user, created = User.objects.get_or_create(
pk=562,
username=u'rasca',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2011-01-06 11:59:42',
# groups=[],
# user_permissions=[],
password=u'sha1$edd41$bd2c72f9c2c2b59aac5775bbc59d4b529494aabf',
email=u'rasca7@hotmail.com',
date_joined=u'2010-10-24 12:24:08',
)
user, created = User.objects.get_or_create(
pk=86,
username=u'justhamade',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2011-03-06 18:57:20',
# groups=[],
# user_permissions=[],
password=u'sha1$b7a5a$53b2cb1cd3c20a2cee79b170b56ea88ec73d9685',
email=u'justhamade@gmail.com',
date_joined=u'2010-08-21 00:11:33',
)
user, created = User.objects.get_or_create(
pk=73,
username=u'slav0nic',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2011-03-13 16:46:13',
# groups=[],
# user_permissions=[],
password=u'sha1$13d6f$fafddd832ac31aff59ec6ff155e4d5284e675c56',
email=u'slav0nic0@gmail.com',
date_joined=u'2010-08-19 06:24:12',
)
user, created = User.objects.get_or_create(
pk=504,
username=u'Fantomas42',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2011-03-02 08:55:17',
# groups=[],
# user_permissions=[],
password=u'sha1$1166e$cc6971b2eb92eeed5ea0d43f896dcc46a47102eb',
email=u'fantomas42@gmail.com',
date_joined=u'2010-10-07 09:26:41',
)
user, created = User.objects.get_or_create(
pk=610,
username=u'globalnamespace',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-11-04 13:38:34',
# groups=[],
# user_permissions=[],
password=u'sha1$d3fbf$f3a1fc7fe6ca203ace449497d437cf06c67e905d',
email=u'mbest@pendragon.org',
date_joined=u'2010-11-04 13:37:57',
)
user, created = User.objects.get_or_create(
pk=621,
username=u'btubbs',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-11-06 19:45:21',
# groups=[],
# user_permissions=[],
password=u'sha1$6ebe8$04ff06448cbf632a9d54f13e7d3c5b808e08b528',
email=u'brent.tubbs@gmail.com',
date_joined=u'2010-11-06 19:36:22',
)
user, created = User.objects.get_or_create(
pk=651,
username=u'HounD',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-11-15 16:02:52',
# groups=[],
# user_permissions=[],
password=u'sha1$e5fc1$0ad97a61ed9bb34904e9df36ba4bbb4eca7c35c9',
email=u'vladshikhov@gmail.com',
date_joined=u'2010-11-13 00:45:44',
)
user, created = User.objects.get_or_create(
pk=663,
username=u'encinas',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-11-15 10:11:00',
# groups=[],
# user_permissions=[],
password=u'sha1$900ea$2044dfec30282981ce4094db6a0c7f1d9bba0ca9',
email=u'list@encinas-fernandez.eu',
date_joined=u'2010-11-15 10:05:27',
)
user, created = User.objects.get_or_create(
pk=688,
username=u'nasp',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-11-20 22:31:48',
# groups=[],
# user_permissions=[],
password=u'sha1$ed6f2$964b18357a346a3063ab299fbe34f38268aaf41f',
email=u'charette.s@gmail.com',
date_joined=u'2010-11-20 22:27:37',
)
user, created = User.objects.get_or_create(
pk=661,
username=u'ralphleyga',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2011-03-11 04:32:15',
# groups=[],
# user_permissions=[],
password=u'sha1$52255$9263926740de1d194152697f9f9da2466b547ce4',
email=u'ralphfleyga@gmail.com',
date_joined=u'2010-11-15 08:46:31',
)
user, created = User.objects.get_or_create(
pk=766,
username=u'xigit',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-12-06 04:28:14',
# groups=[],
# user_permissions=[],
password=u'sha1$917d2$cbead9bd51c7b47651e92dfd315485a054794187',
email=u'xigitech@gmail.com',
date_joined=u'2010-12-06 04:26:04',
)
user, created = User.objects.get_or_create(
pk=770,
username=u'espenhogbakk',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-12-06 06:13:29',
# groups=[],
# user_permissions=[],
password=u'sha1$e354e$d7c7d6cf7a80f6ccd6e96c688362e5e55a651b62',
email=u'espen@hogbakk.no',
date_joined=u'2010-12-06 06:12:27',
)
user, created = User.objects.get_or_create(
pk=773,
username=u'petko',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-12-06 07:02:29',
# groups=[],
# user_permissions=[],
password=u'sha1$86046$91b8739ec7172f6381ae9827a16745084ac960d8',
email=u'petko@magicbg.com',
date_joined=u'2010-12-06 07:01:57',
)
user, created = User.objects.get_or_create(
pk=799,
username=u'eallik',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-12-06 12:49:19',
# groups=[],
# user_permissions=[],
password=u'sha1$601f6$e0a658e028c87227715b617f31c4c04f479daf0f',
email=u'eallik+djangopackages@gmail.com',
date_joined=u'2010-12-06 12:47:55',
)
user, created = User.objects.get_or_create(
pk=821,
username=u'digitaldreamer',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-12-09 23:39:29',
# groups=[],
# user_permissions=[],
password=u'sha1$920b7$590a350f467603856b41b302f4dbb3cf76c99f52',
email=u'poyzer@gmail.com',
date_joined=u'2010-12-09 23:37:03',
)
user, created = User.objects.get_or_create(
pk=834,
username=u'andrey_shipilov',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-12-13 06:56:07',
# groups=[],
# user_permissions=[],
password=u'sha1$59dc2$e720ca75bb1e1aeb66b50a716b74428955c86122',
email=u'tezro.gb@gmail.com',
date_joined=u'2010-12-13 06:54:28',
)
user, created = User.objects.get_or_create(
pk=847,
username=u'john',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-12-15 23:10:14',
# groups=[],
# user_permissions=[],
password=u'sha1$fa281$87df6e12a569a9383986d0047f36e54a93d0812c',
email=u'xjh8619kl93@163.com',
date_joined=u'2010-12-15 23:01:23',
)
user, created = User.objects.get_or_create(
pk=848,
username=u'tmilovan',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-12-16 13:03:30',
# groups=[],
# user_permissions=[],
password=u'sha1$a2979$2c86ec53cd382a822ff4ac9764e2f65bd8d7e7c9',
email=u'tmilovan@fwd.hr',
date_joined=u'2010-12-16 13:02:12',
)
user, created = User.objects.get_or_create(
pk=850,
username=u'silvergeko',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-12-17 15:22:38',
# groups=[],
# user_permissions=[],
password=u'sha1$c9c20$7b4cf6b907147e30a105360888ac4a903ba782ab',
email=u'scopel.emanuele@gmail.com',
date_joined=u'2010-12-17 15:21:25',
)
user, created = User.objects.get_or_create(
pk=322,
username=u'tino',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-12-21 06:01:06',
# groups=[],
# user_permissions=[],
password=u'sha1$3cb5f$36e621a1d7d38a9159e9e7bc86cd93f0636d330d',
email=u'tinodb@gmail.com',
date_joined=u'2010-09-16 16:27:02',
)
user, created = User.objects.get_or_create(
pk=883,
username=u'mariocesar',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2010-12-29 08:36:45',
# groups=[],
# user_permissions=[],
password=u'sha1$705d5$63af6bfa8f27b759b755574f6c7d8158d240c526',
email=u'mariocesar.c50@gmail.com',
date_joined=u'2010-12-29 08:35:49',
)
user, created = User.objects.get_or_create(
pk=823,
username=u'qrilka',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2011-01-19 17:17:17',
# groups=[],
# user_permissions=[],
password=u'sha1$37911$f2622929d7f460a9a4d6204325b1349940aafe83',
email=u'qrilka@gmail.com',
date_joined=u'2010-12-10 05:15:35',
)
user, created = User.objects.get_or_create(
pk=958,
username=u'dmpeters63',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2011-01-28 03:06:30',
# groups=[],
# user_permissions=[],
password=u'sha1$66668$0eb23655452720bdb2e2ff8874bd84d5ae599dfb',
email=u'dmpeters63@gmail.com',
date_joined=u'2011-01-28 03:04:11',
)
user, created = User.objects.get_or_create(
pk=387,
username=u'oversize',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2011-01-28 07:37:47',
# groups=[],
# user_permissions=[],
password=u'sha1$f291f$d08c5869b9dafc8a61c460599e4c2519f3e60cda',
email=u'manuel@schmidtman.de',
date_joined=u'2010-09-21 05:44:08',
)
user, created = User.objects.get_or_create(
pk=361,
username=u'moskrc',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2011-03-05 04:35:45',
# groups=[],
# user_permissions=[],
password=u'sha1$3627b$3a2d7e40c2adb5f6fe459737e1c6abfc242b225c',
email=u'moskrc@gmail.com',
date_joined=u'2010-09-18 15:53:47',
)
user, created = User.objects.get_or_create(
pk=123,
username=u'stefanfoulis',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2011-02-21 03:18:48',
# groups=[],
# user_permissions=[],
password=u'sha1$8ef17$5559490fac93e0e40ac637e84b3c8069d2091879',
email=u'stefan.foulis@gmail.com',
date_joined=u'2010-08-28 13:54:39',
)
user, created = User.objects.get_or_create(
pk=1026,
username=u'gmh04',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2011-03-14 16:02:05',
# groups=[],
# user_permissions=[],
password=u'sha1$d82a2$96d68a17ce55446037d67b3cf076f5e47cca1718',
email=u'gmh04@netscape.net',
date_joined=u'2011-02-16 16:05:47',
)
user, created = User.objects.get_or_create(
pk=516,
username=u'azizmb',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2011-02-25 13:20:16',
# groups=[],
# user_permissions=[],
password=u'sha1$dd3cb$321edb091caa16a8fd2231dfc61bbb27ecc455eb',
email=u'aziz.mansur@gmail.com',
date_joined=u'2010-10-09 13:54:45',
)
user, created = User.objects.get_or_create(
pk=715,
username=u'mwalling',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2011-03-02 16:57:24',
# groups=[],
# user_permissions=[],
password=u'sha1$114c6$195ee166ab51a5727641915fe4bc822d1ba9052f',
email=u'mark@markwalling.org',
date_joined=u'2010-11-28 13:36:12',
)
user, created = User.objects.get_or_create(
pk=1105,
username=u'evilkarlothian',
first_name=u'',
last_name=u'',
is_active=True,
is_superuser=False,
is_staff=False,
last_login=u'2011-03-14 18:54:25',
# groups=[],
# user_permissions=[],
password=u'sha1$e509f$a44e555f6c7aee67fde34dbe995fce20a4af2b96',
email=u'karlbowden@gmail.com',
date_joined=u'2011-03-14 18:52:34',
)
package6 = Package.objects.get(pk=6)
version, created = Version.objects.get_or_create(
pk=2278,
license=u'BSD License',
downloads=1904,
package=package6,
number=u'2.1.3',
hidden=False,
)
version, created = Version.objects.get_or_create(
pk=2252,
license=u'BSD License',
downloads=715,
package=package6,
number=u'2.1.2',
hidden=False,
)
version, created = Version.objects.get_or_create(
pk=2177,
license=u'BSD License',
downloads=906,
package=package6,
number=u'2.1.1',
hidden=False,
)
version, created = Version.objects.get_or_create(
pk=2041,
license=u'BSD License',
downloads=1613,
package=package6,
number=u'2.1.0',
hidden=False,
)
version, created = Version.objects.get_or_create(
pk=1977,
license=u'BSD License',
downloads=850,
package=package6,
number=u'2.1.0.rc3',
hidden=False,
)
version, created = Version.objects.get_or_create(
pk=1913,
license=u'BSD License',
downloads=726,
package=package6,
number=u'2.1.0.rc2',
hidden=False,
)
version, created = Version.objects.get_or_create(
pk=1870,
license=u'BSD License',
downloads=299,
package=package6,
number=u'2.1.0.rc1',
hidden=False,
)
version, created = Version.objects.get_or_create(
pk=12,
license=u'BSD License',
downloads=1062,
package=package6,
number=u'2.0.0',
hidden=False,
)
version, created = Version.objects.get_or_create(
pk=11,
license=u'BSD License',
downloads=212,
package=package6,
number=u'2.0.1',
hidden=False,
)
version, created = Version.objects.get_or_create(
pk=10,
license=u'BSD License',
downloads=4326,
package=package6,
number=u'2.0.2',
hidden=False,
)
version, created = Version.objects.get_or_create(
pk=9,
license=u'BSD License',
downloads=13644,
package=package6,
number=u'2.1.0.beta3',
hidden=False,
)
datautil.reset_sequences(Category, Package, Profile, Version, User)
| |
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common utility functions."""
import base64
import datetime
import hashlib
import json
import os
import random
import re
import string
import StringIO
import time
import unicodedata
import urllib
import urlparse
import zipfile
import yaml
import feconf # pylint: disable=relative-import
class InvalidInputException(Exception):
"""Error class for invalid input."""
pass
class ValidationError(Exception):
"""Error class for when a domain object fails validation."""
pass
class ExplorationConversionError(Exception):
"""Error class for when an exploration fails to convert from a certain
version to a certain version.
"""
pass
def create_enum(*sequential, **names):
enums = dict(zip(sequential, sequential), **names)
return type('Enum', (), enums)
def get_file_contents(filepath, raw_bytes=False, mode='r'):
"""Gets the contents of a file, given a relative filepath from oppia/."""
with open(filepath, mode) as f:
return f.read() if raw_bytes else f.read().decode('utf-8')
def get_exploration_components_from_dir(dir_path):
"""Gets the (yaml, assets) from the contents of an exploration data dir.
Args:
dir_path: a full path to the exploration root directory.
Returns:
a 2-tuple, the first element of which is a yaml string, and the second
element of which is a list of (filepath, content) 2-tuples. The filepath
does not include the assets/ prefix.
Raises:
Exception: if the following condition doesn't hold: "There is exactly one
file not in assets/, and this file has a .yaml suffix".
"""
yaml_content = None
assets_list = []
dir_path_array = dir_path.split('/')
while dir_path_array[-1] == '':
dir_path_array = dir_path_array[:-1]
dir_path_length = len(dir_path_array)
for root, dirs, files in os.walk(dir_path):
for directory in dirs:
if root == dir_path and directory != 'assets':
raise Exception(
'The only directory in %s should be assets/' % dir_path)
for filename in files:
filepath = os.path.join(root, filename)
if root == dir_path:
if filepath.endswith('.DS_Store'):
# These files are added automatically by Mac OS Xsystems.
# We ignore them.
continue
if yaml_content is not None:
raise Exception('More than one non-asset file specified '
'for %s' % dir_path)
elif not filepath.endswith('.yaml'):
raise Exception('Found invalid non-asset file %s. There '
'should only be a single non-asset file, '
'and it should have a .yaml suffix.' %
filepath)
else:
yaml_content = get_file_contents(filepath)
else:
filepath_array = filepath.split('/')
# The additional offset is to remove the 'assets/' prefix.
filename = '/'.join(filepath_array[dir_path_length + 1:])
assets_list.append((filename, get_file_contents(
filepath, raw_bytes=True)))
if yaml_content is None:
raise Exception('No yaml file specifed for %s' % dir_path)
return yaml_content, assets_list
def get_exploration_components_from_zip(zip_file_contents):
"""Gets the (yaml, assets) from the contents of an exploration zip file.
Args:
zip_file_contents: a string of raw bytes representing the contents of
a zip file that comprises the exploration.
Returns:
a 2-tuple, the first element of which is a yaml string, and the second
element of which is a list of (filepath, content) 2-tuples. The filepath
does not include the assets/ prefix.
Raises:
Exception: if the following condition doesn't hold: "There is exactly one
file not in assets/, and this file has a .yaml suffix".
"""
memfile = StringIO.StringIO()
memfile.write(zip_file_contents)
zf = zipfile.ZipFile(memfile, 'r')
yaml_content = None
assets_list = []
for filepath in zf.namelist():
if filepath.startswith('assets/'):
assets_list.append('/'.join(filepath.split('/')[1:]),
zf.read(filepath))
else:
if yaml_content is not None:
raise Exception(
'More than one non-asset file specified for zip file')
elif not filepath.endswith('.yaml'):
raise Exception('Found invalid non-asset file %s. There '
'should only be a single file not in assets/, '
'and it should have a .yaml suffix.' %
filepath)
else:
yaml_content = zf.read(filepath)
if yaml_content is None:
raise Exception('No yaml file specified in zip file contents')
return yaml_content, assets_list
def get_comma_sep_string_from_list(items):
"""Turns a list of items into a comma-separated string."""
if not items:
return ''
if len(items) == 1:
return items[0]
return '%s and %s' % (', '.join(items[:-1]), items[-1])
def to_ascii(input_string):
"""Change unicode characters in a string to ascii if possible."""
return unicodedata.normalize(
'NFKD', unicode(input_string)).encode('ascii', 'ignore')
def yaml_from_dict(dictionary, width=80):
"""Gets the YAML representation of a dict."""
return yaml.safe_dump(dictionary, default_flow_style=False, width=width)
def dict_from_yaml(yaml_str):
"""Gets the dict representation of a YAML string."""
try:
retrieved_dict = yaml.safe_load(yaml_str)
assert isinstance(retrieved_dict, dict)
return retrieved_dict
except yaml.YAMLError as e:
raise InvalidInputException(e)
def recursively_remove_key(obj, key_to_remove):
"""Recursively removes keys from a list or dict."""
if isinstance(obj, list):
for item in obj:
recursively_remove_key(item, key_to_remove)
elif isinstance(obj, dict):
if key_to_remove in obj:
del obj[key_to_remove]
for key, unused_value in obj.items():
recursively_remove_key(obj[key], key_to_remove)
def get_random_int(upper_bound):
"""Returns a random integer in [0, upper_bound)."""
assert upper_bound >= 0 and isinstance(upper_bound, int)
generator = random.SystemRandom()
return generator.randrange(0, upper_bound)
def get_random_choice(alist):
"""Gets a random element from a list."""
assert isinstance(alist, list) and len(alist) > 0
index = get_random_int(len(alist))
return alist[index]
def convert_png_to_data_url(filepath):
"""Converts the png file at filepath to a data URL.
This method is currently used only in tests for RTE extensions.
"""
file_contents = get_file_contents(filepath, raw_bytes=True, mode='rb')
return 'data:image/png;base64,%s' % urllib.quote(
file_contents.encode('base64'))
def camelcase_to_hyphenated(camelcase_str):
intermediate_str = re.sub('(.)([A-Z][a-z]+)', r'\1-\2', camelcase_str)
return re.sub('([a-z0-9])([A-Z])', r'\1-\2', intermediate_str).lower()
def set_url_query_parameter(url, param_name, param_value):
"""Set or replace a query parameter, and return the modified URL."""
if not isinstance(param_name, basestring):
raise Exception(
'URL query parameter name must be a string, received %s'
% param_name)
scheme, netloc, path, query_string, fragment = urlparse.urlsplit(url)
query_params = urlparse.parse_qs(query_string)
query_params[param_name] = [param_value]
new_query_string = urllib.urlencode(query_params, doseq=True)
return urlparse.urlunsplit(
(scheme, netloc, path, new_query_string, fragment))
class JSONEncoderForHTML(json.JSONEncoder):
"""Encodes JSON that is safe to embed in HTML."""
def encode(self, o):
chunks = self.iterencode(o, True)
return ''.join(chunks) if self.ensure_ascii else u''.join(chunks)
def iterencode(self, o, _one_shot=False):
chunks = super(JSONEncoderForHTML, self).iterencode(o, _one_shot)
for chunk in chunks:
yield chunk.replace('&', '\\u0026').replace(
'<', '\\u003c').replace('>', '\\u003e')
def convert_to_hash(input_string, max_length):
"""Convert a string to a SHA1 hash."""
if not isinstance(input_string, basestring):
raise Exception(
'Expected string, received %s of type %s' %
(input_string, type(input_string)))
encoded_string = base64.urlsafe_b64encode(
hashlib.sha1(input_string).digest())
return encoded_string[:max_length]
def base64_from_int(value):
return base64.b64encode(bytes([value]))
def get_time_in_millisecs(datetime_obj):
"""Returns time in milliseconds since the Epoch.
Args:
datetime_obj: An object of type datetime.datetime.
"""
seconds = time.mktime(datetime_obj.timetuple()) * 1000
return seconds + datetime_obj.microsecond / 1000.0
def get_current_time_in_millisecs():
"""Returns time in milliseconds since the Epoch."""
return get_time_in_millisecs(datetime.datetime.utcnow())
def get_human_readable_time_string(time_msec):
"""Given a time in milliseconds since the epoch, get a human-readable
time string for the admin dashboard.
"""
return time.strftime('%B %d %H:%M:%S', time.gmtime(time_msec / 1000.0))
def generate_random_string(length):
return base64.urlsafe_b64encode(os.urandom(length))
def generate_new_session_id():
return generate_random_string(24)
def vfs_construct_path(base_path, *path_components):
"""Mimics behavior of os.path.join on Posix machines."""
path = base_path
for component in path_components:
if component.startswith('/'):
path = component
elif path == '' or path.endswith('/'):
path += component
else:
path += '/%s' % component
return path
def vfs_normpath(path):
"""Normalize path from posixpath.py, eliminating double slashes, etc."""
# Preserve unicode (if path is unicode)
slash, dot = (u'/', u'.') if isinstance(path, unicode) else ('/', '.')
if path == '':
return dot
initial_slashes = path.startswith('/')
# POSIX allows one or two initial slashes, but treats three or more
# as single slash.
if (initial_slashes and
path.startswith('//') and not path.startswith('///')):
initial_slashes = 2
comps = path.split('/')
new_comps = []
for comp in comps:
if comp in ('', '.'):
continue
if (comp != '..' or
(not initial_slashes and not new_comps) or
(new_comps and new_comps[-1] == '..')):
new_comps.append(comp)
elif new_comps:
new_comps.pop()
comps = new_comps
path = slash.join(comps)
if initial_slashes:
path = slash * initial_slashes + path
return path or dot
def require_valid_name(name, name_type):
"""Generic name validation.
Args:
name: the name to validate.
name_type: a human-readable string, like 'the exploration title' or
'a state name'. This will be shown in error messages.
"""
# This check is needed because state names are used in URLs and as ids
# for statistics, so the name length should be bounded above.
if len(name) > 50 or len(name) < 1:
raise ValidationError(
'The length of %s should be between 1 and 50 '
'characters; received %s' % (name_type, name))
if name[0] in string.whitespace or name[-1] in string.whitespace:
raise ValidationError(
'Names should not start or end with whitespace.')
if re.search(r'\s\s+', name):
raise ValidationError(
'Adjacent whitespace in %s should be collapsed.' % name_type)
for character in feconf.INVALID_NAME_CHARS:
if character in name:
raise ValidationError(
'Invalid character %s in %s: %s' %
(character, name_type, name))
def capitalize_string(input_string):
"""Converts the first character of a string to its uppercase equivalent (if
it's a letter), and returns the result.
"""
# This guards against empty strings.
if input_string:
return input_string[0].upper() + input_string[1:]
else:
return input_string
def get_info_card_url_for_category(category):
info_card_color = (
feconf.CATEGORIES_TO_COLORS[category] if
category in feconf.CATEGORIES_TO_COLORS else feconf.DEFAULT_COLOR)
return (
'/images/gallery/exploration_background_%s_large.png' %
info_card_color)
def get_hex_color_for_category(category):
color = (
feconf.CATEGORIES_TO_COLORS[category]
if category in feconf.CATEGORIES_TO_COLORS
else feconf.DEFAULT_COLOR)
return feconf.COLORS_TO_HEX_VALUES[color]
def get_thumbnail_icon_url_for_category(category):
icon_name = (
category if category in feconf.DEFAULT_CATEGORIES
else feconf.DEFAULT_THUMBNAIL_ICON)
# Remove all spaces from the string.
return '/images/gallery/thumbnails/%s.svg' % icon_name.replace(' ', '')
def _get_short_language_description(full_language_description):
"""Given one of the descriptions in feconf.ALL_LANGUAGE_CODES, generates
the corresponding short description.
"""
if ' (' not in full_language_description:
return full_language_description
else:
ind = full_language_description.find(' (')
return full_language_description[:ind]
def get_all_language_codes_and_names():
return [{
'code': lc['code'],
'name': _get_short_language_description(lc['description']),
} for lc in feconf.ALL_LANGUAGE_CODES]
| |
#!/usr/bin/python
"""
Lexing error finder
~~~~~~~~~~~~~~~~~~~
For the source files given on the command line, display
the text where Error tokens are being generated, along
with some context.
:copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import os
import sys
import struct
# always prefer Pygments from source if exists
srcpath = os.path.join(os.path.dirname(__file__), '..')
if os.path.isdir(os.path.join(srcpath, 'pygments')):
sys.path.insert(0, srcpath)
from pygments.lexer import RegexLexer, ExtendedRegexLexer, LexerContext, \
ProfilingRegexLexer, ProfilingRegexLexerMeta
from pygments.lexers import get_lexer_by_name, find_lexer_class, \
find_lexer_class_for_filename, guess_lexer
from pygments.token import Error, Text, _TokenType
from pygments.cmdline import _parse_options
class DebuggingRegexLexer(ExtendedRegexLexer):
"""Make the state stack, position and current match instance attributes."""
def get_tokens_unprocessed(self, text, stack=('root',)):
"""
Split ``text`` into (tokentype, text) pairs.
``stack`` is the initial stack (default: ``['root']``)
"""
tokendefs = self._tokens
self.ctx = ctx = LexerContext(text, 0)
ctx.stack = list(stack)
statetokens = tokendefs[ctx.stack[-1]]
while 1:
for rexmatch, action, new_state in statetokens:
self.m = m = rexmatch(text, ctx.pos, ctx.end)
if m:
if action is not None:
if type(action) is _TokenType:
yield ctx.pos, action, m.group()
ctx.pos = m.end()
else:
if not isinstance(self, ExtendedRegexLexer):
yield from action(self, m)
ctx.pos = m.end()
else:
yield from action(self, m, ctx)
if not new_state:
# altered the state stack?
statetokens = tokendefs[ctx.stack[-1]]
if new_state is not None:
# state transition
if isinstance(new_state, tuple):
for state in new_state:
if state == '#pop':
ctx.stack.pop()
elif state == '#push':
ctx.stack.append(ctx.stack[-1])
else:
ctx.stack.append(state)
elif isinstance(new_state, int):
# pop
del ctx.stack[new_state:]
elif new_state == '#push':
ctx.stack.append(ctx.stack[-1])
else:
assert False, 'wrong state def: %r' % new_state
statetokens = tokendefs[ctx.stack[-1]]
break
else:
try:
if ctx.pos >= ctx.end:
break
if text[ctx.pos] == '\n':
# at EOL, reset state to 'root'
ctx.stack = ['root']
statetokens = tokendefs['root']
yield ctx.pos, Text, '\n'
ctx.pos += 1
continue
yield ctx.pos, Error, text[ctx.pos]
ctx.pos += 1
except IndexError:
break
def decode_atheris(bstr):
"""Decode a byte string into a Unicode string using the algorithm
of Google's Atheris fuzzer library, which aims to produce a wide
range of possible Unicode inputs.
Corresponds to ConsumeUnicodeImpl() with filter_surrogates=false in
https://github.com/google/atheris/blob/master/fuzzed_data_provider.cc
"""
if len(bstr) < 2:
return ''
# The first byte only selects if the rest is decoded as ascii, "utf-16" or "utf-32"
spec, bstr = bstr[0], bstr[1:]
if spec & 1: # pure ASCII
return ''.join(chr(ch & 0x7f) for ch in bstr)
elif spec & 2: # UTF-16
bstr = bstr if len(bstr) % 2 == 0 else bstr[:-1]
return bstr.decode('utf16')
# else UTF-32
def valid_codepoint(ch):
ch &= 0x1fffff
if ch & 0x100000:
ch &= ~0x0f0000
return chr(ch)
chars = struct.unpack('%dI%dx' % divmod(len(bstr), 4), bstr)
return ''.join(map(valid_codepoint), chars)
def main(fn, lexer=None, options={}):
if fn == '-':
text = sys.stdin.read()
else:
with open(fn, 'rb') as fp:
text = fp.read()
if decode_strategy == 'latin1':
try:
text = text.decode('utf8')
except UnicodeError:
print('Warning: non-UTF8 input, using latin1')
text = text.decode('latin1')
elif decode_strategy == 'utf8-ignore':
try:
text = text.decode('utf8')
except UnicodeError:
print('Warning: ignoring non-UTF8 bytes in input')
text = text.decode('utf8', 'ignore')
elif decode_strategy == 'atheris':
text = decode_atheris(text)
text = text.strip('\n') + '\n'
if lexer is not None:
lxcls = get_lexer_by_name(lexer).__class__
elif guess:
lxcls = guess_lexer(text).__class__
print('Using lexer: %s (%s.%s)' % (lxcls.name, lxcls.__module__,
lxcls.__name__))
else:
lxcls = find_lexer_class_for_filename(os.path.basename(fn))
if lxcls is None:
name, rest = fn.split('_', 1)
lxcls = find_lexer_class(name)
if lxcls is None:
raise AssertionError('no lexer found for file %r' % fn)
print('Using lexer: %s (%s.%s)' % (lxcls.name, lxcls.__module__,
lxcls.__name__))
debug_lexer = False
# if profile:
# # does not work for e.g. ExtendedRegexLexers
# if lxcls.__bases__ == (RegexLexer,):
# # yes we can! (change the metaclass)
# lxcls.__class__ = ProfilingRegexLexerMeta
# lxcls.__bases__ = (ProfilingRegexLexer,)
# lxcls._prof_sort_index = profsort
# else:
# if lxcls.__bases__ == (RegexLexer,):
# lxcls.__bases__ = (DebuggingRegexLexer,)
# debug_lexer = True
# elif lxcls.__bases__ == (DebuggingRegexLexer,):
# # already debugged before
# debug_lexer = True
# else:
# # HACK: ExtendedRegexLexer subclasses will only partially work here.
# lxcls.__bases__ = (DebuggingRegexLexer,)
# debug_lexer = True
lx = lxcls(**options)
lno = 1
tokens = []
states = []
def show_token(tok, state):
reprs = list(map(repr, tok))
print(' ' + reprs[1] + ' ' + ' ' * (29-len(reprs[1])) + reprs[0], end=' ')
if debug_lexer:
print(' ' + ' ' * (29-len(reprs[0])) + ' : '.join(state)
if state else '', end=' ')
print()
for type, val in lx.get_tokens(text):
lno += val.count('\n')
if type == Error and not ignerror:
print('Error parsing', fn, 'on line', lno)
if not showall:
print('Previous tokens' + (debug_lexer and ' and states' or '') + ':')
for i in range(max(len(tokens) - num, 0), len(tokens)):
if debug_lexer:
show_token(tokens[i], states[i])
else:
show_token(tokens[i], None)
print('Error token:')
vlen = len(repr(val))
print(' ' + repr(val), end=' ')
if debug_lexer and hasattr(lx, 'ctx'):
print(' ' * (60-vlen) + ' : '.join(lx.ctx.stack), end=' ')
print()
print()
return 1
tokens.append((type, val))
if debug_lexer:
if hasattr(lx, 'ctx'):
states.append(lx.ctx.stack[:])
else:
states.append(None)
if showall:
show_token((type, val), states[-1] if debug_lexer else None)
return 0
def print_help():
print('''\
Pygments development helper to quickly debug lexers.
scripts/debug_lexer.py [options] file ...
Give one or more filenames to lex them and display possible error tokens
and/or profiling info. Files are assumed to be encoded in UTF-8.
Selecting lexer and options:
-l NAME use lexer named NAME (default is to guess from
the given filenames)
-g guess lexer from content
-u if input is non-utf8, use "ignore" handler instead
of using latin1 encoding
-U use Atheris fuzzer's method of converting
byte input to Unicode
-O OPTIONSTR use lexer options parsed from OPTIONSTR
Debugging lexing errors:
-n N show the last N tokens on error
-a always show all lexed tokens (default is only
to show them when an error occurs)
-e do not stop on error tokens
Profiling:
-p use the ProfilingRegexLexer to profile regexes
instead of the debugging lexer
-s N sort profiling output by column N (default is
column 4, the time per call)
''')
num = 10
showall = False
ignerror = False
lexer = None
options = {}
profile = False
profsort = 4
guess = False
decode_strategy = 'latin1'
if __name__ == '__main__':
import getopt
opts, args = getopt.getopt(sys.argv[1:], 'n:l:aepO:s:hguU')
for opt, val in opts:
if opt == '-n':
num = int(val)
elif opt == '-a':
showall = True
elif opt == '-e':
ignerror = True
elif opt == '-l':
lexer = val
elif opt == '-p':
profile = True
elif opt == '-s':
profsort = int(val)
elif opt == '-O':
options = _parse_options([val])
elif opt == '-g':
guess = True
elif opt == '-u':
decode_strategy = 'utf8-ignore'
elif opt == '-U':
decode_strategy = 'atheris'
elif opt == '-h':
print_help()
sys.exit(0)
ret = 0
if not args:
print_help()
for f in args:
ret += main(f, lexer, options)
sys.exit(bool(ret))
| |
#!/usr/bin/env python
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Common import for all tests."""
import base64
import contextlib
import json
import logging
import optparse
import os
import shlex
import shutil
import signal
import socket
import subprocess
import sys
import time
import unittest
import urllib2
from vtdb import prefer_vtroot_imports # pylint: disable=unused-import
from vtdb import vtgate_client
import environment
from mysql_flavor import mysql_flavor
from mysql_flavor import set_mysql_flavor
import MySQLdb
from protocols_flavor import protocols_flavor
from topo_flavor.server import set_topo_server_flavor
from vtctl import vtctl_client
from vtdb import keyrange_constants
from vtgate_gateway_flavor.gateway import set_vtgate_gateway_flavor
from vtgate_gateway_flavor.gateway import vtgate_gateway_flavor
from vtproto import topodata_pb2
options = None
devnull = open('/dev/null', 'w')
hostname = socket.getaddrinfo(
socket.getfqdn(), None, 0, 0, 0, socket.AI_CANONNAME)[0][3]
class TestError(Exception):
pass
class Break(Exception):
pass
environment.setup()
class LoggingStream(object):
def __init__(self):
self.line = ''
def write(self, value):
if value == '\n':
# we already printed it
self.line = ''
return
self.line += value
logging.info('===== ' + self.line)
if value.endswith('\n'):
self.line = ''
def writeln(self, value):
self.write(value)
self.line = ''
def flush(self):
pass
def add_options(parser):
environment.add_options(parser)
parser.add_option('-d', '--debug', action='store_true',
help='utils.pause() statements will wait for user input')
parser.add_option('-k', '--keep-logs', action='store_true',
help='Do not delete log files on teardown.')
parser.add_option(
'-q', '--quiet', action='store_const', const=0, dest='verbose', default=1)
parser.add_option(
'-v', '--verbose', action='store_const', const=2, dest='verbose',
default=1)
parser.add_option('--skip-build', action='store_true',
help='Do not build the go binaries when running the test.')
parser.add_option(
'--skip-teardown', action='store_true',
help='Leave the global processes running after the test is done.')
parser.add_option('--mysql-flavor')
parser.add_option('--protocols-flavor', default='grpc')
parser.add_option('--topo-server-flavor', default='zk2')
parser.add_option('--vtgate-gateway-flavor', default='discoverygateway')
def set_options(opts):
global options
options = opts
set_mysql_flavor(options.mysql_flavor)
environment.setup_protocol_flavor(options.protocols_flavor)
set_topo_server_flavor(options.topo_server_flavor)
set_vtgate_gateway_flavor(options.vtgate_gateway_flavor)
environment.skip_build = options.skip_build
# main executes the test classes contained in the passed module, or
# __main__ if empty.
def main(mod=None, test_options=None):
"""The replacement main method, which parses args and runs tests.
Args:
mod: module that contains the test methods.
test_options: a function which adds OptionParser options that are specific
to a test file.
"""
if mod is None:
mod = sys.modules['__main__']
global options
parser = optparse.OptionParser(usage='usage: %prog [options] [test_names]')
add_options(parser)
if test_options:
test_options(parser)
(options, args) = parser.parse_args()
environment.set_log_level(options.verbose)
logging.basicConfig(
format='-- %(asctime)s %(module)s:%(lineno)d %(levelname)s %(message)s')
set_options(options)
run_tests(mod, args)
def run_tests(mod, args):
try:
suite = unittest.TestSuite()
if not args:
# this will run the setup and teardown
suite.addTests(unittest.TestLoader().loadTestsFromModule(mod))
else:
if args[0] == 'teardown':
mod.tearDownModule()
elif args[0] == 'setup':
mod.setUpModule()
else:
for arg in args:
# this will run the setup and teardown
suite.addTests(unittest.TestLoader().loadTestsFromName(arg, mod))
if suite.countTestCases() > 0:
logger = LoggingStream()
result = unittest.TextTestRunner(
stream=logger, verbosity=options.verbose, failfast=True).run(suite)
if not result.wasSuccessful():
sys.exit(-1)
except KeyboardInterrupt:
logging.warning('======== Tests interrupted, cleaning up ========')
mod.tearDownModule()
# If you interrupt a test, you probably want to stop evaluating the rest.
sys.exit(1)
finally:
if options.keep_logs:
logging.warning('Leaving temporary files behind (--keep-logs), please '
'clean up before next run: ' + os.environ['VTDATAROOT'])
def remove_tmp_files():
if options.keep_logs:
return
try:
shutil.rmtree(environment.tmproot)
except OSError as e:
logging.debug('remove_tmp_files: %s', str(e))
def pause(prompt):
if options.debug:
raw_input(prompt)
# sub-process management
pid_map = {}
already_killed = []
def _add_proc(proc):
pid_map[proc.pid] = proc
with open(environment.tmproot+'/test-pids', 'a') as f:
print >> f, proc.pid, os.path.basename(proc.args[0])
def required_teardown():
"""Required cleanup steps that can't be skipped with --skip-teardown."""
# We can't skip closing of gRPC connections, because the Python interpreter
# won't let us die if any connections are left open.
global vtctld_connection
if vtctld_connection:
vtctld_connection.close()
vtctld_connection = None
def kill_sub_processes():
for proc in pid_map.values():
if proc.pid and proc.returncode is None:
proc.kill()
if not os.path.exists(environment.tmproot+'/test-pids'):
return
with open(environment.tmproot+'/test-pids') as f:
for line in f:
try:
parts = line.strip().split()
pid = int(parts[0])
proc = pid_map.get(pid)
if not proc or (proc and proc.pid and proc.returncode is None):
if pid not in already_killed:
os.kill(pid, signal.SIGTERM)
except OSError as e:
logging.debug('kill_sub_processes: %s', str(e))
def kill_sub_process(proc, soft=False):
if proc is None:
return
pid = proc.pid
if soft:
proc.terminate()
else:
proc.kill()
if pid and pid in pid_map:
del pid_map[pid]
already_killed.append(pid)
# run in foreground, possibly capturing output
def run(cmd, trap_output=False, raise_on_error=True, **kargs):
if isinstance(cmd, str):
args = shlex.split(cmd)
else:
args = cmd
if trap_output:
kargs['stdout'] = subprocess.PIPE
kargs['stderr'] = subprocess.PIPE
logging.debug(
'run: %s %s', str(cmd),
', '.join('%s=%s' % x for x in kargs.iteritems()))
proc = subprocess.Popen(args, **kargs)
proc.args = args
stdout, stderr = proc.communicate()
if proc.returncode:
if raise_on_error:
pause('cmd fail: %s, pausing...' % (args))
raise TestError('cmd fail:', args, proc.returncode, stdout, stderr)
else:
logging.debug('cmd fail: %s %d %s %s',
str(args), proc.returncode, stdout, stderr)
return stdout, stderr
# run sub-process, expects failure
def run_fail(cmd, **kargs):
if isinstance(cmd, str):
args = shlex.split(cmd)
else:
args = cmd
kargs['stdout'] = subprocess.PIPE
kargs['stderr'] = subprocess.PIPE
if options.verbose == 2:
logging.debug(
'run: (expect fail) %s %s', cmd,
', '.join('%s=%s' % x for x in kargs.iteritems()))
proc = subprocess.Popen(args, **kargs)
proc.args = args
stdout, stderr = proc.communicate()
if proc.returncode == 0:
logging.info('stdout:\n%sstderr:\n%s', stdout, stderr)
raise TestError('expected fail:', args, stdout, stderr)
return stdout, stderr
# run a daemon - kill when this script exits
def run_bg(cmd, **kargs):
if options.verbose == 2:
logging.debug(
'run: %s %s', cmd, ', '.join('%s=%s' % x for x in kargs.iteritems()))
if 'extra_env' in kargs:
kargs['env'] = os.environ.copy()
if kargs['extra_env']:
kargs['env'].update(kargs['extra_env'])
del kargs['extra_env']
if isinstance(cmd, str):
args = shlex.split(cmd)
else:
args = cmd
proc = subprocess.Popen(args=args, **kargs)
proc.args = args
_add_proc(proc)
return proc
def wait_procs(proc_list, raise_on_error=True):
for proc in proc_list:
pid = proc.pid
if pid:
already_killed.append(pid)
for proc in proc_list:
proc.wait()
for proc in proc_list:
if proc.returncode:
if options.verbose >= 1 and proc.returncode not in (-9,):
sys.stderr.write('proc failed: %s %s\n' % (proc.returncode, proc.args))
if raise_on_error:
raise subprocess.CalledProcessError(proc.returncode,
' '.join(proc.args))
def validate_topology(ping_tablets=False):
if ping_tablets:
run_vtctl(['Validate', '-ping-tablets'])
else:
run_vtctl(['Validate'])
# wait_step is a helper for looping until a condition is true.
# use as follow:
# timeout = 10
# while True:
# <step>
# if <done>:
# break
# timeout = utils.wait_step('description of condition', timeout)
def wait_step(msg, timeout, sleep_time=0.1):
timeout -= sleep_time
if timeout <= 0:
raise TestError('timeout waiting for condition "%s"' % msg)
logging.debug('Sleeping for %f seconds waiting for condition "%s"',
sleep_time, msg)
time.sleep(sleep_time)
return timeout
# vars helpers
def get_vars(port):
"""Returns the dict for vars from a vtxxx process. None if not available."""
try:
url = 'http://localhost:%d/debug/vars' % int(port)
f = urllib2.urlopen(url)
data = f.read()
f.close()
except urllib2.URLError:
return None
try:
return json.loads(data)
except ValueError:
print data
raise
def wait_for_vars(name, port, var=None, key=None, value=None, timeout=10.0):
"""Waits for the vars of a process, and optional values.
Args:
name: nickname for the process.
port: process port to look at.
var: if specified, waits for var in vars.
key: if specified, waits for vars[var][key]==value.
value: if key if specified, waits for vars[var][key]==value.
timeout: how long to wait.
"""
text = 'waiting for http://localhost:%d/debug/vars of %s' % (port, name)
if var:
text += ' value %s' % var
if key:
text += ' key %s:%s' % (key, value)
while True:
display_text = text
v = get_vars(port)
if v:
if var is None:
break
if var in v:
if key is None:
break
if key in v[var]:
if v[var][key] == value:
break
else:
display_text += ' (current value:%s)' % v[var][key]
else:
display_text += ' (no current value)'
else:
display_text += ' (%s not in vars)' % var
else:
display_text += ' (no vars yet)'
timeout = wait_step(display_text, timeout)
def poll_for_vars(
name, port, condition_msg, timeout=60.0, condition_fn=None,
require_vars=False):
"""Polls for debug variables to exist or match specific conditions.
This function polls in a tight loop, with no sleeps. This is useful for
variables that are expected to be short-lived (e.g., a 'Done' state
immediately before a process exits).
Args:
name: the name of the process that we're trying to poll vars from.
port: the port number that we should poll for variables.
condition_msg: string describing the conditions that we're polling for,
used for error messaging.
timeout: number of seconds that we should attempt to poll for.
condition_fn: a function that takes the debug vars dict as input, and
returns a truthy value if it matches the success conditions.
require_vars: True iff we expect the vars to always exist. If
True, and the vars don't exist, we'll raise a TestError. This
can be used to differentiate between a timeout waiting for a
particular condition vs if the process that you're polling has
already exited.
Raises:
TestError: if the conditions aren't met within the given timeout, or
if vars are required and don't exist.
Returns:
dict of debug variables
"""
start_time = time.time()
while True:
if (time.time() - start_time) >= timeout:
raise TestError(
'Timed out polling for vars from %s; condition "%s" not met' %
(name, condition_msg))
v = get_vars(port)
if v is None:
if require_vars:
raise TestError(
'Expected vars to exist on %s, but they do not; '
'process probably exited earlier than expected.' % (name,))
continue
if condition_fn is None:
return v
elif condition_fn(v):
return v
def apply_vschema(vschema):
for k, v in vschema.iteritems():
fname = os.path.join(environment.tmproot, 'vschema.json')
with open(fname, 'w') as f:
f.write(v)
run_vtctl(['ApplyVSchema', '-vschema_file', fname, k])
def wait_for_tablet_type(tablet_alias, expected_type, timeout=10):
"""Waits for a given tablet's SlaveType to become the expected value.
Args:
tablet_alias: Alias of the tablet.
expected_type: Type of the tablet e.g. "replica".
timeout: Timeout in seconds.
Raises:
TestError: SlaveType did not become expected_type within timeout seconds.
"""
type_as_int = topodata_pb2.TabletType.Value(expected_type.upper())
while True:
if run_vtctl_json(['GetTablet', tablet_alias])['type'] == type_as_int:
logging.debug('tablet %s went to expected type: %s',
tablet_alias, expected_type)
break
timeout = wait_step(
"%s's SlaveType to be %s" % (tablet_alias, expected_type),
timeout)
def wait_for_replication_pos(tablet_a, tablet_b, timeout=60.0):
"""Waits for tablet B to catch up to the replication position of tablet A.
Args:
tablet_a: tablet Object for tablet A.
tablet_b: tablet Object for tablet B.
timeout: Timeout in seconds.
Raises:
TestError: replication position did not catch up within timeout seconds.
"""
replication_pos_a = mysql_flavor().master_position(tablet_a)
while True:
replication_pos_b = mysql_flavor().master_position(tablet_b)
if mysql_flavor().position_at_least(replication_pos_b, replication_pos_a):
break
timeout = wait_step(
"%s's replication position to catch up %s's; "
'currently at: %s, waiting to catch up to: %s' % (
tablet_b.tablet_alias, tablet_a.tablet_alias, replication_pos_b,
replication_pos_a),
timeout, sleep_time=0.1)
# Save the first running instance of vtgate. It is saved when 'start'
# is called, and cleared when kill is called.
vtgate = None
class VtGate(object):
"""VtGate object represents a vtgate process."""
def __init__(self, port=None, mysql_server=False):
"""Creates the Vtgate instance and reserve the ports if necessary."""
self.port = port or environment.reserve_ports(1)
if protocols_flavor().vtgate_protocol() == 'grpc':
self.grpc_port = environment.reserve_ports(1)
self.proc = None
self.mysql_port = None
if mysql_server:
self.mysql_port = environment.reserve_ports(1)
def start(self, cell='test_nj', retry_count=2,
topo_impl=None, cache_ttl='1s',
extra_args=None, tablets=None,
tablet_types_to_wait='MASTER,REPLICA',
l2vtgates=None):
"""Start vtgate. Saves it into the global vtgate variable if not set yet."""
args = environment.binary_args('vtgate') + [
'-port', str(self.port),
'-cell', cell,
'-retry-count', str(retry_count),
'-log_dir', environment.vtlogroot,
'-srv_topo_cache_ttl', cache_ttl,
'-srv_topo_cache_refresh', cache_ttl,
'-tablet_protocol', protocols_flavor().tabletconn_protocol(),
'-stderrthreshold', get_log_level(),
'-normalize_queries',
'-gateway_implementation', vtgate_gateway_flavor().flavor(),
]
args.extend(vtgate_gateway_flavor().flags(cell=cell, tablets=tablets))
if l2vtgates:
args.extend(['-l2vtgate_addrs', ','.join(l2vtgates)])
if tablet_types_to_wait:
args.extend(['-tablet_types_to_wait', tablet_types_to_wait])
if protocols_flavor().vtgate_protocol() == 'grpc':
args.extend(['-grpc_port', str(self.grpc_port)])
args.extend(['-grpc_max_message_size',
str(environment.grpc_max_message_size)])
if protocols_flavor().service_map():
args.extend(['-service_map', ','.join(protocols_flavor().service_map())])
if topo_impl:
args.extend(['-topo_implementation', topo_impl])
else:
args.extend(environment.topo_server().flags())
if extra_args:
args.extend(extra_args)
if self.mysql_port:
args.extend(['-mysql_server_port', str(self.mysql_port)])
self.proc = run_bg(args)
# We use a longer timeout here, as we may be waiting for the initial
# state of a few tablets.
wait_for_vars('vtgate', self.port, timeout=20.0)
global vtgate
if not vtgate:
vtgate = self
def kill(self):
"""Terminates the vtgate process, and waits for it to exit.
If this process is the one saved in the global vtgate variable,
clears it.
Note if the test is using just one global vtgate process, and
starting it with the test, and killing it at the end of the test,
there is no need to call this kill() method,
utils.kill_sub_processes() will do a good enough job.
"""
if self.proc is None:
return
kill_sub_process(self.proc, soft=True)
self.proc.wait()
self.proc = None
global vtgate
if vtgate == self:
vtgate = None
def addr(self):
"""Returns the address of the vtgate process, for web access."""
return 'localhost:%d' % self.port
def rpc_endpoint(self, python=False):
"""Returns the protocol and endpoint to use for RPCs."""
if python:
protocol = protocols_flavor().vtgate_python_protocol()
else:
protocol = protocols_flavor().vtgate_protocol()
if protocol == 'grpc':
return protocol, 'localhost:%d' % self.grpc_port
return protocol, self.addr()
def get_status(self):
"""Returns the status page for this process."""
return get_status(self.port)
def get_vars(self):
"""Returns the vars for this process."""
return get_vars(self.port)
def get_vschema(self):
"""Returns the used vschema for this process."""
return urllib2.urlopen('http://localhost:%d/debug/vschema' %
self.port).read()
@contextlib.contextmanager
def create_connection(self):
"""Connects to vtgate and allows to create a cursor to execute queries.
This method is preferred over the two other methods ("vtclient", "execute")
to execute a query in tests.
Yields:
A vtgate connection object.
Example:
with self.vtgate.create_connection() as conn:
c = conn.cursor(keyspace=KEYSPACE, shards=[SHARD], tablet_type='master',
writable=self.writable)
c.execute('SELECT * FROM buffer WHERE id = :id', {'id': 1})
"""
protocol, endpoint = self.rpc_endpoint(python=True)
# Use a very long timeout to account for slow tests.
conn = vtgate_client.connect(protocol, endpoint, 600.0)
yield conn
conn.close()
@contextlib.contextmanager
def write_transaction(self, **kwargs):
"""Begins a write transaction and commits automatically.
Note that each transaction contextmanager will create a new connection.
Args:
**kwargs: vtgate cursor args. See vtgate_cursor.VTGateCursor.
Yields:
A writable vtgate cursor.
Example:
with utils.vtgate.write_transaction(keyspace=KEYSPACE, shards=[SHARD],
tablet_type='master') as tx:
tx.execute('INSERT INTO table1 (id, msg) VALUES (:id, :msg)',
{'id': 1, 'msg': 'msg1'})
"""
with self.create_connection() as conn:
cursor = conn.cursor(writable=True, **kwargs)
cursor.begin()
yield cursor
cursor.commit()
def vtclient(self, sql, keyspace=None, tablet_type='master',
bindvars=None, streaming=False,
verbose=False, raise_on_error=True, json_output=False):
"""Uses the vtclient binary to send a query to vtgate."""
protocol, addr = self.rpc_endpoint()
args = environment.binary_args('vtclient') + [
'-server', addr,
'-vtgate_protocol', protocol]
if json_output:
args.append('-json')
if bindvars:
args.extend(['-bind_variables', json.dumps(bindvars)])
if streaming:
args.append('-streaming')
if keyspace:
args.extend(['-target', '%s@%s' % (keyspace, tablet_type)])
else:
args.extend(['-target', '@'+tablet_type])
if verbose:
args.append('-alsologtostderr')
args.append(sql)
out, err = run(args, raise_on_error=raise_on_error, trap_output=True)
if json_output:
return json.loads(out), err
return out, err
def execute(self, sql, tablet_type='master', bindvars=None,
execute_options=None):
"""Uses 'vtctl VtGateExecute' to execute a command.
Args:
sql: the command to execute.
tablet_type: the tablet_type to use.
bindvars: a dict of bind variables.
execute_options: proto-encoded ExecuteOptions object.
Returns:
the result of running vtctl command.
"""
_, addr = self.rpc_endpoint()
args = ['VtGateExecute', '-json',
'-server', addr,
'-target', '@'+tablet_type]
if bindvars:
args.extend(['-bind_variables', json.dumps(bindvars)])
if execute_options:
args.extend(['-options', execute_options])
args.append(sql)
return run_vtctl_json(args)
def execute_shards(self, sql, keyspace, shards, tablet_type='master',
bindvars=None):
"""Uses 'vtctl VtGateExecuteShards' to execute a command."""
_, addr = self.rpc_endpoint()
args = ['VtGateExecuteShards', '-json',
'-server', addr,
'-keyspace', keyspace,
'-shards', shards,
'-tablet_type', tablet_type]
if bindvars:
args.extend(['-bind_variables', json.dumps(bindvars)])
args.append(sql)
return run_vtctl_json(args)
def split_query(self, sql, keyspace, split_count, bindvars=None):
"""Uses 'vtctl VtGateSplitQuery' to cut a query up in chunks."""
_, addr = self.rpc_endpoint()
args = ['VtGateSplitQuery',
'-server', addr,
'-keyspace', keyspace,
'-split_count', str(split_count)]
if bindvars:
args.extend(['-bind_variables', json.dumps(bindvars)])
args.append(sql)
return run_vtctl_json(args)
def wait_for_endpoints(self, name, count, timeout=20.0, var=None):
"""waits until vtgate gets endpoints.
Args:
name: name of the endpoint, in the form: 'keyspace.shard.type'.
count: how many endpoints to wait for.
timeout: how long to wait.
var: name of the variable to use. if None, defaults to the gateway's.
"""
wait_for_vars('vtgate', self.port,
var=var or vtgate_gateway_flavor().connection_count_vars(),
key=name, value=count, timeout=timeout)
def verify_no_endpoint(self, name):
"""verifies the vtgate doesn't have any enpoint of the given name.
Args:
name: name of the endpoint, in the form: 'keyspace.shard.type'.
"""
def condition(v):
return (v.get(vtgate_gateway_flavor().connection_count_vars())
.get(name, None)) is None
poll_for_vars('l2vtgate', self.port,
'no endpoint named ' + name,
timeout=5.0,
condition_fn=condition)
# vtctl helpers
# The modes are not all equivalent, and we don't really thrive for it.
# If a client needs to rely on vtctl's command line behavior, make
# sure to use mode=utils.VTCTL_VTCTL
VTCTL_AUTO = 0
VTCTL_VTCTL = 1
VTCTL_VTCTLCLIENT = 2
VTCTL_RPC = 3
def run_vtctl(clargs, auto_log=False, expect_fail=False,
mode=VTCTL_AUTO, **kwargs):
if mode == VTCTL_AUTO:
if not expect_fail and vtctld:
mode = VTCTL_RPC
else:
mode = VTCTL_VTCTL
if mode == VTCTL_VTCTL:
return run_vtctl_vtctl(clargs, auto_log=auto_log,
expect_fail=expect_fail, **kwargs)
elif mode == VTCTL_VTCTLCLIENT:
result = vtctld.vtctl_client(clargs)
return result, ''
elif mode == VTCTL_RPC:
if auto_log:
logging.debug('vtctl: %s', ' '.join(clargs))
result = vtctl_client.execute_vtctl_command(vtctld_connection, clargs,
info_to_debug=True,
action_timeout=120)
return result, ''
raise Exception('Unknown mode: %s', mode)
def run_vtctl_vtctl(clargs, auto_log=False, expect_fail=False,
**kwargs):
args = environment.binary_args('vtctl') + [
'-log_dir', environment.vtlogroot,
'-enable_queries',
]
args.extend(environment.topo_server().flags())
args.extend(['-tablet_manager_protocol',
protocols_flavor().tablet_manager_protocol()])
args.extend(['-tablet_protocol', protocols_flavor().tabletconn_protocol()])
args.extend(['-throttler_client_protocol',
protocols_flavor().throttler_client_protocol()])
args.extend(['-vtgate_protocol', protocols_flavor().vtgate_protocol()])
# TODO(b/26388813): Remove the next two lines once vtctl WaitForDrain is
# integrated in the vtctl MigrateServed* commands.
args.extend(['--wait_for_drain_sleep_rdonly', '0s'])
args.extend(['--wait_for_drain_sleep_replica', '0s'])
if auto_log:
args.append('--stderrthreshold=%s' % get_log_level())
if isinstance(clargs, str):
cmd = ' '.join(args) + ' ' + clargs
else:
cmd = args + clargs
if expect_fail:
return run_fail(cmd, **kwargs)
return run(cmd, **kwargs)
# run_vtctl_json runs the provided vtctl command and returns the result
# parsed as json
def run_vtctl_json(clargs, auto_log=True):
stdout, _ = run_vtctl(clargs, trap_output=True, auto_log=auto_log)
return json.loads(stdout)
def get_log_level():
if options.verbose == 2:
return '0'
elif options.verbose == 1:
return '1'
else:
return '2'
# vtworker helpers
def run_vtworker(clargs, auto_log=False, expect_fail=False, **kwargs):
"""Runs a vtworker process, returning the stdout and stderr."""
cmd, _, _ = _get_vtworker_cmd(clargs, auto_log)
if expect_fail:
return run_fail(cmd, **kwargs)
return run(cmd, **kwargs)
def run_vtworker_bg(clargs, auto_log=False, **kwargs):
"""Starts a background vtworker process."""
cmd, port, rpc_port = _get_vtworker_cmd(clargs, auto_log)
proc = run_bg(cmd, **kwargs), port, rpc_port
wait_for_vars('vtworker', port)
return proc
def _get_vtworker_cmd(clargs, auto_log=False):
"""Assembles the command that is needed to run a vtworker.
Args:
clargs: Command line arguments passed to vtworker.
auto_log: If true, set --stderrthreshold according to the test log level.
Returns:
cmd - list of cmd arguments, can be passed to any `run`-like functions
port - int with the port number that the vtworker is running with
rpc_port - int with the port number of the RPC interface
"""
port = environment.reserve_ports(1)
rpc_port = port
args = environment.binary_args('vtworker') + [
'-log_dir', environment.vtlogroot,
'-port', str(port),
'-executefetch_retry_time', '1s',
'-tablet_manager_protocol',
protocols_flavor().tablet_manager_protocol(),
'-tablet_protocol', protocols_flavor().tabletconn_protocol(),
]
args.extend(environment.topo_server().flags())
if protocols_flavor().service_map():
args.extend(['-service_map',
','.join(protocols_flavor().service_map())])
if protocols_flavor().vtworker_client_protocol() == 'grpc':
rpc_port = environment.reserve_ports(1)
args.extend(['-grpc_port', str(rpc_port)])
if auto_log:
args.append('--stderrthreshold=%s' % get_log_level())
cmd = args + clargs
return cmd, port, rpc_port
# vtworker client helpers
def run_vtworker_client_bg(args, rpc_port):
"""Runs vtworkerclient to execute a command on a remote vtworker.
Args:
args: Full vtworker command.
rpc_port: Port number.
Returns:
proc: process returned by subprocess.Popen
"""
return run_bg(
environment.binary_args('vtworkerclient') + [
'-log_dir', environment.vtlogroot,
'-vtworker_client_protocol',
protocols_flavor().vtworker_client_protocol(),
'-server', 'localhost:%d' % rpc_port,
'-stderrthreshold', get_log_level(),
] + args)
def run_automation_server(auto_log=False):
"""Starts a background automation_server process.
Args:
auto_log: True to log.
Returns:
rpc_port - int with the port number of the RPC interface
"""
rpc_port = environment.reserve_ports(1)
args = environment.binary_args('automation_server') + [
'-log_dir', environment.vtlogroot,
'-port', str(rpc_port),
'-vtctl_client_protocol',
protocols_flavor().vtctl_client_protocol(),
'-vtworker_client_protocol',
protocols_flavor().vtworker_client_protocol(),
]
if auto_log:
args.append('--stderrthreshold=%s' % get_log_level())
return run_bg(args), rpc_port
# mysql helpers
def mysql_query(uid, dbname, query):
conn = MySQLdb.Connect(
user='vt_dba',
unix_socket='%s/vt_%010d/mysql.sock' % (environment.vtdataroot, uid),
db=dbname)
cursor = conn.cursor()
cursor.execute(query)
try:
return cursor.fetchall()
finally:
conn.close()
def mysql_write_query(uid, dbname, query):
conn = MySQLdb.Connect(
user='vt_dba',
unix_socket='%s/vt_%010d/mysql.sock' % (environment.vtdataroot, uid),
db=dbname)
cursor = conn.cursor()
conn.begin()
cursor.execute(query)
conn.commit()
try:
return cursor.fetchall()
finally:
conn.close()
def check_db_var(uid, name, value):
conn = MySQLdb.Connect(
user='vt_dba',
unix_socket='%s/vt_%010d/mysql.sock' % (environment.vtdataroot, uid))
cursor = conn.cursor()
cursor.execute("show variables like '%s'" % name)
row = cursor.fetchone()
if row != (name, value):
raise TestError('variable not set correctly', name, row)
conn.close()
def check_db_read_only(uid):
return check_db_var(uid, 'read_only', 'ON')
def check_db_read_write(uid):
return check_db_var(uid, 'read_only', 'OFF')
def wait_db_read_only(uid):
for _ in xrange(3):
try:
check_db_read_only(uid)
return
except TestError as e:
logging.warning('wait_db_read_only: %s', str(e))
time.sleep(1.0)
raise e
def check_srv_keyspace(cell, keyspace, expected, keyspace_id_type='uint64',
sharding_column_name='keyspace_id'):
ks = run_vtctl_json(['GetSrvKeyspace', cell, keyspace])
result = ''
pmap = {}
for partition in ks['partitions']:
tablet_type = topodata_pb2.TabletType.Name(partition['served_type']).lower()
if tablet_type == 'batch':
tablet_type = 'rdonly'
r = 'Partitions(%s):' % tablet_type
for shard in partition['shard_references']:
s = ''
e = ''
if 'key_range' in shard and shard['key_range']:
if 'start' in shard['key_range']:
s = shard['key_range']['start']
s = base64.b64decode(s).encode('hex') if s else ''
if 'end' in shard['key_range']:
e = shard['key_range']['end']
e = base64.b64decode(e).encode('hex') if e else ''
r += ' %s-%s' % (s, e)
pmap[tablet_type] = r + '\n'
for tablet_type in sorted(pmap):
result += pmap[tablet_type]
logging.debug('Cell %s keyspace %s has data:\n%s', cell, keyspace, result)
if expected != result:
raise Exception(
'Mismatch in srv keyspace for cell %s keyspace %s, expected:\n%'
's\ngot:\n%s' % (
cell, keyspace, expected, result))
if sharding_column_name != ks.get('sharding_column_name'):
raise Exception('Got wrong sharding_column_name in SrvKeyspace: %s' %
str(ks))
if keyspace_id_type != keyrange_constants.PROTO3_KIT_TO_STRING[
ks.get('sharding_column_type')]:
raise Exception('Got wrong sharding_column_type in SrvKeyspace: %s' %
str(ks))
def check_shard_query_service(
testcase, shard_name, tablet_type, expected_state):
"""Checks DisableQueryService in the shard record's TabletControlMap."""
# We assume that query service should be enabled unless
# DisableQueryService is explicitly True
query_service_enabled = True
tablet_controls = run_vtctl_json(
['GetShard', shard_name]).get('tablet_controls')
if tablet_controls:
for tc in tablet_controls:
if tc['tablet_type'] == tablet_type:
if tc.get('disable_query_service', False):
query_service_enabled = False
testcase.assertEqual(
query_service_enabled,
expected_state,
'shard %s does not have the correct query service state: '
'got %s but expected %s' %
(shard_name, query_service_enabled, expected_state)
)
def check_shard_query_services(
testcase, shard_names, tablet_type, expected_state):
for shard_name in shard_names:
check_shard_query_service(
testcase, shard_name, tablet_type, expected_state)
def check_tablet_query_service(
testcase, tablet, serving, tablet_control_disabled):
"""Check that the query service is enabled or disabled on the tablet."""
tablet_vars = get_vars(tablet.port)
if serving:
expected_state = 'SERVING'
else:
expected_state = 'NOT_SERVING'
testcase.assertEqual(
tablet_vars['TabletStateName'], expected_state,
'tablet %s (%s/%s, %s) is not in the right serving state: got %s'
' expected %s' % (tablet.tablet_alias, tablet.keyspace, tablet.shard,
tablet.tablet_type,
tablet_vars['TabletStateName'], expected_state))
status = tablet.get_status()
tc_dqs = 'Query Service disabled: TabletControl.DisableQueryService set'
if tablet_control_disabled:
testcase.assertIn(tc_dqs, status)
else:
testcase.assertNotIn(tc_dqs, status)
if tablet.tablet_type == 'rdonly':
# Run RunHealthCheck to be sure the tablet doesn't change its serving state.
run_vtctl(['RunHealthCheck', tablet.tablet_alias],
auto_log=True)
tablet_vars = get_vars(tablet.port)
testcase.assertEqual(
tablet_vars['TabletStateName'], expected_state,
'tablet %s is not in the right serving state after health check: '
'got %s expected %s' %
(tablet.tablet_alias, tablet_vars['TabletStateName'], expected_state))
def check_tablet_query_services(
testcase, tablets, serving, tablet_control_disabled):
for tablet in tablets:
check_tablet_query_service(
testcase, tablet, serving, tablet_control_disabled)
def get_status(port):
return urllib2.urlopen(
'http://localhost:%d%s' % (port, environment.status_url)).read()
def curl(url, request=None, data=None, background=False, retry_timeout=0,
**kwargs):
args = [environment.curl_bin, '--silent', '--no-buffer', '--location']
if not background:
args.append('--show-error')
if request:
args.extend(['--request', request])
if data:
args.extend(['--data', data])
args.append(url)
if background:
return run_bg(args, **kwargs)
if retry_timeout > 0:
while True:
try:
return run(args, trap_output=True, **kwargs)
except TestError as e:
retry_timeout = wait_step(
'cmd: %s, error: %s' % (str(args), str(e)), retry_timeout)
return run(args, trap_output=True, **kwargs)
class VtctldError(Exception):
pass
# save the first running instance, and an RPC connection to it,
# so we can use it to run remote vtctl commands
vtctld = None
vtctld_connection = None
class Vtctld(object):
def __init__(self):
self.port = environment.reserve_ports(1)
self.schema_change_dir = os.path.join(
environment.tmproot, 'schema_change_test')
if protocols_flavor().vtctl_client_protocol() == 'grpc':
self.grpc_port = environment.reserve_ports(1)
def start(self, enable_schema_change_dir=False, extra_flags=None):
# Note the vtctld2 web dir is set to 'dist', which is populated
# when a toplevel 'make build_web' is run. This is meant to test
# the development version of the UI. The real checked-in app is in
# app/.
args = environment.binary_args('vtctld') + [
'-enable_queries',
'-cell', 'test_nj',
'-web_dir', environment.vttop + '/web/vtctld',
'-web_dir2', environment.vttop + '/web/vtctld2/dist',
'--log_dir', environment.vtlogroot,
'--port', str(self.port),
'-tablet_manager_protocol',
protocols_flavor().tablet_manager_protocol(),
'-tablet_protocol', protocols_flavor().tabletconn_protocol(),
'-throttler_client_protocol',
protocols_flavor().throttler_client_protocol(),
'-vtgate_protocol', protocols_flavor().vtgate_protocol(),
'-workflow_manager_init',
'-workflow_manager_use_election',
'-schema_swap_delay_between_errors', '1s',
] + environment.topo_server().flags()
if extra_flags:
args += extra_flags
# TODO(b/26388813): Remove the next two lines once vtctl WaitForDrain is
# integrated in the vtctl MigrateServed* commands.
args.extend(['--wait_for_drain_sleep_rdonly', '0s'])
args.extend(['--wait_for_drain_sleep_replica', '0s'])
if enable_schema_change_dir:
args += [
'--schema_change_dir', self.schema_change_dir,
'--schema_change_controller', 'local',
'--schema_change_check_interval', '1',
]
if protocols_flavor().service_map():
args.extend(['-service_map', ','.join(protocols_flavor().service_map())])
if protocols_flavor().vtctl_client_protocol() == 'grpc':
args.extend(['-grpc_port', str(self.grpc_port)])
stdout_fd = open(os.path.join(environment.tmproot, 'vtctld.stdout'), 'w')
stderr_fd = open(os.path.join(environment.tmproot, 'vtctld.stderr'), 'w')
self.proc = run_bg(args, stdout=stdout_fd, stderr=stderr_fd)
# wait for the process to listen to RPC
timeout = 30
while True:
v = get_vars(self.port)
if v:
break
if self.proc.poll() is not None:
raise TestError('vtctld died while starting')
timeout = wait_step('waiting for vtctld to start', timeout,
sleep_time=0.2)
# save the running instance so vtctl commands can be remote executed now
global vtctld, vtctld_connection
if not vtctld:
vtctld = self
protocol, endpoint = self.rpc_endpoint(python=True)
vtctld_connection = vtctl_client.connect(protocol, endpoint, 30)
return self.proc
def rpc_endpoint(self, python=False):
"""RPC endpoint to vtctld.
The RPC endpoint may differ from the webinterface URL e.g. because gRPC
requires a dedicated port.
Args:
python: boolean, True iff this is for access with Python (as opposed to
Go).
Returns:
protocol - string e.g. 'grpc'
endpoint - string e.g. 'localhost:15001'
"""
if python:
protocol = protocols_flavor().vtctl_python_client_protocol()
else:
protocol = protocols_flavor().vtctl_client_protocol()
rpc_port = self.port
if protocol == 'grpc':
rpc_port = self.grpc_port
return (protocol, '%s:%d' % (socket.getfqdn(), rpc_port))
def process_args(self):
return ['-vtctld_addr', 'http://localhost:%d/' % self.port]
def vtctl_client(self, args):
if options.verbose == 2:
log_level = 'INFO'
elif options.verbose == 1:
log_level = 'WARNING'
else:
log_level = 'ERROR'
protocol, endpoint = self.rpc_endpoint()
out, _ = run(
environment.binary_args('vtctlclient') +
['-vtctl_client_protocol', protocol,
'-server', endpoint,
'-stderrthreshold', log_level] + args,
trap_output=True)
return out
def uint64_to_hex(integer):
"""Returns the hex representation of an int treated as a 64-bit unsigned int.
The result is padded by zeros if necessary to fill a 16 character string.
Useful for converting keyspace ids integers.
Example:
uint64_to_hex(1) == "0000000000000001"
uint64_to_hex(0xDEADBEAF) == "00000000DEADBEEF"
uint64_to_hex(0xDEADBEAFDEADBEAFDEADBEAF) raises an out of range exception.
Args:
integer: the value to print.
Returns:
String with the hex representation.
Raises:
ValueError: if the integer is out of range.
"""
if integer > (1<<64)-1 or integer < 0:
raise ValueError('Integer out of range: %d' % integer)
return '%016X' % integer
| |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Handles all requests relating to volumes + cinder.
"""
import copy
import sys
from cinderclient import client as cinder_client
from cinderclient import exceptions as cinder_exception
from cinderclient import service_catalog
from oslo.config import cfg
import six.moves.urllib.parse as urlparse
from nova import availability_zones as az
from nova import exception
from nova.i18n import _
from nova.i18n import _LW
from nova.openstack.common import log as logging
from nova.openstack.common import strutils
cinder_opts = [
cfg.StrOpt('catalog_info',
default='volume:cinder:publicURL',
help='Info to match when looking for cinder in the service '
'catalog. Format is: separated values of the form: '
'<service_type>:<service_name>:<endpoint_type>',
deprecated_group='DEFAULT',
deprecated_name='cinder_catalog_info'),
cfg.StrOpt('endpoint_template',
help='Override service catalog lookup with template for cinder '
'endpoint e.g. http://localhost:8776/v1/%(project_id)s',
deprecated_group='DEFAULT',
deprecated_name='cinder_endpoint_template'),
cfg.StrOpt('os_region_name',
help='Region name of this node',
deprecated_group='DEFAULT',
deprecated_name='os_region_name'),
cfg.StrOpt('ca_certificates_file',
help='Location of ca certificates file to use for cinder '
'client requests.',
deprecated_group='DEFAULT',
deprecated_name='cinder_ca_certificates_file'),
cfg.IntOpt('http_retries',
default=3,
help='Number of cinderclient retries on failed http calls',
deprecated_group='DEFAULT',
deprecated_name='cinder_http_retries'),
cfg.IntOpt('http_timeout',
help='HTTP inactivity timeout (in seconds)',
deprecated_group='DEFAULT',
deprecated_name='cinder_http_timeout'),
cfg.BoolOpt('api_insecure',
default=False,
help='Allow to perform insecure SSL requests to cinder',
deprecated_group='DEFAULT',
deprecated_name='cinder_api_insecure'),
cfg.BoolOpt('cross_az_attach',
default=True,
help='Allow attach between instance and volume in different '
'availability zones.',
deprecated_group='DEFAULT',
deprecated_name='cinder_cross_az_attach'),
]
CONF = cfg.CONF
# cinder_opts options in the DEFAULT group were deprecated in Juno
CONF.register_opts(cinder_opts, group='cinder')
LOG = logging.getLogger(__name__)
CINDER_URL = None
def cinderclient(context):
global CINDER_URL
version = get_cinder_client_version(context)
c = cinder_client.Client(version,
context.user_id,
context.auth_token,
project_id=context.project_id,
auth_url=CINDER_URL,
insecure=CONF.cinder.api_insecure,
retries=CONF.cinder.http_retries,
timeout=CONF.cinder.http_timeout,
cacert=CONF.cinder.ca_certificates_file)
# noauth extracts user_id:project_id from auth_token
c.client.auth_token = context.auth_token or '%s:%s' % (context.user_id,
context.project_id)
c.client.management_url = CINDER_URL
return c
def _untranslate_volume_summary_view(context, vol):
"""Maps keys for volumes summary view."""
d = {}
d['id'] = vol.id
d['status'] = vol.status
d['size'] = vol.size
d['availability_zone'] = vol.availability_zone
d['created_at'] = vol.created_at
# TODO(jdg): The calling code expects attach_time and
# mountpoint to be set. When the calling
# code is more defensive this can be
# removed.
d['attach_time'] = ""
d['mountpoint'] = ""
if vol.attachments:
att = vol.attachments[0]
d['attach_status'] = 'attached'
d['instance_uuid'] = att['server_id']
#d['mountpoint'] = att['device']
else:
d['attach_status'] = 'detached'
# NOTE(dzyu) volume(cinder) v2 API uses 'name' instead of 'display_name',
# and use 'description' instead of 'display_description' for volume.
if hasattr(vol, 'display_name'):
d['display_name'] = vol.display_name
d['display_description'] = vol.display_description
else:
d['display_name'] = vol.name
d['display_description'] = vol.description
# TODO(jdg): Information may be lost in this translation
d['volume_type_id'] = vol.volume_type
d['snapshot_id'] = vol.snapshot_id
d['bootable'] = strutils.bool_from_string(vol.bootable)
d['volume_metadata'] = {}
for key, value in vol.metadata.items():
d['volume_metadata'][key] = value
if hasattr(vol, 'volume_image_metadata'):
d['volume_image_metadata'] = copy.deepcopy(vol.volume_image_metadata)
return d
def _untranslate_snapshot_summary_view(context, snapshot):
"""Maps keys for snapshots summary view."""
d = {}
d['id'] = snapshot.id
d['status'] = snapshot.status
d['progress'] = snapshot.progress
d['size'] = snapshot.size
d['created_at'] = snapshot.created_at
# NOTE(dzyu) volume(cinder) v2 API uses 'name' instead of 'display_name',
# 'description' instead of 'display_description' for snapshot.
if hasattr(snapshot, 'display_name'):
d['display_name'] = snapshot.display_name
d['display_description'] = snapshot.display_description
else:
d['display_name'] = snapshot.name
d['display_description'] = snapshot.description
d['volume_id'] = snapshot.volume_id
d['project_id'] = snapshot.project_id
d['volume_size'] = snapshot.size
return d
def translate_volume_exception(method):
"""Transforms the exception for the volume but keeps its traceback intact.
"""
def wrapper(self, ctx, volume_id, *args, **kwargs):
try:
res = method(self, ctx, volume_id, *args, **kwargs)
except cinder_exception.ClientException:
exc_type, exc_value, exc_trace = sys.exc_info()
if isinstance(exc_value, cinder_exception.NotFound):
exc_value = exception.VolumeNotFound(volume_id=volume_id)
elif isinstance(exc_value, cinder_exception.BadRequest):
exc_value = exception.InvalidInput(reason=exc_value.message)
raise exc_value, None, exc_trace
except cinder_exception.ConnectionError:
exc_type, exc_value, exc_trace = sys.exc_info()
exc_value = exception.CinderConnectionFailed(
reason=exc_value.message)
raise exc_value, None, exc_trace
return res
return wrapper
def translate_snapshot_exception(method):
"""Transforms the exception for the snapshot but keeps its traceback
intact.
"""
def wrapper(self, ctx, snapshot_id, *args, **kwargs):
try:
res = method(self, ctx, snapshot_id, *args, **kwargs)
except cinder_exception.ClientException:
exc_type, exc_value, exc_trace = sys.exc_info()
if isinstance(exc_value, cinder_exception.NotFound):
exc_value = exception.SnapshotNotFound(snapshot_id=snapshot_id)
raise exc_value, None, exc_trace
except cinder_exception.ConnectionError:
exc_type, exc_value, exc_trace = sys.exc_info()
exc_value = exception.CinderConnectionFailed(
reason=exc_value.message)
raise exc_value, None, exc_trace
return res
return wrapper
def get_cinder_client_version(context):
"""Parse cinder client version by endpoint url.
:param context: Nova auth context.
:return: str value(1 or 2).
"""
global CINDER_URL
# FIXME: the cinderclient ServiceCatalog object is mis-named.
# It actually contains the entire access blob.
# Only needed parts of the service catalog are passed in, see
# nova/context.py.
compat_catalog = {
'access': {'serviceCatalog': context.service_catalog or []}
}
sc = service_catalog.ServiceCatalog(compat_catalog)
if CONF.cinder.endpoint_template:
url = CONF.cinder.endpoint_template % context.to_dict()
else:
info = CONF.cinder.catalog_info
service_type, service_name, endpoint_type = info.split(':')
# extract the region if set in configuration
if CONF.cinder.os_region_name:
attr = 'region'
filter_value = CONF.cinder.os_region_name
else:
attr = None
filter_value = None
url = sc.url_for(attr=attr,
filter_value=filter_value,
service_type=service_type,
service_name=service_name,
endpoint_type=endpoint_type)
LOG.debug('Cinderclient connection created using URL: %s', url)
valid_versions = ['v1', 'v2']
magic_tuple = urlparse.urlsplit(url)
scheme, netloc, path, query, frag = magic_tuple
components = path.split("/")
for version in valid_versions:
if version in components[1]:
version = version[1:]
if not CINDER_URL and version == '1':
msg = _LW('Cinder V1 API is deprecated as of the Juno '
'release, and Nova is still configured to use it. '
'Enable the V2 API in Cinder and set '
'cinder_catalog_info in nova.conf to use it.')
LOG.warn(msg)
CINDER_URL = url
return version
msg = _("Invalid client version, must be one of: %s") % valid_versions
raise cinder_exception.UnsupportedVersion(msg)
class API(object):
"""API for interacting with the volume manager."""
@translate_volume_exception
def get(self, context, volume_id):
item = cinderclient(context).volumes.get(volume_id)
return _untranslate_volume_summary_view(context, item)
@translate_volume_exception
def upload_to_image(self,context,volume,force, image_name, container_format,disk_format):
return cinderclient(context).volumes.upload_to_image(volume,force, image_name, container_format,disk_format)
def get_all(self, context, search_opts=None):
search_opts = search_opts or {}
items = cinderclient(context).volumes.list(detailed=True)
rval = []
for item in items:
rval.append(_untranslate_volume_summary_view(context, item))
return rval
def check_attached(self, context, volume):
if volume['status'] != "in-use":
msg = _("status must be 'in-use'")
raise exception.InvalidVolume(reason=msg)
def check_attach(self, context, volume, instance=None):
# TODO(vish): abstract status checking?
if volume['status'] != "available":
msg = _("status must be 'available'")
raise exception.InvalidVolume(reason=msg)
if volume['attach_status'] == "attached":
msg = _("already attached")
raise exception.InvalidVolume(reason=msg)
if instance and not CONF.cinder.cross_az_attach:
# NOTE(sorrison): If instance is on a host we match against it's AZ
# else we check the intended AZ
if instance.get('host'):
instance_az = az.get_instance_availability_zone(
context, instance)
else:
instance_az = instance['availability_zone']
if instance_az != volume['availability_zone']:
msg = _("Instance and volume not in same availability_zone")
raise exception.InvalidVolume(reason=msg)
def check_detach(self, context, volume):
# TODO(vish): abstract status checking?
if volume['status'] == "available":
msg = _("already detached")
raise exception.InvalidVolume(reason=msg)
@translate_volume_exception
def reserve_volume(self, context, volume_id):
cinderclient(context).volumes.reserve(volume_id)
@translate_volume_exception
def unreserve_volume(self, context, volume_id):
cinderclient(context).volumes.unreserve(volume_id)
@translate_volume_exception
def begin_detaching(self, context, volume_id):
cinderclient(context).volumes.begin_detaching(volume_id)
@translate_volume_exception
def roll_detaching(self, context, volume_id):
cinderclient(context).volumes.roll_detaching(volume_id)
@translate_volume_exception
def attach(self, context, volume_id, instance_uuid, mountpoint, mode='rw'):
cinderclient(context).volumes.attach(volume_id, instance_uuid,
mountpoint, mode=mode)
@translate_volume_exception
def detach(self, context, volume_id):
cinderclient(context).volumes.detach(volume_id)
@translate_volume_exception
def initialize_connection(self, context, volume_id, connector):
return cinderclient(context).volumes.initialize_connection(volume_id,
connector)
@translate_volume_exception
def terminate_connection(self, context, volume_id, connector):
return cinderclient(context).volumes.terminate_connection(volume_id,
connector)
def migrate_volume_completion(self, context, old_volume_id, new_volume_id,
error=False):
return cinderclient(context).volumes.migrate_volume_completion(
old_volume_id, new_volume_id, error)
def create(self, context, size, name, description, snapshot=None,
image_id=None, volume_type=None, metadata=None,
availability_zone=None):
if snapshot is not None:
snapshot_id = snapshot['id']
else:
snapshot_id = None
kwargs = dict(snapshot_id=snapshot_id,
volume_type=volume_type,
user_id=context.user_id,
project_id=context.project_id,
availability_zone=availability_zone,
metadata=metadata,
imageRef=image_id)
version = get_cinder_client_version(context)
if version == '1':
kwargs['display_name'] = name
kwargs['display_description'] = description
elif version == '2':
kwargs['name'] = name
kwargs['description'] = description
try:
item = cinderclient(context).volumes.create(size, **kwargs)
return _untranslate_volume_summary_view(context, item)
except cinder_exception.OverLimit:
raise exception.OverQuota(overs='volumes')
except cinder_exception.BadRequest as e:
raise exception.InvalidInput(reason=unicode(e))
@translate_volume_exception
def delete(self, context, volume_id):
cinderclient(context).volumes.delete(volume_id)
@translate_volume_exception
def update(self, context, volume_id, fields):
raise NotImplementedError()
@translate_snapshot_exception
def get_snapshot(self, context, snapshot_id):
item = cinderclient(context).volume_snapshots.get(snapshot_id)
return _untranslate_snapshot_summary_view(context, item)
def get_all_snapshots(self, context):
items = cinderclient(context).volume_snapshots.list(detailed=True)
rvals = []
for item in items:
rvals.append(_untranslate_snapshot_summary_view(context, item))
return rvals
@translate_volume_exception
def create_snapshot(self, context, volume_id, name, description):
item = cinderclient(context).volume_snapshots.create(volume_id,
False,
name,
description)
return _untranslate_snapshot_summary_view(context, item)
@translate_volume_exception
def create_snapshot_force(self, context, volume_id, name, description):
item = cinderclient(context).volume_snapshots.create(volume_id,
True,
name,
description)
return _untranslate_snapshot_summary_view(context, item)
@translate_snapshot_exception
def delete_snapshot(self, context, snapshot_id):
cinderclient(context).volume_snapshots.delete(snapshot_id)
def get_volume_encryption_metadata(self, context, volume_id):
return cinderclient(context).volumes.get_encryption_metadata(volume_id)
@translate_volume_exception
def get_volume_metadata(self, context, volume_id):
vol = cinderclient(context).volumes.get(volume_id)
return vol.metadata
@translate_volume_exception
def delete_volume_metadata(self, context, volume_id, keys):
cinderclient(context).volumes.delete_metadata(volume_id, keys)
@translate_volume_exception
def update_volume_metadata(self, context, volume_id,
metadata, delete=False):
if delete:
# Completely replace volume metadata with one given
return cinderclient(context).volumes.update_all_metadata(
volume_id, metadata)
else:
return cinderclient(context).volumes.set_metadata(
volume_id, metadata)
@translate_volume_exception
def get_volume_metadata_value(self, context, volume_id, key):
vol = cinderclient(context).volumes.get(volume_id)
return vol.metadata.get(key)
@translate_snapshot_exception
def update_snapshot_status(self, context, snapshot_id, status):
vs = cinderclient(context).volume_snapshots
# '90%' here is used to tell Cinder that Nova is done
# with its portion of the 'creating' state. This can
# be removed when we are able to split the Cinder states
# into 'creating' and a separate state of
# 'creating_in_nova'. (Same for 'deleting' state.)
vs.update_snapshot_status(
snapshot_id,
{'status': status,
'progress': '90%'}
)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Linear Estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import re
import six
from tensorflow.contrib import layers
from tensorflow.contrib.framework import deprecated
from tensorflow.contrib.framework import deprecated_arg_values
from tensorflow.contrib.framework.python.framework import experimental
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import evaluable
from tensorflow.contrib.learn.python.learn import monitors as monitor_lib
from tensorflow.contrib.learn.python.learn import trainable
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.contrib.learn.python.learn.utils import export
from tensorflow.contrib.linear_optimizer.python import sdca_optimizer
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import gradients
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import variable_scope
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import training as train
# The default learning rate of 0.2 is a historical artifact of the initial
# implementation, but seems a reasonable choice.
_LEARNING_RATE = 0.2
def _get_optimizer(spec):
if isinstance(spec, six.string_types):
return layers.OPTIMIZER_CLS_NAMES[spec](
learning_rate=_LEARNING_RATE)
elif callable(spec):
return spec()
return spec
# TODO(ispir): Remove this function by fixing '_infer_model' with single outputs
# and as_iteable case.
def _as_iterable(preds, output):
for pred in preds:
yield pred[output]
def _add_bias_column(feature_columns, columns_to_tensors, bias_variable,
labels, columns_to_variables):
# TODO(b/31008490): Move definition to a common constants place.
bias_column_name = "tf_virtual_bias_column"
if any(col.name is bias_column_name for col in feature_columns):
raise ValueError("%s is a reserved column name." % bias_column_name)
bias_column = layers.real_valued_column(bias_column_name)
columns_to_tensors[bias_column] = array_ops.ones_like(labels,
dtype=dtypes.float32)
columns_to_variables[bias_column] = [bias_variable]
def _linear_model_fn(features, labels, mode, params, config=None):
"""A model_fn for linear models that use a gradient-based optimizer.
Args:
features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`).
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `Head` instance.
* feature_columns: An iterable containing all the feature columns used by
the model.
* optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training.
* gradient_clip_norm: A float > 0. If provided, gradients are
clipped to their global norm with this clipping ratio.
* num_ps_replicas: The number of parameter server replicas.
* joint_weights: If True, the weights for all columns will be stored in a
single (possibly partitioned) variable. It's more efficient, but it's
incompatible with SDCAOptimizer, and requires all feature columns are
sparse and use the 'sum' combiner.
config: `RunConfig` object to configure the runtime settings.
Returns:
A `ModelFnOps` instance.
Raises:
ValueError: If mode is not any of the `ModeKeys`.
"""
head = params["head"]
feature_columns = params["feature_columns"]
optimizer = params["optimizer"]
gradient_clip_norm = params.get("gradient_clip_norm", None)
num_ps_replicas = config.num_ps_replicas if config else 0
joint_weights = params.get("joint_weights", False)
if not isinstance(features, dict):
features = {"": features}
parent_scope = "linear"
partitioner = partitioned_variables.min_max_variable_partitioner(
max_partitions=num_ps_replicas,
min_slice_size=64 << 20)
with variable_scope.variable_scope(
parent_scope, values=features.values(), partitioner=partitioner) as scope:
if joint_weights:
logits, _, _ = (
layers.joint_weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=head.logits_dimension,
weight_collections=[parent_scope],
scope=scope))
else:
logits, _, _ = (
layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=head.logits_dimension,
weight_collections=[parent_scope],
scope=scope))
def _train_op_fn(loss):
global_step = contrib_variables.get_global_step()
my_vars = ops.get_collection("linear")
grads = gradients.gradients(loss, my_vars)
if gradient_clip_norm:
grads, _ = clip_ops.clip_by_global_norm(grads, gradient_clip_norm)
return (optimizer.apply_gradients(
zip(grads, my_vars), global_step=global_step))
return head.head_ops(features, labels, mode, _train_op_fn, logits)
def sdca_model_fn(features, labels, mode, params):
"""A model_fn for linear models that use the SDCA optimizer.
Args:
features: A dict of `Tensor` keyed by column name.
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `Head` instance. Type must be one of `_BinarySvmHead`,
`_RegressionHead` or `_MultiClassHead`.
* feature_columns: An iterable containing all the feature columns used by
the model.
* optimizer: An `SDCAOptimizer` instance.
* weight_column_name: A string defining the weight feature column, or
None if there are no weights.
* update_weights_hook: A `SessionRunHook` object or None. Used to update
model weights.
Returns:
A `ModelFnOps` instance.
Raises:
ValueError: If `optimizer` is not an `SDCAOptimizer` instance.
ValueError: If the type of head is neither `_BinarySvmHead`, nor
`_RegressionHead` nor `_MultiClassHead`.
ValueError: If mode is not any of the `ModeKeys`.
"""
head = params["head"]
feature_columns = params["feature_columns"]
optimizer = params["optimizer"]
weight_column_name = params["weight_column_name"]
update_weights_hook = params.get("update_weights_hook", None)
if not isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
raise ValueError("Optimizer must be of type SDCAOptimizer")
# pylint: disable=protected-access
if isinstance(head, head_lib._BinarySvmHead):
loss_type = "hinge_loss"
elif isinstance(
head, (head_lib._MultiClassHead, head_lib._BinaryLogisticHead)):
loss_type = "logistic_loss"
elif isinstance(head, head_lib._RegressionHead):
loss_type = "squared_loss"
else:
raise ValueError("Unsupported head type: {}".format(head))
# pylint: enable=protected-access
parent_scope = "linear"
with variable_scope.variable_op_scope(
features.values(), parent_scope) as scope:
logits, columns_to_variables, bias = (
layers.weighted_sum_from_feature_columns(
columns_to_tensors=features,
feature_columns=feature_columns,
num_outputs=1,
scope=scope))
_add_bias_column(feature_columns, features, bias, labels,
columns_to_variables)
def _train_op_fn(unused_loss):
global_step = contrib_variables.get_global_step()
sdca_model, train_op = optimizer.get_train_step(columns_to_variables,
weight_column_name,
loss_type, features,
labels, global_step)
if update_weights_hook is not None:
update_weights_hook.set_parameters(sdca_model, train_op)
return train_op
return head.head_ops(features, labels, mode, _train_op_fn, logits)
# Ensures consistency with LinearComposableModel.
def _get_default_optimizer(feature_columns):
learning_rate = min(_LEARNING_RATE, 1.0 / math.sqrt(len(feature_columns)))
return train.FtrlOptimizer(learning_rate=learning_rate)
class _SdcaUpdateWeightsHook(session_run_hook.SessionRunHook):
"""SessionRunHook to update and shrink SDCA model weights."""
def __init__(self):
pass
def set_parameters(self, sdca_model, train_op):
self._sdca_model = sdca_model
self._train_op = train_op
def begin(self):
"""Construct the update_weights op.
The op is implicitly added to the default graph.
"""
self._update_op = self._sdca_model.update_weights(self._train_op)
def before_run(self, run_context):
"""Return the update_weights op so that it is executed during this run."""
return session_run_hook.SessionRunArgs(self._update_op)
class LinearClassifier(evaluable.Evaluable, trainable.Trainable):
"""Linear classifier model.
Train a linear model to classify instances into one of multiple possible
classes. When number of possible classes is 2, this is binary classification.
Example:
```python
sparse_column_a = sparse_column_with_hash_bucket(...)
sparse_column_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_x_sparse_feature_b = crossed_column(...)
# Estimator using the default optimizer.
estimator = LinearClassifier(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b])
# Or estimator using the FTRL optimizer with regularization.
estimator = LinearClassifier(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b],
optimizer=tf.train.FtrlOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
# Or estimator using the SDCAOptimizer.
estimator = LinearClassifier(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b],
optimizer=tf.contrib.linear_optimizer.SDCAOptimizer(
example_id_column='example_id',
num_loss_partitions=...,
symmetric_l2_regularization=2.0
))
# Input builders
def input_fn_train: # returns x, y (where y represents label's class index).
...
def input_fn_eval: # returns x, y (where y represents label's class index).
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x) # returns predicted labels (i.e. label's class index).
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a `KeyError`:
* if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self, # _joint_weight pylint: disable=invalid-name
feature_columns,
model_dir=None,
n_classes=2,
weight_column_name=None,
optimizer=None,
gradient_clip_norm=None,
enable_centered_bias=False,
_joint_weight=False,
config=None,
feature_engineering_fn=None):
"""Construct a `LinearClassifier` estimator object.
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph and etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
n_classes: number of label classes. Default is binary classification.
Note that class labels are integers representing the class index (i.e.
values from 0 to n_classes-1). For arbitrary label values (e.g. string
labels), convert to class indices first.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: The optimizer used to train the model. If specified, it should
be either an instance of `tf.Optimizer` or the SDCAOptimizer. If `None`,
the Ftrl optimizer will be used.
gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
_joint_weight: If True, the weights for all columns will be stored in a
single (possibly partitioned) variable. It's more efficient, but it's
incompatible with SDCAOptimizer, and requires all feature columns are
sparse and use the 'sum' combiner.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
Returns:
A `LinearClassifier` estimator.
Raises:
ValueError: if n_classes < 2.
"""
# TODO(zoy): Give an unsupported error if enable_centered_bias is
# requested for SDCA once its default changes to False.
self._feature_columns = tuple(feature_columns or [])
assert self._feature_columns
self._optimizer = _get_default_optimizer(feature_columns)
if optimizer:
self._optimizer = _get_optimizer(optimizer)
chief_hook = None
if (isinstance(optimizer, sdca_optimizer.SDCAOptimizer) and
enable_centered_bias):
enable_centered_bias = False
logging.warning("centered_bias is not supported with SDCA, "
"please disable it explicitly.")
head = head_lib._multi_class_head( # pylint: disable=protected-access
n_classes,
weight_column_name=weight_column_name,
enable_centered_bias=enable_centered_bias)
params = {
"head": head,
"feature_columns": feature_columns,
"optimizer": self._optimizer,
}
if isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
assert not _joint_weight, ("_joint_weight is incompatible with the"
" SDCAOptimizer")
assert n_classes == 2, "SDCA only applies to binary classification."
model_fn = sdca_model_fn
# The model_fn passes the model parameters to the chief_hook. We then use
# the hook to update weights and shrink step only on the chief.
chief_hook = _SdcaUpdateWeightsHook()
params.update({
"weight_column_name": weight_column_name,
"update_weights_hook": chief_hook,
})
else:
model_fn = _linear_model_fn
params.update({
"gradient_clip_norm": gradient_clip_norm,
"joint_weights": _joint_weight,
})
self._estimator = estimator.Estimator(
model_fn=model_fn,
model_dir=model_dir,
config=config,
params=params,
feature_engineering_fn=feature_engineering_fn)
self._additional_run_hook = (chief_hook if self._estimator.config.is_chief
else None)
def get_estimator(self):
return self._estimator
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
"""See trainable.Trainable. Note: Labels must be integer class indices."""
# TODO(roumposg): Remove when deprecated monitors are removed.
hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
if self._additional_run_hook:
hooks.append(self._additional_run_hook)
self._estimator.fit(x=x,
y=y,
input_fn=input_fn,
steps=steps,
batch_size=batch_size,
monitors=hooks,
max_steps=max_steps)
return self
def evaluate(self, x=None, y=None, input_fn=None, feed_fn=None,
batch_size=None, steps=None, metrics=None, name=None,
checkpoint_path=None):
"""See evaluable.Evaluable. Note: Labels must be integer class indices."""
return self._estimator.evaluate(x=x, y=y, input_fn=input_fn,
feed_fn=feed_fn, batch_size=batch_size,
steps=steps, metrics=metrics, name=name,
checkpoint_path=checkpoint_path)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict(self, x=None, input_fn=None, batch_size=None, as_iterable=True):
"""Runs inference to determine the predicted class (i.e. class index)."""
key = prediction_key.PredictionKey.CLASSES
preds = self._estimator.predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return _as_iterable(preds, output=key)
return preds[key]
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict_proba(self, x=None, input_fn=None, batch_size=None, outputs=None,
as_iterable=True):
"""Runs inference to determine the class probability predictions."""
key = prediction_key.PredictionKey.PROBABILITIES
preds = self._estimator.predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return _as_iterable(preds, output=key)
return preds[key]
def get_variable_names(self):
return self._estimator.get_variable_names()
def get_variable_value(self, name):
return self._estimator.get_variable_value(name)
def export(self,
export_dir,
input_fn=None,
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
default_batch_size=1,
exports_to_keep=None):
"""See BaseEstimator.export."""
def default_input_fn(unused_estimator, examples):
return layers.parse_feature_columns_from_examples(
examples, self._feature_columns)
return self._estimator.export(
export_dir=export_dir,
input_fn=input_fn or default_input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
signature_fn=(signature_fn or
export.classification_signature_fn_with_prob),
prediction_key=prediction_key.PredictionKey.PROBABILITIES,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
@experimental
def export_savedmodel(self,
export_dir_base,
input_fn,
default_output_alternative_key=None,
assets_extra=None,
as_text=False,
exports_to_keep=None):
return self._estimator.export_savedmodel(
export_dir_base,
input_fn,
default_output_alternative_key=default_output_alternative_key,
assets_extra=assets_extra,
as_text=as_text,
exports_to_keep=exports_to_keep)
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def weights_(self):
values = {}
optimizer_regex = r".*/"+self._optimizer.get_name() + r"(_\d)?$"
for name in self.get_variable_names():
if (name.startswith("linear/") and
name != "linear/bias_weight" and
not re.match(optimizer_regex, name)):
values[name] = self.get_variable_value(name)
if len(values) == 1:
return values[list(values.keys())[0]]
return values
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def bias_(self):
return self.get_variable_value("linear/bias_weight")
@property
def config(self):
return self._estimator.config
@property
def model_dir(self):
return self._estimator.model_dir
class LinearRegressor(evaluable.Evaluable, trainable.Trainable):
"""Linear regressor model.
Train a linear regression model to predict label value given observation of
feature values.
Example:
```python
sparse_column_a = sparse_column_with_hash_bucket(...)
sparse_column_b = sparse_column_with_hash_bucket(...)
sparse_feature_a_x_sparse_feature_b = crossed_column(...)
estimator = LinearRegressor(
feature_columns=[sparse_column_a, sparse_feature_a_x_sparse_feature_b])
# Input builders
def input_fn_train: # returns x, y
...
def input_fn_eval: # returns x, y
...
estimator.fit(input_fn=input_fn_train)
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
Input of `fit` and `evaluate` should have following features,
otherwise there will be a KeyError:
* if `weight_column_name` is not `None`:
key=weight_column_name, value=a `Tensor`
* for column in `feature_columns`:
- if isinstance(column, `SparseColumn`):
key=column.name, value=a `SparseTensor`
- if isinstance(column, `WeightedSparseColumn`):
{key=id column name, value=a `SparseTensor`,
key=weight column name, value=a `SparseTensor`}
- if isinstance(column, `RealValuedColumn`):
key=column.name, value=a `Tensor`
"""
def __init__(self, # _joint_weights: pylint: disable=invalid-name
feature_columns,
model_dir=None,
weight_column_name=None,
optimizer=None,
gradient_clip_norm=None,
enable_centered_bias=False,
label_dimension=1,
_joint_weights=False,
config=None,
feature_engineering_fn=None):
"""Construct a `LinearRegressor` estimator object.
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph, etc. This can
also be used to load checkpoints from the directory into a estimator
to continue training a previously saved model.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: An instance of `tf.Optimizer` used to train the model. If
`None`, will use an Ftrl optimizer.
gradient_clip_norm: A `float` > 0. If provided, gradients are clipped
to their global norm with this clipping ratio. See
`tf.clip_by_global_norm` for more details.
enable_centered_bias: A bool. If True, estimator will learn a centered
bias variable for each class. Rest of the model structure learns the
residual after centered bias.
label_dimension: Dimension of the label for multilabels. Defaults to 1.
_joint_weights: If True use a single (possibly partitioned) variable to
store the weights. It's faster, but requires all feature columns are
sparse and have the 'sum' combiner. Incompatible with SDCAOptimizer.
config: `RunConfig` object to configure the runtime settings.
feature_engineering_fn: Feature engineering function. Takes features and
labels which are the output of `input_fn` and
returns features and labels which will be fed
into the model.
Returns:
A `LinearRegressor` estimator.
"""
self._feature_columns = tuple(feature_columns or [])
assert self._feature_columns
if optimizer:
self._optimizer = _get_optimizer(optimizer)
else:
self._optimizer = _get_default_optimizer(feature_columns)
chief_hook = None
if (isinstance(optimizer, sdca_optimizer.SDCAOptimizer) and
enable_centered_bias):
enable_centered_bias = False
logging.warning("centered_bias is not supported with SDCA, "
"please disable it explicitly.")
head = head_lib._regression_head( # pylint: disable=protected-access
weight_column_name=weight_column_name,
label_dimension=label_dimension,
enable_centered_bias=enable_centered_bias)
params = {
"head": head,
"feature_columns": feature_columns,
"optimizer": self._optimizer,
}
if isinstance(optimizer, sdca_optimizer.SDCAOptimizer):
assert label_dimension == 1, "SDCA only applies for label_dimension=1."
assert not _joint_weights, ("_joint_weights is incompatible with"
" SDCAOptimizer.")
model_fn = sdca_model_fn
# The model_fn passes the model parameters to the chief_hook. We then use
# the hook to update weights and shrink step only on the chief.
chief_hook = _SdcaUpdateWeightsHook()
params.update({
"weight_column_name": weight_column_name,
"update_weights_hook": chief_hook,
})
else:
model_fn = _linear_model_fn
params.update({
"gradient_clip_norm": gradient_clip_norm,
"joint_weights": _joint_weights,
})
self._estimator = estimator.Estimator(
model_fn=model_fn,
model_dir=model_dir,
config=config,
params=params,
feature_engineering_fn=feature_engineering_fn)
self._additional_run_hook = (chief_hook if self._estimator.config.is_chief
else None)
def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None,
monitors=None, max_steps=None):
"""See trainable.Trainable."""
# TODO(roumposg): Remove when deprecated monitors are removed.
hooks = monitor_lib.replace_monitors_with_hooks(monitors, self)
if self._additional_run_hook:
hooks.append(self._additional_run_hook)
self._estimator.fit(x=x,
y=y,
input_fn=input_fn,
steps=steps,
batch_size=batch_size,
monitors=hooks,
max_steps=max_steps)
return self
def evaluate(self, x=None, y=None, input_fn=None, feed_fn=None,
batch_size=None, steps=None, metrics=None, name=None,
checkpoint_path=None):
"""See evaluable.Evaluable."""
return self._estimator.evaluate(x=x, y=y, input_fn=input_fn,
feed_fn=feed_fn, batch_size=batch_size,
steps=steps, metrics=metrics, name=name,
checkpoint_path=checkpoint_path)
@deprecated_arg_values(
estimator.AS_ITERABLE_DATE, estimator.AS_ITERABLE_INSTRUCTIONS,
as_iterable=False)
def predict(self, x=None, input_fn=None, batch_size=None, as_iterable=True):
"""Runs inference to determine the predicted class."""
key = prediction_key.PredictionKey.SCORES
preds = self._estimator.predict(
x=x,
input_fn=input_fn,
batch_size=batch_size,
outputs=[key],
as_iterable=as_iterable)
if as_iterable:
return _as_iterable(preds, output=key)
return preds[key]
def get_variable_names(self):
return self._estimator.get_variable_names()
def get_variable_value(self, name):
return self._estimator.get_variable_value(name)
def export(self,
export_dir,
input_fn=None,
input_feature_key=None,
use_deprecated_input_fn=True,
signature_fn=None,
default_batch_size=1,
exports_to_keep=None):
"""See BaseEstimator.export."""
def default_input_fn(unused_estimator, examples):
return layers.parse_feature_columns_from_examples(
examples, self._feature_columns)
return self._estimator.export(
export_dir=export_dir,
input_fn=input_fn or default_input_fn,
input_feature_key=input_feature_key,
use_deprecated_input_fn=use_deprecated_input_fn,
signature_fn=(signature_fn or export.regression_signature_fn),
prediction_key=prediction_key.PredictionKey.SCORES,
default_batch_size=default_batch_size,
exports_to_keep=exports_to_keep)
@experimental
def export_savedmodel(self,
export_dir_base,
input_fn,
default_output_alternative_key=None,
assets_extra=None,
as_text=False,
exports_to_keep=None):
return self._estimator.export_savedmodel(
export_dir_base,
input_fn,
default_output_alternative_key=default_output_alternative_key,
assets_extra=assets_extra,
as_text=as_text,
exports_to_keep=exports_to_keep)
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def weights_(self):
values = {}
optimizer_regex = r".*/"+self._optimizer.get_name() + r"(_\d)?$"
for name in self.get_variable_names():
if (name.startswith("linear/") and
name != "linear/bias_weight" and
not re.match(optimizer_regex, name)):
values[name] = self.get_variable_value(name)
if len(values) == 1:
return values[list(values.keys())[0]]
return values
@property
@deprecated("2016-10-30",
"This method will be removed after the deprecation date. "
"To inspect variables, use get_variable_names() and "
"get_variable_value().")
def bias_(self):
return self.get_variable_value("linear/bias_weight")
@property
def config(self):
return self._estimator.config
@property
def model_dir(self):
return self._estimator.model_dir
| |
#!/usr/bin/env python
# coding: utf-8
# Based on the pylletTown demo, Copyright (c) 2015 Renfred Harper
# Copyright (c) 2015 Dominic Delabruere
from __future__ import print_function
import pygame
from . import find_res
from . import tmx
from .player import Player
from .font import RegularFont
from .menu import Menu
from .parse import parse_options
from .spriteloop import SpriteLoop
from .slidingbox import SlidingBox
class QuitGameException(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
class Game(object):
def __init__(self, options):
self.options = options
self.screen = pygame.Surface((160, 144))
self.resized = True
self.needs_filp = True
if not self.options.fullscreen:
self.realScreen = pygame.display.set_mode((480, 432))
else:
self.realScreen = None
self.toggle_fullscreen()
pygame.mouse.set_visible(False)
def bgm_loop(self, filename):
if pygame.mixer:
pygame.mixer.music.load(find_res('bgm/' + filename))
pygame.mixer.music.play(-1)
def sfx(self, filename):
if pygame.mixer:
sound = pygame.mixer.Sound(find_res('sfx/' + filename))
sound.play()
def fade_out(self):
"""Animate the screen fading to black for entering a new area."""
clock = pygame.time.Clock()
self.sfx('open.ogg')
black_rect = pygame.Surface(self.screen.get_size())
black_rect.set_alpha(100)
black_rect.fill((0, 0, 0))
# Continuously draw a transparent black rectangle over the screen
# to create a fade_out effect
for i in range(0, 5):
clock.tick(15)
self.screen.blit(black_rect, (0, 0))
self.needs_flip = True
self.flip()
clock.tick(15)
self.screen.fill((255, 255, 255, 50))
self.needs_flip = True
self.flip()
def display_message(self, text, wait=True):
font = RegularFont()
# Split text into lists of word-wrapped strings, 25 characters long
text = font.word_wrap(text, 25)
while True:
textBackground = pygame.image.load(find_res('images/dialog.png'))
yOffset = 2
for line in text[0:3]:
textBackground.blit(
font.render(line), (4, yOffset))
yOffset += 11
self.screen.blit(textBackground, (0, 104))
self.needs_flip = True
space_pressed = False
text = text[3:]
while (text or wait) and not space_pressed:
clock = pygame.time.Clock()
clock.tick(30)
self.flip()
for event in pygame.event.get():
if event.type == pygame.KEYDOWN and (
event.key == pygame.K_SPACE or
event.key == pygame.K_RETURN):
space_pressed = True
break
else:
self.check_meta_keys(event)
if not text:
self.needs_flip = True
break
def initArea(self, mapFile, fromMap):
"""Load maps and initialize sprite layers for each new area."""
self.tilemap = tmx.load(find_res('maps/' + mapFile), (160, 144))
self.players = tmx.SpriteLayer()
self.objects = tmx.SpriteLayer()
self.boxes = tmx.SpriteLayer()
# Initializing other animated sprites
try:
for cell in self.tilemap.layers['sprites'].find('src'):
SpriteLoop((cell.px, cell.py), cell, self.objects)
# In case there is no sprite layer for the current map
except KeyError:
pass
else:
self.tilemap.layers.append(self.objects)
# Initializing player sprite
try:
for cell in self.tilemap.layers['boxes'].find('box'):
SlidingBox((cell.px, cell.py), cell, self.boxes)
except KeyError:
pass
else:
self.tilemap.layers.append(self.boxes)
for cell in self.tilemap.layers['objects'].find('entry'):
if cell['entry'] == fromMap:
startCell = cell
self.player = Player((startCell.px, startCell.py),
startCell['facing'], mapFile, self.players)
self.tilemap.layers.append(self.players)
self.tilemap.set_focus(self.player.rect.x, self.player.rect.y)
self.needs_flip = True
def flip(self):
if self.needs_flip:
real_x, real_y = self.realScreen.get_size()
scale_size = self.realScreen.get_size()
# If the screen is too wide, maintain the aspect ratio by
# narrowing it.
if (scale_size[1] / scale_size[0]) < 0.9:
scale_size = (int(round((scale_size[1] / 9) * 10)),
scale_size[1])
# If the screen is too narrow, maintain the aspect ratio by
# shortening it.
elif (scale_size[1] / scale_size[0]) > 0.9:
scale_size = (scale_size[0],
int(round(scale_size[0] * 0.9)))
offset_x = (real_x - scale_size[0]) // 2
offset_y = round(real_y - scale_size[1]) // 2
self.realScreen.fill((0, 0, 0))
resized_screen = pygame.transform.scale(self.screen, scale_size)
self.realScreen.blit(resized_screen, (offset_x, offset_y))
if self.resized:
pygame.display.flip()
self.resized = False
else:
pygame.display.update((offset_x, offset_y),
resized_screen.get_size())
self.needs_flip = False
def toggle_fullscreen(self):
if self.realScreen and abs(
self.realScreen.get_flags() & pygame.FULLSCREEN):
pygame.display.quit()
pygame.display.init()
pygame.mouse.set_visible(False)
self.realScreen = pygame.display.set_mode(
(480, 432), pygame.RESIZABLE)
else:
size_candidates = pygame.display.list_modes(
0, pygame.FULLSCREEN)[::-1]
if size_candidates == -1:
size = (160, 144)
else:
for size in size_candidates:
if (size[0] < 160) or (size[1] < 144):
continue
else:
break
self.realScreen = pygame.display.set_mode(
size, pygame.FULLSCREEN)
self.needs_flip = True
self.resized = True
def confirm_quit(self):
if Menu.no_or_yes('Are you sure you want to quit?', self):
raise QuitGameException
def check_meta_keys(self, event):
if event.type == pygame.QUIT:
raise QuitGameException
if event.type == pygame.KEYDOWN and (
event.key == pygame.K_f or event.key == pygame.K_F11):
self.toggle_fullscreen()
if event.type == pygame.VIDEORESIZE:
self.realScreen = pygame.display.set_mode(
event.dict['size'], pygame.RESIZABLE)
self.resized = True
self.needs_flip = True
def main(self):
clock = pygame.time.Clock()
self.initArea('bedroom.tmx', 'wakeup')
self.bgm_loop('81691-past.xm')
self.tilemap.update(clock.tick(30), self)
self.screen.fill((0, 0, 0))
self.tilemap.draw(self.screen)
try:
self.display_message('Hit the SPACEBAR when you\'re done reading '
'a message, or to interact with an object '
'you\'re facing.')
self.display_message('Move around with the ARROW KEYS.')
self.display_message('Launch the game menu with the ESCAPE key.')
while True:
dt = clock.tick(30)
for event in pygame.event.get():
if event.type == pygame.KEYDOWN and event.key == pygame.K_ESCAPE:
Menu.show_main_menu(self)
if event.type == pygame.KEYDOWN and (
event.key == pygame.K_SPACE or
event.key == pygame.K_RETURN):
self.player.action(self)
self.check_meta_keys(event)
self.tilemap.update(dt, self)
self.screen.fill((0, 0, 0))
self.tilemap.draw(self.screen)
self.flip()
except QuitGameException:
if pygame.mixer:
pygame.mixer.quit()
pygame.display.quit()
def run_game():
pygame.init()
if not pygame.font:
print('pygame.font module not initialized. '
'Game will not run without it.')
return
if not pygame.mixer:
print('pygame.mixer module not initialized. '
'Game will run without sound.')
else:
pygame.mixer.set_num_channels(1)
pygame.display.set_caption("Demo: Be realistic!")
Game(parse_options()).main()
| |
#!/usr/bin/env python
"""
OpenMM ForceField residue template generators.
"""
from __future__ import absolute_import
import numpy as np
import os, os.path, sys
from simtk.openmm.app import ForceField
from openmoltools.amber import run_antechamber
from openmoltools.openeye import get_charges
from simtk.openmm.app import Element
import parmed
if sys.version_info >= (3, 0):
from io import StringIO
from subprocess import getstatusoutput, call
def run_command(command):
call(command.split())
else:
from cStringIO import StringIO
from commands import getstatusoutput
def run_command(command):
getstatusoutput(command)
def generateTopologyFromOEMol(molecule):
"""
Generate an OpenMM Topology object from an OEMol molecule.
Parameters
----------
molecule : openeye.oechem.OEMol
The molecule from which a Topology object is to be generated.
Returns
-------
topology : simtk.openmm.app.Topology
The Topology object generated from `molecule`.
"""
# Create a Topology object with one Chain and one Residue.
from simtk.openmm.app import Topology
topology = Topology()
chain = topology.addChain()
resname = molecule.GetTitle()
residue = topology.addResidue(resname, chain)
# Create atoms in the residue.
for atom in molecule.GetAtoms():
name = atom.GetName()
element = Element.getByAtomicNumber(atom.GetAtomicNum())
atom = topology.addAtom(name, element, residue)
# Create bonds.
atoms = { atom.name : atom for atom in topology.atoms() }
for bond in molecule.GetBonds():
topology.addBond(atoms[bond.GetBgn().GetName()], atoms[bond.GetEnd().GetName()])
return topology
def _ensureUniqueAtomNames(molecule):
"""
Ensure all atom names are unique and not blank.
If any atom names are degenerate or blank, Tripos atom names are assigned to all atoms.
Parameters
----------
molecule : openeye.oechem.OEMol
The molecule to be modified
"""
from openeye import oechem
atom_names = set()
atom_names_are_unique = True
for atom in molecule.GetAtoms():
atom_name = atom.GetName()
if (atom_name in atom_names) or (atom_name == ""):
atom_names_are_unique = False
atom_names.add(atom_name)
if not atom_names_are_unique:
oechem.OETriposAtomNames(molecule)
def generateOEMolFromTopologyResidue(residue, geometry=False, tripos_atom_names=False):
"""
Generate an OpenEye OEMol molecule from an OpenMM Topology Residue.
Parameters
----------
residue : simtk.openmm.app.topology.Residue
The topology Residue from which an OEMol is to be created.
An Exception will be thrown if this residue has external bonds.
geometry : bool, optional, default=False
If True, will generate a single configuration with OEOmega.
Note that stereochemistry will be *random*.
tripos_atom_names : bool, optional, default=False
If True, will generate and assign Tripos atom names.
Returns
-------
molecule : openeye.oechem.OEMol
The OEMol molecule corresponding to the topology.
Atom order will be preserved and bond orders assigned.
The Antechamber `bondtype` program will be used to assign bond orders, and these
will be converted back into OEMol bond type assignments.
Note that there is no way to preserve stereochemistry since `Residue` does
not note stereochemistry in any way.
"""
# Raise an Exception if this residue has external bonds.
if len(list(residue.external_bonds())) > 0:
raise Exception("Cannot generate an OEMol from residue '%s' because it has external bonds." % residue.name)
from openeye import oechem
# Create OEMol where all atoms have bond order 1.
molecule = oechem.OEMol()
molecule.SetTitle(residue.name) # name molecule after first residue
for atom in residue.atoms():
oeatom = molecule.NewAtom(atom.element.atomic_number)
oeatom.SetName(atom.name)
oeatom.AddData("topology_index", atom.index)
oeatoms = { oeatom.GetName() : oeatom for oeatom in molecule.GetAtoms() }
for (atom1, atom2) in residue.bonds():
order = 1
bond = molecule.NewBond(oeatoms[atom1.name], oeatoms[atom2.name], order)
bond.SetType('1')
# Write out a mol2 file without altering molecule.
import tempfile
tmpdir = tempfile.mkdtemp()
mol2_input_filename = os.path.join(tmpdir,'molecule-before-bond-perception.mol2')
ac_output_filename = os.path.join(tmpdir,'molecule-after-bond-perception.ac')
ofs = oechem.oemolostream(mol2_input_filename)
m2h = True
substruct = False
oechem.OEWriteMol2File(ofs, molecule, m2h, substruct)
ofs.close()
# Run Antechamber bondtype
import subprocess
# HBM reverting back to bondtype, antechamber seems to complain about valency
# see https://github.com/choderalab/perses/pull/626
# and https://github.com/choderalab/openmoltools/issues/295
# and https://github.com/choderalab/openmoltools/issues/279
command = 'bondtype -i %s -o %s -f mol2 -j full' % (mol2_input_filename, ac_output_filename)
#command = 'antechamber -i %s -fi mol2 -o %s -fo ac -j 2' % (mol2_input_filename, ac_output_filename)
run_command(command)
# Define mapping from GAFF bond orders to OpenEye bond orders.
order_map = { 1 : 1, 2 : 2, 3: 3, 7 : 1, 8 : 2, 9 : 5, 10 : 5 }
# Read bonds.
infile = open(ac_output_filename)
lines = infile.readlines()
infile.close()
antechamber_bond_types = list()
for line in lines:
elements = line.split()
if elements[0] == 'BOND':
antechamber_bond_types.append(int(elements[4]))
oechem.OEClearAromaticFlags(molecule)
for (bond, antechamber_bond_type) in zip(molecule.GetBonds(), antechamber_bond_types):
#bond.SetOrder(order_map[antechamber_bond_type])
bond.SetIntType(order_map[antechamber_bond_type])
oechem.OEFindRingAtomsAndBonds(molecule)
oechem.OEKekulize(molecule)
oechem.OEAssignFormalCharges(molecule)
oechem.OEAssignAromaticFlags(molecule, oechem.OEAroModelOpenEye)
# Clean up.
os.unlink(mol2_input_filename)
os.unlink(ac_output_filename)
os.rmdir(tmpdir)
# Generate Tripos atom names if requested.
if tripos_atom_names:
oechem.OETriposAtomNames(molecule)
# Assign geometry
if geometry:
from openeye import oeomega
omega = oeomega.OEOmega()
omega.SetMaxConfs(1)
omega.SetIncludeInput(False)
omega.SetStrictStereo(False)
omega(molecule)
return molecule
def _computeNetCharge(molecule):
"""
Compute the net formal charge on the molecule.
Formal charges are assigned by this function.
Parameters
----------
molecule : openeye.oechem.OEMol
The molecule for which a net formal charge is to be computed
Returns
-------
net_charge : float
The net formal charge on the molecule
"""
from openeye import oechem
oechem.OEAssignFormalCharges(molecule)
charges = [ atom.GetFormalCharge() for atom in molecule.GetAtoms() ]
net_charge = np.array(charges).sum()
return net_charge
def _writeMolecule(molecule, output_filename, standardize=True):
"""
Write the molecule to a file.
Parameters
----------
molecule : openeye.oechem.OEMol
The molecule to write (will be modified by writer).
output_filename : str
The filename of file to be written; type is autodetected by extension.
standardize : bool, optional, default=True
Standardize molecular properties such as atom names in the output file.
"""
from openmoltools.openeye import molecule_to_mol2
molecule_to_mol2(molecule, tripos_mol2_filename=output_filename, conformer=0, residue_name=molecule.GetTitle(), standardize=standardize)
#from openeye import oechem
#ofs = oechem.oemolostream(output_filename)
#oechem.OEWriteMolecule(ofs, molecule)
#ofs.close()
def generateResidueTemplate(molecule, residue_atoms=None, normalize=True, gaff_version='gaff'):
"""
Generate an residue template for simtk.openmm.app.ForceField using GAFF/AM1-BCC.
This requires the OpenEye toolkit.
Parameters
----------
molecule : openeye.oechem.OEMol
The molecule to be parameterized.
The molecule must have explicit hydrogens.
Net charge will be inferred from the net formal charge on each molecule.
Partial charges will be determined automatically using oequacpac and canonical AM1-BCC charging rules.
residue_atomset : set of OEAtom, optional, default=None
If not None, only the atoms in this set will be used to construct the residue template
normalize : bool, optional, default=True
If True, normalize the molecule by checking aromaticity, adding
explicit hydrogens, and renaming by IUPAC name.
gaff_version : str, default = 'gaff'
One of ['gaff', 'gaff2']; selects which atom types to use.
Returns
-------
template : simtk.openmm.app.forcefield._TemplateData
Residue template for ForceField using atom types and parameters from `gaff.xml` or `gaff2.xml`.
additional_parameters_ffxml : str
Contents of ForceField `ffxml` file defining additional parameters from parmchk(2).
Notes
-----
The residue template will be named after the molecule title.
This method preserves stereochemistry during AM1-BCC charge parameterization.
Atom names in molecules will be assigned Tripos atom names if any are blank or not unique.
"""
# Set the template name based on the molecule title plus a globally unique UUID.
from uuid import uuid4
template_name = molecule.GetTitle() + '-' + str(uuid4())
# If any atom names are not unique, atom names
_ensureUniqueAtomNames(molecule)
# Compute net formal charge.
net_charge = _computeNetCharge(molecule)
# Generate canonical AM1-BCC charges and a reference conformation.
molecule = get_charges(molecule, strictStereo=False, keep_confs=1, normalize=normalize)
# DEBUG: This may be necessary.
molecule.SetTitle('MOL')
# Create temporary directory for running antechamber.
import tempfile
tmpdir = tempfile.mkdtemp()
prefix = 'molecule'
input_mol2_filename = os.path.join(tmpdir, prefix + '.tripos.mol2')
gaff_mol2_filename = os.path.join(tmpdir, prefix + '.gaff.mol2')
frcmod_filename = os.path.join(tmpdir, prefix + '.frcmod')
# Write Tripos mol2 file as antechamber input.
_writeMolecule(molecule, input_mol2_filename, standardize=normalize)
# Parameterize the molecule with antechamber.
run_antechamber(template_name, input_mol2_filename, charge_method=None, net_charge=net_charge, gaff_mol2_filename=gaff_mol2_filename, frcmod_filename=frcmod_filename, gaff_version=gaff_version)
# Read the resulting GAFF mol2 file as a ParmEd structure.
from openeye import oechem
ifs = oechem.oemolistream(gaff_mol2_filename)
ifs.SetFlavor(oechem.OEFormat_MOL2, oechem.OEIFlavor_MOL2_DEFAULT | oechem.OEIFlavor_MOL2_M2H | oechem.OEIFlavor_MOL2_Forcefield)
m2h = True
oechem.OEReadMolecule(ifs, molecule)
ifs.close()
# If residue_atoms = None, add all atoms to the residues
if residue_atoms == None:
residue_atoms = [ atom for atom in molecule.GetAtoms() ]
# Modify partial charges so that charge on residue atoms is integral.
residue_charge = 0.0
sum_of_absolute_charge = 0.0
for atom in residue_atoms:
charge = atom.GetPartialCharge()
residue_charge += charge
sum_of_absolute_charge += abs(charge)
excess_charge = residue_charge - net_charge
if sum_of_absolute_charge == 0.0:
sum_of_absolute_charge = 1.0
for atom in residue_atoms:
charge = atom.GetPartialCharge()
atom.SetPartialCharge( charge + excess_charge * (abs(charge) / sum_of_absolute_charge) )
# Create residue template.
template = ForceField._TemplateData(template_name)
for (index, atom) in enumerate(molecule.GetAtoms()):
atomname = atom.GetName()
typename = atom.GetType()
element = Element.getByAtomicNumber(atom.GetAtomicNum())
charge = atom.GetPartialCharge()
parameters = { 'charge' : charge }
atom_template = ForceField._TemplateAtomData(atomname, typename, element, parameters)
if hasattr(template, 'atomIndices'):
# OpenMM 7.4 and later
template.addAtom(atom_template)
else:
# OpenMM 7.3 and earlier
template.atoms.append(atom_template)
for bond in molecule.GetBonds():
if (bond.GetBgn() in residue_atoms) and (bond.GetEnd() in residue_atoms):
template.addBondByName(bond.GetBgn().GetName(), bond.GetEnd().GetName())
elif (bond.GetBgn() in residue_atoms) and (bond.GetEnd() not in residue_atoms):
template.addExternalBondByName(bond.GetBgn().GetName())
elif (bond.GetBgn() not in residue_atoms) and (bond.GetEnd() in residue_atoms):
template.addExternalBondByName(bond.GetEnd().GetName())
# Generate ffxml file contents for parmchk-generated frcmod output.
leaprc = StringIO('parm = loadamberparams %s' % frcmod_filename)
params = parmed.amber.AmberParameterSet.from_leaprc(leaprc)
params = parmed.openmm.OpenMMParameterSet.from_parameterset(params, remediate_residues=False)
ffxml = StringIO()
params.write(ffxml, write_unused=True)
return template, ffxml.getvalue()
def generateForceFieldFromMolecules(molecules, ignoreFailures=False, generateUniqueNames=False, normalize=True, gaff_version='gaff'):
"""
Generate ffxml file containing additional parameters and residue templates for simtk.openmm.app.ForceField using GAFF/AM1-BCC.
This requires the OpenEye toolkit.
Parameters
----------
molecules : list of openeye.oechem.OEMol
The molecules to be parameterized.
All molecules must have explicit hydrogens.
Net charge will be inferred from the net formal charge on each molecule.
Partial charges will be determined automatically using oequacpac and canonical AM1-BCC charging rules.
ignoreFailures: bool, optional, default=False
Determines whether to add a failed molecule to the list of failed molecules (True),
or raise an Exception (False).
generateUniqueNames : bool, optional, default=False
If True, will generate globally unique names for templates.
normalize : bool, optional, default=True
If True, normalize the molecule by checking aromaticity, adding
explicit hydrogens, and renaming by IUPAC name.
gaff_version : str, default = 'gaff'
One of ['gaff', 'gaff2']; selects which atom types to use.
Returns
-------
ffxml : str
Contents of ForceField `ffxml` file defining additional parameters from parmchk(2) and residue templates.
failed_molecule_list : list of openeye.oechem.OEMol
List of the oemols that could not be parameterized. Only returned if ignoreFailures=True
Notes
-----
This method preserves stereochemistry during AM1-BCC charge parameterization.
Residue template names will be set from molecule names.
Atom names in molecules will be assigned Tripos atom names if any are blank or not unique.
"""
if not generateUniqueNames:
# Check template names are unique.
template_names = set()
for molecule in molecules:
template_name = molecule.GetTitle()
if template_name == '<0>':
raise Exception("Molecule '%s' has invalid name" % template_name)
if template_name in template_names:
raise Exception("Molecule '%s' has template name collision." % template_name)
template_names.add(template_name)
# Process molecules.
import tempfile
tmpdir = tempfile.mkdtemp()
olddir = os.getcwd()
os.chdir(tmpdir)
leaprc = ""
failed_molecule_list = []
for (molecule_index, molecule) in enumerate(molecules):
# Set the template name based on the molecule title.
if generateUniqueNames:
from uuid import uuid4
template_name = molecule.GetTitle() + '-' + str(uuid4())
else:
template_name = molecule.GetTitle()
# If any atom names are not unique, atom names
_ensureUniqueAtomNames(molecule)
# Compute net formal charge.
net_charge = _computeNetCharge(molecule)
# Generate canonical AM1-BCC charges and a reference conformation.
if not ignoreFailures:
molecule = get_charges(molecule, strictStereo=False, keep_confs=1, normalize=normalize)
else:
try:
molecule = get_charges(molecule, strictStereo=False, keep_confs=1, normalize=normalize)
except:
failed_molecule_list.append(molecule)
# Create a unique prefix.
prefix = 'molecule%010d' % molecule_index
# Create temporary directory for running antechamber.
input_mol2_filename = prefix + '.tripos.mol2'
gaff_mol2_filename = prefix + '.gaff.mol2'
frcmod_filename = prefix + '.frcmod'
# Write Tripos mol2 file as antechamber input.
_writeMolecule(molecule, input_mol2_filename, standardize=normalize)
# Parameterize the molecule with antechamber.
run_antechamber(prefix, input_mol2_filename, charge_method=None, net_charge=net_charge, gaff_mol2_filename=gaff_mol2_filename, frcmod_filename=frcmod_filename, gaff_version=gaff_version)
# Append to leaprc input for parmed.
leaprc += '%s = loadmol2 %s\n' % (prefix, gaff_mol2_filename)
leaprc += 'loadamberparams %s\n' % frcmod_filename
# Generate ffxml file contents for parmchk-generated frcmod output.
leaprc = StringIO(leaprc)
params = parmed.amber.AmberParameterSet.from_leaprc(leaprc)
params = parmed.openmm.OpenMMParameterSet.from_parameterset(params, remediate_residues=False)
ffxml = StringIO()
params.write(ffxml, write_unused=True)
# TODO: Clean up temporary directory.
os.chdir(olddir)
if ignoreFailures:
return ffxml.getvalue(), failed_molecule_list
else:
return ffxml.getvalue()
def createStructureFromResidue(residue):
# Create ParmEd structure for residue.
structure = parmed.Structure()
for a in residue.atoms():
if a.element is None:
atom = parmed.ExtraPoint(name=a.name)
else:
atom = parmed.Atom(atomic_number=a.element.atomic_number, name=a.name, mass=a.element.mass)
structure.add_atom(atom, residue.name, residue.index, 'A')
atommap[a] = atom
for a1, a2 in topology.bonds():
structure.bonds.append(Bond(atommap[a1], atommap[a2]))
return structure
def gaffTemplateGenerator(forcefield, residue, structure=None):
"""
OpenMM ForceField residue template generator for GAFF/AM1-BCC.
NOTE: This implementation currently only handles small molecules, not polymeric residues.
NOTE: We presume we have already loaded the `gaff.xml` force definitions into ForceField.
Parameters
----------
forcefield : simtk.openmm.app.ForceField
The ForceField object to which residue templates and/or parameters are to be added.
residue : simtk.openmm.app.Topology.Residue
The residue topology for which a template is to be generated.
Returns
-------
success : bool
If the generator is able to successfully parameterize the residue, `True` is returned.
If the generator cannot parameterize the residue, it should return `False` and not modify `forcefield`.
Note that there is no way to preserve stereochemistry since `Residue` does not specify stereochemistry in any way.
Charge fitting is therefore performed on an indeterminate stereo form.
"""
# Get a list of external bonds.
external_bonds = [ bond for bond in residue.external_bonds() ]
if len(external_bonds) > 0:
# We can't parameterize residues with external bonds right now.
return False
# Generate an OpenEye OEMol molecule from the Topology Residue.
molecule = generateOEMolFromTopologyResidue(residue)
# Generate template and parameters.
[template, ffxml] = generateResidueTemplate(molecule)
# Register the template.
forcefield.registerResidueTemplate(template)
# Add the parameters.
forcefield.loadFile(StringIO(ffxml))
# Signal that we have successfully parameterized the residue.
return True
class SystemGenerator(object):
"""
Utility factory to generate OpenMM Systems from Topology objects.
Parameters
----------
forcefields_to_use : list of string
List of the names of ffxml files that will be used in system creation.
forcefield_kwargs : dict of arguments to createSystem, optional
Allows specification of various aspects of system creation.
use_gaff : bool, optional, default=True
If True, will add the GAFF residue template generator.
Examples
--------
>>> from simtk.openmm import app
>>> forcefield_kwargs={ 'nonbondedMethod' : app.NoCutoff, 'implicitSolvent' : None, 'constraints' : None }
>>> system_generator = SystemGenerator(['amber99sbildn.xml'], forcefield_kwargs=forcefield_kwargs)
>>> from openmmtools.testsystems import AlanineDipeptideVacuum
>>> testsystem = AlanineDipeptideVacuum()
>>> system = system_generator.createSystem(testsystem.topology)
"""
def __init__(self, forcefields_to_use, forcefield_kwargs=None, use_gaff=True):
self._forcefield_xmls = forcefields_to_use
self._forcefield_kwargs = forcefield_kwargs if forcefield_kwargs is not None else {}
from simtk.openmm.app import ForceField
self._forcefield = ForceField(*self._forcefield_xmls)
if use_gaff:
self._forcefield.registerTemplateGenerator(gaffTemplateGenerator)
def getForceField(self):
"""
Return the associated ForceField object.
Returns
-------
forcefield : simtk.openmm.app.ForceField
The current ForceField object.
"""
return self._forcefield
def createSystem(self, topology):
"""
Build a system from specified topology object.
Parameters
----------
topology : simtk.openmm.app.Topology object
The topology of the system to construct.
Returns
-------
system : openmm.System
A system object generated from the topology
"""
system = self._forcefield.createSystem(topology, **self._forcefield_kwargs)
return system
@property
def ffxmls(self):
return self._forcefield_xmls
@property
def forcefield(self):
return self._forcefield
| |
import imp
import threading
from django.conf import settings
from webassets.env import (
BaseEnvironment, ConfigStorage, Resolver, url_prefix_join)
from webassets.exceptions import ImminentDeprecationWarning
from django_assets.glob import Globber, has_magic
__all__ = ('register',)
class DjangoConfigStorage(ConfigStorage):
_mapping = {
'debug': 'ASSETS_DEBUG',
'cache': 'ASSETS_CACHE',
'updater': 'ASSETS_UPDATER',
'auto_build': 'ASSETS_AUTO_BUILD',
'url_expire': 'ASSETS_URL_EXPIRE',
'versions': 'ASSETS_VERSIONS',
'manifest': 'ASSETS_MANIFEST',
'load_path': 'ASSETS_LOAD_PATH',
'url_mapping': 'ASSETS_URL_MAPPING',
}
def _transform_key(self, key):
# STATIC_* are the new Django 1.3 settings,
# MEDIA_* was used in earlier versions.
if key.lower() == 'directory':
if hasattr(settings, 'ASSETS_ROOT'):
return 'ASSETS_ROOT'
if getattr(settings, 'STATIC_ROOT', None):
# Is None by default
return 'STATIC_ROOT'
return 'MEDIA_ROOT'
if key.lower() == 'url':
if hasattr(settings, 'ASSETS_URL'):
return 'ASSETS_URL'
if getattr(settings, 'STATIC_URL', None):
# Is '' by default
return 'STATIC_URL'
return 'MEDIA_URL'
return self._mapping.get(key.lower(), key.upper())
def __contains__(self, key):
return hasattr(settings, self._transform_key(key))
def __getitem__(self, key):
if self.__contains__(key):
value = self._get_deprecated(key)
if value is not None:
return value
return getattr(settings, self._transform_key(key))
else:
raise KeyError("Django settings doesn't define %s" %
self._transform_key(key))
def __setitem__(self, key, value):
if not self._set_deprecated(key, value):
setattr(settings, self._transform_key(key), value)
def __delitem__(self, key):
# This isn't possible to implement in Django without relying
# on internals of the settings object, so just set to None.
self.__setitem__(key, None)
class StorageGlobber(Globber):
"""Globber that works with a Django storage."""
def __init__(self, storage):
self.storage = storage
def isdir(self, path):
# No API for this, though we could a) check if this is a filesystem
# storage, then do a shortcut, otherwise b) use listdir() and see
# if we are in the directory set.
# However, this is only used for the "sdf/" syntax, so by returning
# False we disable this syntax and cause it no match nothing.
return False
def islink(self, path):
# No API for this, just act like we don't know about links.
return False
def listdir(self, path):
directories, files = self.storage.listdir(path)
return directories + files
def exists(self, path):
try:
return self.storage.exists(path)
except NotImplementedError:
return False
class DjangoResolver(Resolver):
"""Adds support for staticfiles resolving."""
@property
def use_staticfiles(self):
return settings.ASSETS_DEBUG and \
'django.contrib.staticfiles' in settings.INSTALLED_APPS
def glob_staticfiles(self, item):
# The staticfiles finder system can't do globs, but we can
# access the storages behind the finders, and glob those.
# We can't import too early because of unit tests
try:
from django.contrib.staticfiles import finders
except ImportError:
# Support pre-1.3 versions.
finders = None
for finder in finders.get_finders():
# Builtin finders use either one of those attributes,
# though this does seem to be informal; custom finders
# may well use neither. Nothing we can do about that.
if hasattr(finder, 'storages'):
storages = finder.storages.values()
elif hasattr(finder, 'storage'):
storages = [finder.storage]
else:
continue
for storage in storages:
globber = StorageGlobber(storage)
for file in globber.glob(item):
yield storage.path(file)
def search_for_source(self, ctx, item):
if not self.use_staticfiles:
return Resolver.search_for_source(self, ctx, item)
# We can't import too early because of unit tests
try:
from django.contrib.staticfiles import finders
except ImportError:
# Support pre-1.3 versions.
finders = None
# Use the staticfiles finders to determine the absolute path
if finders:
if has_magic(item):
return list(self.glob_staticfiles(item))
else:
f = finders.find(item)
if f is not None:
return f
raise IOError(
"'%s' not found (using staticfiles finders)" % item)
def resolve_source_to_url(self, ctx, filepath, item):
if not self.use_staticfiles:
return Resolver.resolve_source_to_url(self, ctx, filepath, item)
# With staticfiles enabled, searching the url mappings, as the
# parent implementation does, will not help. Instead, we can
# assume that the url is the root url + the original relative
# item that was specified (and searched for using the finders).
return url_prefix_join(ctx.url, item)
class DjangoEnvironment(BaseEnvironment):
"""For Django, we need to redirect all the configuration values this
object holds to Django's own settings object.
"""
config_storage_class = DjangoConfigStorage
resolver_class = DjangoResolver
# Django has a global state, a global configuration, and so we need a
# global instance of a asset environment.
env = None
env_lock = threading.RLock()
def get_env():
# While the first request is within autoload(), a second thread can come
# in and without the lock, would use a not-fully-loaded environment.
with env_lock:
global env
if env is None:
env = DjangoEnvironment()
# Load application's ``assets`` modules. We need to do this in
# a delayed fashion, since the main django_assets module imports
# this, and the application ``assets`` modules we load will import
# ``django_assets``, thus giving us a classic circular dependency
# issue.
autoload()
return env
def reset():
global env
env = None
# The user needn't know about the env though, we can expose the
# relevant functionality directly. This is also for backwards-compatibility
# with times where ``django-assets`` was a standalone library.
def register(*a, **kw):
return get_env().register(*a, **kw)
# Finally, we'd like to autoload the ``assets`` module of each Django.
try:
# polyfill for new django 1.6+ apps
from importlib import import_module as native_import_module
def import_module(app):
try:
module = native_import_module(app)
except ImportError:
app = deduce_app_name(app)
module = native_import_module(app)
return module
except ImportError:
try:
from django.utils.importlib import import_module
except ImportError:
# django-1.0 compatibility
import warnings
warnings.warn('django-assets may not be compatible with Django versions '
'earlier than 1.1', ImminentDeprecationWarning)
def import_module(app):
return __import__(app, {}, {}, [app.split('.')[-1]]).__path__
# polyfill for new django 1.6+ apps
def deduce_app_name(app):
try:
app_array = app.split('.')
module_name = '.'.join(app_array[0:-1])
if len(module_name) == 0:
return app
app_config_class = app_array[-1]
module = import_module(module_name)
# figure out the config
ImportedConfig = getattr(module, app_config_class)
return ImportedConfig.name
except ImportError:
return app
return app
_ASSETS_LOADED = False
def autoload():
"""Find assets by looking for an ``assets`` module within each
installed application, similar to how, e.g., the admin autodiscover
process works. This is were this code has been adapted from, too.
Only runs once.
"""
global _ASSETS_LOADED
if _ASSETS_LOADED:
return False
# Import this locally, so that we don't have a global Django
# dependency.
from django.conf import settings
for app in settings.INSTALLED_APPS:
# For each app, we need to look for an assets.py inside that
# app's package. We can't use os.path here -- recall that
# modules may be imported different ways (think zip files) --
# so we need to get the app's __path__ and look for
# admin.py on that path.
#if options.get('verbosity') > 1:
# print "\t%s..." % app,
# Step 1: find out the app's __path__ Import errors here will
# (and should) bubble up, but a missing __path__ (which is
# legal, but weird) fails silently -- apps that do weird things
# with __path__ might need to roll their own registration.
try:
app_path = import_module(app).__path__
except AttributeError:
#if options.get('verbosity') > 1:
# print "cannot inspect app"
continue
# Step 2: use imp.find_module to find the app's assets.py.
# For some reason imp.find_module raises ImportError if the
# app can't be found but doesn't actually try to import the
# module. So skip this app if its assetse.py doesn't exist
try:
imp.find_module('assets', app_path)
except ImportError:
#if options.get('verbosity') > 1:
# print "no assets module"
continue
# Step 3: import the app's assets file. If this has errors we
# want them to bubble up.
app_name = deduce_app_name(app)
import_module("{}.assets".format(app_name))
#if options.get('verbosity') > 1:
# print "assets module loaded"
# Load additional modules.
for module in getattr(settings, 'ASSETS_MODULES', []):
import_module("%s" % module)
_ASSETS_LOADED = True
| |
# Copyright (c) 2015 Mirantis, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from neutron_lib.api import attributes
from neutron_lib.callbacks import events
from neutron_lib import context
from neutron_lib.db import constants as db_const
from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_policy import policy as oslo_policy
from oslo_serialization import jsonutils
from neutron.db.quota import driver as quota_driver
from neutron import manager
from neutron.pecan_wsgi.controllers import resource
from neutron import policy
from neutron.tests.functional.pecan_wsgi import test_functional
class TestOwnershipHook(test_functional.PecanFunctionalTest):
def test_network_ownership_check(self):
net_response = self.app.post_json(
'/v2.0/networks.json',
params={'network': {'name': 'meh'}},
headers={'X-Project-Id': 'tenid'})
network_id = jsonutils.loads(net_response.body)['network']['id']
port_response = self.app.post_json(
'/v2.0/ports.json',
params={'port': {'network_id': network_id,
'admin_state_up': True}},
headers={'X-Project-Id': 'tenid'})
self.assertEqual(201, port_response.status_int)
class TestQueryParametersHook(test_functional.PecanFunctionalTest):
def test_if_match_on_update(self):
net_response = jsonutils.loads(self.app.post_json(
'/v2.0/networks.json',
params={'network': {'name': 'meh'}},
headers={'X-Project-Id': 'tenid'}).body)
network_id = net_response['network']['id']
response = self.app.put_json('/v2.0/networks/%s.json' % network_id,
params={'network': {'name': 'cat'}},
headers={'X-Project-Id': 'tenid',
'If-Match': 'revision_number=0'},
expect_errors=True)
# revision plugin not supported by default, so badrequest
self.assertEqual(400, response.status_int)
class TestQueryParametersHookWithRevision(test_functional.PecanFunctionalTest):
def setUp(self):
cfg.CONF.set_override('service_plugins', ['revisions'])
super(TestQueryParametersHookWithRevision, self).setUp()
def test_if_match_on_update(self):
net_response = jsonutils.loads(self.app.post_json(
'/v2.0/networks.json',
params={'network': {'name': 'meh'}},
headers={'X-Project-Id': 'tenid'}).body)
network_id = net_response['network']['id']
rev = net_response['network']['revision_number']
stale = rev - 1
response = self.app.put_json(
'/v2.0/networks/%s.json' % network_id,
params={'network': {'name': 'cat'}},
headers={'X-Project-Id': 'tenid',
'If-Match': 'revision_number=%s' % stale},
expect_errors=True)
self.assertEqual(412, response.status_int)
self.app.put_json('/v2.0/networks/%s.json' % network_id,
params={'network': {'name': 'cat'}},
headers={'X-Project-Id': 'tenid',
'If-Match': 'revision_number=%s' % rev})
class TestQuotaEnforcementHook(test_functional.PecanFunctionalTest):
def test_quota_enforcement_single(self):
ctx = context.get_admin_context()
quota_driver.DbQuotaDriver.update_quota_limit(
ctx, 'tenid', 'network', 1)
# There is enough headroom for creating a network
response = self.app.post_json(
'/v2.0/networks.json',
params={'network': {'name': 'meh'}},
headers={'X-Project-Id': 'tenid'})
self.assertEqual(response.status_int, 201)
# But a second request will fail
response = self.app.post_json(
'/v2.0/networks.json',
params={'network': {'name': 'meh-2'}},
headers={'X-Project-Id': 'tenid'},
expect_errors=True)
self.assertEqual(response.status_int, 409)
def test_quota_enforcement_bulk_request(self):
ctx = context.get_admin_context()
quota_driver.DbQuotaDriver.update_quota_limit(
ctx, 'tenid', 'network', 3)
# There is enough headroom for a bulk request creating 2 networks
response = self.app.post_json(
'/v2.0/networks.json',
params={'networks': [
{'name': 'meh1'},
{'name': 'meh2'}]},
headers={'X-Project-Id': 'tenid'})
self.assertEqual(response.status_int, 201)
# But it won't be possible to create 2 more networks...
response = self.app.post_json(
'/v2.0/networks.json',
params={'networks': [
{'name': 'meh3'},
{'name': 'meh4'}]},
headers={'X-Project-Id': 'tenid'},
expect_errors=True)
self.assertEqual(response.status_int, 409)
class TestPolicyEnforcementHook(test_functional.PecanFunctionalTest):
FAKE_RESOURCE = {
'mehs': {
'id': {'allow_post': False, 'allow_put': False,
'is_visible': True, 'primary_key': True},
'attr': {'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': ''},
'restricted_attr': {'allow_post': True, 'allow_put': True,
'is_visible': True, 'default': ''},
'tenant_id': {'allow_post': True, 'allow_put': False,
'required_by_policy': True,
'validate': {'type:string':
db_const.PROJECT_ID_FIELD_SIZE},
'is_visible': True}
}
}
def setUp(self):
# Create a controller for a fake resource. This will make the tests
# independent from the evolution of the API (so if one changes the API
# or the default policies there won't be any risk of breaking these
# tests, or at least I hope so)
super(TestPolicyEnforcementHook, self).setUp()
self.mock_plugin = mock.Mock()
attributes.RESOURCES.update(self.FAKE_RESOURCE)
manager.NeutronManager.set_plugin_for_resource('mehs',
self.mock_plugin)
fake_controller = resource.CollectionsController('mehs', 'meh')
manager.NeutronManager.set_controller_for_resource(
'mehs', fake_controller)
# Inject policies for the fake resource
policy.init()
policy._ENFORCER.set_rules(
oslo_policy.Rules.from_dict(
{'create_meh': '',
'update_meh': 'rule:admin_only',
'delete_meh': 'rule:admin_only',
'get_meh': 'rule:admin_only or field:mehs:id=xxx',
'get_meh:restricted_attr': 'rule:admin_only'}),
overwrite=False)
def test_before_on_create_authorized(self):
# Mock a return value for an hypothetical create operation
self.mock_plugin.create_meh.return_value = {
'id': 'xxx',
'attr': 'meh',
'restricted_attr': '',
'tenant_id': 'tenid'}
response = self.app.post_json('/v2.0/mehs.json',
params={'meh': {'attr': 'meh'}},
headers={'X-Project-Id': 'tenid'})
# We expect this operation to succeed
self.assertEqual(201, response.status_int)
self.assertEqual(0, self.mock_plugin.get_meh.call_count)
self.assertEqual(1, self.mock_plugin.create_meh.call_count)
def test_before_on_put_not_authorized(self):
# The policy hook here should load the resource, and therefore we must
# mock a get response
self.mock_plugin.get_meh.return_value = {
'id': 'xxx',
'attr': 'meh',
'restricted_attr': '',
'tenant_id': 'tenid'}
# The policy engine should trigger an exception in 'before', and the
# plugin method should not be called at all
response = self.app.put_json('/v2.0/mehs/xxx.json',
params={'meh': {'attr': 'meh'}},
headers={'X-Project-Id': 'tenid'},
expect_errors=True)
self.assertEqual(403, response.status_int)
self.assertEqual(1, self.mock_plugin.get_meh.call_count)
self.assertEqual(0, self.mock_plugin.update_meh.call_count)
def test_before_on_put_not_found_when_not_authorized_to_get(self):
# the user won't even have permission to view this resource
# so the error on unauthorized updates should be translated into
# a 404
self.mock_plugin.get_meh.return_value = {
'id': 'yyy',
'attr': 'meh',
'restricted_attr': '',
'tenant_id': 'tenid'}
response = self.app.put_json('/v2.0/mehs/yyy.json',
params={'meh': {'attr': 'meh'}},
headers={'X-Project-Id': 'tenid'},
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual(1, self.mock_plugin.get_meh.call_count)
self.assertEqual(0, self.mock_plugin.update_meh.call_count)
def test_before_on_delete_not_authorized(self):
# The policy hook here should load the resource, and therefore we must
# mock a get response
self.mock_plugin.delete_meh.return_value = None
self.mock_plugin.get_meh.return_value = {
'id': 'xxx',
'attr': 'meh',
'restricted_attr': '',
'tenant_id': 'tenid'}
# The policy engine should trigger an exception in 'before', and the
# plugin method should not be called
response = self.app.delete_json('/v2.0/mehs/xxx.json',
headers={'X-Project-Id': 'tenid'},
expect_errors=True)
self.assertEqual(403, response.status_int)
self.assertEqual(1, self.mock_plugin.get_meh.call_count)
self.assertEqual(0, self.mock_plugin.delete_meh.call_count)
def test_after_on_get_not_found(self):
# The GET test policy will deny access to anything whose id is not
# 'xxx', so the following request should be forbidden and presented
# to the user as an HTTPNotFound
self.mock_plugin.get_meh.return_value = {
'id': 'yyy',
'attr': 'meh',
'restricted_attr': '',
'tenant_id': 'tenid'}
# The policy engine should trigger an exception in 'after', and the
# plugin method should be called
response = self.app.get('/v2.0/mehs/yyy.json',
headers={'X-Project-Id': 'tenid'},
expect_errors=True)
self.assertEqual(404, response.status_int)
self.assertEqual(1, self.mock_plugin.get_meh.call_count)
def test_after_on_get_excludes_admin_attribute(self):
self.mock_plugin.get_meh.return_value = {
'id': 'xxx',
'attr': 'meh',
'restricted_attr': '',
'tenant_id': 'tenid'}
response = self.app.get('/v2.0/mehs/xxx.json',
headers={'X-Project-Id': 'tenid'})
self.assertEqual(200, response.status_int)
json_response = jsonutils.loads(response.body)
self.assertNotIn('restricted_attr', json_response['meh'])
def test_after_on_list_excludes_admin_attribute(self):
self.mock_plugin.get_mehs.return_value = [{
'id': 'xxx',
'attr': 'meh',
'restricted_attr': '',
'tenant_id': 'tenid'}]
response = self.app.get('/v2.0/mehs',
headers={'X-Project-Id': 'tenid'})
self.assertEqual(200, response.status_int)
json_response = jsonutils.loads(response.body)
self.assertNotIn('restricted_attr', json_response['mehs'][0])
def test_after_inits_policy(self):
self.mock_plugin.get_mehs.return_value = [{
'id': 'xxx',
'attr': 'meh',
'restricted_attr': '',
'tenant_id': 'tenid'}]
policy.reset()
response = self.app.get('/v2.0/mehs',
headers={'X-Project-Id': 'tenid'})
self.assertEqual(200, response.status_int)
class TestMetricsNotifierHook(test_functional.PecanFunctionalTest):
def setUp(self):
patcher = mock.patch('neutron.pecan_wsgi.hooks.notifier.NotifierHook.'
'_notifier')
self.mock_notifier = patcher.start().info
super(TestMetricsNotifierHook, self).setUp()
def test_post_put_delete_triggers_notification(self):
req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'}
payload = {'network': {'name': 'meh'}}
response = self.app.post_json(
'/v2.0/networks.json',
params=payload, headers=req_headers)
self.assertEqual(201, response.status_int)
json_body = jsonutils.loads(response.body)
self.assertEqual(
[mock.call(mock.ANY, 'network.create.start', payload),
mock.call(mock.ANY, 'network.create.end', json_body)],
self.mock_notifier.mock_calls)
self.mock_notifier.reset_mock()
network_id = json_body['network']['id']
payload = {'network': {'name': 'meh-2'}}
response = self.app.put_json(
'/v2.0/networks/%s.json' % network_id,
params=payload, headers=req_headers)
self.assertEqual(200, response.status_int)
json_body = jsonutils.loads(response.body)
# id should be in payload sent to notifier
payload['id'] = network_id
self.assertEqual(
[mock.call(mock.ANY, 'network.update.start', payload),
mock.call(mock.ANY, 'network.update.end', json_body)],
self.mock_notifier.mock_calls)
self.mock_notifier.reset_mock()
before_payload = {'network_id': network_id}
after_payload = before_payload.copy()
after_payload['network'] = directory.get_plugin().get_network(
context.get_admin_context(), network_id)
response = self.app.delete(
'/v2.0/networks/%s.json' % network_id, headers=req_headers)
self.assertEqual(204, response.status_int)
self.assertEqual(
[mock.call(mock.ANY, 'network.delete.start', before_payload),
mock.call(mock.ANY, 'network.delete.end', after_payload)],
self.mock_notifier.mock_calls)
def test_bulk_create_triggers_notification(self):
req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'}
payload = {'networks': [{'name': 'meh_1'}, {'name': 'meh_2'}]}
response = self.app.post_json(
'/v2.0/networks.json',
params=payload,
headers=req_headers)
self.assertEqual(201, response.status_int)
json_body = jsonutils.loads(response.body)
self.assertEqual(2, self.mock_notifier.call_count)
self.mock_notifier.assert_has_calls(
[mock.call(mock.ANY, 'network.create.start', payload),
mock.call(mock.ANY, 'network.create.end', json_body)])
def test_bad_create_doesnt_emit_end(self):
req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'}
payload = {'network': {'name': 'meh'}}
plugin = directory.get_plugin()
with mock.patch.object(plugin, 'create_network',
side_effect=ValueError):
response = self.app.post_json(
'/v2.0/networks.json',
params=payload, headers=req_headers,
expect_errors=True)
self.assertEqual(500, response.status_int)
self.assertEqual(
[mock.call(mock.ANY, 'network.create.start', mock.ANY)],
self.mock_notifier.mock_calls)
def test_bad_update_doesnt_emit_end(self):
req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'}
payload = {'network': {'name': 'meh'}}
response = self.app.post_json(
'/v2.0/networks.json',
params=payload, headers=req_headers,
expect_errors=True)
self.assertEqual(201, response.status_int)
json_body = jsonutils.loads(response.body)
self.mock_notifier.reset_mock()
plugin = directory.get_plugin()
with mock.patch.object(plugin, 'update_network',
side_effect=ValueError):
response = self.app.put_json(
'/v2.0/networks/%s.json' % json_body['network']['id'],
params=payload, headers=req_headers,
expect_errors=True)
self.assertEqual(500, response.status_int)
self.assertEqual(
[mock.call(mock.ANY, 'network.update.start', mock.ANY)],
self.mock_notifier.mock_calls)
def test_bad_delete_doesnt_emit_end(self):
req_headers = {'X-Project-Id': 'tenid', 'X-Roles': 'admin'}
payload = {'network': {'name': 'meh'}}
response = self.app.post_json(
'/v2.0/networks.json',
params=payload, headers=req_headers,
expect_errors=True)
self.assertEqual(201, response.status_int)
json_body = jsonutils.loads(response.body)
self.mock_notifier.reset_mock()
plugin = directory.get_plugin()
with mock.patch.object(plugin, 'delete_network',
side_effect=ValueError):
response = self.app.delete(
'/v2.0/networks/%s.json' % json_body['network']['id'],
headers=req_headers, expect_errors=True)
self.assertEqual(500, response.status_int)
self.assertEqual(
[mock.call(mock.ANY, 'network.delete.start', mock.ANY)],
self.mock_notifier.mock_calls)
class TestCallbackRegistryNotifier(test_functional.PecanFunctionalTest):
def setUp(self):
super(TestCallbackRegistryNotifier, self).setUp()
patcher = mock.patch('neutron.pecan_wsgi.hooks.notifier.registry')
self.mock_notifier = patcher.start().publish
def _create(self, bulk=False):
if bulk:
body = {'networks': [{'name': 'meh-1'}, {'name': 'meh-2'}]}
else:
body = {'network': {'name': 'meh-1'}}
response = self.app.post_json(
'/v2.0/networks.json',
params=body, headers={'X-Project-Id': 'tenid'})
return response.json
def test_create(self):
self._create()
self.mock_notifier.assert_called_once_with(
'network', events.BEFORE_RESPONSE, mock.ANY, payload=mock.ANY)
payload = self.mock_notifier.call_args[1]['payload']
self.assertEqual('network.create.end', payload.method_name)
self.assertEqual('create_network', payload.action)
self.assertEqual('networks', payload.collection_name)
actual = payload.latest_state
self.assertEqual('meh-1', actual['network']['name'])
def test_create_bulk(self):
self._create(bulk=True)
self.mock_notifier.assert_called_once_with(
'network', events.BEFORE_RESPONSE, mock.ANY, payload=mock.ANY)
payload = self.mock_notifier.call_args[1]['payload']
self.assertEqual('network.create.end', payload.method_name)
self.assertEqual('create_network', payload.action)
self.assertEqual('networks', payload.collection_name)
actual = payload.latest_state
self.assertEqual(2, len(actual['networks']))
self.assertEqual('meh-1', actual['networks'][0]['name'])
self.assertEqual('meh-2', actual['networks'][1]['name'])
def test_update(self):
network_id = self._create()['network']['id']
self.mock_notifier.reset_mock()
self.app.put_json('/v2.0/networks/%s.json' % network_id,
params={'network': {'name': 'new-meh'}},
headers={'X-Project-Id': 'tenid'})
self.mock_notifier.assert_called_once_with(
'network', events.BEFORE_RESPONSE, mock.ANY, payload=mock.ANY)
payload = self.mock_notifier.call_args[1]['payload']
self.assertEqual('network.update.end', payload.method_name)
self.assertEqual('update_network', payload.action)
self.assertEqual('networks', payload.collection_name)
actual_new = payload.latest_state
self.assertEqual('new-meh', actual_new['network']['name'])
actual_original = payload.states[0]
self.assertEqual(network_id, actual_original['id'])
def test_delete(self):
network_id = self._create()['network']['id']
self.mock_notifier.reset_mock()
self.app.delete(
'/v2.0/networks/%s.json' % network_id,
headers={'X-Project-Id': 'tenid'})
self.mock_notifier.assert_called_once_with(
'network', events.BEFORE_RESPONSE, mock.ANY, payload=mock.ANY)
payload = self.mock_notifier.call_args[1]['payload']
self.assertEqual('network.delete.end', payload.method_name)
self.assertEqual('delete_network', payload.action)
self.assertEqual('networks', payload.collection_name)
actual = payload.latest_state
self.assertEqual(network_id, actual['network']['id'])
| |
"""
Internal shared-state variables such as config settings and host lists.
"""
import os
import sys
from optparse import make_option
from fabric.network import HostConnectionCache, ssh
from fabric.version import get_version
from fabric.utils import _AliasDict, _AttributeDict
#
# Win32 flag
#
# Impacts a handful of platform specific behaviors. Note that Cygwin's Python
# is actually close enough to "real" UNIXes that it doesn't need (or want!) to
# use PyWin32 -- so we only test for literal Win32 setups (vanilla Python,
# ActiveState etc) here.
win32 = (sys.platform == 'win32')
#
# Environment dictionary - support structures
#
# By default, if the user (including code using Fabric as a library) doesn't
# set the username, we obtain the currently running username and use that.
def _get_system_username():
"""
Obtain name of current system user, which will be default connection user.
"""
import getpass
username = None
try:
username = getpass.getuser()
# getpass.getuser supported on both Unix and Windows systems.
# getpass.getuser may call pwd.getpwuid which in turns may raise KeyError
# if it cannot find a username for the given UID, e.g. on ep.io
# and similar "non VPS" style services. Rather than error out, just keep
# the 'default' username to None. Can check for this value later if needed.
except KeyError:
pass
except ImportError:
if win32:
import win32api
import win32security
import win32profile
username = win32api.GetUserName()
return username
def _rc_path():
"""
Return platform-specific default file path for $HOME/.fabricrc.
"""
rc_file = '.fabricrc'
rc_path = '~/' + rc_file
expanded_rc_path = os.path.expanduser(rc_path)
if expanded_rc_path == rc_path and win32:
from win32com.shell.shell import SHGetSpecialFolderPath
from win32com.shell.shellcon import CSIDL_PROFILE
expanded_rc_path = "%s/%s" % (
SHGetSpecialFolderPath(0, CSIDL_PROFILE),
rc_file
)
return expanded_rc_path
default_port = '22' # hurr durr
default_ssh_config_path = '~/.ssh/config'
# Options/settings which exist both as environment keys and which can be set on
# the command line, are defined here. When used via `fab` they will be added to
# the optparse parser, and either way they are added to `env` below (i.e. the
# 'dest' value becomes the environment key and the value, the env value).
#
# Keep in mind that optparse changes hyphens to underscores when automatically
# deriving the `dest` name, e.g. `--reject-unknown-hosts` becomes
# `reject_unknown_hosts`.
#
# Furthermore, *always* specify some sort of default to avoid ending up with
# optparse.NO_DEFAULT (currently a two-tuple)! In general, None is a better
# default than ''.
#
# User-facing documentation for these are kept in docs/env.rst.
env_options = [
make_option('-a', '--no_agent',
action='store_true',
default=False,
help="don't use the running SSH agent"
),
make_option('-A', '--forward-agent',
action='store_true',
default=False,
help="forward local agent to remote end"
),
make_option('--abort-on-prompts',
action='store_true',
default=False,
help="abort instead of prompting (for password, host, etc)"
),
make_option('-c', '--config',
dest='rcfile',
default=_rc_path(),
metavar='PATH',
help="specify location of config file to use"
),
make_option('--colorize-errors',
action='store_true',
default=False,
help="Color error output",
),
make_option('-D', '--disable-known-hosts',
action='store_true',
default=False,
help="do not load user known_hosts file"
),
make_option('-e', '--eagerly-disconnect',
action='store_true',
default=False,
help="disconnect from hosts as soon as possible"
),
make_option('-f', '--fabfile',
default='fabfile',
metavar='PATH',
help="python module file to import, e.g. '../other.py'"
),
make_option('-g', '--gateway',
default=None,
metavar='HOST',
help="gateway host to connect through"
),
make_option('--hide',
metavar='LEVELS',
help="comma-separated list of output levels to hide"
),
make_option('-H', '--hosts',
default=[],
help="comma-separated list of hosts to operate on"
),
make_option('-i',
action='append',
dest='key_filename',
metavar='PATH',
default=None,
help="path to SSH private key file. May be repeated."
),
make_option('-k', '--no-keys',
action='store_true',
default=False,
help="don't load private key files from ~/.ssh/"
),
make_option('--keepalive',
dest='keepalive',
type=int,
default=0,
metavar="N",
help="enables a keepalive every N seconds"
),
make_option('--linewise',
action='store_true',
default=False,
help="print line-by-line instead of byte-by-byte"
),
make_option('-n', '--connection-attempts',
type='int',
metavar='M',
dest='connection_attempts',
default=1,
help="make M attempts to connect before giving up"
),
make_option('--no-pty',
dest='always_use_pty',
action='store_false',
default=True,
help="do not use pseudo-terminal in run/sudo"
),
make_option('-p', '--password',
default=None,
help="password for use with authentication and/or sudo"
),
make_option('-P', '--parallel',
dest='parallel',
action='store_true',
default=False,
help="default to parallel execution method"
),
make_option('--port',
default=default_port,
help="SSH connection port"
),
make_option('-r', '--reject-unknown-hosts',
action='store_true',
default=False,
help="reject unknown hosts"
),
make_option('--system-known-hosts',
default=None,
help="load system known_hosts file before reading user known_hosts"
),
make_option('-R', '--roles',
default=[],
help="comma-separated list of roles to operate on"
),
make_option('-s', '--shell',
default='/bin/bash -l -c',
help="specify a new shell, defaults to '/bin/bash -l -c'"
),
make_option('--show',
metavar='LEVELS',
help="comma-separated list of output levels to show"
),
make_option('--skip-bad-hosts',
action="store_true",
default=False,
help="skip over hosts that can't be reached"
),
make_option('--ssh-config-path',
default=default_ssh_config_path,
metavar='PATH',
help="Path to SSH config file"
),
make_option('-t', '--timeout',
type='int',
default=10,
metavar="N",
help="set connection timeout to N seconds"
),
make_option('-T', '--command-timeout',
dest='command_timeout',
type='int',
default=None,
metavar="N",
help="set remote command timeout to N seconds"
),
make_option('-u', '--user',
default=_get_system_username(),
help="username to use when connecting to remote hosts"
),
make_option('-w', '--warn-only',
action='store_true',
default=False,
help="warn, instead of abort, when commands fail"
),
make_option('-x', '--exclude-hosts',
default=[],
metavar='HOSTS',
help="comma-separated list of hosts to exclude"
),
make_option('-z', '--pool-size',
dest='pool_size',
type='int',
metavar='INT',
default=0,
help="number of concurrent processes to use in parallel mode",
),
]
#
# Environment dictionary - actual dictionary object
#
# Global environment dict. Currently a catchall for everything: config settings
# such as global deep/broad mode, host lists, username etc.
# Most default values are specified in `env_options` above, in the interests of
# preserving DRY: anything in here is generally not settable via the command
# line.
env = _AttributeDict({
'abort_exception': None,
'again_prompt': 'Sorry, try again.',
'all_hosts': [],
'combine_stderr': True,
'colorize_errors': False,
'command': None,
'command_prefixes': [],
'cwd': '', # Must be empty string, not None, for concatenation purposes
'dedupe_hosts': True,
'default_port': default_port,
'eagerly_disconnect': False,
'echo_stdin': True,
'exclude_hosts': [],
'gateway': None,
'host': None,
'host_string': None,
'lcwd': '', # Must be empty string, not None, for concatenation purposes
'local_user': _get_system_username(),
'output_prefix': True,
'passwords': {},
'path': '',
'path_behavior': 'append',
'port': default_port,
'real_fabfile': None,
'remote_interrupt': None,
'roles': [],
'roledefs': {},
'shell_env': {},
'skip_bad_hosts': False,
'ssh_config_path': default_ssh_config_path,
'ok_ret_codes': [0], # a list of return codes that indicate success
# -S so sudo accepts passwd via stdin, -p with our known-value prompt for
# later detection (thus %s -- gets filled with env.sudo_prompt at runtime)
'sudo_prefix': "sudo -S -p '%(sudo_prompt)s' ",
'sudo_prompt': 'sudo password:',
'sudo_user': None,
'tasks': [],
'prompts': {},
'use_exceptions_for': {'network': False},
'use_shell': True,
'use_ssh_config': False,
'user': None,
'version': get_version('short')
})
# Fill in exceptions settings
exceptions = ['network']
exception_dict = {}
for e in exceptions:
exception_dict[e] = False
env.use_exceptions_for = _AliasDict(exception_dict,
aliases={'everything': exceptions})
# Add in option defaults
for option in env_options:
env[option.dest] = option.default
#
# Command dictionary
#
# Keys are the command/function names, values are the callables themselves.
# This is filled in when main() runs.
commands = {}
#
# Host connection dict/cache
#
connections = HostConnectionCache()
def _open_session():
return connections[env.host_string].get_transport().open_session()
def default_channel():
"""
Return a channel object based on ``env.host_string``.
"""
try:
chan = _open_session()
except ssh.SSHException as err:
if str(err) == 'SSH session not active':
connections[env.host_string].close()
del connections[env.host_string]
chan = _open_session()
else:
raise
chan.settimeout(0.1)
chan.input_enabled = True
return chan
#
# Output controls
#
# Keys are "levels" or "groups" of output, values are always boolean,
# determining whether output falling into the given group is printed or not
# printed.
#
# By default, everything except 'debug' is printed, as this is what the average
# user, and new users, are most likely to expect.
#
# See docs/usage.rst for details on what these levels mean.
output = _AliasDict({
'status': True,
'aborts': True,
'warnings': True,
'running': True,
'stdout': True,
'stderr': True,
'debug': False,
'user': True
}, aliases={
'everything': ['warnings', 'running', 'user', 'output'],
'output': ['stdout', 'stderr'],
'commands': ['stdout', 'running']
})
| |
from __future__ import absolute_import, division, print_function
from datetime import timedelta
from datetime import datetime
import astropy.units as u
from sunpy.time import parse_time
from sunpy import config
from sunpy.extern.six.moves import range
TIME_FORMAT = config.get('general', 'time_format')
__all__ = ['TimeRange']
class TimeRange(object):
"""
An object to handle time ranges.
.. note::
Regardless of how a TimeRange is constructed it will always provide a
positive time range where the start time is before the end time.
Parameters
----------
a : str, number, `datetime.datetime`
A time (usually the start time) specified as a parse_time-compatible
time string, number, or a datetime object.
b : str, number, `datetime.datetime`, `datetime.timedelta`, `astropy.units.Quantity` (time)
Another time (usually the end time) specified as a
parse_time-compatible time string, number, or a datetime object.
May also be the size of the time range specified as a timedelta object,
or a `astropy.units.Quantity`.
Examples
--------
>>> from sunpy.time import TimeRange
>>> time_range = TimeRange('2010/03/04 00:10', '2010/03/04 00:20')
>>> time_range = TimeRange(('2010/03/04 00:10', '2010/03/04 00:20'))
>>> import astropy.units as u
>>> time_range = TimeRange('2010/03/04 00:10', 400 * u.s)
>>> time_range = TimeRange('2010/03/04 00:10', 400 * u.day)
"""
def __init__(self, a, b=None):
"""Creates a new TimeRange instance"""
# If a is a TimeRange object, copy attributes to new instance.
self._t1 = None
self._t2 = None
if isinstance(a, TimeRange):
self.__dict__ = a.__dict__.copy()
return
# Normalize different input types
if b is None:
x = parse_time(a[0])
if len(a) != 2:
raise ValueError('If b is None a must have two elements')
else:
y = a[1]
else:
x = parse_time(a)
y = b
if isinstance(y, u.Quantity):
y = timedelta(seconds=y.to('s').value)
# Timedelta
if isinstance(y, timedelta):
if y.days >= 0:
self._t1 = x
self._t2 = x + y
else:
self._t1 = x + y
self._t2 = x
return
# Otherwise, assume that the second argument is parse_time-compatible
y = parse_time(y)
if isinstance(y, datetime):
if x < y:
self._t1 = x
self._t2 = y
else:
self._t1 = y
self._t2 = x
@property
def start(self):
"""
Get the start time
Returns
-------
start : `datetime.datetime`
"""
return self._t1
@property
def end(self):
"""
Get the end time
Returns
-------
end : `datetime.datetime`
"""
return self._t2
@property
def dt(self):
"""
Get the length of the time range. Always a positive value.
Returns
-------
dt : `datetime.timedelta`
"""
return self._t2 - self._t1
@property
def center(self):
"""
Gets the center of the TimeRange instance.
Returns
-------
value : `datetime.datetime`
"""
return self.start + self.dt // 2
@property
def hours(self):
"""
Get the number of hours elapsed.
Returns
-------
value : `astropy.units.Quantity`
"""
return self._duration.to('hour')
@property
def days(self):
"""
Gets the number of days elapsed.
Returns
-------
value : `astropy.units.Quantity`
"""
return self._duration.to('d')
@property
def seconds(self):
"""
Gets the number of seconds elapsed.
Returns
-------
value : `astropy.units.Quantity`
"""
return self._duration.to('s')
@property
def minutes(self):
"""
Gets the number of minutes elapsed.
Returns
-------
value : `astropy.units.Quantity`
"""
return self._duration.to('min')
@property
def _duration(self):
"""
The duration of the time range.
Returns
-------
value : `astropy.units.Quantity`
"""
result = self.dt.microseconds * u.Unit('us') + self.dt.seconds * u.Unit('s') + self.dt.days * u.Unit('day')
return result
def __repr__(self):
"""
Returns a human-readable representation of the TimeRange instance."""
t1 = self.start.strftime(TIME_FORMAT)
t2 = self.end.strftime(TIME_FORMAT)
center = self.center.strftime(TIME_FORMAT)
return (' Start:'.ljust(11) + t1 +
'\n End:'.ljust(12) + t2 +
'\n Center:'.ljust(12) + center +
'\n Duration:'.ljust(12) + str(self.days.value) + ' days or' +
'\n '.ljust(12) + str(self.hours.value) + ' hours or' +
'\n '.ljust(12) + str(self.minutes.value) + ' minutes or' +
'\n '.ljust(12) + str(self.seconds.value) + ' seconds' +
'\n')
def split(self, n=2):
"""
Splits the TimeRange into multiple equally sized parts.
Parameters
----------
n : int
The number of times to split the time range (must > 1)
Returns
-------
time ranges: list
An list of equally sized TimeRange objects between
the start and end times.
Raises
------
ValueError
If requested amount is less than 1
"""
if n <= 0:
raise ValueError('n must be greater than or equal to 1')
subsections = []
previous_time = self.start
next_time = None
for _ in range(n):
next_time = previous_time + self.dt // n
next_range = TimeRange(previous_time, next_time)
subsections.append(next_range)
previous_time = next_time
return subsections
def window(self, cadence, window):
"""
Split the TimeRange up into a series of TimeRange windows,
'window' long, between the start and end with a cadence of 'cadence'.
Parameters
----------
cadence : `astropy.units.Quantity`, `datetime.timedelta`
Cadence in seconds or a timedelta instance
window : `astropy.units.quantity`, `datetime.timedelta`
The length of the Time's, assumed to be seconds if int.
Returns
-------
time ranges : list
A list of TimeRange objects, that are window long and separated by
cadence.
Examples
--------
>>> import astropy.units as u
>>> from sunpy.time import TimeRange
>>> time_range = TimeRange('2010/03/04 00:10', '2010/03/04 01:20')
>>> time_range.window(60*60*u.s, window=12*u.s) # doctest: +NORMALIZE_WHITESPACE
[ Start: 2010-03-04 00:10:00
End: 2010-03-04 00:10:12
Center:2010-03-04 00:10:06
Duration:0.000138888888889 days or
0.00333333333333 hours or
0.2 minutes or
12.0 seconds
, Start: 2010-03-04 01:10:00
End: 2010-03-04 01:10:12
Center:2010-03-04 01:10:06
Duration:0.000138888888889 days or
0.00333333333333 hours or
0.2 minutes or
12.0 seconds
, Start: 2010-03-04 02:10:00
End: 2010-03-04 02:10:12
Center:2010-03-04 02:10:06
Duration:0.000138888888889 days or
0.00333333333333 hours or
0.2 minutes or
12.0 seconds
]
"""
if not isinstance(window, timedelta):
window = timedelta(seconds=window.to('s').value)
if not isinstance(cadence, timedelta):
cadence = timedelta(seconds=cadence.to('s').value)
n = 1
times = [TimeRange(self.start, self.start + window)]
while times[-1].end < self.end:
times.append(TimeRange(self.start + cadence * n,
self.start + cadence * n + window))
n += 1
return times
def next(self):
"""Shift the time range forward by the amount of time elapsed"""
dt = self.dt
self._t1 = self._t1 + dt
self._t2 = self._t2 + dt
return self
def previous(self):
"""Shift the time range backward by the amount of time elapsed"""
dt = self.dt
self._t1 = self._t1 - dt
self._t2 = self._t2 - dt
return self
def extend(self, dt_start, dt_end):
"""Extend the time range forwards and backwards
Parameters
----------
dt_start : `datetime.timedelta`
The amount to shift the start time
dt_end : `datetime.timedelta`
The amount to shift the end time
"""
# Only a timedelta object is acceptable here
self._t1 = self._t1 + dt_start
self._t2 = self._t2 + dt_end
def __contains__(self, time):
"""
Checks whether the given time lies within this range.
Both limits are inclusive (i.e. __contains__(t1) and __contains__(t2)
always return true)
Parameters
----------
time : `datetime.datetime`, str
A parse_time-compatible time to be checked.
Returns
-------
value : bool
True if time lies between start and end, False otherwise.
Examples
--------
>>> from sunpy.time import TimeRange
>>> time1 = '2014/5/5 12:11'
>>> time2 = '2012/5/5 12:11'
>>> time_range = TimeRange('2014/05/04 13:54', '2018/02/03 12:12')
>>> time1 in time_range
True
>>> time2 in time_range
False
"""
this_time = parse_time(time)
return this_time >= self.start and this_time <= self.end
| |
import math
from datetime import datetime
from conch import analyze_segments
from conch.analysis.segments import SegmentMapping
from .helper import generate_pitch_function
from ..segments import generate_utterance_segments
from ...exceptions import SpeakerAttributeError
from ..classes import Track, TimePoint
from ..utils import PADDING
def analyze_utterance_pitch(corpus_context, utterance, source='praat', min_pitch=50, max_pitch=500,
**kwargs):
if isinstance(utterance, str):
utterance_id = utterance
else:
utterance_id = utterance.id
padding = kwargs.pop('padding', None)
if padding is None:
padding = PADDING
utt_type = corpus_context.hierarchy.highest
statement = '''MATCH (s:Speaker:{corpus_name})-[r:speaks_in]->(d:Discourse:{corpus_name}),
(u:{utt_type}:{corpus_name})-[:spoken_by]->(s),
(u)-[:spoken_in]->(d)
WHERE u.id = $utterance_id
RETURN u, d, r.channel as channel'''.format(corpus_name=corpus_context.cypher_safe_name,
utt_type=utt_type)
results = corpus_context.execute_cypher(statement, utterance_id=utterance_id)
segment_mapping = SegmentMapping()
for r in results:
channel = r['channel']
file_path = r['d']['vowel_file_path']
u = r['u']
segment_mapping.add_file_segment(file_path, u['begin'], u['end'], channel, padding=padding)
path = None
if source == 'praat':
path = corpus_context.config.praat_path
elif source == 'reaper':
path = corpus_context.config.reaper_path
pitch_function = generate_pitch_function(source, min_pitch, max_pitch, path=path)
track = Track()
for seg in segment_mapping:
output = pitch_function(seg)
for k, v in output.items():
if v['F0'] is None or v['F0'] <= 0:
continue
p = TimePoint(k)
p.add_value('F0', v['F0'])
track.add(p)
if 'pitch' not in corpus_context.hierarchy.acoustics:
corpus_context.hierarchy.add_acoustic_properties(corpus_context, 'pitch', [('F0', float)])
corpus_context.encode_hierarchy()
return track
def update_utterance_pitch_track(corpus_context, utterance, new_track):
from ...corpus.audio import s_to_ms, s_to_nano
if isinstance(utterance, str):
utterance_id = utterance
else:
utterance_id = utterance.id
today = datetime.utcnow()
utt_type = corpus_context.hierarchy.highest
phone_type = corpus_context.hierarchy.lowest
time_stamp = today.timestamp()
statement = '''MATCH (s:Speaker:{corpus_name})-[r:speaks_in]->(d:Discourse:{corpus_name}),
(u:{utt_type}:{corpus_name})-[:spoken_by]->(s),
(u)-[:spoken_in]->(d),
(p:{phone_type}:{corpus_name})-[:contained_by*]->(u)
WHERE u.id = $utterance_id
SET u.pitch_last_edited = $date
RETURN u, d, r.channel as channel, s, collect(p) as p'''.format(
corpus_name=corpus_context.cypher_safe_name,
utt_type=utt_type, phone_type=phone_type)
results = corpus_context.execute_cypher(statement, utterance_id=utterance_id, date=time_stamp)
for r in results:
channel = r['channel']
discourse = r['d']['name']
speaker = r['s']['name']
u = r['u']
phones = r['p']
client = corpus_context.acoustic_client()
query = '''DELETE from "pitch"
where "discourse" = '{}'
and "speaker" = '{}'
and "time" >= {}
and "time" <= {};'''.format(discourse, speaker, s_to_nano(u['begin']), s_to_nano(u['end']))
result = client.query(query)
data = []
for data_point in new_track:
speaker, discourse, channel = speaker, discourse, channel
time_point, value = data_point['time'], data_point['F0']
t_dict = {'speaker': speaker, 'discourse': discourse, 'channel': channel}
label = None
for i, p in enumerate(sorted(phones, key=lambda x: x['begin'])):
if p['begin'] > time_point:
break
label = p['label']
if i == len(phones) - 1:
break
else:
label = None
if label is None:
continue
fields = {'phone': label, 'utterance_id': u['id']}
try:
if value is None:
continue
value = float(value)
except TypeError:
continue
if value <= 0:
continue
fields['F0'] = value
d = {'measurement': 'pitch',
'tags': t_dict,
'time': s_to_ms(time_point),
'fields': fields
}
data.append(d)
client.write_points(data, batch_size=1000, time_precision='ms')
if 'pitch' not in corpus_context.hierarchy.acoustics:
corpus_context.hierarchy.acoustics.add('pitch')
corpus_context.encode_hierarchy()
return time_stamp
def analyze_pitch(corpus_context,
source='praat',
algorithm='base',
call_back=None,
absolute_min_pitch=50,
absolute_max_pitch=500,
adjusted_octaves=1,
stop_check=None, multiprocessing=True):
"""
Parameters
----------
corpus_context : :class:`~polyglotdb.corpus.audio.AudioContext`
source : str
Program to use for analyzing pitch, either ``praat`` or ``reaper``
algorithm : str
Algorithm to use, ``base``, ``gendered``, or ``speaker_adjusted``
absolute_min_pitch : int
Absolute pitch floor
absolute_max_pitch : int
Absolute pitch ceiling
adjusted_octaves : int
How many octaves around the speaker's mean pitch to set the speaker adjusted pitch floor and ceiling
stop_check : callable
Function to check whether processing should stop early
call_back : callable
Function to report progress
multiprocessing : bool
Flag whether to use multiprocessing or threading
Returns
-------
"""
if not 'utterance' in corpus_context.hierarchy:
raise (Exception('Must encode utterances before pitch can be analyzed'))
segment_mapping = generate_utterance_segments(corpus_context, padding=PADDING).grouped_mapping('speaker')
num_speakers = len(segment_mapping)
path = None
if source == 'praat':
path = corpus_context.config.praat_path
# kwargs = {'silence_threshold': 0.03,
# 'voicing_threshold': 0.45, 'octave_cost': 0.01, 'octave_jump_cost': 0.35,
# 'voiced_unvoiced_cost': 0.14}
elif source == 'reaper':
path = corpus_context.config.reaper_path
# kwargs = None
pitch_function = generate_pitch_function(source, absolute_min_pitch, absolute_max_pitch,
path=path)
if 'pitch' not in corpus_context.hierarchy.acoustics:
corpus_context.hierarchy.add_acoustic_properties(corpus_context, 'pitch', [('F0', float)])
corpus_context.encode_hierarchy()
if algorithm == 'speaker_adjusted':
speaker_data = {}
if call_back is not None:
call_back('Getting original speaker means and SDs...')
for i, ((k,), v) in enumerate(segment_mapping.items()):
if call_back is not None:
call_back('Analyzing speaker {} ({} of {})'.format(k, i, num_speakers))
output = analyze_segments(v, pitch_function, stop_check=stop_check, multiprocessing=multiprocessing)
sum_pitch = 0
n = 0
for seg, track in output.items():
for t, v in track.items():
v = v['F0']
if v is not None and v > 0: # only voiced frames
n += 1
sum_pitch += v
mean_pitch = sum_pitch / n
speaker_data[k] = int(mean_pitch / math.pow(2, adjusted_octaves)), \
int( mean_pitch * math.pow(2, adjusted_octaves))
for i, ((speaker,), v) in enumerate(segment_mapping.items()):
if call_back is not None:
call_back('Analyzing speaker {} ({} of {})'.format(speaker, i, num_speakers))
if algorithm == 'gendered':
min_pitch = absolute_min_pitch
max_pitch = absolute_max_pitch
try:
q = corpus_context.query_speakers().filter(corpus_context.speaker.name == speaker)
q = q.columns(corpus_context.speaker.gender.column_name('Gender'))
gender = q.all()[0]['Gender']
if gender is not None:
if gender.lower()[0] == 'f':
min_pitch = 100
else:
max_pitch = 400
except SpeakerAttributeError:
pass
pitch_function = generate_pitch_function(source, min_pitch, max_pitch,
path=path)
elif algorithm == 'speaker_adjusted':
min_pitch, max_pitch = speaker_data[speaker]
if min_pitch < absolute_min_pitch:
min_pitch = absolute_min_pitch
if max_pitch > absolute_max_pitch:
max_pitch = absolute_max_pitch
pitch_function = generate_pitch_function(source, min_pitch, max_pitch,
path=path)
output = analyze_segments(v, pitch_function, stop_check=stop_check, multiprocessing=multiprocessing)
corpus_context.save_acoustic_tracks('pitch', output, speaker)
today = datetime.utcnow()
corpus_context.query_graph(corpus_context.utterance).set_properties(pitch_last_edited=today.timestamp())
corpus_context.encode_hierarchy()
| |
'''
Unit tests for yedit
'''
import os
import sys
import unittest
import mock
# Removing invalid variable names for tests so that I can
# keep them brief
# pylint: disable=invalid-name,no-name-in-module
# Disable import-error b/c our libraries aren't loaded in jenkins
# pylint: disable=import-error
# place yedit in our path
yedit_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
sys.path.insert(0, yedit_path)
from yedit import Yedit, YeditException # noqa: E402
# pylint: disable=too-many-public-methods
# Silly pylint, moar tests!
class YeditTest(unittest.TestCase):
'''
Test class for yedit
'''
data = {'a': 'a',
'b': {'c': {'d': [{'e': 'x'}, 'f', 'g']}},
} # noqa: E124
filename = 'yedit_test.yml'
def setUp(self):
''' setup method will create a file and set to known configuration '''
yed = Yedit(YeditTest.filename)
yed.yaml_dict = YeditTest.data
yed.write()
def test_load(self):
''' Testing a get '''
yed = Yedit('yedit_test.yml')
self.assertEqual(yed.yaml_dict, self.data)
def test_write(self):
''' Testing a simple write '''
yed = Yedit('yedit_test.yml')
yed.put('key1', 1)
yed.write()
self.assertTrue('key1' in yed.yaml_dict)
self.assertEqual(yed.yaml_dict['key1'], 1)
def test_write_x_y_z(self):
'''Testing a write of multilayer key'''
yed = Yedit('yedit_test.yml')
yed.put('x.y.z', 'modified')
yed.write()
yed.load()
self.assertEqual(yed.get('x.y.z'), 'modified')
def test_delete_a(self):
'''Testing a simple delete '''
yed = Yedit('yedit_test.yml')
yed.delete('a')
yed.write()
yed.load()
self.assertTrue('a' not in yed.yaml_dict)
def test_delete_b_c(self):
'''Testing delete of layered key '''
yed = Yedit('yedit_test.yml', separator=':')
yed.delete('b:c')
yed.write()
yed.load()
self.assertTrue('b' in yed.yaml_dict)
self.assertFalse('c' in yed.yaml_dict['b'])
def test_create(self):
'''Testing a create '''
os.unlink(YeditTest.filename)
yed = Yedit('yedit_test.yml')
yed.create('foo', 'bar')
yed.write()
yed.load()
self.assertTrue('foo' in yed.yaml_dict)
self.assertTrue(yed.yaml_dict['foo'] == 'bar')
def test_create_content(self):
'''Testing a create with content '''
content = {"foo": "bar"}
yed = Yedit("yedit_test.yml", content)
yed.write()
yed.load()
self.assertTrue('foo' in yed.yaml_dict)
self.assertTrue(yed.yaml_dict['foo'], 'bar')
def test_array_insert(self):
'''Testing a create with content '''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', 'inject')
self.assertTrue(yed.get('b:c:d[0]') == 'inject')
def test_array_insert_first_index(self):
'''Testing a create with content '''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', 'inject')
self.assertTrue(yed.get('b:c:d[1]') == 'f')
def test_array_insert_second_index(self):
'''Testing a create with content '''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', 'inject')
self.assertTrue(yed.get('b:c:d[2]') == 'g')
def test_dict_array_dict_access(self):
'''Testing a create with content'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
self.assertTrue(yed.get('b:c:d[0]:[0]:x:y') == 'inject')
def test_dict_array_dict_replace(self):
'''Testing multilevel delete'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
yed.put('b:c:d[0]:[0]:x:y', 'testing')
self.assertTrue('b' in yed.yaml_dict)
self.assertTrue('c' in yed.yaml_dict['b'])
self.assertTrue('d' in yed.yaml_dict['b']['c'])
self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'], list))
self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0], list))
self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0][0], dict))
self.assertTrue('y' in yed.yaml_dict['b']['c']['d'][0][0]['x'])
self.assertTrue(yed.yaml_dict['b']['c']['d'][0][0]['x']['y'] == 'testing') # noqa: E501
def test_dict_array_dict_remove(self):
'''Testing multilevel delete'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
yed.delete('b:c:d[0]:[0]:x:y')
self.assertTrue('b' in yed.yaml_dict)
self.assertTrue('c' in yed.yaml_dict['b'])
self.assertTrue('d' in yed.yaml_dict['b']['c'])
self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'], list))
self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0], list))
self.assertTrue(isinstance(yed.yaml_dict['b']['c']['d'][0][0], dict))
self.assertFalse('y' in yed.yaml_dict['b']['c']['d'][0][0]['x'])
def test_key_exists_in_dict(self):
'''Testing exist in dict'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
self.assertTrue(yed.exists('b:c', 'd'))
def test_key_exists_in_list(self):
'''Testing exist in list'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('b:c:d[0]', [{'x': {'y': 'inject'}}])
self.assertTrue(yed.exists('b:c:d', [{'x': {'y': 'inject'}}]))
self.assertFalse(yed.exists('b:c:d', [{'x': {'y': 'test'}}]))
def test_update_to_list_with_index(self):
'''Testing update to list with index'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('x:y:z', [1, 2, 3])
yed.update('x:y:z', [5, 6], index=2)
self.assertTrue(yed.get('x:y:z') == [1, 2, [5, 6]])
self.assertTrue(yed.exists('x:y:z', [5, 6]))
self.assertFalse(yed.exists('x:y:z', 4))
def test_update_to_list_with_curr_value(self):
'''Testing update to list with index'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('x:y:z', [1, 2, 3])
yed.update('x:y:z', [5, 6], curr_value=3)
self.assertTrue(yed.get('x:y:z') == [1, 2, [5, 6]])
self.assertTrue(yed.exists('x:y:z', [5, 6]))
self.assertFalse(yed.exists('x:y:z', 4))
def test_update_to_list(self):
'''Testing update to list'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('x:y:z', [1, 2, 3])
yed.update('x:y:z', [5, 6])
self.assertTrue(yed.get('x:y:z') == [1, 2, 3, [5, 6]])
self.assertTrue(yed.exists('x:y:z', [5, 6]))
self.assertFalse(yed.exists('x:y:z', 4))
def test_append_twice_to_list(self):
'''Testing append to list'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('x:y:z', [1, 2, 3])
yed.append('x:y:z', [5, 6])
yed.append('x:y:z', [5, 6])
self.assertTrue(yed.get('x:y:z') == [1, 2, 3, [5, 6], [5, 6]])
self.assertFalse(yed.exists('x:y:z', 4))
def test_add_item_to_dict(self):
'''Testing update to dict'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('x:y:z', {'a': 1, 'b': 2})
yed.update('x:y:z', {'c': 3, 'd': 4})
self.assertTrue(yed.get('x:y:z') == {'a': 1, 'b': 2, 'c': 3, 'd': 4})
self.assertTrue(yed.exists('x:y:z', {'c': 3}))
def test_first_level_dict_with_none_value(self):
'''test dict value with none value'''
yed = Yedit(content={'a': None}, separator=":")
yed.put('a:b:c', 'test')
self.assertTrue(yed.get('a:b:c') == 'test')
self.assertTrue(yed.get('a:b'), {'c': 'test'})
def test_adding_yaml_variable(self):
'''test dict value with none value'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('z:y', '{{test}}')
self.assertTrue(yed.get('z:y') == '{{test}}')
def test_keys_with_underscore(self):
'''test dict value with none value'''
yed = Yedit("yedit_test.yml", separator=':')
yed.put('z_:y_y', {'test': '{{test}}'})
self.assertTrue(yed.get('z_:y_y') == {'test': '{{test}}'})
def test_first_level_array_update(self):
'''test update on top level array'''
yed = Yedit(content=[{'a': 1}, {'b': 2}, {'b': 3}], separator=':')
yed.update('', {'c': 4})
self.assertTrue({'c': 4} in yed.get(''))
def test_first_level_array_delete(self):
'''test remove top level key'''
yed = Yedit(content=[{'a': 1}, {'b': 2}, {'b': 3}])
yed.delete('')
self.assertTrue({'b': 3} not in yed.get(''))
def test_first_level_array_get(self):
'''test dict value with none value'''
yed = Yedit(content=[{'a': 1}, {'b': 2}, {'b': 3}])
yed.get('')
self.assertTrue([{'a': 1}, {'b': 2}, {'b': 3}] == yed.yaml_dict)
def test_pop_list_item(self):
'''test dict value with none value'''
yed = Yedit(content=[{'a': 1}, {'b': 2}, {'b': 3}], separator=':')
yed.pop('', {'b': 2})
self.assertTrue([{'a': 1}, {'b': 3}] == yed.yaml_dict)
def test_pop_list_item_2(self):
'''test dict value with none value'''
z = list(range(10))
yed = Yedit(content=z, separator=':')
yed.pop('', 5)
z.pop(5)
self.assertTrue(z == yed.yaml_dict)
def test_pop_dict_key(self):
'''test dict value with none value'''
yed = Yedit(content={'a': {'b': {'c': 1, 'd': 2}}}, separator='#')
yed.pop('a#b', 'c')
self.assertTrue({'a': {'b': {'d': 2}}} == yed.yaml_dict)
def test_accessing_path_with_unexpected_objects(self):
'''test providing source path objects that differ from current object state'''
yed = Yedit(content={'a': {'b': {'c': ['d', 'e']}}})
with self.assertRaises(YeditException):
yed.put('a.b.c.d', 'x')
def test_creating_new_objects_with_embedded_list(self):
'''test creating new objects with an embedded list in the creation path'''
yed = Yedit(content={'a': {'b': 12}})
with self.assertRaises(YeditException):
yed.put('new.stuff[0].here', 'value')
def test_creating_new_objects_with_trailing_list(self):
'''test creating new object(s) where the final piece is a list'''
yed = Yedit(content={'a': {'b': 12}})
with self.assertRaises(YeditException):
yed.put('new.stuff.here[0]', 'item')
def test_empty_key_with_int_value(self):
'''test editing top level with not list or dict'''
yed = Yedit(content={'a': {'b': 12}})
result = yed.put('', 'b')
self.assertFalse(result[0])
def test_setting_separator(self):
'''test editing top level with not list or dict'''
yed = Yedit(content={'a': {'b': 12}})
yed.separator = ':'
self.assertEqual(yed.separator, ':')
def test_remove_all(self):
'''test removing all data'''
data = Yedit.remove_entry({'a': {'b': 12}}, '')
self.assertTrue(data)
def test_remove_dict_entry(self):
'''test removing dict entry'''
data = {'a': {'b': [{'c': 3, 'd': 4, 'e': 5}]}}
results = Yedit.remove_entry(data, 'a.b[0].c')
self.assertTrue(results)
self.assertEqual(data, {'a': {'b': [{'d': 4, 'e': 5}]}})
def test_remove_dict_entry_top_all(self):
'''test removing dict entry top all'''
data = {'a': 1, 'b': 2}
results = Yedit.remove_entry(data, '')
self.assertTrue(results)
self.assertEqual(data, {})
def test_remove_dict_entry_top(self):
'''test removing dict entry top'''
data = {'a': 1, 'b': 2}
results = Yedit.remove_entry(data, '', value='b')
self.assertTrue(results)
self.assertEqual(data, {'a': 1})
def test_remove_list_entry(self):
'''test removing list entry'''
data = {'a': {'b': [{'c': 3}]}}
results = Yedit.remove_entry(data, 'a.b[0]')
self.assertTrue(results)
self.assertEqual(data, {'a': {'b': []}})
def test_remove_list_entry_value_top(self):
'''test removing top list entry'''
data = ['c', 'd', 'e']
results = Yedit.remove_entry(data, '', value='d')
self.assertTrue(results)
self.assertEqual(data, ['c', 'e'])
def test_remove_list_entry_index_top(self):
'''test removing top list entry'''
data = ['c', 'd', 'e']
results = Yedit.remove_entry(data, '', 2)
self.assertTrue(results)
self.assertEqual(data, ['c', 'd'])
def test_remove_list_entry_index(self):
'''test removing list entry 1 index'''
data = {'a': {'b': ['c', 'd', 'e']}}
results = Yedit.remove_entry(data, 'a.b[1]')
self.assertTrue(results)
self.assertEqual(data, {'a': {'b': ['c', 'e']}})
def test_parse_value_string_true(self):
'''test parse_value'''
results = Yedit.parse_value('true', 'str')
self.assertEqual(results, 'true')
def test_parse_value_bool_true(self):
'''test parse_value'''
results = Yedit.parse_value('true', 'bool')
self.assertTrue(results)
def test_parse_value_bool_exception(self):
'''test parse_value'''
with self.assertRaises(YeditException):
Yedit.parse_value('TTT', 'bool')
@mock.patch('yedit.Yedit.write')
def test_run_ansible_basic(self, mock_write):
'''test parse_value'''
params = {
'src': None,
'backup': False,
'separator': '.',
'state': 'present',
'edits': [],
'value': None,
'key': None,
'content': {'a': {'b': {'c': 1}}},
'content_type': '',
}
results = Yedit.run_ansible(params)
mock_write.side_effect = [
(True, params['content']),
]
self.assertFalse(results['changed'])
@mock.patch('yedit.Yedit.write')
def test_run_ansible_and_write(self, mock_write):
'''test parse_value'''
params = {
'src': '/tmp/test',
'backup': False,
'separator': '.',
'state': 'present',
'edits': [],
'value': None,
'key': None,
'content': {'a': {'b': {'c': 1}}},
'content_type': '',
}
results = Yedit.run_ansible(params)
mock_write.side_effect = [
(True, params['content']),
]
self.assertTrue(results['changed'])
def tearDown(self):
'''TearDown method'''
os.unlink(YeditTest.filename)
| |
# Copyright 2014 Cloudera Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ibis.util as util
import ibis.expr.datatypes as dt
import ibis.expr.types as ir
import ibis.expr.operations as ops
class FormatMemo(object):
# A little sanity hack to simplify the below
def __init__(self):
from collections import defaultdict
self.formatted = {}
self.aliases = {}
self.ops = {}
self.counts = defaultdict(lambda: 0)
def __contains__(self, obj):
return self._key(obj) in self.formatted
def _key(self, obj):
return obj._repr()
def observe(self, obj, formatter=lambda x: x._repr()):
key = self._key(obj)
if key not in self.formatted:
self.aliases[key] = 'ref_%d' % len(self.formatted)
self.formatted[key] = formatter(obj)
self.ops[key] = obj
self.counts[key] += 1
def count(self, obj):
return self.counts[self._key(obj)]
def get_alias(self, obj):
return self.aliases[self._key(obj)]
def get_formatted(self, obj):
return self.formatted[self._key(obj)]
class ExprFormatter(object):
"""
For creating a nice tree-like representation of an expression graph for
displaying in the console.
TODO: detect reused DAG nodes and do not display redundant information
"""
def __init__(self, expr, indent_size=2, base_level=0, memo=None,
memoize=True):
self.expr = expr
self.indent_size = indent_size
self.base_level = base_level
self.memoize = memoize
# For tracking "extracted" objects, like tables, that we don't want to
# print out more than once, and simply alias in the expression tree
self.memo = memo or FormatMemo()
def get_result(self):
what = self.expr.op()
if self.memoize:
self._memoize_tables()
if isinstance(what, dt.HasSchema):
# This should also catch aggregations
if not self.memoize and what in self.memo:
text = 'Table: %s' % self.memo.get_alias(what)
elif isinstance(what, ops.PhysicalTable):
text = self._format_table(what)
else:
# Any other node type
text = self._format_node(what)
elif isinstance(what, ops.TableColumn):
text = self._format_column(self.expr)
elif isinstance(what, ir.Node):
text = self._format_node(what)
elif isinstance(what, ops.Literal):
text = 'Literal[%s] %s' % (self._get_type_display(),
str(what.value))
if isinstance(self.expr, ir.ValueExpr) and self.expr._name is not None:
text = '{0} = {1}'.format(self.expr.get_name(), text)
if self.memoize:
alias_to_text = [(self.memo.aliases[x],
self.memo.formatted[x],
self.memo.ops[x])
for x in self.memo.formatted]
alias_to_text.sort()
# A hack to suppress printing out of a ref that is the result of
# the top level expression
refs = [x + '\n' + y
for x, y, op in alias_to_text
if not op.equals(what)]
text = '\n\n'.join(refs + [text])
return self._indent(text, self.base_level)
def _memoize_tables(self):
table_memo_ops = (ops.Aggregation, ops.Filter,
ops.Projection, ops.SelfReference)
def walk(expr):
op = expr.op()
def visit(arg):
if isinstance(arg, list):
[visit(x) for x in arg]
elif isinstance(arg, ir.Expr):
walk(arg)
if isinstance(op, ops.PhysicalTable):
self.memo.observe(op, self._format_table)
elif isinstance(op, ir.Node):
visit(op.args)
if isinstance(op, table_memo_ops):
self.memo.observe(op, self._format_node)
elif isinstance(op, dt.HasSchema):
self.memo.observe(op, self._format_table)
walk(self.expr)
def _indent(self, text, indents=1):
return util.indent(text, self.indent_size * indents)
def _format_table(self, table):
# format the schema
rows = ['name: {0!s}\nschema:'.format(table.name)]
rows.extend([' %s : %s' % tup for tup in
zip(table.schema.names, table.schema.types)])
opname = type(table).__name__
type_display = self._get_type_display(table)
opline = '%s[%s]' % (opname, type_display)
return '{0}\n{1}'.format(opline, self._indent('\n'.join(rows)))
def _format_column(self, expr):
# HACK: if column is pulled from a Filter of another table, this parent
# will not be found in the memo
col = expr.op()
parent_op = col.parent().op()
if parent_op in self.memo:
table_formatted = self.memo.get_alias(parent_op)
else:
table_formatted = '\n' + self._indent(self._format_node(parent_op))
type_display = self._get_type_display(self.expr)
return ("Column[{0}] '{1}' from table {2}"
.format(type_display, col.name, table_formatted))
def _format_node(self, op):
formatted_args = []
def visit(what, extra_indents=0):
if isinstance(what, ir.Expr):
result = self._format_subexpr(what)
else:
result = self._indent(str(what))
if extra_indents > 0:
result = util.indent(result, self.indent_size)
formatted_args.append(result)
arg_names = getattr(op, '_arg_names', None)
if arg_names is None:
for arg in op.args:
if isinstance(arg, list):
for x in arg:
visit(x)
else:
visit(arg)
else:
for arg, name in zip(op.args, arg_names):
if name is not None:
name = self._indent('{0}:'.format(name))
if isinstance(arg, list):
if name is not None and len(arg) > 0:
formatted_args.append(name)
indents = 1
else:
indents = 0
for x in arg:
visit(x, extra_indents=indents)
else:
if name is not None:
formatted_args.append(name)
indents = 1
else:
indents = 0
visit(arg, extra_indents=indents)
opname = type(op).__name__
type_display = self._get_type_display(op)
opline = '%s[%s]' % (opname, type_display)
return '\n'.join([opline] + formatted_args)
def _format_subexpr(self, expr):
formatter = ExprFormatter(expr, base_level=1, memo=self.memo,
memoize=False)
return formatter.get_result()
def _get_type_display(self, expr=None):
if expr is None:
expr = self.expr
if isinstance(expr, ir.Node):
expr = expr.to_expr()
if isinstance(expr, ir.TableExpr):
return 'table'
elif isinstance(expr, ir.ArrayExpr):
return 'array(%s)' % expr.type()
elif isinstance(expr, ir.SortExpr):
return 'array-sort'
elif isinstance(expr, (ir.ScalarExpr, ir.AnalyticExpr)):
return '%s' % expr.type()
elif isinstance(expr, ir.ExprList):
list_args = [self._get_type_display(arg)
for arg in expr.op().args]
return ', '.join(list_args)
else:
raise NotImplementedError
| |
"""
===============================================================================
Use pysam model to calculate the AS from bam file
===============================================================================
Author : Shujia Huang
Date : 2014-03-25 14:29:08
"""
import sys
import re
import optparse
import os
import string
import pysam
import matplotlib.pyplot as plt
import numpy as np
import AlterAlign as ATA
def IsSNP(refbase, alleles):
isSnp = True
for ale in alleles:
if len(ale) > 1 or len(refbase) > 1: isSnp = False
return isSnp
def main(opt):
vcfInfile = opt.vcfInfile
bamInfile = opt.bamInfile
faInfile = opt.refInfile
sampleID = opt.sample
refId = opt.refChrId
outPrefix = opt.outPrefix
mapq = opt.mapq
newID = opt.exc
fa = ATA.LoadFaSeq(faInfile, refId)
print >> sys.stderr, '# [INFO] Fa Loading finish ***'
if bamInfile[-4:] == '.bam':
samInHandle = pysam.Samfile(bamInfile, 'rb')
else :
samInHandle = pysam.Samfile(bamInfile, 'r')
#samOutHandle = pysam.Samfile(outPrefix + '.bam', 'wb', template=samInHandle)
vcfOutHandle = open(outPrefix + '.vcf', 'w')
print >> sys.stderr, '# [INFO] Now Scaning the VCF and doing alternate align ... ...'
if vcfInfile[-3:] == '.gz':
if refId == "ALL":
I = os.popen('gzip -dc %s' % vcfInfile)
else:
I = os.popen('/home/siyang/Bin/software_pip/tabix-0.2.6/tabix -h %s %s' % (vcfInfile, refId))
else :
I = open(vcfInfile)
frist = True
while 1:
lines = I.readlines(100000)
if not lines : break;
for line in lines :
line = line.strip('\n')
col = line.split()
if re.search(r'^##', line):
if frist and re.search(r'^##FORMAT=', line):
frist = False
vcfOutHandle.write('##FORMAT=<ID=AA,Number=4,Type=Integer,Description="Information of Alternate Align. Format: Ref_perfect,Alt_Perfect,Both_Perfect,Both_Imperfect">\n')
vcfOutHandle.write('%s\n' % line)
continue
elif re.search(r'^#CHROM', line):
if len(col) < 10 :
print >> sys.stderr, '# [ERROR] The input vcf file (%s) does not contain the "Sample" fields!\n' % vcfInfile
sys.exit(1)
sam2col = {sam:i+9 for i, sam in enumerate(col[9:])}
if sampleID not in sam2col:
print >> sys.stderr, '# [ERROR] The input sample id (%s) is not match in Vcf file(%s)\n' % (sampleID, vcfInfile)
if len(newID) > 0:
vcfOutHandle.write('%s\t%s\n' % ('\t'.join(col[:9]), newID))
else:
vcfOutHandle.write('%s\t%s\n' % ('\t'.join(col[:9]), sampleID))
continue
if refId != 'ALL' and refId != col[0]: continue
if col[4] == '.': continue # ignore REFCALL or INTERGAP
idx = sam2col[sampleID]
fi = col[idx].split(':')
gt = fi[0].split('/')
if '|' in fi[0]: gt = fi[0].split('|')
gtIdx = 1 # Default is the first ALT Sequence
if len(gt) == 2 and gt[1] != '.':
gtIdx = string.atoi(gt[1])
col[4] = col[4].split(',')[gtIdx-1] # Match to the identity sample
isAltAlign = False
zr,za,zc,zi = 0,0,0,0
if not IsSNP(col[3], [col[4]]):
# Not SNP, INTERGAP ...
isAltAlign = True
zr,za,zc,zi = ATA.Align(samInHandle,
fa,
col[0],
string.atoi(col[1]),
col[2],
col[3],
col[4][1:], #col[4][0] is reference
mapq)
# Ignore the position which is meanless
if not isAltAlign and col[idx] == './.': continue
fm = {t:i for i, t in enumerate(col[8].split(':'))} # Get Format
if col[idx] != './.' and len(fi) != len(fm):
raise ValueError('[ERROR] The format of "FORMAT"' +
'fields is not match sample ' +
'%r in %r' % (col[idx], fm))
for type in ['VS', 'VT']:
if type not in fm:
raise ValueError('[ERROR] The format of VCF file is ' +
'not right which you input, it did ' +
'not contian %s field in FORMAT')
format = {}
for k, i in fm.items():
if k != 'GT' and col[idx] != './.': format[k] = fi[i]
# Use first sample which is not './.' to set VT and VS if col[idx] == './.'
# This is the same idea with what we do above for 'gtIdx = 1'
if col[idx] == './.':
if isAltAlign:
isam = [sam for sam in col[9:] if sam != './.' and not re.search(r'^0/0:', sam)]
if len(isam) == 0:
# This may happen if appear duplication position and pick the
# REFCALL instand of Variant call when CombineVar with GATK
isam = [sam for sam in col[9:] if sam != './.'][0].split(':')
else:
isam = isam[0].split(':')
format['VT'] = isam[fm['VT']]
format['VS'] = isam[fm['VS']]
if isAltAlign:
format['AA'] = ','.join(str(a) for a in [zr,za,zc,zi])
gt = fi[fm['GT']].split('/')
if '|' in fi[fm['GT']]: gt = fi[fm['GT']].split('|')
for i, g in enumerate(gt):
if g != '.' and string.atoi(g) > 1: gt[i] = '1'
if '|' in fi[fm['GT']]:
fi[fm['GT']] = '|'.join(gt)
else:
fi[fm['GT']] = '/'.join(gt)
col[8] = 'GT:' + ':'.join(sorted(format.keys()))
# Still keep the origin genotype
col[idx] = fi[fm['GT']] + ':' + ':'.join([format[k] for k in sorted(format.keys())])
vcfOutHandle.write('%s\t%s\n' % ('\t'.join(col[:9]), col[idx]))
I.close()
samInHandle.close()
vcfOutHandle.close()
print >> sys.stderr, '# [INFO] Closing the two Ouput files :\n -- %s' % (outPrefix + '.vcf')
########################################################################
########################################################################
if __name__ == '__main__' :
usage = "\nUsage : %prog [option] [-v vcfInfile] > Output"
optp = optparse.OptionParser(usage=usage)
optp.add_option("-v", "--vcf", dest="vcfInfile", metavar="VCF", help="Variants. VCF format.", default=[] )
optp.add_option("-b", "--bam", dest="bamInfile", metavar="BAM", help="Bam Alignment file. ", default=[] )
optp.add_option("-c", "--chr", dest="refChrId" , metavar="CHR", help="The chr ID of Re." , default='ALL')
optp.add_option("-r", "--ref", dest="refInfile", metavar="REF", help="Reference fa format. ", default=[] )
optp.add_option("-s", "--smp", dest="sample" , metavar="SMP", help="Sample ID." , default=[] )
optp.add_option("-o", "--out", dest="outPrefix", metavar="OUT", help="The prefix of output. [out]" , default = 'out')
optp.add_option("-q", "--qul", dest="mapq" , metavar="QUL", help="Threshold of Mapping Quality. [20]", default = '20' )
optp.add_option("-e", "--exc", dest="exc" , metavar="EXC", help="Change Sample ID(-s) to be -e in output", default=[])
opt, _ = optp.parse_args()
if len(opt.vcfInfile) == 0: optp.error("Required [-v vcfInfile]\n")
if len(opt.bamInfile) == 0: optp.error("Required [-b bamInfile]\n")
if len(opt.refInfile) == 0: optp.error("Required [-r reference] Fa format\n")
if len(opt.sample ) == 0: optp.error("Required [-s sample ID]\n")
opt.mapq = string.atoi(opt.mapq)
print >> sys.stderr, '#[INFO] Paraeters: python', sys.argv[0], '\n\t-v', opt.vcfInfile, \
'\n\t-b', opt.bamInfile, '\n\t-r', opt.refInfile, '\n\t-s', opt.sample, '\n\t-o', opt.outPrefix, \
'\n\t-q', opt.mapq , '\n\t-c', opt.refChrId
if len(opt.exc) > 0:
print >> sys.stderr, '\t-e', opt.exc, '\n'
else :
print >> sys.stderr, '\n'
main(opt)
print >> sys.stderr, '*********************** ALL DONE ***********************\n'
| |
# Copyright 2013 VMware, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from neutron.common import exceptions as n_exc
from neutron import manager
from neutron.plugins.common import constants
from neutron.services import provider_configuration as provconf
from neutron.tests import base
class ParseServiceProviderConfigurationTestCase(base.BaseTestCase):
def setUp(self):
super(ParseServiceProviderConfigurationTestCase, self).setUp()
self.service_providers = mock.patch.object(
provconf.NeutronModule, 'service_providers').start()
def _set_override(self, service_providers):
self.service_providers.return_value = service_providers
def test_default_service_provider_configuration(self):
providers = cfg.CONF.service_providers.service_provider
self.assertEqual(providers, [])
def test_parse_single_service_provider_opt(self):
self._set_override([constants.LOADBALANCER +
':lbaas:driver_path'])
expected = {'service_type': constants.LOADBALANCER,
'name': 'lbaas',
'driver': 'driver_path',
'default': False}
res = provconf.parse_service_provider_opt()
self.assertEqual(len(res), 1)
self.assertEqual(res, [expected])
def test_parse_single_default_service_provider_opt(self):
self._set_override([constants.LOADBALANCER +
':lbaas:driver_path:default'])
expected = {'service_type': constants.LOADBALANCER,
'name': 'lbaas',
'driver': 'driver_path',
'default': True}
res = provconf.parse_service_provider_opt()
self.assertEqual(len(res), 1)
self.assertEqual(res, [expected])
def test_parse_multi_service_provider_opt(self):
self._set_override([constants.LOADBALANCER +
':lbaas:driver_path',
constants.LOADBALANCER + ':name1:path1',
constants.LOADBALANCER +
':name2:path2:default'])
res = provconf.parse_service_provider_opt()
# This parsing crosses repos if additional projects are installed,
# so check that at least what we expect is there; there may be more.
self.assertTrue(len(res) >= 3)
def test_parse_service_provider_invalid_format(self):
self._set_override([constants.LOADBALANCER +
':lbaas:driver_path',
'svc_type:name1:path1:def'])
self.assertRaises(n_exc.Invalid, provconf.parse_service_provider_opt)
self._set_override([constants.LOADBALANCER +
':',
'svc_type:name1:path1:def'])
self.assertRaises(n_exc.Invalid, provconf.parse_service_provider_opt)
def test_parse_service_provider_name_too_long(self):
name = 'a' * 256
self._set_override([constants.LOADBALANCER +
':' + name + ':driver_path',
'svc_type:name1:path1:def'])
self.assertRaises(n_exc.Invalid, provconf.parse_service_provider_opt)
class ProviderConfigurationTestCase(base.BaseTestCase):
def setUp(self):
super(ProviderConfigurationTestCase, self).setUp()
self.service_providers = mock.patch.object(
provconf.NeutronModule, 'service_providers').start()
def _set_override(self, service_providers):
self.service_providers.return_value = service_providers
def test_ensure_driver_unique(self):
pconf = provconf.ProviderConfiguration()
pconf.providers[('svctype', 'name')] = {'driver': 'driver',
'default': True}
self.assertRaises(n_exc.Invalid,
pconf._ensure_driver_unique, 'driver')
self.assertIsNone(pconf._ensure_driver_unique('another_driver1'))
def test_ensure_default_unique(self):
pconf = provconf.ProviderConfiguration()
pconf.providers[('svctype', 'name')] = {'driver': 'driver',
'default': True}
self.assertRaises(n_exc.Invalid,
pconf._ensure_default_unique,
'svctype', True)
self.assertIsNone(pconf._ensure_default_unique('svctype', False))
self.assertIsNone(pconf._ensure_default_unique('svctype1', True))
self.assertIsNone(pconf._ensure_default_unique('svctype1', False))
def test_add_provider(self):
pconf = provconf.ProviderConfiguration()
prov = {'service_type': constants.LOADBALANCER,
'name': 'name',
'driver': 'path',
'default': False}
pconf.add_provider(prov)
self.assertEqual(len(pconf.providers), 1)
self.assertEqual(list(pconf.providers.keys()),
[(constants.LOADBALANCER, 'name')])
self.assertEqual(list(pconf.providers.values()),
[{'driver': 'path', 'default': False}])
def test_add_duplicate_provider(self):
pconf = provconf.ProviderConfiguration()
prov = {'service_type': constants.LOADBALANCER,
'name': 'name',
'driver': 'path',
'default': False}
pconf.add_provider(prov)
self.assertRaises(n_exc.Invalid, pconf.add_provider, prov)
self.assertEqual(len(pconf.providers), 1)
def test_get_service_providers(self):
self._set_override([constants.LOADBALANCER + ':name:path',
constants.LOADBALANCER + ':name2:path2',
'st2:name:driver:default',
'st3:name2:driver2:default'])
provs = [{'service_type': constants.LOADBALANCER,
'name': 'name',
'driver': 'path',
'default': False},
{'service_type': constants.LOADBALANCER,
'name': 'name2',
'driver': 'path2',
'default': False},
{'service_type': 'st2',
'name': 'name',
'driver': 'driver',
'default': True
},
{'service_type': 'st3',
'name': 'name2',
'driver': 'driver2',
'default': True}]
pconf = provconf.ProviderConfiguration()
for prov in provs:
p = pconf.get_service_providers(
filters={'name': [prov['name']],
'service_type': prov['service_type']}
)
self.assertEqual(p, [prov])
def test_get_service_providers_with_fields(self):
self._set_override([constants.LOADBALANCER + ":name:path",
constants.LOADBALANCER + ":name2:path2"])
provs = [{'service_type': constants.LOADBALANCER,
'name': 'name',
'driver': 'path',
'default': False},
{'service_type': constants.LOADBALANCER,
'name': 'name2',
'driver': 'path2',
'default': False}]
pconf = provconf.ProviderConfiguration()
for prov in provs:
p = pconf.get_service_providers(
filters={'name': [prov['name']],
'service_type': prov['service_type']},
fields=['name']
)
self.assertEqual(p, [{'name': prov['name']}])
class GetProviderDriverClassTestCase(base.BaseTestCase):
def test_get_provider_driver_class_hit(self):
driver = 'ml2'
expected = 'neutron.plugins.ml2.plugin.Ml2Plugin'
actual = provconf.get_provider_driver_class(
driver,
namespace=manager.CORE_PLUGINS_NAMESPACE)
self.assertEqual(expected, actual)
def test_get_provider_driver_class_miss(self):
retval = provconf.get_provider_driver_class('foo')
self.assertEqual('foo', retval)
class NeutronModuleTestCase(base.BaseTestCase):
def test_can_parse_multi_opt_service_provider_from_conf_file(self):
mod = provconf.NeutronModule('neutron_test')
mod.ini(base.ETCDIR)
self.assertEqual(['foo', 'bar'], mod.service_providers(),
'Expected two providers, only one read')
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
import calendar
from ujson import loads
from tests.unit.base import ApiTestCase
from preggy import expect
from tornado.testing import gen_test
from tornado.httpclient import HTTPError
from datetime import datetime
from tests.fixtures import ReviewFactory, PageFactory, DomainFactory, KeyFactory, ViolationFactory
from holmes.models import Key, Violation
class TestMostCommonViolationsHandler(ApiTestCase):
@gen_test
def test_can_get_most_common_violations(self):
self.db.query(Violation).delete()
review = ReviewFactory.create()
for i in range(5):
key = Key.get_or_create(self.db, 'violation.0')
review.add_violation(key, 'value', 100, review.domain)
for j in range(2):
key = Key.get_or_create(self.db, 'violation.1')
review.add_violation(key, 'value', 300, review.domain)
self.server.application.violation_definitions = {
'violation.%d' % i: {
'title': 'title.%s' % i,
'category': 'category.%s' % i,
'key': Key.get_or_create(self.db, 'violation.%d' % i, 'category.%d' % i)
} for i in range(3)
}
self.db.flush()
response = yield self.authenticated_fetch('/most-common-violations/')
violations = loads(response.body)
expect(response.code).to_equal(200)
expect(violations).to_be_like([
{'count': 5, 'name': 'title.0', 'category': 'category.0', 'key': 'violation.0'},
{'count': 2, 'name': 'title.1', 'category': 'category.1', 'key': 'violation.1'},
{'count': 0, 'name': 'title.2', 'category': 'category.2', 'key': 'violation.2'},
])
self.db.query(Violation).delete()
response = yield self.authenticated_fetch('/most-common-violations/')
violations_from_cache = loads(response.body)
expect(response.code).to_equal(200)
expect(violations_from_cache).to_be_like(violations)
class TestViolationHandler(ApiTestCase):
@gen_test
def test_can_get_violation_by_key_name_using_no_external_search_provider(self):
self.use_no_external_search_provider()
domains = [DomainFactory.create(
name='g%s.com' % chr(i),
url='http://g%s.com' % chr(i)
) for i in range(ord('a'), ord('d'))]
pages = [PageFactory.create(
domain=domains[i % 3],
url='%s/%d' % (domains[i % 3].url, i % 2)
) for i in range(6)]
for i, page in enumerate(pages):
review = ReviewFactory.create(
page=page,
is_active=True,
number_of_violations=i,
created_date=datetime(2014, 04, 15, 11, 44, i),
completed_date=datetime(2014, 04, 15, 11, 44, i * 2)
)
review.page.last_review_id = review.id
review.page.last_review_uuid = review.uuid
review.page.last_review_date = review.completed_date
self.db.flush()
self.server.application.violation_definitions = {
'key.%s' % i: {
'title': 'title.%s' % i,
'category': 'category.%s' % (i % 3),
'generic_description': 'description.%s' % (i % 3),
'key': Key.get_or_create(self.db, 'key.%d' % i, 'category.%d' % (i % 3))
} for i in range(6)
}
dt = datetime(2014, 04, 15, 11, 44, 4)
dt_timestamp = calendar.timegm(dt.utctimetuple())
response = yield self.authenticated_fetch('/violation/key.1')
violations = loads(response.body)
expect(response.code).to_equal(200)
expect(violations).to_length(3)
expect(violations['title']).to_equal('title.1')
expect(violations['reviews']).to_length(4)
expect(violations['reviewsCount']).to_equal(4)
expect(violations['reviews'][3]['domain']).to_equal('gc.com')
expect(violations['reviews'][3]['page']['url']).to_equal('http://gc.com/0')
expect(violations['reviews'][3]['page']['completedAt']).to_equal(dt_timestamp)
response = yield self.authenticated_fetch(
'/violation/key.1?page_size=2¤t_page=1'
)
violations = loads(response.body)
expect(response.code).to_equal(200)
expect(violations).to_length(3)
expect(violations['title']).to_equal('title.1')
expect(violations['reviews']).to_length(2)
expect(violations['reviewsCount']).to_equal(4)
response = yield self.authenticated_fetch(
'/violation/key.1?page_filter=1'
)
violations = loads(response.body)
expect(response.code).to_equal(200)
expect(violations).to_length(3)
expect(violations['title']).to_equal('title.1')
expect(violations['reviews']).to_length(4)
expect(violations['reviewsCount']).to_be_null()
response = yield self.authenticated_fetch(
'/violation/key.1?domain_filter=gc.com'
)
violations = loads(response.body)
expect(response.code).to_equal(200)
expect(violations).to_length(3)
expect(violations['title']).to_equal('title.1')
expect(violations['reviews']).to_length(2)
expect(violations['reviewsCount']).to_be_null()
response = yield self.authenticated_fetch(
'/violation/key.1?domain_filter=gc.com&page_filter=1'
)
violations = loads(response.body)
expect(response.code).to_equal(200)
expect(violations).to_length(3)
expect(violations['title']).to_equal('title.1')
expect(violations['reviews']).to_length(1)
expect(violations['reviewsCount']).to_be_null()
try:
response = yield self.authenticated_fetch(
'/violation/foobar'
)
except HTTPError:
err = sys.exc_info()[1]
expect(err).not_to_be_null()
expect(err.code).to_equal(404)
expect(err.response.reason).to_be_like('Invalid violation key foobar')
else:
assert False, 'Should not get this far'
try:
response = yield self.authenticated_fetch(
'/violation/key.1?domain_filter=foobar'
)
except HTTPError:
err = sys.exc_info()[1]
expect(err).not_to_be_null()
expect(err.code).to_equal(404)
expect(err.response.reason).to_be_like('Domain foobar not found')
else:
assert False, 'Should not get this far'
@gen_test
def test_can_get_violation_by_key_name_using_elastic_search_provider(self):
self.use_elastic_search_provider()
domains = [DomainFactory.create(
name='g%s.com' % chr(i),
url='http://g%s.com' % chr(i)
) for i in range(ord('a'), ord('d'))]
pages = [PageFactory.create(
domain=domains[i % 3],
url='%s/%d' % (domains[i % 3].url, i % 2)
) for i in range(6)]
for i, page in enumerate(pages):
review = ReviewFactory.create(
page=page,
is_active=True,
number_of_violations=6 - i,
created_date=datetime(2014, 04, 15, 11, 44, i),
completed_date=datetime(2014, 04, 15, 11, 44, i * 2),
)
self.server.application.search_provider.index_review(review)
self.db.flush()
self.server.application.search_provider.refresh()
self.server.application.violation_definitions = {
'key.%s' % i: {
'title': 'title.%s' % i,
'category': 'category.%s' % (i % 3),
'key': Key.get_or_create(self.db, 'key.%d' % i, 'category.%d' % (i % 3))
} for i in range(6)
}
dt = datetime(2014, 04, 15, 11, 44, 2) # [4, 3, 2, 1, 0]
dt_timestamp = calendar.timegm(dt.utctimetuple())
response = yield self.authenticated_fetch(
'/violation/key.1'
)
violations = loads(response.body)
expect(response.code).to_equal(200)
expect(violations).to_length(3)
expect(violations['title']).to_equal('title.1')
expect(violations['reviews']).to_length(5)
expect(violations['reviewsCount']).to_equal(5)
expect(violations['reviews'][3]['domain']).to_equal('gb.com')
expect(violations['reviews'][3]['page']['url']).to_equal('http://gb.com/1')
expect(violations['reviews'][3]['page']['completedAt']).to_equal(dt_timestamp)
response = yield self.authenticated_fetch(
'/violation/key.1?page_size=2¤t_page=1'
)
violations = loads(response.body)
expect(response.code).to_equal(200)
expect(violations).to_length(3)
expect(violations['title']).to_equal('title.1')
expect(violations['reviews']).to_length(2)
expect(violations['reviewsCount']).to_equal(5)
response = yield self.authenticated_fetch(
'/violation/key.1?page_filter=1'
)
violations = loads(response.body)
expect(response.code).to_equal(200)
expect(violations).to_length(3)
expect(violations['title']).to_equal('title.1')
expect(violations['reviews']).to_length(5)
expect(violations['reviewsCount']).to_equal(5)
response = yield self.authenticated_fetch(
'/violation/key.1?domain_filter=gb.com'
)
violations = loads(response.body)
expect(response.code).to_equal(200)
expect(violations).to_length(3)
expect(violations['title']).to_equal('title.1')
expect(violations['reviews']).to_length(2)
expect(violations['reviewsCount']).to_equal(2)
response = yield self.authenticated_fetch(
'/violation/key.1?domain_filter=gb.com&page_filter=1'
)
violations = loads(response.body)
expect(response.code).to_equal(200)
expect(violations).to_length(3)
expect(violations['title']).to_equal('title.1')
expect(violations['reviews']).to_length(1)
expect(violations['reviewsCount']).to_equal(1)
try:
response = yield self.authenticated_fetch('/violation/foobar')
except HTTPError:
err = sys.exc_info()[1]
expect(err).not_to_be_null()
expect(err.code).to_equal(404)
expect(err.response.reason).to_be_like('Invalid violation key foobar')
else:
assert False, 'Should not get this far'
try:
response = yield self.authenticated_fetch(
'/violation/key.1?domain_filter=foobar'
)
except HTTPError:
err = sys.exc_info()[1]
expect(err).not_to_be_null()
expect(err.code).to_equal(404)
expect(err.response.reason).to_be_like('Domain foobar not found')
else:
assert False, 'Should not get this far'
@gen_test
def test_can_get_blacklist_domains(self):
key = KeyFactory.create(name='blacklist.domains')
for i in range(3):
for j in range(i + 1):
ViolationFactory.create(
key=key,
value=[
'http://www.blacklist-domain-%d.com/' % i,
'http://blacklist-domain-%d.com/' % i
]
)
ViolationFactory.create(
key=key,
value=['http://www.blacklist-domain-%d.com/' % i]
)
self.db.flush()
self.server.application.violation_definitions = {
'blacklist.domains': {
'title': 'title',
'category': 'category',
'generic_description': '',
'key': key
}
}
response = yield self.authenticated_fetch(
'/violation/blacklist.domains/domains'
)
expect(response.code).to_equal(200)
violation = loads(response.body)
expect(violation['details']).to_length(3)
expect(violation['details']).to_be_like([
{'count': 9, 'domain': 'blacklist-domain-2.com'},
{'count': 6, 'domain': 'blacklist-domain-1.com'},
{'count': 3, 'domain': 'blacklist-domain-0.com'}
])
class TestViolationDomainsHandler(ApiTestCase):
@gen_test
def test_fails_by_invalid_key_name_domains(self):
try:
yield self.authenticated_fetch('/violation/foobar/domains')
except HTTPError:
err = sys.exc_info()[1]
expect(err).not_to_be_null()
expect(err.code).to_equal(404)
expect(err.response.reason).to_be_like('Invalid violation key foobar')
else:
assert False, 'Should not get this far'
@gen_test
def test_can_get_by_key_name_domains(self):
domains = [DomainFactory.create(name='g%d.com' % i) for i in range(2)]
keys = [KeyFactory.create(name='random.fact.%s' % i) for i in range(3)]
for i in range(3):
for j in range(i + 1):
ViolationFactory.create(
key=keys[i],
domain=domains[j % 2]
)
self.db.flush()
self.server.application.violation_definitions = {
'random.fact.%s' % i: {
'title': 'SEO',
'category': 'SEO',
'generic_description': 'Desc',
'key': keys[i]
} for i in range(3)
}
response = yield self.authenticated_fetch(
'/violation/%s/domains' % keys[2].name
)
violation = loads(response.body)
expected_violation = {
'domains': [
{'name': 'g0.com', 'count': 2},
{'name': 'g1.com', 'count': 1}
],
'total': 3,
'title': 'SEO',
'category': 'SEO',
'description': 'Desc'
}
expect(response.code).to_equal(200)
expect(violation).to_length(5)
expect(violation['domains']).to_length(2)
expect(violation).to_be_like(expected_violation)
class TestViolationsHandler(ApiTestCase):
@gen_test
def test_can_get_violations(self):
keys = [KeyFactory.create(name='random.fact.%s' % i) for i in range(3)]
violation_definitions = {
'random.fact.%s' % i: {
'title': 'title.%d' % i,
'category': 'category.%d' % i,
'key': key
} for i, key in enumerate(keys)
}
def extract_key_name(violation_definition):
key = violation_definition.pop('key')
violation_definition['key_name'] = key.name
return violation_definition
expected = [extract_key_name(vd) for vd in violation_definitions.values()]
self.server.application.violation_definitions = violation_definitions
response = yield self.authenticated_fetch('/violations')
violations = loads(response.body)
expect(violations).to_length(3)
expect(violations).to_be_like(expected)
| |
from __future__ import absolute_import
import logging
import six
from collections import defaultdict
from sentry import features
from sentry.models.useroption import UserOption
from sentry.shared_integrations.exceptions import ApiError, IntegrationError
from sentry.models import Activity, ExternalIssue, Group, GroupLink, GroupStatus, Organization
from sentry.utils.http import absolute_uri
from sentry.utils.safe import safe_execute
from sentry.utils.compat import filter
logger = logging.getLogger("sentry.integrations.issues")
class IssueBasicMixin(object):
def should_sync(self, attribute):
return False
def get_group_title(self, group, event, **kwargs):
return event.title
def get_issue_url(self, key):
"""
Given the key of the external_issue return the external issue link.
"""
raise NotImplementedError
def get_group_body(self, group, event, **kwargs):
result = []
for interface in six.itervalues(event.interfaces):
output = safe_execute(interface.to_string, event, _with_transaction=False)
if output:
result.append(output)
return "\n\n".join(result)
def get_group_description(self, group, event, **kwargs):
params = {}
if kwargs.get("link_referrer"):
params["referrer"] = kwargs.get("link_referrer")
output = [
u"Sentry Issue: [{}]({})".format(
group.qualified_short_id, absolute_uri(group.get_absolute_url(params=params))
)
]
body = self.get_group_body(group, event)
if body:
output.extend(["", "```", body, "```"])
return "\n".join(output)
def get_create_issue_config(self, group, user, **kwargs):
"""
These fields are used to render a form for the user,
and are then passed in the format of:
>>>{'title': 'TypeError: Object [object Object] has no method "updateFrom"''}
to `create_issue`, which handles creation of the issue
in Jira, VSTS, GitHub, etc
"""
event = group.get_latest_event()
return [
{
"name": "title",
"label": "Title",
"default": self.get_group_title(group, event, **kwargs),
"type": "string",
"required": True,
},
{
"name": "description",
"label": "Description",
"default": self.get_group_description(group, event, **kwargs),
"type": "textarea",
"autosize": True,
"maxRows": 10,
},
]
def get_link_issue_config(self, group, **kwargs):
"""
Used by the `GroupIntegrationDetailsEndpoint` to create an
`ExternalIssue` using title/description obtained from calling
`get_issue` described below.
"""
return [{"name": "externalIssue", "label": "Issue", "default": "", "type": "string"}]
def get_persisted_default_config_fields(self):
"""
Returns a list of field names that should have their last used values
persisted on a per-project basis.
"""
return []
def get_persisted_user_default_config_fields(self):
"""
Returns a list of field names that should have their last used values
persisted on a per-project, per-user basis.
"""
return []
def store_issue_last_defaults(self, project, user, data):
"""
Stores the last used field defaults on a per-project basis. This
accepts a dict of values that will be filtered to keys returned by
``get_persisted_default_config_fields`` which will automatically be
merged into the associated field config object as the default.
>>> integ.store_issue_last_defaults(project, user, {'externalProject': 2})
When the integration is serialized these values will automatically be
merged into the field configuration objects.
NOTE: These are currently stored for both link and create issue, no
differentiation is made between the two field configs.
"""
persisted_fields = self.get_persisted_default_config_fields()
if persisted_fields:
project_defaults = {k: v for k, v in six.iteritems(data) if k in persisted_fields}
self.org_integration.config.setdefault("project_issue_defaults", {}).setdefault(
six.text_type(project.id), {}
).update(project_defaults)
self.org_integration.save()
user_persisted_fields = self.get_persisted_user_default_config_fields()
if user_persisted_fields:
user_defaults = {k: v for k, v in six.iteritems(data) if k in user_persisted_fields}
user_option_key = dict(user=user, key="issue:defaults", project=project)
new_user_defaults = UserOption.objects.get_value(default={}, **user_option_key)
new_user_defaults.setdefault(self.org_integration.integration.provider, {}).update(
user_defaults
)
UserOption.objects.set_value(value=new_user_defaults, **user_option_key)
def get_defaults(self, project, user):
project_defaults = self.get_project_defaults(project.id)
user_option_key = dict(user=user, key="issue:defaults", project=project)
user_defaults = UserOption.objects.get_value(default={}, **user_option_key).get(
self.org_integration.integration.provider, {}
)
defaults = {}
defaults.update(project_defaults)
defaults.update(user_defaults)
return defaults
# TODO(saif): Make private and move all usages over to `get_defaults`
def get_project_defaults(self, project_id):
return self.org_integration.config.get("project_issue_defaults", {}).get(
six.text_type(project_id), {}
)
def create_issue(self, data, **kwargs):
"""
Create an issue via the provider's API and return the issue key,
title and description.
Should also handle API client exceptions and reraise as an
IntegrationError (using the `message_from_error` helper).
>>> def create_issue(self, data, **kwargs):
>>> resp = self.get_client().create_issue(data)
>>> return {
>>> 'key': resp['id'],
>>> 'title': resp['title'],
>>> 'description': resp['description'],
>>> }
"""
raise NotImplementedError
def get_issue(self, issue_id, **kwargs):
"""
Get an issue via the provider's API and return the issue key,
title and description.
Should also handle API client exceptions and reraise as an
IntegrationError (using the `message_from_error` helper).
>>> def get_issue(self, data, **kwargs):
>>> resp = self.get_client().get_issue(issue_id)
>>> return {
>>> 'key': resp['id'],
>>> 'title': resp['title'],
>>> 'description': resp['description'],
>>> }
"""
raise NotImplementedError
def after_link_issue(self, external_issue, **kwargs):
"""
Takes the external issue that has been linked via `get_issue`.
Does anything needed after an issue has been linked, i.e. creating
a comment for a linked issue.
"""
pass
def make_external_key(self, data):
"""
Takes result of `get_issue` or `create_issue` and returns the formatted key
"""
return data["key"]
def get_issue_display_name(self, external_issue):
"""
Returns the display name of the issue.
This is not required but helpful for integrations whose external issue key
does not match the desired display name.
"""
return ""
def get_repository_choices(self, group, **kwargs):
"""
Returns the default repository and a set/subset of repositories of associated with the installation
"""
try:
repos = self.get_repositories()
except ApiError:
raise IntegrationError("Unable to retrive repositories. Please try again later.")
else:
repo_choices = [(repo["identifier"], repo["name"]) for repo in repos]
repo = kwargs.get("repo")
if not repo:
params = kwargs.get("params", {})
defaults = self.get_project_defaults(group.project_id)
repo = params.get("repo", defaults.get("repo"))
try:
default_repo = repo or repo_choices[0][0]
except IndexError:
return "", repo_choices
# If a repo has been selected outside of the default list of
# repos, stick it onto the front of the list so that it can be
# selected.
try:
next(True for r in repo_choices if r[0] == default_repo)
except StopIteration:
repo_choices.insert(0, self.create_default_repo_choice(default_repo))
return default_repo, repo_choices
def create_default_repo_choice(self, default_repo):
"""
Helper method for get_repository_choices
Returns the choice for the default repo in a tuple to be added to the list of repository choices
"""
return (default_repo, default_repo)
def get_annotations_for_group_list(self, group_list):
group_links = GroupLink.objects.filter(
group_id__in=[group.id for group in group_list],
project_id__in=list(set(group.project.id for group in group_list)),
linked_type=GroupLink.LinkedType.issue,
relationship=GroupLink.Relationship.references,
)
external_issues = ExternalIssue.objects.filter(
id__in=[group_link.linked_id for group_link in group_links],
integration_id=self.model.id,
)
# group annotations by group id
annotations_by_group_id = defaultdict(list)
for group_link in group_links:
issues_for_group = filter(lambda x: x.id == group_link.linked_id, external_issues)
annotations = self.map_external_issues_to_annotations(issues_for_group)
annotations_by_group_id[group_link.group_id].extend(annotations)
return annotations_by_group_id
def map_external_issues_to_annotations(self, external_issues):
annotations = []
for ei in external_issues:
link = self.get_issue_url(ei.key)
label = self.get_issue_display_name(ei) or ei.key
annotations.append('<a href="%s">%s</a>' % (link, label))
return annotations
def get_comment_id(self, comment):
return comment["id"]
def create_comment(self, issue_id, user_id, group_note):
pass
def update_comment(self, issue_id, user_id, group_note):
pass
class IssueSyncMixin(IssueBasicMixin):
comment_key = None
outbound_status_key = None
inbound_status_key = None
outbound_assignee_key = None
inbound_assignee_key = None
def should_sync(self, attribute):
try:
key = getattr(self, "%s_key" % attribute)
except AttributeError:
return False
if key is None:
return False
config = self.org_integration.config
return config.get(key, False)
def sync_assignee_outbound(self, external_issue, user, assign=True, **kwargs):
"""
Propagate a sentry issue's assignee to a linked issue's assignee.
If assign=True, we're assigning the issue. Otherwise, deassign.
"""
raise NotImplementedError
def sync_status_outbound(self, external_issue, is_resolved, project_id, **kwargs):
"""
Propagate a sentry issue's status to a linked issue's status.
"""
raise NotImplementedError
def should_unresolve(self, data):
"""
Given webhook data, check whether the status
category changed FROM "done" to something else,
meaning the sentry issue should be marked as
unresolved
>>> def should_unresolve(self, data):
>>> client = self.get_client()
>>> statuses = client.get_statuses()
>>> done_statuses = [s['id'] for s in statuses if s['category'] == 'done']
>>> return data['from_status'] in done_statuses \
>>> and data['to_status'] not in done_statuses
"""
raise NotImplementedError
def should_resolve(self, data):
"""
Given webhook data, check whether the status
category changed TO "done" from something else,
meaning the sentry issue should be marked as
resolved
see example above
"""
raise NotImplementedError
def update_group_status(self, groups, status, activity_type):
updated = (
Group.objects.filter(id__in=[g.id for g in groups])
.exclude(status=status)
.update(status=status)
)
if updated:
for group in groups:
activity = Activity.objects.create(
project=group.project, group=group, type=activity_type
)
activity.send_notification()
def sync_status_inbound(self, issue_key, data):
if not self.should_sync("inbound_status"):
return
organization = Organization.objects.get(id=self.organization_id)
has_issue_sync = features.has("organizations:integrations-issue-sync", organization)
if not has_issue_sync:
return
affected_groups = list(
Group.objects.get_groups_by_external_issue(self.model, issue_key)
.filter(project__organization_id=self.organization_id)
.select_related("project")
)
groups_to_resolve = []
groups_to_unresolve = []
should_resolve = self.should_resolve(data)
should_unresolve = self.should_unresolve(data)
for group in affected_groups:
# this probably shouldn't be possible unless there
# is a bug in one of those methods
if should_resolve is True and should_unresolve is True:
logger.warning(
"sync-config-conflict",
extra={
"organization_id": group.project.organization_id,
"integration_id": self.model.id,
"provider": self.model.get_provider(),
},
)
continue
if should_unresolve:
groups_to_unresolve.append(group)
elif should_resolve:
groups_to_resolve.append(group)
if groups_to_resolve:
self.update_group_status(groups_to_resolve, GroupStatus.RESOLVED, Activity.SET_RESOLVED)
if groups_to_unresolve:
self.update_group_status(
groups_to_unresolve, GroupStatus.UNRESOLVED, Activity.SET_UNRESOLVED
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.