gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
## ParseMaster, version 1.0 (pre-release) (2005/05/12) x6
## Copyright 2005, Dean Edwards
## Web: http://dean.edwards.name/
##
## This software is licensed under the CC-GNU LGPL
## Web: http://creativecommons.org/licenses/LGPL/2.1/
##
## Ported to Python by Florian Schulze
import os, re, sys
# a multi-pattern parser
class Pattern:
def __init__(self, expression, replacement, length):
self.expression = expression
self.replacement = replacement
self.length = length
def __str__(self):
return "(" + self.expression + ")"
class Patterns(list):
def __str__(self):
return '|'.join([str(e) for e in self])
class ParseMaster:
# constants
EXPRESSION = 0
REPLACEMENT = 1
LENGTH = 2
GROUPS = re.compile(r"""\(""", re.M)#g
SUB_REPLACE = re.compile(r"""\$\d""", re.M)
INDEXED = re.compile(r"""^\$\d+$""", re.M)
TRIM = re.compile(r"""(['"])\1\+(.*)\+\1\1$""", re.M)
ESCAPE = re.compile(r"""\\.""", re.M)#g
#QUOTE = re.compile(r"""'""", re.M)
DELETED = re.compile("""\x01[^\x01]*\x01""", re.M)#g
def __init__(self):
# private
self._patterns = Patterns() # patterns stored by index
self._escaped = []
self.ignoreCase = False
self.escapeChar = None
def DELETE(self, match, offset):
return "\x01" + match.group(offset) + "\x01"
def _repl(self, a, o, r, i):
while (i):
m = a.group(o+i-1)
if m is None:
s = ""
else:
s = m
r = r.replace("$" + str(i), s)
i = i - 1
r = ParseMaster.TRIM.sub("$1", r)
return r
# public
def add(self, expression="^$", replacement=None):
if replacement is None:
replacement = self.DELETE
# count the number of sub-expressions
# - add one because each pattern is itself a sub-expression
length = len(ParseMaster.GROUPS.findall(self._internalEscape(str(expression)))) + 1
# does the pattern deal with sub-expressions?
if (isinstance(replacement, str) and ParseMaster.SUB_REPLACE.match(replacement)):
# a simple lookup? (e.g. "$2")
if (ParseMaster.INDEXED.match(replacement)):
# store the index (used for fast retrieval of matched strings)
replacement = int(replacement[1:]) - 1
else: # a complicated lookup (e.g. "Hello $2 $1")
# build a function to do the lookup
i = length
r = replacement
replacement = lambda a,o: self._repl(a,o,r,i)
# pass the modified arguments
self._patterns.append(Pattern(expression, replacement, length))
# execute the global replacement
def execute(self, string):
if self.ignoreCase:
r = re.compile(str(self._patterns), re.I | re.M)
else:
r = re.compile(str(self._patterns), re.M)
string = self._escape(string, self.escapeChar)
string = r.sub(self._replacement, string)
string = self._unescape(string, self.escapeChar)
string = ParseMaster.DELETED.sub("", string)
return string
# clear the patterns collections so that this object may be re-used
def reset(self):
self._patterns = Patterns()
# this is the global replace function (it's quite complicated)
def _replacement(self, match):
i = 1
# loop through the patterns
for pattern in self._patterns:
if match.group(i) is not None:
replacement = pattern.replacement
if callable(replacement):
return replacement(match, i)
elif isinstance(replacement, (int, long)):
return match.group(replacement+i)
else:
return replacement
else:
i = i+pattern.length
# encode escaped characters
def _escape(self, string, escapeChar=None):
def repl(match):
char = match.group(1)
self._escaped.append(char)
return escapeChar
if escapeChar is None:
return string
r = re.compile("\\"+escapeChar+"(.)", re.M)
result = r.sub(repl, string)
return result
# decode escaped characters
def _unescape(self, string, escapeChar=None):
def repl(match):
try:
#result = eval("'"+escapeChar + self._escaped.pop(0)+"'")
result = escapeChar + self._escaped.pop(0)
return result
except IndexError:
return escapeChar
if escapeChar is None:
return string
r = re.compile("\\"+escapeChar, re.M)
result = r.sub(repl, string)
return result
def _internalEscape(self, string):
return ParseMaster.ESCAPE.sub("", string)
## packer, version 2.0 (2005/04/20)
## Copyright 2004-2005, Dean Edwards
## License: http://creativecommons.org/licenses/LGPL/2.1/
## Ported to Python by Florian Schulze
## http://dean.edwards.name/packer/
class JavaScriptPacker:
def __init__(self):
self._basicCompressionParseMaster = self.getCompressionParseMaster(False)
self._specialCompressionParseMaster = self.getCompressionParseMaster(True)
def basicCompression(self, script):
return self._basicCompressionParseMaster.execute(script)
def specialCompression(self, script):
return self._specialCompressionParseMaster.execute(script)
def getCompressionParseMaster(self, specialChars):
IGNORE = "$1"
parser = ParseMaster()
parser.escapeChar = '\\'
# protect strings
parser.add(r"""'[^']*?'""", IGNORE)
parser.add(r'"[^"]*?"', IGNORE)
# remove comments
parser.add(r"""//[^\n\r]*?[\n\r]""")
parser.add(r"""/\*[^*]*?\*+([^/][^*]*?\*+)*?/""")
# protect regular expressions
parser.add(r"""\s+(\/[^\/\n\r\*][^\/\n\r]*\/g?i?)""", "$2")
parser.add(r"""[^\w\$\/'"*)\?:]\/[^\/\n\r\*][^\/\n\r]*\/g?i?""", IGNORE)
# remove: ;;; doSomething();
if specialChars:
parser.add(""";;;[^\n\r]+[\n\r]""")
# remove redundant semi-colons
parser.add(r""";+\s*([};])""", "$2")
# remove white-space
parser.add(r"""(\b|\$)\s+(\b|\$)""", "$2 $3")
parser.add(r"""([+\-])\s+([+\-])""", "$2 $3")
parser.add(r"""\s+""", "")
return parser
def getEncoder(self, ascii):
mapping = {}
base = ord('0')
mapping.update(dict([(i, chr(i+base)) for i in range(10)]))
base = ord('a')
mapping.update(dict([(i+10, chr(i+base)) for i in range(26)]))
base = ord('A')
mapping.update(dict([(i+36, chr(i+base)) for i in range(26)]))
base = 161
mapping.update(dict([(i+62, chr(i+base)) for i in range(95)]))
# zero encoding
# characters: 0123456789
def encode10(charCode):
return str(charCode)
# inherent base36 support
# characters: 0123456789abcdefghijklmnopqrstuvwxyz
def encode36(charCode):
l = []
remainder = charCode
while 1:
result, remainder = divmod(remainder, 36)
l.append(mapping[remainder])
if not result:
break
remainder = result
l.reverse()
return "".join(l)
# hitch a ride on base36 and add the upper case alpha characters
# characters: 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ
def encode62(charCode):
l = []
remainder = charCode
while 1:
result, remainder = divmod(remainder, 62)
l.append(mapping[remainder])
if not result:
break
remainder = result
l.reverse()
return "".join(l)
# use high-ascii values
def encode95(charCode):
l = []
remainder = charCode
while 1:
result, remainder = divmod(remainder, 95)
l.append(mapping[remainder+62])
if not result:
break
remainder = result
l.reverse()
return "".join(l)
if ascii <= 10:
return encode10
elif ascii <= 36:
return encode36
elif ascii <= 62:
return encode62
return encode95
def escape(self, script):
script = script.replace("\\","\\\\")
script = script.replace("'","\\'")
script = script.replace('\n','\\n')
#return re.sub(r"""([\\'](?!\n))""", "\\$1", script)
return script
def escape95(self, script):
result = []
for x in script:
if x>'\xa1':
x = "\\x%0x" % ord(x)
result.append(x)
return "".join(result)
def encodeKeywords(self, script, encoding, fastDecode):
# escape high-ascii values already in the script (i.e. in strings)
if (encoding > 62):
script = self.escape95(script)
# create the parser
parser = ParseMaster()
encode = self.getEncoder(encoding)
# for high-ascii, don't encode single character low-ascii
if encoding > 62:
regexp = r"""\w\w+"""
else:
regexp = r"""\w+"""
# build the word list
keywords = self.analyze(script, regexp, encode)
encoded = keywords['encoded']
# encode
def repl(match, offset):
return encoded.get(match.group(offset), "")
parser.add(regexp, repl)
# if encoded, wrap the script in a decoding function
script = parser.execute(script)
script = self.bootStrap(script, keywords, encoding, fastDecode)
return script
def analyze(self, script, regexp, encode):
# analyse
# retreive all words in the script
regexp = re.compile(regexp, re.M)
all = regexp.findall(script)
sorted = [] # list of words sorted by frequency
encoded = {} # dictionary of word->encoding
protected = {} # instances of "protected" words
if all:
unsorted = []
_protected = {}
values = {}
count = {}
all.reverse()
for word in all:
word = "$"+word
if word not in count:
count[word] = 0
j = len(unsorted)
unsorted.append(word)
# make a dictionary of all of the protected words in this script
# these are words that might be mistaken for encoding
values[j] = encode(j)
_protected["$"+values[j]] = j
count[word] = count[word] + 1
# prepare to sort the word list, first we must protect
# words that are also used as codes. we assign them a code
# equivalent to the word itself.
# e.g. if "do" falls within our encoding range
# then we store keywords["do"] = "do";
# this avoids problems when decoding
sorted = [None] * len(unsorted)
for word in unsorted:
if word in _protected and isinstance(_protected[word], int):
sorted[_protected[word]] = word[1:]
protected[_protected[word]] = True
count[word] = 0
unsorted.sort(lambda a,b: count[b]-count[a])
j = 0
for i in range(len(sorted)):
if sorted[i] is None:
sorted[i] = unsorted[j][1:]
j = j + 1
encoded[sorted[i]] = values[i]
return {'sorted': sorted, 'encoded': encoded, 'protected': protected}
def encodePrivate(self, charCode):
return "_"+str(charCode)
def encodeSpecialChars(self, script):
parser = ParseMaster()
# replace: $name -> n, $$name -> $$na
def repl(match, offset):
#print offset, match.groups()
length = len(match.group(offset + 2))
start = length - max(length - len(match.group(offset + 3)), 0)
return match.group(offset + 1)[start:start+length] + match.group(offset + 4)
parser.add(r"""((\$+)([a-zA-Z\$_]+))(\d*)""", repl)
# replace: _name -> _0, double-underscore (__name) is ignored
regexp = r"""\b_[A-Za-z\d]\w*"""
# build the word list
keywords = self.analyze(script, regexp, self.encodePrivate)
# quick ref
encoded = keywords['encoded']
def repl(match, offset):
return encoded.get(match.group(offset), "")
parser.add(regexp, repl)
return parser.execute(script)
# build the boot function used for loading and decoding
def bootStrap(self, packed, keywords, encoding, fastDecode):
ENCODE = re.compile(r"""\$encode\(\$count\)""")
# $packed: the packed script
#packed = self.escape(packed)
#packed = [packed[x*10000:(x+1)*10000] for x in range((len(packed)/10000)+1)]
#packed = "'" + "'+\n'".join(packed) + "'\n"
packed = "'" + self.escape(packed) + "'"
# $count: number of words contained in the script
count = len(keywords['sorted'])
# $ascii: base for encoding
ascii = min(count, encoding) or 1
# $keywords: list of words contained in the script
for i in keywords['protected']:
keywords['sorted'][i] = ""
# convert from a string to an array
keywords = "'" + "|".join(keywords['sorted']) + "'.split('|')"
encoding_functions = {
10: """ function($charCode) {
return $charCode;
}""",
36: """ function($charCode) {
return $charCode.toString(36);
}""",
62: """ function($charCode) {
return ($charCode < _encoding ? "" : arguments.callee(parseInt($charCode / _encoding))) +
(($charCode = $charCode % _encoding) > 35 ? String.fromCharCode($charCode + 29) : $charCode.toString(36));
}""",
95: """ function($charCode) {
return ($charCode < _encoding ? "" : arguments.callee($charCode / _encoding)) +
String.fromCharCode($charCode % _encoding + 161);
}"""
}
# $encode: encoding function (used for decoding the script)
encode = encoding_functions[encoding]
encode = encode.replace('_encoding',"$ascii")
encode = encode.replace('arguments.callee', "$encode")
if ascii > 10:
inline = "$count.toString($ascii)"
else:
inline = "$count"
# $decode: code snippet to speed up decoding
if fastDecode:
# create the decoder
decode = r"""// does the browser support String.replace where the
// replacement value is a function?
if (!''.replace(/^/, String)) {
// decode all the values we need
while ($count--) $decode[$encode($count)] = $keywords[$count] || $encode($count);
// global replacement function
$keywords = [function($encoded){return $decode[$encoded]}];
// generic match
$encode = function(){return'\\w+'};
// reset the loop counter - we are now doing a global replace
$count = 1;
}"""
if encoding > 62:
decode = decode.replace('\\\\w', "[\\xa1-\\xff]")
else:
# perform the encoding inline for lower ascii values
if ascii < 36:
decode = ENCODE.sub(inline, decode)
# special case: when $count==0 there ar no keywords. i want to keep
# the basic shape of the unpacking funcion so i'll frig the code...
if not count:
raise NotImplemented
#) $decode = $decode.replace(/(\$count)\s*=\s*1/, "$1=0");
# boot function
unpack = r"""function($packed, $ascii, $count, $keywords, $encode, $decode) {
while ($count--)
if ($keywords[$count])
$packed = $packed.replace(new RegExp("\\b" + $encode($count) + "\\b", "g"), $keywords[$count]);
return $packed;
}"""
if fastDecode:
# insert the decoder
#unpack = re.sub(r"""\{""", "{" + decode + ";", unpack)
unpack = unpack.replace('{', "{" + decode + ";", 1)
if encoding > 62: # high-ascii
# get rid of the word-boundaries for regexp matches
unpack = re.sub(r"""'\\\\b'\s*\+|\+\s*'\\\\b'""", "", unpack)
if ascii > 36 or encoding > 62 or fastDecode:
# insert the encode function
#unpack = re.sub(r"""\{""", "{$encode=" + encode + ";", unpack)
unpack = unpack.replace('{', "{$encode=" + encode + ";", 1)
else:
# perform the encoding inline
unpack = ENCODE.sub(inline, unpack)
# pack the boot function too
unpack = self.pack(unpack, 0, False, True)
# arguments
params = [packed, str(ascii), str(count), keywords]
if fastDecode:
# insert placeholders for the decoder
params.extend(['0', "{}"])
# the whole thing
return "eval(" + unpack + "(" + ",".join(params) + "))\n";
def pack(self, script, encoding=62, fastDecode=True, specialChars=True, compaction=True):
script = script+"\n"
self._encoding = encoding
self._fastDecode = fastDecode
if specialChars:
script = self.specialCompression(script)
script = self.encodeSpecialChars(script)
else:
if compaction:
script = self.basicCompression(script)
if encoding:
script = self.encodeKeywords(script, encoding, fastDecode)
return script
def pack(paths, result_file, base_path= None):
if base_path is None:
base_path= ''
result= ''
scripts= ''
packer= JavaScriptPacker()
for path in paths:
if base_path:
path= base_path + path
scripts+= open(path).read()
result= packer.pack(scripts, compaction= True, encoding= 62, fastDecode= True, specialChars= False)
open(result_file, 'w').write(result)
def run1():
test_scripts = []
test_scripts.append(("""// -----------------------------------------------------------------------
// public interface
// -----------------------------------------------------------------------
cssQuery.toString = function() {
return "function cssQuery() {\n [version " + version + "]\n}";
};""", 0, False, False, """cssQuery.toString=function(){return"function cssQuery() {\n [version "+version+"]\n}"};"""))
test_scripts.append(("""function test(_localvar) {
var $name = 'foo';
var $$dummy = 2;
return $name + $$dummy;
}""", 0, False, True, """function test(_0){var n='foo';var du=2;return n+du}"""))
test_scripts.append(("""function _test($localvar) {
var $name = 1;
var _dummy = 2;
var __foo = 3;
return $name + _dummy + $localvar + __foo;
}""", 0, False, True, """function _1(l){var n=1;var _0=2;var __foo=3;return n+_0+l+__foo}"""))
test_scripts.append(("""function _test($localvar) {
var $name = 1;
var _dummy = 2;
var __foo = 3;
return $name + _dummy + $localvar + __foo;
}
function _bar(_ocalvar) {
var $name = 1;
var _dummy = 2;
var __foo = 3;
return $name + _dummy + $localvar + __foo;
}""", 0, False, True, """function _3(l){var n=1;var _0=2;var __foo=3;return n+_0+l+__foo}function _2(_1){var n=1;var _0=2;var __foo=3;return n+_0+l+__foo}"""))
test_scripts.append(("cssQuery1.js", 0, False, False, "cssQuery1-p1.js"))
test_scripts.append(("cssQuery.js", 0, False, False, "cssQuery-p1.js"))
test_scripts.append(("pack.js", 0, False, False, "pack-p1.js"))
test_scripts.append(("cssQuery.js", 0, False, True, "cssQuery-p2.js"))
# the following ones are different, because javascript might use an
# unstable sort algorithm while python uses an stable sort algorithm
test_scripts.append(("pack.js", 0, False, True, "pack-p2.js"))
test_scripts.append(("test.js", 0, False, True, """function _4(l){var n=1;var _0=2;var __foo=3;return n+_0+l+__foo}function _3(_1){var n=1;var _2=2;var __foo=3;return n+_2+l+__foo}"""))
test_scripts.append(("test.js", 10, False, False, """eval(function(p,a,c,k,e,d){while(c--){if(k[c]){p=p.replace(new RegExp("\\b"+e(c)+"\\b","g"),k[c])}}return p}('8 13($6){0 $4=1;0 7=2;0 5=3;9 $4+7+$6+5}8 11(12){0 $4=1;0 10=2;0 5=3;9 $4+10+$6+5}',10,14,'var||||name|__foo|localvar|_dummy|function|return|_2|_bar|_ocalvar|_test'.split('|')))
"""))
test_scripts.append(("test.js", 62, False, False, """eval(function(p,a,c,k,e,d){while(c--){if(k[c]){p=p.replace(new RegExp("\\b"+e(c)+"\\b","g"),k[c])}}return p}('8 d($6){0 $4=1;0 7=2;0 5=3;9 $4+7+$6+5}8 b(c){0 $4=1;0 a=2;0 5=3;9 $4+a+$6+5}',14,14,'var||||name|__foo|localvar|_dummy|function|return|_2|_bar|_ocalvar|_test'.split('|')))
"""))
test_scripts.append(("test.js", 95, False, False, "test-p4.js"))
test_scripts.append(("cssQuery.js", 0, False, True, "cssQuery-p3.js"))
test_scripts.append(("cssQuery.js", 62, False, True, "cssQuery-p4.js"))
import difflib
p = JavaScriptPacker()
for script, encoding, fastDecode, specialChars, expected in test_scripts:
if os.path.exists(script):
_script = open(script).read()
else:
_script = script
if os.path.exists(expected):
_expected = open(expected).read()
else:
_expected = expected
print script[:20], encoding, fastDecode, specialChars, expected[:20]
print "="*40
result = p.pack(_script, encoding, fastDecode, specialChars)
print len(result), len(_script)
if (result != _expected):
print "ERROR!!!!!!!!!!!!!!!!"
print _expected
print result
#print list(difflib.unified_diff(result, _expected))
if __name__=='__main__':
pack(sys.argv[1:], 'output.js')
| |
import astropy.time
import numpy as np
import pytest
from astropy import units as u
from astropy.tests.helper import assert_quantity_allclose
from numpy.testing import assert_allclose
from poliastro.bodies import Earth
from poliastro.core.elements import coe2mee, coe2rv, mee2coe, rv2coe
from poliastro.twobody import angles
# Data from Schlesinger & Udick, 1912
ELLIPTIC_ANGLES_DATA = [
# ecc, M (deg), nu (deg)
(0.0, 0.0, 0.0),
(0.05, 10.0, 11.06),
(0.06, 30.0, 33.67),
(0.04, 120.0, 123.87),
(0.14, 65.0, 80.50),
(0.19, 21.0, 30.94),
(0.35, 65.0, 105.71),
(0.48, 180.0, 180.0),
(0.75, 125.0, 167.57),
]
@pytest.fixture()
def classical():
p = 11067.790 # u.km
ecc = 0.83285 # u.one
inc = np.deg2rad(87.87) # u.rad
raan = np.deg2rad(227.89) # u.rad
argp = np.deg2rad(53.38) # u.rad
nu = np.deg2rad(92.335) # u.rad
expected_res = (p, ecc, inc, raan, argp, nu)
return expected_res
@pytest.fixture()
def circular():
k = 3.9860047e14
p = 24464560.0
ecc = 0.0
inc = 0.122138
raan = 1.00681
argp = 0.0
nu = 0.048363
expected_res = (p, ecc, inc, raan, argp, nu)
return k, expected_res
@pytest.fixture()
def hyperbolic():
k = 3.9860047e14
p = 4.884856334147761e7
ecc = 1.7311
inc = 0.122138
raan = 1.00681
argp = 3.10686
nu = 0.12741601769795755
expected_res = (p, ecc, inc, raan, argp, nu)
return k, expected_res
@pytest.fixture()
def equatorial():
k = 3.9860047e14
p = 1.13880762905224e7
ecc = 0.7311
inc = 0.0
raan = 0.0
argp = 3.10686
nu = 0.44369564302687126
expected_res = (p, ecc, inc, raan, argp, nu)
return k, expected_res
@pytest.fixture()
def circular_equatorial():
k = 3.9860047e14
p = 1.13880762905224e7
ecc = 0.0
inc = 0.0
raan = 0.0
argp = 0.0
nu = 0.44369564302687126
expected_res = (p, ecc, inc, raan, argp, nu)
return k, expected_res
def test_true_to_eccentric():
# Data from NASA-TR-R-158
data = [
# ecc,E (deg), nu(deg)
(0.0, 0.0, 0.0),
(0.05, 10.52321, 11.05994),
(0.10, 54.67466, 59.49810),
(0.35, 142.27123, 153.32411),
(0.61, 161.87359, 171.02189),
]
for row in data:
ecc, expected_E, nu = row
ecc = ecc * u.one
expected_E = expected_E * u.deg
nu = nu * u.deg
E = angles.nu_to_E(nu, ecc)
assert_quantity_allclose(E, expected_E, rtol=1e-6)
def test_true_to_eccentric_hyperbolic():
# Data from Curtis, H. (2013). "Orbital mechanics for engineering students".
# Example 3.5
nu = 100 * u.deg
ecc = 2.7696 * u.one
expected_F = 2.2927 * u.rad
F = angles.nu_to_F(nu, ecc)
assert_quantity_allclose(F, expected_F, rtol=1e-4)
def test_mean_to_true():
for row in ELLIPTIC_ANGLES_DATA:
ecc, M, expected_nu = row
ecc = ecc * u.one
M = M * u.deg
expected_nu = expected_nu * u.deg
nu = angles.E_to_nu(angles.M_to_E(M, ecc), ecc)
assert_quantity_allclose(nu, expected_nu, rtol=1e-4)
def test_true_to_mean():
for row in ELLIPTIC_ANGLES_DATA:
ecc, expected_M, nu = row
ecc = ecc * u.one
expected_M = expected_M * u.deg
nu = nu * u.deg
M = angles.E_to_M(angles.nu_to_E(nu, ecc), ecc)
assert_quantity_allclose(M, expected_M, rtol=1e-4)
def test_true_to_mean_hyperbolic():
# Data from Curtis, H. (2013). "Orbital mechanics for engineering students".
# Example 3.5
nu = 100 * u.deg
ecc = 2.7696 * u.one
expected_M = 11.279 * u.rad
M = angles.F_to_M(angles.nu_to_F(nu, ecc), ecc)
assert_quantity_allclose(M, expected_M, rtol=1e-4)
def test_mean_to_true_hyperbolic():
# Data from Curtis, H. (2013). "Orbital mechanics for engineering students".
# Example 3.5
M = 11.279 * u.rad
ecc = 2.7696 * u.one
expected_nu = 100 * u.deg
nu = angles.F_to_nu(angles.M_to_F(M, ecc), ecc)
assert_quantity_allclose(nu, expected_nu, rtol=1e-4)
def test_flight_path_angle():
# Data from Curtis, example 2.5
nu = 109.5 * u.deg
ecc = 0.6 * u.one
expected_gamma = 35.26 * u.deg
gamma = angles.fp_angle(np.deg2rad(nu), ecc)
assert_quantity_allclose(gamma, expected_gamma, rtol=1e-3)
@pytest.mark.parametrize(
"expected_nu", np.linspace(-1 / 3.0, 1 / 3.0, num=100) * np.pi * u.rad
)
@pytest.mark.parametrize("ecc", [3200 * u.one, 1.5 * u.one])
def test_mean_to_true_hyperbolic_highecc(expected_nu, ecc):
M = angles.F_to_M(angles.nu_to_F(expected_nu, ecc), ecc)
nu = angles.F_to_nu(angles.M_to_F(M, ecc), ecc)
assert_quantity_allclose(nu, expected_nu, rtol=1e-4)
@pytest.mark.parametrize("E", np.linspace(-1, 1, num=10) * np.pi * u.rad)
@pytest.mark.parametrize("ecc", np.linspace(0.1, 0.9, num=10) * u.one)
def test_eccentric_to_true_range(E, ecc):
nu = angles.E_to_nu(E, ecc)
E_result = angles.nu_to_E(nu, ecc)
assert_quantity_allclose(E_result, E, rtol=1e-8)
def test_convert_between_coe_and_rv_is_transitive(classical):
k = Earth.k.to(u.km ** 3 / u.s ** 2).value # u.km**3 / u.s**2
res = rv2coe(k, *coe2rv(k, *classical))
assert_allclose(res, classical)
def test_convert_between_coe_and_mee_is_transitive(classical):
res = mee2coe(*coe2mee(*classical))
assert_allclose(res, classical)
def test_convert_coe_and_rv_circular(circular):
k, expected_res = circular
res = rv2coe(k, *coe2rv(k, *expected_res))
assert_allclose(res, expected_res, atol=1e-8)
def test_convert_coe_and_rv_hyperbolic(hyperbolic):
k, expected_res = hyperbolic
res = rv2coe(k, *coe2rv(k, *expected_res))
assert_allclose(res, expected_res, atol=1e-8)
def test_convert_coe_and_rv_equatorial(equatorial):
k, expected_res = equatorial
res = rv2coe(k, *coe2rv(k, *expected_res))
assert_allclose(res, expected_res, atol=1e-8)
def test_convert_coe_and_rv_circular_equatorial(circular_equatorial):
k, expected_res = circular_equatorial
res = rv2coe(k, *coe2rv(k, *expected_res))
assert_allclose(res, expected_res, atol=1e-8)
def test_raan_from_ltan_metopb():
# MetOp-B LTAN: 21:31:45
# LTAN from https://www.ospo.noaa.gov/Operations/METOP/status.html
# METOP-B
# 1 38771U 12049A 20049.95408566 -.00000014 00000-0 13607-4 0 9997
# 2 38771 98.7092 110.9899 0001263 48.5458 295.8781 14.21485930385043
ltan = (21 + ((31 + 45 / 60) / 60)) * u.hourangle
epoch = astropy.time.Time(
astropy.time.Time("2020-01-01 00:00").to_value("mjd") + 49.95408566 - 1,
format="mjd",
)
expected_raan = 110.9899 * u.deg
raan = angles.raan_from_ltan(epoch, ltan)
assert_allclose(raan.wrap_at(360 * u.deg).to(u.deg), expected_raan, atol=0.3)
def test_raan_from_ltan_sentinel5p():
# SENTINEL-5P LTAN: 13:30
# LTAN from https://sentinels.copernicus.eu/web/sentinel/missions/sentinel-5p/geographical-coverage
# 1 42969U 17064A 20049.78099017 -.00000032 00000-0 54746-5 0 9991
# 2 42969 98.7249 350.5997 0001077 82.0109 278.1189 14.19549365121775
ltan = (13 + (30 / 60)) * u.hourangle
epoch = astropy.time.Time(
astropy.time.Time("2020-01-01 00:00").to_value("mjd") + 49.78099017 - 1,
format="mjd",
)
expected_raan = 350.5997 * u.deg
raan = angles.raan_from_ltan(epoch, ltan)
assert_allclose(raan.wrap_at(360 * u.deg).to(u.deg), expected_raan, atol=0.3)
| |
"""
dj-stripe Plan Model Tests.
"""
from copy import deepcopy
from unittest.mock import patch
import pytest
import stripe
from django.test import TestCase
from djstripe.enums import PriceUsageType
from djstripe.models import Plan, Product, Subscription
from djstripe.settings import djstripe_settings
from . import (
FAKE_PLAN,
FAKE_PLAN_II,
FAKE_PLAN_METERED,
FAKE_PRODUCT,
FAKE_TIER_PLAN,
AssertStripeFksMixin,
)
pytestmark = pytest.mark.django_db
class PlanCreateTest(AssertStripeFksMixin, TestCase):
def setUp(self):
with patch(
"stripe.Product.retrieve",
return_value=deepcopy(FAKE_PRODUCT),
autospec=True,
):
self.stripe_product = Product(id=FAKE_PRODUCT["id"]).api_retrieve()
@patch(
"stripe.Product.retrieve", return_value=deepcopy(FAKE_PRODUCT), autospec=True
)
@patch("stripe.Plan.create", return_value=deepcopy(FAKE_PLAN), autospec=True)
def test_create_from_product_id(self, plan_create_mock, product_retrieve_mock):
fake_plan = deepcopy(FAKE_PLAN)
fake_plan["amount"] = fake_plan["amount"] / 100
self.assertIsInstance(fake_plan["product"], str)
plan = Plan.create(**fake_plan)
expected_create_kwargs = deepcopy(FAKE_PLAN)
expected_create_kwargs["api_key"] = djstripe_settings.STRIPE_SECRET_KEY
plan_create_mock.assert_called_once_with(**expected_create_kwargs)
self.assert_fks(plan, expected_blank_fks={"djstripe.Customer.coupon"})
@patch(
"stripe.Product.retrieve", return_value=deepcopy(FAKE_PRODUCT), autospec=True
)
@patch("stripe.Plan.create", return_value=deepcopy(FAKE_PLAN), autospec=True)
def test_create_from_stripe_product(self, plan_create_mock, product_retrieve_mock):
fake_plan = deepcopy(FAKE_PLAN)
fake_plan["product"] = self.stripe_product
fake_plan["amount"] = fake_plan["amount"] / 100
self.assertIsInstance(fake_plan["product"], dict)
plan = Plan.create(**fake_plan)
expected_create_kwargs = deepcopy(FAKE_PLAN)
expected_create_kwargs["product"] = self.stripe_product
plan_create_mock.assert_called_once_with(
api_key=djstripe_settings.STRIPE_SECRET_KEY, **expected_create_kwargs
)
self.assert_fks(plan, expected_blank_fks={"djstripe.Customer.coupon"})
@patch(
"stripe.Product.retrieve", return_value=deepcopy(FAKE_PRODUCT), autospec=True
)
@patch("stripe.Plan.create", return_value=deepcopy(FAKE_PLAN), autospec=True)
def test_create_from_djstripe_product(
self, plan_create_mock, product_retrieve_mock
):
fake_plan = deepcopy(FAKE_PLAN)
fake_plan["product"] = Product.sync_from_stripe_data(self.stripe_product)
fake_plan["amount"] = fake_plan["amount"] / 100
self.assertIsInstance(fake_plan["product"], Product)
plan = Plan.create(**fake_plan)
plan_create_mock.assert_called_once_with(
api_key=djstripe_settings.STRIPE_SECRET_KEY, **FAKE_PLAN
)
self.assert_fks(plan, expected_blank_fks={"djstripe.Customer.coupon"})
@patch(
"stripe.Product.retrieve", return_value=deepcopy(FAKE_PRODUCT), autospec=True
)
@patch("stripe.Plan.create", return_value=deepcopy(FAKE_PLAN), autospec=True)
def test_create_with_metadata(self, plan_create_mock, product_retrieve_mock):
metadata = {"other_data": "more_data"}
fake_plan = deepcopy(FAKE_PLAN)
fake_plan["amount"] = fake_plan["amount"] / 100
fake_plan["metadata"] = metadata
self.assertIsInstance(fake_plan["product"], str)
plan = Plan.create(**fake_plan)
expected_create_kwargs = deepcopy(FAKE_PLAN)
expected_create_kwargs["metadata"] = metadata
plan_create_mock.assert_called_once_with(
api_key=djstripe_settings.STRIPE_SECRET_KEY, **expected_create_kwargs
)
self.assert_fks(plan, expected_blank_fks={"djstripe.Customer.coupon"})
class PlanTest(AssertStripeFksMixin, TestCase):
def setUp(self):
self.plan_data = deepcopy(FAKE_PLAN)
with patch(
"stripe.Product.retrieve",
return_value=deepcopy(FAKE_PRODUCT),
autospec=True,
):
self.plan = Plan.sync_from_stripe_data(self.plan_data)
def test___str__(self):
subscriptions = Subscription.objects.filter(plan__id=self.plan.id).count()
self.assertEqual(
f"{self.plan.human_readable_price} for {FAKE_PRODUCT['name']} ({subscriptions} subscriptions)",
str(self.plan),
)
@patch("stripe.Plan.retrieve", return_value=FAKE_PLAN, autospec=True)
def test_stripe_plan(self, plan_retrieve_mock):
stripe_plan = self.plan.api_retrieve()
plan_retrieve_mock.assert_called_once_with(
id=self.plan_data["id"],
api_key=djstripe_settings.STRIPE_SECRET_KEY,
expand=["tiers"],
stripe_account=None,
)
plan = Plan.sync_from_stripe_data(stripe_plan)
assert plan.amount_in_cents == plan.amount * 100
assert isinstance(plan.amount_in_cents, int)
self.assert_fks(plan, expected_blank_fks={"djstripe.Customer.coupon"})
@patch("stripe.Product.retrieve", autospec=True)
def test_stripe_plan_null_product(self, product_retrieve_mock):
"""
assert that plan.Product can be null for backwards compatibility
though note that it is a Stripe required field
"""
plan_data = deepcopy(FAKE_PLAN_II)
del plan_data["product"]
plan = Plan.sync_from_stripe_data(plan_data)
self.assert_fks(
plan,
expected_blank_fks={"djstripe.Customer.coupon", "djstripe.Plan.product"},
)
@patch("stripe.Plan.retrieve", autospec=True)
def test_stripe_tier_plan(self, plan_retrieve_mock):
tier_plan_data = deepcopy(FAKE_TIER_PLAN)
plan = Plan.sync_from_stripe_data(tier_plan_data)
self.assertEqual(plan.id, tier_plan_data["id"])
self.assertIsNone(plan.amount)
self.assertIsNotNone(plan.tiers)
self.assert_fks(plan, expected_blank_fks={"djstripe.Customer.coupon"})
@patch("stripe.Plan.retrieve", autospec=True)
def test_stripe_metered_plan(self, plan_retrieve_mock):
plan_data = deepcopy(FAKE_PLAN_METERED)
plan = Plan.sync_from_stripe_data(plan_data)
self.assertEqual(plan.id, plan_data["id"])
self.assertEqual(plan.usage_type, PriceUsageType.metered)
self.assertIsNotNone(plan.amount)
self.assert_fks(plan, expected_blank_fks={"djstripe.Customer.coupon"})
class TestHumanReadablePlan:
#
# Helpers
#
def get_fake_price_NONE_flat_amount():
FAKE_PRICE_TIER_NONE_FLAT_AMOUNT = deepcopy(FAKE_TIER_PLAN)
FAKE_PRICE_TIER_NONE_FLAT_AMOUNT["tiers"][0]["flat_amount"] = None
FAKE_PRICE_TIER_NONE_FLAT_AMOUNT["tiers"][0]["flat_amount_decimal"] = None
return FAKE_PRICE_TIER_NONE_FLAT_AMOUNT
def get_fake_price_0_flat_amount():
FAKE_PRICE_TIER_0_FLAT_AMOUNT = deepcopy(FAKE_TIER_PLAN)
FAKE_PRICE_TIER_0_FLAT_AMOUNT["tiers"][0]["flat_amount"] = 0
FAKE_PRICE_TIER_0_FLAT_AMOUNT["tiers"][0]["flat_amount_decimal"] = 0
return FAKE_PRICE_TIER_0_FLAT_AMOUNT
def get_fake_price_0_amount():
FAKE_PRICE_TIER_0_AMOUNT = deepcopy(FAKE_PLAN)
FAKE_PRICE_TIER_0_AMOUNT["amount"] = 0
FAKE_PRICE_TIER_0_AMOUNT["amount_decimal"] = 0
return FAKE_PRICE_TIER_0_AMOUNT
@pytest.mark.parametrize(
"fake_plan_data, expected_str",
[
(deepcopy(FAKE_PLAN), "$20.00 USD/month"),
(get_fake_price_0_amount(), "$0.00 USD/month"),
(
deepcopy(FAKE_TIER_PLAN),
"Starts at $10.00 USD per unit + $49.00 USD/month",
),
(
get_fake_price_0_flat_amount(),
"Starts at $10.00 USD per unit + $0.00 USD/month",
),
(
get_fake_price_NONE_flat_amount(),
"Starts at $10.00 USD per unit/month",
),
(deepcopy(FAKE_PLAN_METERED), "$2.00 USD/month"),
],
)
def test_human_readable(self, fake_plan_data, expected_str, monkeypatch):
def mock_product_get(*args, **kwargs):
return deepcopy(FAKE_PRODUCT)
def mock_price_get(*args, **kwargs):
return fake_plan_data
# monkeypatch stripe.Product.retrieve and stripe.Plan.retrieve calls to return
# the desired json response.
monkeypatch.setattr(stripe.Product, "retrieve", mock_product_get)
monkeypatch.setattr(stripe.Plan, "retrieve", mock_price_get)
plan = Plan.sync_from_stripe_data(fake_plan_data)
assert plan.human_readable_price == expected_str
| |
# Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convenience wrapper for invoking APIs/factories w/ a project."""
import os
from google.cloud._helpers import _LocalStack
from google.cloud._helpers import (
_determine_default_project as _base_default_project)
from google.cloud.client import _ClientProjectMixin
from google.cloud.client import Client as _BaseClient
from google.cloud.datastore._http import Connection
from google.cloud.datastore import helpers
from google.cloud.datastore.batch import Batch
from google.cloud.datastore.entity import Entity
from google.cloud.datastore.key import Key
from google.cloud.datastore.query import Query
from google.cloud.datastore.transaction import Transaction
from google.cloud.environment_vars import GCD_DATASET
_MAX_LOOPS = 128
"""Maximum number of iterations to wait for deferred keys."""
def _get_gcd_project():
"""Gets the GCD application ID if it can be inferred."""
return os.getenv(GCD_DATASET)
def _determine_default_project(project=None):
"""Determine default project explicitly or implicitly as fall-back.
In implicit case, supports four environments. In order of precedence, the
implicit environments are:
* DATASTORE_DATASET environment variable (for ``gcd`` / emulator testing)
* GOOGLE_CLOUD_PROJECT environment variable
* Google App Engine application ID
* Google Compute Engine project ID (from metadata server)
:type project: str
:param project: Optional. The project to use as default.
:rtype: str or ``NoneType``
:returns: Default project if it can be determined.
"""
if project is None:
project = _get_gcd_project()
if project is None:
project = _base_default_project(project=project)
return project
def _extended_lookup(connection, project, key_pbs,
missing=None, deferred=None,
eventual=False, transaction_id=None):
"""Repeat lookup until all keys found (unless stop requested).
Helper function for :meth:`Client.get_multi`.
:type connection: :class:`google.cloud.datastore._http.Connection`
:param connection: The connection used to connect to datastore.
:type project: str
:param project: The project to make the request for.
:type key_pbs: list of :class:`._generated.entity_pb2.Key`
:param key_pbs: The keys to retrieve from the datastore.
:type missing: list
:param missing: (Optional) If a list is passed, the key-only entity
protobufs returned by the backend as "missing" will be
copied into it.
:type deferred: list
:param deferred: (Optional) If a list is passed, the key protobufs returned
by the backend as "deferred" will be copied into it.
:type eventual: bool
:param eventual: If False (the default), request ``STRONG`` read
consistency. If True, request ``EVENTUAL`` read
consistency.
:type transaction_id: str
:param transaction_id: If passed, make the request in the scope of
the given transaction. Incompatible with
``eventual==True``.
:rtype: list of :class:`._generated.entity_pb2.Entity`
:returns: The requested entities.
:raises: :class:`ValueError` if missing / deferred are not null or
empty list.
"""
if missing is not None and missing != []:
raise ValueError('missing must be None or an empty list')
if deferred is not None and deferred != []:
raise ValueError('deferred must be None or an empty list')
results = []
loop_num = 0
while loop_num < _MAX_LOOPS: # loop against possible deferred.
loop_num += 1
results_found, missing_found, deferred_found = connection.lookup(
project=project,
key_pbs=key_pbs,
eventual=eventual,
transaction_id=transaction_id,
)
results.extend(results_found)
if missing is not None:
missing.extend(missing_found)
if deferred is not None:
deferred.extend(deferred_found)
break
if len(deferred_found) == 0:
break
# We have deferred keys, and the user didn't ask to know about
# them, so retry (but only with the deferred ones).
key_pbs = deferred_found
return results
class Client(_BaseClient, _ClientProjectMixin):
"""Convenience wrapper for invoking APIs/factories w/ a project.
.. doctest::
>>> from google.cloud import datastore
>>> client = datastore.Client()
:type project: str
:param project: (optional) The project to pass to proxied API methods.
:type namespace: str
:param namespace: (optional) namespace to pass to proxied API methods.
:type credentials: :class:`oauth2client.client.OAuth2Credentials` or
:class:`NoneType`
:param credentials: The OAuth2 Credentials to use for the connection
owned by this client. If not passed (and if no ``http``
object is passed), falls back to the default inferred
from the environment.
:type http: :class:`httplib2.Http` or class that defines ``request()``.
:param http: An optional HTTP object to make requests. If not passed, an
``http`` object is created that is bound to the
``credentials`` for the current object.
"""
_connection_class = Connection
def __init__(self, project=None, namespace=None,
credentials=None, http=None):
_ClientProjectMixin.__init__(self, project=project)
self.namespace = namespace
self._batch_stack = _LocalStack()
super(Client, self).__init__(credentials, http)
@staticmethod
def _determine_default(project):
"""Helper: override default project detection."""
return _determine_default_project(project)
def _push_batch(self, batch):
"""Push a batch/transaction onto our stack.
"Protected", intended for use by batch / transaction context mgrs.
:type batch: :class:`google.cloud.datastore.batch.Batch`, or an object
implementing its API.
:param batch: newly-active batch/transaction.
"""
self._batch_stack.push(batch)
def _pop_batch(self):
"""Pop a batch/transaction from our stack.
"Protected", intended for use by batch / transaction context mgrs.
:raises: IndexError if the stack is empty.
:rtype: :class:`google.cloud.datastore.batch.Batch`, or an object
implementing its API.
:returns: the top-most batch/transaction, after removing it.
"""
return self._batch_stack.pop()
@property
def current_batch(self):
"""Currently-active batch.
:rtype: :class:`google.cloud.datastore.batch.Batch`, or an object
implementing its API, or ``NoneType`` (if no batch is active).
:returns: The batch/transaction at the top of the batch stack.
"""
return self._batch_stack.top
@property
def current_transaction(self):
"""Currently-active transaction.
:rtype: :class:`google.cloud.datastore.transaction.Transaction`, or an
object implementing its API, or ``NoneType`` (if no transaction
is active).
:returns: The transaction at the top of the batch stack.
"""
transaction = self.current_batch
if isinstance(transaction, Transaction):
return transaction
def get(self, key, missing=None, deferred=None, transaction=None):
"""Retrieve an entity from a single key (if it exists).
.. note::
This is just a thin wrapper over :meth:`get_multi`.
The backend API does not make a distinction between a single key or
multiple keys in a lookup request.
:type key: :class:`google.cloud.datastore.key.Key`
:param key: The key to be retrieved from the datastore.
:type missing: list
:param missing: (Optional) If a list is passed, the key-only entities
returned by the backend as "missing" will be copied
into it.
:type deferred: list
:param deferred: (Optional) If a list is passed, the keys returned
by the backend as "deferred" will be copied into it.
:type transaction: :class:`~.transaction.Transaction`
:param transaction: (Optional) Transaction to use for read consistency.
If not passed, uses current transaction, if set.
:rtype: :class:`google.cloud.datastore.entity.Entity` or ``NoneType``
:returns: The requested entity if it exists.
"""
entities = self.get_multi(keys=[key], missing=missing,
deferred=deferred, transaction=transaction)
if entities:
return entities[0]
def get_multi(self, keys, missing=None, deferred=None, transaction=None):
"""Retrieve entities, along with their attributes.
:type keys: list of :class:`google.cloud.datastore.key.Key`
:param keys: The keys to be retrieved from the datastore.
:type missing: list
:param missing: (Optional) If a list is passed, the key-only entities
returned by the backend as "missing" will be copied
into it. If the list is not empty, an error will occur.
:type deferred: list
:param deferred: (Optional) If a list is passed, the keys returned
by the backend as "deferred" will be copied into it.
If the list is not empty, an error will occur.
:type transaction: :class:`~.transaction.Transaction`
:param transaction: (Optional) Transaction to use for read consistency.
If not passed, uses current transaction, if set.
:rtype: list of :class:`google.cloud.datastore.entity.Entity`
:returns: The requested entities.
:raises: :class:`ValueError` if one or more of ``keys`` has a project
which does not match our project.
"""
if not keys:
return []
ids = set(key.project for key in keys)
for current_id in ids:
if current_id != self.project:
raise ValueError('Keys do not match project')
if transaction is None:
transaction = self.current_transaction
entity_pbs = _extended_lookup(
connection=self._connection,
project=self.project,
key_pbs=[k.to_protobuf() for k in keys],
missing=missing,
deferred=deferred,
transaction_id=transaction and transaction.id,
)
if missing is not None:
missing[:] = [
helpers.entity_from_protobuf(missed_pb)
for missed_pb in missing]
if deferred is not None:
deferred[:] = [
helpers.key_from_protobuf(deferred_pb)
for deferred_pb in deferred]
return [helpers.entity_from_protobuf(entity_pb)
for entity_pb in entity_pbs]
def put(self, entity):
"""Save an entity in the Cloud Datastore.
.. note::
This is just a thin wrapper over :meth:`put_multi`.
The backend API does not make a distinction between a single
entity or multiple entities in a commit request.
:type entity: :class:`google.cloud.datastore.entity.Entity`
:param entity: The entity to be saved to the datastore.
"""
self.put_multi(entities=[entity])
def put_multi(self, entities):
"""Save entities in the Cloud Datastore.
:type entities: list of :class:`google.cloud.datastore.entity.Entity`
:param entities: The entities to be saved to the datastore.
:raises: :class:`ValueError` if ``entities`` is a single entity.
"""
if isinstance(entities, Entity):
raise ValueError("Pass a sequence of entities")
if not entities:
return
current = self.current_batch
in_batch = current is not None
if not in_batch:
current = self.batch()
current.begin()
for entity in entities:
current.put(entity)
if not in_batch:
current.commit()
def delete(self, key):
"""Delete the key in the Cloud Datastore.
.. note::
This is just a thin wrapper over :meth:`delete_multi`.
The backend API does not make a distinction between a single key or
multiple keys in a commit request.
:type key: :class:`google.cloud.datastore.key.Key`
:param key: The key to be deleted from the datastore.
"""
self.delete_multi(keys=[key])
def delete_multi(self, keys):
"""Delete keys from the Cloud Datastore.
:type keys: list of :class:`google.cloud.datastore.key.Key`
:param keys: The keys to be deleted from the Datastore.
"""
if not keys:
return
# We allow partial keys to attempt a delete, the backend will fail.
current = self.current_batch
in_batch = current is not None
if not in_batch:
current = self.batch()
current.begin()
for key in keys:
current.delete(key)
if not in_batch:
current.commit()
def allocate_ids(self, incomplete_key, num_ids):
"""Allocate a list of IDs from a partial key.
:type incomplete_key: :class:`google.cloud.datastore.key.Key`
:param incomplete_key: Partial key to use as base for allocated IDs.
:type num_ids: int
:param num_ids: The number of IDs to allocate.
:rtype: list of :class:`google.cloud.datastore.key.Key`
:returns: The (complete) keys allocated with ``incomplete_key`` as
root.
:raises: :class:`ValueError` if ``incomplete_key`` is not a
partial key.
"""
if not incomplete_key.is_partial:
raise ValueError(('Key is not partial.', incomplete_key))
incomplete_key_pb = incomplete_key.to_protobuf()
incomplete_key_pbs = [incomplete_key_pb] * num_ids
conn = self._connection
allocated_key_pbs = conn.allocate_ids(incomplete_key.project,
incomplete_key_pbs)
allocated_ids = [allocated_key_pb.path[-1].id
for allocated_key_pb in allocated_key_pbs]
return [incomplete_key.completed_key(allocated_id)
for allocated_id in allocated_ids]
def key(self, *path_args, **kwargs):
"""Proxy to :class:`google.cloud.datastore.key.Key`.
Passes our ``project``.
"""
if 'project' in kwargs:
raise TypeError('Cannot pass project')
kwargs['project'] = self.project
if 'namespace' not in kwargs:
kwargs['namespace'] = self.namespace
return Key(*path_args, **kwargs)
def batch(self):
"""Proxy to :class:`google.cloud.datastore.batch.Batch`."""
return Batch(self)
def transaction(self):
"""Proxy to :class:`google.cloud.datastore.transaction.Transaction`."""
return Transaction(self)
def query(self, **kwargs):
"""Proxy to :class:`google.cloud.datastore.query.Query`.
Passes our ``project``.
Using query to search a datastore:
.. testsetup:: query
from google.cloud import datastore
client = datastore.Client()
query = client.query(kind='_Doctest')
def do_something(entity):
pass
.. doctest:: query
>>> query = client.query(kind='MyKind')
>>> query.add_filter('property', '=', 'val')
Using the query iterator
.. doctest:: query
>>> query_iter = query.fetch()
>>> for entity in query_iter:
... do_something(entity)
or manually page through results
.. testsetup:: query-page
from google.cloud import datastore
from datastore import Config # system tests
client = datastore.Client()
key = client.key('_Doctest')
entity1 = datastore.Entity(key=key)
entity1['foo'] = 1337
entity2 = datastore.Entity(key=key)
entity2['foo'] = 42
Config.TO_DELETE.extend([entity1, entity2])
client.put_multi([entity1, entity2])
query = client.query(kind='_Doctest')
cursor = None
.. doctest:: query-page
>>> query_iter = query.fetch(start_cursor=cursor)
>>> pages = query_iter.pages
>>>
>>> first_page = next(pages)
>>> first_page_entities = list(first_page)
>>> query_iter.next_page_token
'...'
:type kwargs: dict
:param kwargs: Parameters for initializing and instance of
:class:`~google.cloud.datastore.query.Query`.
:rtype: :class:`~google.cloud.datastore.query.Query`
:returns: A query object.
"""
if 'client' in kwargs:
raise TypeError('Cannot pass client')
if 'project' in kwargs:
raise TypeError('Cannot pass project')
kwargs['project'] = self.project
if 'namespace' not in kwargs:
kwargs['namespace'] = self.namespace
return Query(self, **kwargs)
| |
#!/usr/bin/env python
import ast
import re
import sys
from xml.etree import ElementTree as ET
import docutils.core
import docutils.nodes
from itertools import izip
import inspect
import matplotlib
matplotlib.use('Qt4Agg')
import matplotlib.docstring
def new_call(self, func):
return func
matplotlib.docstring.Substitution.__call__ = new_call
import matplotlib.pyplot
from matplotlib.artist import Artist, ArtistInspector
import matplotlib.cbook
# want to get lowercase accepts too
ArtistInspector._get_valid_values_regex = re.compile(
r"\n\s*ACCEPTS:\s*((?:.|\n)*?)(?:$|(?:\n\n))", re.IGNORECASE)
from specs import SpecList, ModuleSpec, InputPortSpec, OutputPortSpec, \
AlternatePortSpec
# sys.path.append('/vistrails/src/git')
from vistrails.core.modules.utils import expand_port_spec_string
##############################################################################
# docutils parsing code
##############################################################################
def parse_docutils_thead(elt):
header = []
for child in elt.children:
if child.__class__ == docutils.nodes.row:
assert len(header) == 0, "More than one row in header"
for subchild in child.children:
if subchild.__class__ == docutils.nodes.entry:
header.append(parse_docutils_elt(subchild)[0].strip())
return header
def parse_docutils_tbody(elt):
rows = []
for child in elt.children:
if child.__class__ == docutils.nodes.row:
row = []
for subchild in child.children:
if subchild.__class__ == docutils.nodes.entry:
row.append(parse_docutils_elt(subchild)[0].strip())
rows.append(row)
return rows
def parse_docutils_table(elt):
header = []
rows = []
for child in elt.children:
if child.__class__ == docutils.nodes.tgroup:
for subchild in child.children:
if subchild.__class__ == docutils.nodes.thead:
header = parse_docutils_thead(subchild)
elif subchild.__class__ == docutils.nodes.tbody:
rows = parse_docutils_tbody(subchild)
print "== TABLE =="
print "HEADER:", header
print "ROWS:", '\n'.join(str(r) for r in rows)
return (header, rows)
def parse_docutils_term(elt):
terms = []
accepts = ""
for child in elt.children:
if child.__class__ == docutils.nodes.emphasis:
term = parse_docutils_elt(child)[0].strip()
if term in ('True', 'False') or accepts != "":
accepts += term
elif term != "None":
terms.append(term)
elif child.__class__ == docutils.nodes.Text:
if str(child).strip() not in [',', '/']:
accepts += str(child)
else:
accepts += parse_docutils_elt(child)[0]
accepts = accepts.strip()
if accepts.startswith(':'):
accepts = accepts[1:].strip()
return terms, accepts
def parse_docutils_deflist(elt):
print "GOT DEFLIST!"
args = []
term = None
definition = None
for child in elt.children:
assert child.__class__ == docutils.nodes.definition_list_item, "NO DEF LIST ITEM!"
for subchild in child.children:
if subchild.__class__ == docutils.nodes.term:
terms, accepts = parse_docutils_term(subchild)
print "TERMS:", terms
if accepts:
print "ACCEPTS:", accepts
elif subchild.__class__ == docutils.nodes.definition:
definition = parse_docutils_elt(subchild)[0].rstrip()
print "DEFINITION:", definition
for term in terms:
args.append((term, accepts, definition))
return args
def parse_docutils_elt(elt, last_text=""):
def get_last_block(cur_text):
num_newlines = 1
end_idx = len(cur_text)
while cur_text.endswith("\n\n" * num_newlines):
num_newlines += 1
end_idx -= 2
idx = cur_text.rfind("\n\n",0,end_idx)
if idx < 0:
idx = 0
else:
idx += 2
return cur_text[idx:].strip()
text = ""
args = []
tables = []
call_signatures = []
for child in elt.children:
if child.__class__ == docutils.nodes.Text:
ntext = ' '.join(s for s in str(child).split('\n'))
text += ntext
elif child.__class__ == docutils.nodes.system_message:
pass
elif child.__class__ == docutils.nodes.definition_list:
args.append((get_last_block(last_text + text),
parse_docutils_deflist(child)))
elif child.__class__ == docutils.nodes.table:
tables.append((get_last_block(last_text + text),) + \
parse_docutils_table(child))
elif isinstance(child, docutils.nodes.Inline):
(ntext, nargs, ntables, ncall_sigs) = \
parse_docutils_elt(child, last_text + text)
text += ntext
args += nargs
tables += ntables
call_signatures += ncall_sigs
else:
(ntext, nargs, ntables, ncall_sigs) = \
parse_docutils_elt(child, last_text + text)
if child.__class__ == docutils.nodes.literal_block:
check_str = (last_text + text).lower().strip()
if check_str.endswith("\ncall signature:") or \
check_str.endswith("\ncall signatures:"):
call_signatures.append(ntext)
text += ntext.strip() + "\n\n"
args += nargs
tables += ntables
call_signatures += ncall_sigs
return (text.rstrip(), args, tables, call_signatures)
def parse_docutils_str(docstring, should_print=False):
root = docutils.core.publish_doctree(docstring)
if should_print:
print root
return parse_docutils_elt(root)
##############################################################################
# util methods
##############################################################################
def capfirst(s):
return s[0].upper() + s[1:]
def pretty_name(s):
cap = True
new_s = ""
for i, c in enumerate(s):
if cap:
c = c.upper()
cap = False
if c != '_' or i == 0:
new_s += c
else:
cap = True
return new_s
def get_value_and_type(s):
try:
val = eval(s)
if isinstance(val, type):
return (None, None)
except Exception:
val = s
port_type = get_type_from_val(val)
return (val, port_type)
def get_type_from_val(val):
if isinstance(val, float):
return "basic:Float"
elif isinstance(val, bool):
return "basic:Boolean"
elif isinstance(val, (int, long)):
return "basic:Integer"
elif isinstance(val, basestring):
return "basic:String"
elif isinstance(val, list):
return "basic:List"
return None
def resolve_port_type(port_types, port_spec):
port_types_set = set(p for p in port_types if p is not None)
was_set = False
if port_spec.port_type is not None:
port_types_set.add(port_spec.port_type)
if len(port_types_set) == 1:
port_spec.port_type = next(iter(port_types_set))
was_set = True
elif len(port_types_set) == 2:
if 'basic:Float' in port_types_set and \
'basic:Integer' in port_types_set:
port_spec.port_type = 'basic:Float'
was_set = True
elif 'basic:List' in port_types_set:
port_spec.port_type = 'basic:List'
base_name = port_spec.name
port_spec.name = base_name + "Sequence"
port_types_set.discard('basic:List')
alternate_spec = \
AlternatePortSpec(name=base_name + "Scalar",
port_type=next(iter(port_types_set)))
port_spec.alternate_specs.append(alternate_spec)
was_set = True
if not was_set:
if "color" in port_spec.name:
port_spec.port_type = "basic:Color"
port_spec.translations = "translate_color"
elif port_spec.name == "x":
port_spec.port_type = "basic:List"
elif port_spec.name == "y":
port_spec.port_type = "basic:List"
else:
port_spec.port_type = None
# # FIXME
# # what to do with scalar/sequence-type args
# elif len(port_types_set) == 2 and 'basic:List' in port_types_set:
# port_type = 'basic:List'
# else:
# port_type = None
# return port_type
def assign_port_values(port_spec, values, default_val):
assign_port_spec = None
if port_spec.defaults is not None and len(port_spec.defaults) > 0:
current_default = port_spec.defaults
port_spec.defaults = None
else:
current_default = []
if len(port_spec.alternate_specs) == 0:
assign_port_spec = port_spec
else:
port_types = set()
for value in values + current_default + \
([default_val] if default_val is not None else []):
port_type = get_type_from_val(value)
if port_type is not None:
port_types.add(port_type)
if len(port_types) == 1:
for ps in [port_spec] + port_spec.alternate_specs:
if ps.port_type == next(iter(port_types)):
assign_port_spec = ps
elif len(port_types) > 1:
raise Exception("Multiple value types found!")
if assign_port_spec is not None:
if len(values) > 0:
assign_port_spec.entry_types = ['enum']
assign_port_spec.values = [values]
if len(current_default) > 0:
assign_port_spec.defaults = current_default
elif default_val is not None:
assign_port_spec.defaults = [default_val]
def parse_description(desc):
key_to_type = {'string': 'basic:String',
'integer': 'basic:Integer',
'sequence': 'basic:List',
'float': 'basic:Float',
'boolean': 'basic:Boolean',
'scalar': 'basic:Float',
'vector': 'basic:List',
'list': 'basic:List'}
port_types = []
option_strs = []
default_val = None
allows_none = False
default_paren_re = re.compile(r"((\S*)\s+)?\(default:?(\s+(\S*))?\)",
re.IGNORECASE)
default_is_re = re.compile(r"default\s+is\s+(\S*)", re.IGNORECASE)
if '|' in desc:
m = re.search("\[([\s\S]*?)\]", desc)
if m:
opt_str = m.group(1)
else:
opt_str = desc
opts = opt_str.split('|')
for opt in opts:
opt = opt.strip()
m = default_paren_re.search(opt)
if m:
(_, before_res, _, after_res) = m.groups()
if after_res:
assert default_val is None, ('Multiple defaults: '
'"%s" "%s"' % (default_val, after_res))
default_val = after_res
opt = after_res
elif before_res:
assert default_val is None, ('Multiple defaults: '
'"%s" "%s"' % (default_val, after_res))
default_val = before_res
opt = before_res
found_type = False
opt_lower = opt.lower()
if opt_lower == "none":
found_type = True
allows_none = True
elif opt_lower == "true" or opt_lower == "false":
found_type = True
port_types.append("basic:Boolean")
else:
for key in key_to_type:
if key in opt_lower:
found_type = True
port_types.append(key_to_type[key])
if not found_type:
(val, port_type) = get_value_and_type(opt)
option_strs.append(val)
if port_type is not None:
port_types.append(port_type)
found_type = True
if default_val is None:
m = default_paren_re.search(desc)
if m:
(_, before_res, _, after_res) = m.groups()
if after_res:
default_val = after_res
elif before_res:
default_val = before_res
else:
m = default_is_re.search(desc)
if m:
(default_val,) = m.groups()
if default_val.endswith('.') or default_val.endswith(','):
default_val = default_val[:-1]
if default_val:
(default_val, port_type) = get_value_and_type(default_val)
if port_type is not None:
port_types.append(port_type)
should_print = False
if len(port_types) == 0:
for key, port_type in key_to_type.iteritems():
if key in desc:
port_types.append(port_type)
return (port_types, option_strs, default_val, allows_none)
def parse_translation(rows, should_reverse=True):
t = {}
port_types = []
values = []
for row in rows:
(val1, port_type1) = get_value_and_type(row[0])
(val2, port_type2) = get_value_and_type(row[1])
if should_reverse:
if val2 != None:
port_types.append(port_type2)
values.append(val2)
t[val2] = val1
else:
if val1 != None:
port_types.append(port_type1)
values.append(val1)
t[val1] = val2
return (t, port_types, values)
def do_translation_override(port_specs, names, rows, opts):
if 'name' in opts:
names = opts['name']
if names is None:
raise ValueError("Must specify name of port to use translation for")
if isinstance(names, basestring) or not matplotlib.cbook.iterable(names):
names = [names]
should_reverse = opts.get('reverse', True)
values_only = opts.get('values_only', False)
(t, port_type, values) = \
parse_translation(rows, should_reverse)
for name in names:
print "TRANSLATING", name
if name not in port_specs:
port_specs[name] = InputPortSpec(name)
port_specs[name].entry_types = ['enum']
port_specs[name].values = [values]
if not values_only:
port_specs[name].translations = t
def get_names(obj, default_module_base, default_super_base,
prefix="Mpl", suffix=""):
module_name = None
super_name = None
if isinstance(obj, tuple):
if len(obj) > 2:
super_name = obj[2]
if len(obj) < 2:
raise ValueError("Need to specify 2- or 3-tuple")
(obj, module_name) = obj[:2]
if module_name is None:
module_name = "%s%s%s" % (prefix,
pretty_name(default_module_base(obj)),
suffix)
if super_name is None:
super_name = "%s%s%s" % (prefix,
pretty_name(default_super_base(obj)),
suffix)
return (obj, module_name, super_name)
##############################################################################
# main methods
##############################################################################
def parse_argspec(obj_or_str):
if isinstance(obj_or_str, basestring):
obj_or_str = obj_or_str.strip()
if not obj_or_str.endswith(":"):
obj_or_str += ":"
if not obj_or_str.startswith("def "):
obj_or_str = "def " + obj_or_str
try:
tree = ast.parse(obj_or_str + "\n pass")
except SyntaxError:
# cannot parse the argspec
print "*** CANNOT PARSE", obj_or_str
return []
argspec_name = tree.body[0].name
argspec_args = [a.id for a in tree.body[0].args.args]
print tree.body[0].args.defaults
argspec_defaults = []
for i, d in enumerate(tree.body[0].args.defaults):
try:
d_val = ast.literal_eval(d)
except ValueError:
d_val = None
argspec_defaults.append(d_val)
else:
argspec = inspect.getargspec(obj_or_str)
argspec_args = argspec.args
argspec_defaults = argspec.defaults
if not argspec_defaults:
start_defaults = len(argspec_args) + 1
else:
start_defaults = len(argspec_args) - len(argspec_defaults)
port_specs_list = []
has_self = False
for i, arg in enumerate(argspec_args):
if i == 0 and arg == "self":
has_self = True
continue
port_spec = InputPortSpec(arg)
port_spec.arg_pos = (i-1) if has_self else i
if i >= start_defaults:
port_spec.required = False
default_val = argspec_defaults[i-start_defaults]
if default_val is not None:
port_spec.defaults = [default_val]
port_type = get_type_from_val(default_val)
if port_type is not None:
port_spec.port_type = port_type
else:
port_spec.required = True
port_specs_list.append(port_spec)
return port_specs_list
def process_docstring(docstring, port_specs, parent, table_overrides):
(cleaned_docstring, args, tables, call_sigs) = \
parse_docutils_str(docstring)
if len(call_sigs) > 0:
for call_sig in call_sigs:
port_specs_list = parse_argspec(call_sig)
for port_spec in port_specs_list:
if port_spec.arg in port_specs:
# have to reconcile the two
old_port_spec = port_specs[port_spec.arg]
resolve_port_type([port_spec.port_type], old_port_spec)
if old_port_spec.defaults is None:
if port_spec.defaults is not None:
assign_port_values(old_port_spec, [],
port_spec.defaults[0])
# old_port_spec.defaults = port_spec.defaults
elif old_port_spec.defaults != port_spec.defaults:
# keep it as the old spec is
print "*** Different defaults!" + \
str(old_port_spec.defaults) + \
" : " + str(port_spec.defaults)
assign_port_values(old_port_spec, [],
old_port_spec.defaults[0])
else:
port_specs[port_spec.arg] = port_spec
output_port_specs = []
for (deflist_intro, deflist) in args:
print "PROCESSING DEFLIST", deflist_intro
if re.search("return value", deflist_intro, re.IGNORECASE):
print " -> RETURN VALUE"
for (name, accepts, port_doc) in deflist:
(port_types, option_strs, default_val, allows_none) = \
parse_description(accepts)
(pt2, _, dv2, _) = parse_description(port_doc)
port_types.extend(pt2)
if default_val is None:
default_val = dv2
oport = OutputPortSpec(name, docstring=port_doc)
resolve_port_type(port_types, oport)
output_port_specs.append(oport)
elif (re.search("argument", deflist_intro, re.IGNORECASE) or
re.search("kwarg", deflist_intro, re.IGNORECASE)):
print " -> ARGUMENTS"
for (name, accepts, port_doc) in deflist:
if name not in port_specs:
port_specs[name] = InputPortSpec(name, docstring=port_doc)
else:
port_specs[name].docstring = port_doc
(port_types, option_strs, default_val, allows_none) = \
parse_description(accepts)
(pt2, _, dv2, _) = parse_description(port_doc)
port_types.extend(pt2)
if default_val is None:
default_val = dv2
resolve_port_type(port_types, port_specs[name])
assign_port_values(port_specs[name], option_strs, default_val)
for (table_intro, header, rows) in tables:
print "GOT TABLE", table_intro, rows[0]
table_key = parent + (table_intro,)
if table_key in table_overrides:
(override_type, opts) = table_overrides[table_key]
if override_type == "translation":
do_translation_override(port_specs, None, rows, opts)
continue
elif override_type == "ports":
table_intro = "kwarg"
elif override_type == "skip":
continue
if re.search("return value", table_intro, re.IGNORECASE):
print " -> RETURN"
if len(rows[0]) != 2:
raise ValueError("row that has more/less than 2 columns!")
for (name, port_doc) in rows:
(port_types, option_strs, default_val, allows_none) = \
parse_description(port_doc)
oport = OutputPortSpec(name, docstring=port_doc)
resolve_port_type(port_types, oport)
output_port_specs.append(oport)
elif (re.search("argument", table_intro, re.IGNORECASE) or
re.search("kwarg", table_intro, re.IGNORECASE)):
print " -> ARGUMENT"
if len(rows[0]) != 2:
raise ValueError("row that has more/less than 2 columns!")
for (name, port_doc) in rows:
if name not in port_specs:
port_specs[name] = InputPortSpec(name, docstring=port_doc)
else:
port_specs[name].docstring = port_doc
(port_types, option_strs, default_val, allows_none) = \
parse_description(port_doc)
resolve_port_type(port_types, port_specs[name])
assign_port_values(port_specs[name], option_strs, default_val)
else:
raise ValueError("Unknown table: %s\n %s %s" % (
parent, table_intro, header))
return cleaned_docstring, output_port_specs
def parse_plots(plot_types, table_overrides):
def get_module_base(n):
return n
def get_super_base(n):
return "plot"
module_specs = []
for plot in plot_types:
port_specs = {}
print "========================================"
print plot
print "========================================"
(plot, module_name, super_name) = \
get_names(plot, get_module_base, get_super_base, "Mpl", "")
try:
plot_obj = getattr(matplotlib.pyplot, plot)
except AttributeError:
print '*** CANNOT ADD PLOT "%s";' \
'IT DOES NOT EXIST IN THIS MPL VERSION ***' % plot
continue
port_specs_list = parse_argspec(plot_obj)
for port_spec in port_specs_list:
port_specs[port_spec.arg] = port_spec
docstring = plot_obj.__doc__
if plot == 'contour':
# want to change the double newline to single newline...
print "&*&* FINDING:", \
docstring.find("*extent*: [ *None* | (x0,x1,y0,y1) ]\n\n")
docstring = docstring.replace("*extent*: [ *None* | (x0,x1,y0,y1) ]\n\n",
"*extent*: [ *None* | (x0,x1,y0,y1) ]\n")
if plot == 'annotate':
docstring = docstring % dict((k,v) for k, v in matplotlib.docstring.interpd.params.iteritems() if k == 'Annotation')
elif plot == 'barbs':
docstring = docstring % dict((k,v) for k,v in matplotlib.docstring.interpd.params.iteritems() if k == 'barbs_doc')
cleaned_docstring, output_port_specs = \
process_docstring(docstring, port_specs, ('pyplot', plot),
table_overrides)
# for port_spec in port_specs.itervalues():
# if port_spec.defaults is not None:
# port_spec.defaults = [str(v) for v in port_spec.defaults]
# if port_spec.values is not None:
# port_spec.values = [[str(v) for v in port_spec.values[0]]]
# for alt_ps in port_spec.alternate_specs:
# if alt_ps.defaults is not None:
# alt_ps.defaults = [str(v) for v in alt_ps.defaults]
# if alt_ps.values is not None:
# alt_ps.values = [[str(v) for v in alt_ps.values[0]]]
module_specs.append(ModuleSpec(module_name, super_name,
"matplotlib.pyplot.%s" % plot,
cleaned_docstring, port_specs.values(),
output_port_specs))
my_specs = SpecList(module_specs)
return my_specs
_get_accepts_regex = re.compile(
r"([\s\S]*)\n\s*ACCEPTS:\s*((?:.|\n)*?)(?:$|(?:\n\n))([\s\S]*)",
re.IGNORECASE)
def parse_artists(artist_types, table_overrides={}):
def get_module_name(obj):
return obj.__name__
def get_super_name(obj):
for base in obj.__bases__:
if issubclass(base, Artist):
return base.__name__
return ""
module_specs = []
for klass in artist_types:
(klass, module_name, super_name) = \
get_names(klass, get_module_name, get_super_name, "Mpl",
"Properties")
port_specs = {}
insp = ArtistInspector(klass)
klass_name = klass.__name__
klass_qualname = klass.__module__ + "." + klass_name
for (s, t) in insp._get_setters_and_targets():
print "** %s **" % s
if t.rsplit('.',1)[0] != klass_qualname:
# let inheritance work
continue
if s in port_specs:
raise ValueError('duplicate port "%s"' % s)
port_spec = InputPortSpec(s)
port_specs[s] = port_spec
accepts_raw = insp.get_valid_values(s)
(accepts, deflists, tables, call_sigs) = \
parse_docutils_str(accepts_raw)
if len(deflists) + len(tables) > 0:
raise ValueError("accepts has deflists and/or tables")
(port_types, option_strs, default_val, allows_none) = \
parse_description(accepts)
if default_val is not None:
port_spec.default_val = default_val
if len(option_strs) > 0:
port_spec.entry_types = ['enum']
port_spec.values = [option_strs]
port_spec.hide = False
docstring = getattr(insp.o, 'set_' + s).__doc__
if docstring is None:
docstring = ""
else:
docstring = docstring % matplotlib.docstring.interpd.params
match = _get_accepts_regex.search(docstring)
if match is not None:
print "STARTING DOCSTRING:", docstring
groups = match.groups()
if len(groups) > 2 and groups[2]:
docstring = groups[0] + groups[2]
else:
docstring = groups[0]
print "FIXED DOCSTRING:", docstring
(cleaned_docstring, args, tables, call_sigs) = \
parse_docutils_str(docstring)
port_spec.docstring = cleaned_docstring
translations = None
for (table_intro, header, rows) in tables:
print "TABLE:", table_intro
if (klass.__name__, s, table_intro) in table_overrides:
(override_type, opts) = \
table_overrides[(klass.__name__, s, table_intro)]
if override_type == "translation":
do_translation_override(port_specs, s, rows, opts)
continue
elif override_type == "ports":
table_intro = "kwarg"
elif override_type == "skip":
continue
if len(header) != 2:
raise ValueError("Table not two columns!")
if translations is not None:
raise ValueError("Two translations in one attr")
(translations, pt2, values) = parse_translation(rows)
port_spec.translations = translations
port_spec.values = [values]
port_types.extend(pt2)
resolve_port_type(port_types, port_spec)
constructor_port_specs = {}
port_specs_list = parse_argspec(klass.__init__)
for port_spec in port_specs_list:
constructor_port_specs[port_spec.arg] = port_spec
constructor_docstring = klass.__init__.__doc__
if constructor_docstring is not None:
_, output_port_specs = process_docstring(constructor_docstring,
constructor_port_specs,
(klass.__name__,
'__init__'),
table_overrides)
for arg, ps in constructor_port_specs.iteritems():
if arg not in port_specs:
ps.constructor_arg = True
ps.required = False
port_specs[arg] = ps
module_spec = ModuleSpec(module_name, super_name, klass_qualname,
klass.__doc__, port_specs.values())
module_specs.append(module_spec)
my_specs = SpecList(module_specs)
return my_specs
def run_artists():
import matplotlib.axes
import matplotlib.axis
import matplotlib.collections
import matplotlib.figure
import matplotlib.image
import matplotlib.lines
import matplotlib.patches
import matplotlib.text
artist_py_modules = [matplotlib.axes,
matplotlib.axis,
matplotlib.collections,
matplotlib.figure,
matplotlib.image,
matplotlib.lines,
matplotlib.patches,
matplotlib.text,
]
exclude = set([])
artist_types = set() # (Artist, None, "MplProperties")]
for py_module in artist_py_modules:
for cls_name, cls in inspect.getmembers(py_module, inspect.isclass):
if cls_name in exclude:
continue
if issubclass(cls, Artist) and cls != Artist:
artist_types.add(cls)
print "ARTIST TYPES:", artist_types
artist_types = [(Artist, None, "MplProperties")] + \
list(sorted(artist_types, key=lambda x: list(reversed(x.mro()))))
print "SORTED ARTIST TYPES:", artist_types
# FIXME want this to be indexed by artist name, too...
artist_overrides = {('Axes', 'aspect', 'aspect'):
('translation', {'reverse': False,
'values_only': True}),
# FIXME may want documentation from adjustable?
('Axes', 'aspect', 'adjustable'):
('skip', {}),
# FIXME may want documentation from anchor?
('Axes', 'aspect', 'anchor'):
('skip', {}),
('ConnectionPatch', '__init__', "Valid keys are"):
('ports', {}),
('ConnectionPatch', '__init__', "coordsA and coordsB are strings that indicate the coordinates of xyA and xyB."):
('translation', {'name': ['coordsA', 'coordsB'],
'reverse': False,
'values_only': True}),
('Annotation', '__init__', "If the dictionary has a key arrowstyle, a FancyArrowPatch instance is created with the given dictionary and is drawn. Otherwise, a YAArow patch instance is created and drawn. Valid keys for YAArow are"):
('skip', {}),
('Annotation', '__init__', "Valid keys for FancyArrowPatch are"):
('skip', {}),
('Annotation', '__init__', "xycoords and textcoords are strings that indicate the coordinates of xy and xytext."):
('translation', {'name': ['xycoords', 'textcoords'],
'reverse': False,
'values_only': True}),
}
specs = parse_artists(artist_types, artist_overrides)
specs.write_to_xml("mpl_artists_raw.xml")
def run_plots():
# from matplotlib's boilerplate.py
plot_types = ['acorr',
'arrow',
'axhline',
'axhspan',
'axvline',
'axvspan',
'bar',
'barh',
'broken_barh',
'boxplot',
'cohere',
'clabel',
'contour',
'contourf',
'csd',
'errorbar',
'fill',
'fill_between',
'fill_betweenx',
'hexbin',
'hist',
'hist2d',
'hlines',
'imshow',
'loglog',
'pcolor',
'pcolormesh',
'pie',
# add plot later
# 'plot',
'plot_date',
'psd',
'quiver',
'quiverkey',
'scatter',
'semilogx',
'semilogy',
'specgram',
'stackplot',
'stem',
'step',
'streamplot',
'tricontour',
'tricontourf',
'tripcolor',
'triplot',
'vlines',
'xcorr',
'barbs',
]
plot_types += ['spy',
'polar',
]
# FIXME added to keep existing code happy for now
plot_types += ['legend',
'annotate',
('plot', 'MplLinePlot')]
table_overrides = {('pyplot', 'plot', 'The following format string characters are accepted to control the line style or marker:'):
('translation', {'name': 'marker'}),
('pyplot', 'plot', 'The following color abbreviations are supported:'):
('skip', {}),
('pyplot', 'legend', 'The location codes are'):
('translation', {'name': 'loc',
'reverse': False}),
('pyplot', 'legend', 'Padding and spacing between various elements use following keywords parameters. These values are measure in font-size units. E.g., a fontsize of 10 points and a handlelength=5 implies a handlelength of 50 points. Values from rcParams will be used if None.'):
('ports', {}),
('pyplot', 'annotate', "If the dictionary has a key arrowstyle, a FancyArrowPatch instance is created with the given dictionary and is drawn. Otherwise, a YAArow patch instance is created and drawn. Valid keys for YAArow are"):
('skip', {}),
('pyplot', 'annotate', "Valid keys for FancyArrowPatch are"):
('skip', {}),
('pyplot', 'annotate', "xycoords and textcoords are strings that indicate the coordinates of xy and xytext."):
('translation', {'name': ['xycoords', 'textcoords'],
'reverse': False,
'values_only': True}),
}
specs = parse_plots(plot_types, table_overrides)
specs.write_to_xml("mpl_plots_raw.xml")
def run(which="all"):
if which == "all" or which == "artists":
run_artists()
if which == "all" or which == "plots":
run_plots()
def get_docutils(plot):
import matplotlib.pyplot
plot_obj = getattr(matplotlib.pyplot, plot)
(_, _, _, call_sigs) = parse_docutils_str(plot_obj.__doc__, True)
print call_sigs
if __name__ == '__main__':
if len(sys.argv) <= 1:
run()
elif len(sys.argv) == 2:
run(sys.argv[1])
else:
raise TypeError("usage: python parse.py [all|artists|plots]")
| |
# -*- coding: utf-8 -*-
"""Parser for Microsoft Internet Explorer (MSIE) Cache Files (CF)."""
import pymsiecf
from dfdatetime import fat_date_time as dfdatetime_fat_date_time
from dfdatetime import filetime as dfdatetime_filetime
from dfdatetime import semantic_time as dfdatetime_semantic_time
from plaso.containers import events
from plaso.containers import time_events
from plaso.lib import definitions
from plaso.lib import specification
from plaso.parsers import interface
from plaso.parsers import manager
class MSIECFLeakEventData(events.EventData):
"""MSIECF leak event data.
Attributes:
cached_filename (str): name of the cached file.
cached_file_size (int): size of the cached file.
cache_directory_index (int): index of the cache directory.
cache_directory_name (str): name of the cache directory.
offset (int): offset of the MSIECF item relative to the start of the file,
from which the event data was extracted.
recovered (bool): True if the item was recovered.
"""
DATA_TYPE = 'msiecf:leak'
def __init__(self):
"""Initializes event data."""
super(MSIECFLeakEventData, self).__init__(data_type=self.DATA_TYPE)
self.cached_filename = None
self.cached_file_size = None
self.cache_directory_index = None
self.cache_directory_name = None
self.offset = None
self.recovered = None
class MSIECFRedirectedEventData(events.EventData):
"""MSIECF redirected event data.
Attributes:
offset (int): offset of the MSIECF item relative to the start of the file,
from which the event data was extracted.
recovered (bool): True if the item was recovered.
url (str): location URL.
"""
DATA_TYPE = 'msiecf:redirected'
def __init__(self):
"""Initializes event data."""
super(MSIECFRedirectedEventData, self).__init__(data_type=self.DATA_TYPE)
self.offset = None
self.recovered = None
self.url = None
class MSIECFURLEventData(events.EventData):
"""MSIECF URL event data.
Attributes:
cached_filename (str): name of the cached file.
cached_file_size (int): size of the cached file.
cache_directory_index (int): index of the cache directory.
cache_directory_name (str): name of the cache directory.
http_headers (str): HTTP headers.
number_of_hits (int): number of hits.
offset (int): offset of the MSIECF item relative to the start of the file,
from which the event data was extracted.
recovered (bool): True if the item was recovered.
url (str): location URL.
"""
DATA_TYPE = 'msiecf:url'
def __init__(self):
"""Initializes event data."""
super(MSIECFURLEventData, self).__init__(data_type=self.DATA_TYPE)
self.cached_filename = None
self.cached_file_size = None
self.cache_directory_index = None
self.cache_directory_name = None
self.http_headers = None
self.number_of_hits = None
self.offset = None
self.recovered = None
self.url = None
class MSIECFParser(interface.FileObjectParser):
"""Parses MSIE Cache Files (MSIECF)."""
NAME = 'msiecf'
DATA_FORMAT = (
'Microsoft Internet Explorer (MSIE) 4 - 9 cache (index.dat) file')
def _ParseLeak(
self, parser_mediator, cache_directories, msiecf_item, recovered=False):
"""Extract data from a MSIE Cache Files (MSIECF) leak item.
Every item is stored as an event object, one for each timestamp.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
cache_directories (list[str]): cache directory names.
msiecf_item (pymsiecf.leak): MSIECF leak item.
recovered (Optional[bool]): True if the item was recovered.
"""
# TODO: add support for possible last cache synchronization date and time.
date_time = dfdatetime_semantic_time.NotSet()
event_data = MSIECFLeakEventData()
event_data.cached_filename = msiecf_item.filename
event_data.cached_file_size = msiecf_item.cached_file_size
event_data.cache_directory_index = msiecf_item.cache_directory_index
event_data.offset = msiecf_item.offset
event_data.recovered = recovered
if (event_data.cache_directory_index >= 0 and
event_data.cache_directory_index < len(cache_directories)):
event_data.cache_directory_name = (
cache_directories[event_data.cache_directory_index])
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME)
parser_mediator.ProduceEventWithEventData(event, event_data)
def _ParseItems(self, parser_mediator, msiecf_file):
"""Parses a MSIE Cache File (MSIECF) items.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
msiecf_file (pymsiecf.file): MSIECF file.
"""
format_version = msiecf_file.format_version
decode_error = False
cache_directories = []
for cache_directory_name in msiecf_file.cache_directories:
try:
cache_directory_name = cache_directory_name.decode('ascii')
except UnicodeDecodeError:
decode_error = True
cache_directory_name = cache_directory_name.decode(
'ascii', errors='replace')
cache_directories.append(cache_directory_name)
if decode_error:
parser_mediator.ProduceExtractionWarning((
'unable to decode cache directory names. Characters that cannot '
'be decoded will be replaced with "?" or "\\ufffd".'))
for item_index in range(0, msiecf_file.number_of_items):
try:
msiecf_item = msiecf_file.get_item(item_index)
if isinstance(msiecf_item, pymsiecf.leak):
self._ParseLeak(parser_mediator, cache_directories, msiecf_item)
elif isinstance(msiecf_item, pymsiecf.redirected):
self._ParseRedirected(parser_mediator, msiecf_item)
elif isinstance(msiecf_item, pymsiecf.url):
self._ParseUrl(
parser_mediator, format_version, cache_directories, msiecf_item)
except IOError as exception:
parser_mediator.ProduceExtractionWarning(
'Unable to parse item: {0:d} with error: {1!s}'.format(
item_index, exception))
for item_index in range(0, msiecf_file.number_of_recovered_items):
try:
msiecf_item = msiecf_file.get_recovered_item(item_index)
if isinstance(msiecf_item, pymsiecf.leak):
self._ParseLeak(
parser_mediator, cache_directories, msiecf_item, recovered=True)
elif isinstance(msiecf_item, pymsiecf.redirected):
self._ParseRedirected(parser_mediator, msiecf_item, recovered=True)
elif isinstance(msiecf_item, pymsiecf.url):
self._ParseUrl(
parser_mediator, format_version, cache_directories, msiecf_item,
recovered=True)
except IOError as exception:
parser_mediator.ProduceRecoveryWarning(
'Unable to parse recovered item: {0:d} with error: {1!s}'.format(
item_index, exception))
def _ParseRedirected(
self, parser_mediator, msiecf_item, recovered=False):
"""Extract data from a MSIE Cache Files (MSIECF) redirected item.
Every item is stored as an event object, one for each timestamp.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
msiecf_item (pymsiecf.redirected): MSIECF redirected item.
recovered (Optional[bool]): True if the item was recovered.
"""
date_time = dfdatetime_semantic_time.NotSet()
event_data = MSIECFRedirectedEventData()
event_data.offset = msiecf_item.offset
event_data.recovered = recovered
event_data.url = msiecf_item.location
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME)
parser_mediator.ProduceEventWithEventData(event, event_data)
def _ParseUrl(
self, parser_mediator, format_version, cache_directories, msiecf_item,
recovered=False):
"""Extract data from a MSIE Cache Files (MSIECF) URL item.
Every item is stored as an event object, one for each timestamp.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
format_version (str): MSIECF format version.
cache_directories (list[str]): cache directory names.
msiecf_item (pymsiecf.url): MSIECF URL item.
recovered (Optional[bool]): True if the item was recovered.
"""
# The secondary time can be stored in either UTC or local time
# this is dependent on what the index.dat file is used for.
# Either the file path or location string can be used to distinguish
# between the different type of files.
timestamp = msiecf_item.get_primary_time_as_integer()
if not timestamp:
primary_date_time = dfdatetime_semantic_time.NotSet()
else:
primary_date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
primary_date_time_description = 'Primary Time'
timestamp = msiecf_item.get_secondary_time_as_integer()
secondary_date_time = dfdatetime_filetime.Filetime(timestamp=timestamp)
secondary_date_time_description = 'Secondary Time'
if msiecf_item.type:
if msiecf_item.type == 'cache':
primary_date_time_description = definitions.TIME_DESCRIPTION_LAST_ACCESS
secondary_date_time_description = (
definitions.TIME_DESCRIPTION_MODIFICATION)
elif msiecf_item.type == 'cookie':
primary_date_time_description = definitions.TIME_DESCRIPTION_LAST_ACCESS
secondary_date_time_description = (
definitions.TIME_DESCRIPTION_MODIFICATION)
elif msiecf_item.type == 'history':
primary_date_time_description = (
definitions.TIME_DESCRIPTION_LAST_VISITED)
secondary_date_time_description = (
definitions.TIME_DESCRIPTION_LAST_VISITED)
elif msiecf_item.type == 'history-daily':
primary_date_time_description = (
definitions.TIME_DESCRIPTION_LAST_VISITED)
secondary_date_time_description = (
definitions.TIME_DESCRIPTION_LAST_VISITED)
# The secondary_date_time is in localtime normalize it to be in UTC.
secondary_date_time.is_local_time = True
elif msiecf_item.type == 'history-weekly':
primary_date_time_description = definitions.TIME_DESCRIPTION_CREATION
secondary_date_time_description = (
definitions.TIME_DESCRIPTION_LAST_VISITED)
# The secondary_date_time is in localtime normalize it to be in UTC.
secondary_date_time.is_local_time = True
http_headers = ''
if msiecf_item.type and msiecf_item.data:
if msiecf_item.type == 'cache':
if msiecf_item.data[:4] == b'HTTP':
# Make sure the HTTP headers are ASCII encoded.
# TODO: determine correct encoding currently indications that
# this could be the system narrow string codepage.
try:
http_headers = msiecf_item.data[:-1].decode('ascii')
except UnicodeDecodeError:
warning_message = (
'unable to decode HTTP headers of URL record at offset: '
'0x{0:08x}. Characters that cannot be decoded will be '
'replaced with "?" or "\\ufffd".').format(msiecf_item.offset)
if recovered:
parser_mediator.ProduceRecoveryWarning(warning_message)
else:
parser_mediator.ProduceExtractionWarning(warning_message)
http_headers = msiecf_item.data[:-1].decode(
'ascii', errors='replace')
# TODO: parse data of other URL item type like history which requires
# OLE VT parsing.
event_data = MSIECFURLEventData()
event_data.cached_filename = msiecf_item.filename
event_data.cached_file_size = msiecf_item.cached_file_size
event_data.cache_directory_index = msiecf_item.cache_directory_index
event_data.http_headers = http_headers
event_data.number_of_hits = msiecf_item.number_of_hits
event_data.offset = msiecf_item.offset
event_data.recovered = recovered
event_data.url = msiecf_item.location
if (event_data.cache_directory_index >= 0 and
event_data.cache_directory_index < len(cache_directories)):
event_data.cache_directory_name = (
cache_directories[event_data.cache_directory_index])
event = time_events.DateTimeValuesEvent(
primary_date_time, primary_date_time_description)
parser_mediator.ProduceEventWithEventData(event, event_data)
if secondary_date_time.timestamp != 0:
event = time_events.DateTimeValuesEvent(
secondary_date_time, secondary_date_time_description,
time_zone=parser_mediator.timezone)
parser_mediator.ProduceEventWithEventData(event, event_data)
expiration_timestamp = msiecf_item.get_expiration_time_as_integer()
if expiration_timestamp != 0:
# The expiration time in MSIECF version 4.7 is stored as a FILETIME value
# in version 5.2 it is stored as a FAT date time value.
# Since the as_integer function returns the raw integer value we need to
# apply the right conversion here.
if format_version == '4.7':
if expiration_timestamp == 0x7fffffffffffffff:
expiration_date_time = dfdatetime_semantic_time.Never()
else:
expiration_date_time = dfdatetime_filetime.Filetime(
timestamp=expiration_timestamp)
else:
if expiration_timestamp == 0xffffffff:
expiration_date_time = dfdatetime_semantic_time.Never()
else:
expiration_date_time = dfdatetime_fat_date_time.FATDateTime(
fat_date_time=expiration_timestamp)
event = time_events.DateTimeValuesEvent(
expiration_date_time, definitions.TIME_DESCRIPTION_EXPIRATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
last_checked_timestamp = msiecf_item.get_last_checked_time_as_integer()
if last_checked_timestamp != 0:
last_checked_date_time = dfdatetime_fat_date_time.FATDateTime(
fat_date_time=last_checked_timestamp)
event = time_events.DateTimeValuesEvent(
last_checked_date_time, definitions.TIME_DESCRIPTION_LAST_CHECKED)
parser_mediator.ProduceEventWithEventData(event, event_data)
@classmethod
def GetFormatSpecification(cls):
"""Retrieves the format specification.
Returns:
FormatSpecification: format specification.
"""
format_specification = specification.FormatSpecification(cls.NAME)
format_specification.AddNewSignature(
b'Client\x20UrlCache\x20MMF\x20Ver\x20', offset=0)
return format_specification
def ParseFileObject(self, parser_mediator, file_object):
"""Parses a MSIE Cache File (MSIECF) file-like object.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
file_object (dfvfs.FileIO): file-like object.
"""
msiecf_file = pymsiecf.file()
msiecf_file.set_ascii_codepage(parser_mediator.codepage)
try:
msiecf_file.open_file_object(file_object)
except IOError as exception:
parser_mediator.ProduceExtractionWarning(
'unable to open file with error: {0!s}'.format(exception))
return
try:
self._ParseItems(parser_mediator, msiecf_file)
finally:
msiecf_file.close()
manager.ParsersManager.RegisterParser(MSIECFParser)
| |
#!/usr/bin/python2.7
# -*- coding: utf-8 -*-
# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module defines the ScriptManager class. This is used to manage
running shell scripts on a remote POSIX device, while keeping the scripts
in your local storage.
"""
import os
import expect
class ScriptSetEntry(object):
"""
This is the script to manage. Don't use this class. Use the ScriptManger
add_script() method instead.
"""
def __init__(self, name, script, rvset=None):
self.name = name
self.script = script
self.rvset = rvset
self.pushed = 0
def clear_script(self):
self.script = None
def __str__(self):
return "ScriptSetEntry:\n name=%s\n script=%s\n rvset=%s\n pushed=%d\n" % \
(self.name, self.script, self.rvset, self.pushed)
class ScriptManager(expect.Expect):
"""
An instance of this class manages "stuffing", running, and checking
results of shell scripts on remote POSIX shells.
Usage:
ss = ScriptManager(procobject, [prompt], [timeout])
Where console is an Console.Console instance or a Telnet instance, which
should already be connected.
Methods:
add_script(pathname, script, matchset)
Adds a script to a set of scripts that this instance manages.
The pathname is the name of the script on the remote system, and also
servers as the index for the other methods.
the script parameter is the actual script, as a string.
the matchset should be a list of regular expressions that match
possible outputs of the script.
push_script(name)
Stuffs the named script through the connection to the remote shell.
This is run automatically by the run_script method if it has not
already been pushed.
run_script(name)
runs the named script on the romote system. Return an tuple of (index,
re_matchobject, text) as matched by the matchset. See the
documentation for telnetlib.Telnet.expect() for details.
"""
def __init__(self, fo=None, prompt="$", timeout=30.0):
expect.Expect.__init__(self, fo, prompt, timeout)
self.scriptset = {}
def add_script(self, name, script, rvset=None):
"""
add_script(name, script, rvset)
Adds a script to the set to be managed. You supply a name that will be
used to reference this script (and is used for the file name on the remote
host), the script itself as a string, and an optional "rvset". The rvset
is a python list of regular expressions matching strings that the main
script could possibly emit. These will be matched when the script is run
allowing you to take different actions in you controlling python script
depending on the returned value.
"""
self.scriptset[name] = ScriptSetEntry(name, script, rvset)
def load_script(self, pathname, rvset=None):
"""load_script(filename, [rvset])
loads the file given by the filename parameter if not already loaded."""
name = os.path.basename(pathname)
if self.scriptset.has_key(name):
return
body = open(pathname).read()
self.add_script(name, body, rvset)
def has_script(self, name):
return self.scriptset.has_key(name)
def clear_script(self, name):
if self.scriptset.has_key(name):
del self.scriptset[name]
def push_script(self, name):
"""
push_script(name)
Pushes the named script down to the device, and makes it runnable (+x mode).
"""
sse = self.scriptset[name]
self.send("\r")
self.wait_for_prompt()
self.send("cat - > /tmp/%s\r" % sse.name)
# delay between lines to try and avoid serial port overruns.
for line in sse.script.split("\n"):
self.send(line+"\n")
self.delay(1)
self.send(chr(4)) # ^D
self.send("\r")
self.wait_for_prompt()
self.send("chmod +x /tmp/%s\r" % sse.name)
rv = self.wait_for_prompt()
sse.pushed = 1
sse.clear_script() # save space
return rv
def write_file(self, path, body):
self.send("\r")
self.wait_for_prompt()
self.send("cat - > %s\r" % path)
self.delay(0.2)
self.send(body)
self.send("\n")
self.send(chr(4)) # ^D
self.send("\r")
self.wait_for_prompt()
def run_script(self, name, async=0):
"""
run_script(name, [asyncflag])
Runs the named script on the remote device. If it has not been pushed
it will be pushed first.
If an rvset is included, this will return a match object of the pattern from
the set that matched (and will have its listindex attribute set to the position
in that list). if the asyncflag is given (evaluates to true), the script is
run in the background on the remote machine.
"""
sse = self.scriptset[name]
if not sse.pushed:
self.push_script(name)
if not sse.pushed: # not connected?
raise expect.ExpectError, "ScriptManager: tried to run script that is not pushed."
if async:
self.send("/tmp/%s &\r" % name) # invoke the script
self.wait_for_prompt()
return None
#else
self.send("/tmp/%s\r" % name) # invoke the script
if sse.rvset:
mo = self.expect(sse.rvset)
return mo
else: # no return value checks
self.wait_for_prompt()
return None
def pass_or_fail(self, name):
"""
Run a script that has simple pass-fail patterns. Return true, or false.
The rvset should include two patterns. The first pattern in the rvset
(return-value-set) should match a true, or positive, condition. The second
pattern should match a false, or negative, condition. Raises an exception
if script could not be run at all.
"""
if not self.scriptset[name].pushed:
self.push_script(name)
if not self.scriptset[name].pushed: # not connected?
raise RuntimeError, "could not push script to device"
self.send("%s\r" % name)
mo = self.expect(self.scriptset[name].rvset, 10.0)
self.discard()
return not mo.listindex # force boolean
def exit(self):
rv = None
self.send("\rexit\r")
try: # fd might have gone away by now.
rv = self.read() # this won't block now due to eof condition.
except:
pass
return rv
| |
import builtins
import imp
from importlib.test.import_ import test_relative_imports
from importlib.test.import_ import util as importlib_util
import marshal
import os
import platform
import py_compile
import random
import stat
import sys
import unittest
import textwrap
import errno
from test.support import (
EnvironmentVarGuard, TESTFN, check_warnings, forget, is_jython,
make_legacy_pyc, rmtree, run_unittest, swap_attr, swap_item, temp_umask,
unlink, unload)
from test import script_helper
def remove_files(name):
for f in (name + ".py",
name + ".pyc",
name + ".pyo",
name + ".pyw",
name + "$py.class"):
unlink(f)
rmtree('__pycache__')
class ImportTests(unittest.TestCase):
def setUp(self):
remove_files(TESTFN)
def tearDown(self):
unload(TESTFN)
setUp = tearDown
def test_case_sensitivity(self):
# Brief digression to test that import is case-sensitive: if we got
# this far, we know for sure that "random" exists.
with self.assertRaises(ImportError):
import RAnDoM
def test_double_const(self):
# Another brief digression to test the accuracy of manifest float
# constants.
from test import double_const # don't blink -- that *was* the test
def test_import(self):
def test_with_extension(ext):
# The extension is normally ".py", perhaps ".pyw".
source = TESTFN + ext
pyo = TESTFN + ".pyo"
if is_jython:
pyc = TESTFN + "$py.class"
else:
pyc = TESTFN + ".pyc"
with open(source, "w") as f:
print("# This tests Python's ability to import a",
ext, "file.", file=f)
a = random.randrange(1000)
b = random.randrange(1000)
print("a =", a, file=f)
print("b =", b, file=f)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
try:
try:
mod = __import__(TESTFN)
except ImportError as err:
self.fail("import from %s failed: %s" % (ext, err))
self.assertEqual(mod.a, a,
"module loaded (%s) but contents invalid" % mod)
self.assertEqual(mod.b, b,
"module loaded (%s) but contents invalid" % mod)
finally:
forget(TESTFN)
unlink(source)
unlink(pyc)
unlink(pyo)
sys.path.insert(0, os.curdir)
try:
test_with_extension(".py")
if sys.platform.startswith("win"):
for ext in [".PY", ".Py", ".pY", ".pyw", ".PYW", ".pYw"]:
test_with_extension(ext)
finally:
del sys.path[0]
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
def test_execute_bit_not_copied(self):
# Issue 6070: under posix .pyc files got their execute bit set if
# the .py file had the execute bit set, but they aren't executable.
with temp_umask(0o022):
sys.path.insert(0, os.curdir)
try:
fname = TESTFN + os.extsep + "py"
open(fname, 'w').close()
os.chmod(fname, (stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH |
stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH))
fn = imp.cache_from_source(fname)
unlink(fn)
__import__(TESTFN)
if not os.path.exists(fn):
self.fail("__import__ did not result in creation of "
"either a .pyc or .pyo file")
s = os.stat(fn)
self.assertEqual(stat.S_IMODE(s.st_mode),
stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
finally:
del sys.path[0]
remove_files(TESTFN)
unload(TESTFN)
def test_imp_module(self):
# Verify that the imp module can correctly load and find .py files
# XXX (ncoghlan): It would be nice to use support.CleanImport
# here, but that breaks because the os module registers some
# handlers in copy_reg on import. Since CleanImport doesn't
# revert that registration, the module is left in a broken
# state after reversion. Reinitialising the module contents
# and just reverting os.environ to its previous state is an OK
# workaround
orig_path = os.path
orig_getenv = os.getenv
with EnvironmentVarGuard():
x = imp.find_module("os")
self.addCleanup(x[0].close)
new_os = imp.load_module("os", *x)
self.assertIs(os, new_os)
self.assertIs(orig_path, new_os.path)
self.assertIsNot(orig_getenv, new_os.getenv)
def test_bug7732(self):
source = TESTFN + '.py'
os.mkdir(source)
try:
self.assertRaisesRegex(ImportError, '^No module',
imp.find_module, TESTFN, ["."])
finally:
os.rmdir(source)
def test_module_with_large_stack(self, module='longlist'):
# Regression test for http://bugs.python.org/issue561858.
filename = module + '.py'
# Create a file with a list of 65000 elements.
with open(filename, 'w') as f:
f.write('d = [\n')
for i in range(65000):
f.write('"",\n')
f.write(']')
try:
# Compile & remove .py file; we only need .pyc (or .pyo).
# Bytecode must be relocated from the PEP 3147 bytecode-only location.
py_compile.compile(filename)
finally:
unlink(filename)
# Need to be able to load from current dir.
sys.path.append('')
try:
make_legacy_pyc(filename)
# This used to crash.
exec('import ' + module)
finally:
# Cleanup.
del sys.path[-1]
unlink(filename + 'c')
unlink(filename + 'o')
def test_failing_import_sticks(self):
source = TESTFN + ".py"
with open(source, "w") as f:
print("a = 1/0", file=f)
# New in 2.4, we shouldn't be able to import that no matter how often
# we try.
sys.path.insert(0, os.curdir)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
try:
for i in [1, 2, 3]:
self.assertRaises(ZeroDivisionError, __import__, TESTFN)
self.assertNotIn(TESTFN, sys.modules,
"damaged module in sys.modules on %i try" % i)
finally:
del sys.path[0]
remove_files(TESTFN)
def test_import_name_binding(self):
# import x.y.z binds x in the current namespace
import test as x
import test.support
self.assertTrue(x is test, x.__name__)
self.assertTrue(hasattr(test.support, "__file__"))
# import x.y.z as w binds z as w
import test.support as y
self.assertTrue(y is test.support, y.__name__)
def test_failing_reload(self):
# A failing reload should leave the module object in sys.modules.
source = TESTFN + os.extsep + "py"
with open(source, "w") as f:
f.write("a = 1\nb=2\n")
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assertIn(TESTFN, sys.modules)
self.assertEqual(mod.a, 1, "module has wrong attribute values")
self.assertEqual(mod.b, 2, "module has wrong attribute values")
# On WinXP, just replacing the .py file wasn't enough to
# convince reload() to reparse it. Maybe the timestamp didn't
# move enough. We force it to get reparsed by removing the
# compiled file too.
remove_files(TESTFN)
# Now damage the module.
with open(source, "w") as f:
f.write("a = 10\nb=20//0\n")
self.assertRaises(ZeroDivisionError, imp.reload, mod)
# But we still expect the module to be in sys.modules.
mod = sys.modules.get(TESTFN)
self.assertIsNot(mod, None, "expected module to be in sys.modules")
# We should have replaced a w/ 10, but the old b value should
# stick.
self.assertEqual(mod.a, 10, "module has wrong attribute values")
self.assertEqual(mod.b, 2, "module has wrong attribute values")
finally:
del sys.path[0]
remove_files(TESTFN)
unload(TESTFN)
def test_file_to_source(self):
# check if __file__ points to the source file where available
source = TESTFN + ".py"
with open(source, "w") as f:
f.write("test = None\n")
sys.path.insert(0, os.curdir)
try:
mod = __import__(TESTFN)
self.assertTrue(mod.__file__.endswith('.py'))
os.remove(source)
del sys.modules[TESTFN]
make_legacy_pyc(source)
mod = __import__(TESTFN)
base, ext = os.path.splitext(mod.__file__)
self.assertIn(ext, ('.pyc', '.pyo'))
finally:
del sys.path[0]
remove_files(TESTFN)
if TESTFN in sys.modules:
del sys.modules[TESTFN]
def test_import_name_binding(self):
# import x.y.z binds x in the current namespace.
import test as x
import test.support
self.assertIs(x, test, x.__name__)
self.assertTrue(hasattr(test.support, "__file__"))
# import x.y.z as w binds z as w.
import test.support as y
self.assertIs(y, test.support, y.__name__)
def test_import_initless_directory_warning(self):
with check_warnings(('', ImportWarning)):
# Just a random non-package directory we always expect to be
# somewhere in sys.path...
self.assertRaises(ImportError, __import__, "site-packages")
def test_import_by_filename(self):
path = os.path.abspath(TESTFN)
encoding = sys.getfilesystemencoding()
try:
path.encode(encoding)
except UnicodeEncodeError:
self.skipTest('path is not encodable to {}'.format(encoding))
with self.assertRaises(ImportError) as c:
__import__(path)
self.assertEqual("Import by filename is not supported.",
c.exception.args[0])
def test_import_in_del_does_not_crash(self):
# Issue 4236
testfn = script_helper.make_script('', TESTFN, textwrap.dedent("""\
import sys
class C:
def __del__(self):
import imp
sys.argv.insert(0, C())
"""))
script_helper.assert_python_ok(testfn)
def test_timestamp_overflow(self):
# A modification timestamp larger than 2**32 should not be a problem
# when importing a module (issue #11235).
sys.path.insert(0, os.curdir)
try:
source = TESTFN + ".py"
compiled = imp.cache_from_source(source)
with open(source, 'w') as f:
pass
try:
os.utime(source, (2 ** 33 - 5, 2 ** 33 - 5))
except OverflowError:
self.skipTest("cannot set modification time to large integer")
except OSError as e:
if e.errno != getattr(errno, 'EOVERFLOW', None):
raise
self.skipTest("cannot set modification time to large integer ({})".format(e))
__import__(TESTFN)
# The pyc file was created.
os.stat(compiled)
finally:
del sys.path[0]
remove_files(TESTFN)
class PycRewritingTests(unittest.TestCase):
# Test that the `co_filename` attribute on code objects always points
# to the right file, even when various things happen (e.g. both the .py
# and the .pyc file are renamed).
module_name = "unlikely_module_name"
module_source = """
import sys
code_filename = sys._getframe().f_code.co_filename
module_filename = __file__
constant = 1
def func():
pass
func_filename = func.__code__.co_filename
"""
dir_name = os.path.abspath(TESTFN)
file_name = os.path.join(dir_name, module_name) + os.extsep + "py"
compiled_name = imp.cache_from_source(file_name)
def setUp(self):
self.sys_path = sys.path[:]
self.orig_module = sys.modules.pop(self.module_name, None)
os.mkdir(self.dir_name)
with open(self.file_name, "w") as f:
f.write(self.module_source)
sys.path.insert(0, self.dir_name)
def tearDown(self):
sys.path[:] = self.sys_path
if self.orig_module is not None:
sys.modules[self.module_name] = self.orig_module
else:
unload(self.module_name)
unlink(self.file_name)
unlink(self.compiled_name)
rmtree(self.dir_name)
def import_module(self):
ns = globals()
__import__(self.module_name, ns, ns)
return sys.modules[self.module_name]
def test_basics(self):
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
del sys.modules[self.module_name]
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_incorrect_code_name(self):
py_compile.compile(self.file_name, dfile="another_module.py")
mod = self.import_module()
self.assertEqual(mod.module_filename, self.file_name)
self.assertEqual(mod.code_filename, self.file_name)
self.assertEqual(mod.func_filename, self.file_name)
def test_module_without_source(self):
target = "another_module.py"
py_compile.compile(self.file_name, dfile=target)
os.remove(self.file_name)
pyc_file = make_legacy_pyc(self.file_name)
mod = self.import_module()
self.assertEqual(mod.module_filename, pyc_file)
self.assertEqual(mod.code_filename, target)
self.assertEqual(mod.func_filename, target)
def test_foreign_code(self):
py_compile.compile(self.file_name)
with open(self.compiled_name, "rb") as f:
header = f.read(8)
code = marshal.load(f)
constants = list(code.co_consts)
foreign_code = test_main.__code__
pos = constants.index(1)
constants[pos] = foreign_code
code = type(code)(code.co_argcount, code.co_kwonlyargcount,
code.co_nlocals, code.co_stacksize,
code.co_flags, code.co_code, tuple(constants),
code.co_names, code.co_varnames, code.co_filename,
code.co_name, code.co_firstlineno, code.co_lnotab,
code.co_freevars, code.co_cellvars)
with open(self.compiled_name, "wb") as f:
f.write(header)
marshal.dump(code, f)
mod = self.import_module()
self.assertEqual(mod.constant.co_filename, foreign_code.co_filename)
class PathsTests(unittest.TestCase):
SAMPLES = ('test', 'test\u00e4\u00f6\u00fc\u00df', 'test\u00e9\u00e8',
'test\u00b0\u00b3\u00b2')
path = TESTFN
def setUp(self):
os.mkdir(self.path)
self.syspath = sys.path[:]
def tearDown(self):
rmtree(self.path)
sys.path[:] = self.syspath
# Regression test for http://bugs.python.org/issue1293.
def test_trailing_slash(self):
with open(os.path.join(self.path, 'test_trailing_slash.py'), 'w') as f:
f.write("testdata = 'test_trailing_slash'")
sys.path.append(self.path+'/')
mod = __import__("test_trailing_slash")
self.assertEqual(mod.testdata, 'test_trailing_slash')
unload("test_trailing_slash")
# Regression test for http://bugs.python.org/issue3677.
def _test_UNC_path(self):
with open(os.path.join(self.path, 'test_trailing_slash.py'), 'w') as f:
f.write("testdata = 'test_trailing_slash'")
# Create the UNC path, like \\myhost\c$\foo\bar.
path = os.path.abspath(self.path)
import socket
hn = socket.gethostname()
drive = path[0]
unc = "\\\\%s\\%s$"%(hn, drive)
unc += path[2:]
sys.path.append(path)
mod = __import__("test_trailing_slash")
self.assertEqual(mod.testdata, 'test_trailing_slash')
unload("test_trailing_slash")
if sys.platform == "win32":
test_UNC_path = _test_UNC_path
class RelativeImportTests(unittest.TestCase):
def tearDown(self):
unload("test.relimport")
setUp = tearDown
def test_relimport_star(self):
# This will import * from .test_import.
from . import relimport
self.assertTrue(hasattr(relimport, "RelativeImportTests"))
def test_issue3221(self):
# Note for mergers: the 'absolute' tests from the 2.x branch
# are missing in Py3k because implicit relative imports are
# a thing of the past
#
# Regression test for http://bugs.python.org/issue3221.
def check_relative():
exec("from . import relimport", ns)
# Check relative import OK with __package__ and __name__ correct
ns = dict(__package__='test', __name__='test.notarealmodule')
check_relative()
# Check relative import OK with only __name__ wrong
ns = dict(__package__='test', __name__='notarealpkg.notarealmodule')
check_relative()
# Check relative import fails with only __package__ wrong
ns = dict(__package__='foo', __name__='test.notarealmodule')
self.assertRaises(SystemError, check_relative)
# Check relative import fails with __package__ and __name__ wrong
ns = dict(__package__='foo', __name__='notarealpkg.notarealmodule')
self.assertRaises(SystemError, check_relative)
# Check relative import fails with package set to a non-string
ns = dict(__package__=object())
self.assertRaises(ValueError, check_relative)
def test_absolute_import_without_future(self):
# If explicit relative import syntax is used, then do not try
# to perform an absolute import in the face of failure.
# Issue #7902.
with self.assertRaises(ImportError):
from .os import sep
self.fail("explicit relative import triggered an "
"implicit absolute import")
class OverridingImportBuiltinTests(unittest.TestCase):
def test_override_builtin(self):
# Test that overriding builtins.__import__ can bypass sys.modules.
import os
def foo():
import os
return os
self.assertEqual(foo(), os) # Quick sanity check.
with swap_attr(builtins, "__import__", lambda *x: 5):
self.assertEqual(foo(), 5)
# Test what happens when we shadow __import__ in globals(); this
# currently does not impact the import process, but if this changes,
# other code will need to change, so keep this test as a tripwire.
with swap_item(globals(), "__import__", lambda *x: 5):
self.assertEqual(foo(), os)
class PycacheTests(unittest.TestCase):
# Test the various PEP 3147 related behaviors.
tag = imp.get_tag()
def _clean(self):
forget(TESTFN)
rmtree('__pycache__')
unlink(self.source)
def setUp(self):
self.source = TESTFN + '.py'
self._clean()
with open(self.source, 'w') as fp:
print('# This is a test file written by test_import.py', file=fp)
sys.path.insert(0, os.curdir)
def tearDown(self):
assert sys.path[0] == os.curdir, 'Unexpected sys.path[0]'
del sys.path[0]
self._clean()
def test_import_pyc_path(self):
self.assertFalse(os.path.exists('__pycache__'))
__import__(TESTFN)
self.assertTrue(os.path.exists('__pycache__'))
self.assertTrue(os.path.exists(os.path.join(
'__pycache__', '{}.{}.py{}'.format(
TESTFN, self.tag, __debug__ and 'c' or 'o'))))
@unittest.skipUnless(os.name == 'posix',
"test meaningful only on posix systems")
@unittest.skipIf(hasattr(os, 'geteuid') and os.geteuid() == 0,
"due to varying filesystem permission semantics (issue #11956)")
def test_unwritable_directory(self):
# When the umask causes the new __pycache__ directory to be
# unwritable, the import still succeeds but no .pyc file is written.
with temp_umask(0o222):
__import__(TESTFN)
self.assertTrue(os.path.exists('__pycache__'))
self.assertFalse(os.path.exists(os.path.join(
'__pycache__', '{}.{}.pyc'.format(TESTFN, self.tag))))
def test_missing_source(self):
# With PEP 3147 cache layout, removing the source but leaving the pyc
# file does not satisfy the import.
__import__(TESTFN)
pyc_file = imp.cache_from_source(self.source)
self.assertTrue(os.path.exists(pyc_file))
os.remove(self.source)
forget(TESTFN)
self.assertRaises(ImportError, __import__, TESTFN)
def test_missing_source_legacy(self):
# Like test_missing_source() except that for backward compatibility,
# when the pyc file lives where the py file would have been (and named
# without the tag), it is importable. The __file__ of the imported
# module is the pyc location.
__import__(TESTFN)
# pyc_file gets removed in _clean() via tearDown().
pyc_file = make_legacy_pyc(self.source)
os.remove(self.source)
unload(TESTFN)
m = __import__(TESTFN)
self.assertEqual(m.__file__,
os.path.join(os.curdir, os.path.relpath(pyc_file)))
def test___cached__(self):
# Modules now also have an __cached__ that points to the pyc file.
m = __import__(TESTFN)
pyc_file = imp.cache_from_source(TESTFN + '.py')
self.assertEqual(m.__cached__, os.path.join(os.curdir, pyc_file))
def test___cached___legacy_pyc(self):
# Like test___cached__() except that for backward compatibility,
# when the pyc file lives where the py file would have been (and named
# without the tag), it is importable. The __cached__ of the imported
# module is the pyc location.
__import__(TESTFN)
# pyc_file gets removed in _clean() via tearDown().
pyc_file = make_legacy_pyc(self.source)
os.remove(self.source)
unload(TESTFN)
m = __import__(TESTFN)
self.assertEqual(m.__cached__,
os.path.join(os.curdir, os.path.relpath(pyc_file)))
def test_package___cached__(self):
# Like test___cached__ but for packages.
def cleanup():
rmtree('pep3147')
os.mkdir('pep3147')
self.addCleanup(cleanup)
# Touch the __init__.py
with open(os.path.join('pep3147', '__init__.py'), 'w'):
pass
with open(os.path.join('pep3147', 'foo.py'), 'w'):
pass
unload('pep3147.foo')
unload('pep3147')
m = __import__('pep3147.foo')
init_pyc = imp.cache_from_source(
os.path.join('pep3147', '__init__.py'))
self.assertEqual(m.__cached__, os.path.join(os.curdir, init_pyc))
foo_pyc = imp.cache_from_source(os.path.join('pep3147', 'foo.py'))
self.assertEqual(sys.modules['pep3147.foo'].__cached__,
os.path.join(os.curdir, foo_pyc))
def test_package___cached___from_pyc(self):
# Like test___cached__ but ensuring __cached__ when imported from a
# PEP 3147 pyc file.
def cleanup():
rmtree('pep3147')
os.mkdir('pep3147')
self.addCleanup(cleanup)
unload('pep3147.foo')
unload('pep3147')
# Touch the __init__.py
with open(os.path.join('pep3147', '__init__.py'), 'w'):
pass
with open(os.path.join('pep3147', 'foo.py'), 'w'):
pass
m = __import__('pep3147.foo')
unload('pep3147.foo')
unload('pep3147')
m = __import__('pep3147.foo')
init_pyc = imp.cache_from_source(
os.path.join('pep3147', '__init__.py'))
self.assertEqual(m.__cached__, os.path.join(os.curdir, init_pyc))
foo_pyc = imp.cache_from_source(os.path.join('pep3147', 'foo.py'))
self.assertEqual(sys.modules['pep3147.foo'].__cached__,
os.path.join(os.curdir, foo_pyc))
class RelativeImportFromImportlibTests(test_relative_imports.RelativeImports):
def setUp(self):
self._importlib_util_flag = importlib_util.using___import__
importlib_util.using___import__ = True
def tearDown(self):
importlib_util.using___import__ = self._importlib_util_flag
def test_main(verbose=None):
run_unittest(ImportTests, PycacheTests,
PycRewritingTests, PathsTests, RelativeImportTests,
OverridingImportBuiltinTests,
RelativeImportFromImportlibTests)
if __name__ == '__main__':
# Test needs to be a package, so we can do relative imports.
from test.test_import import test_main
test_main()
| |
# Copyright (c) 2014, Salesforce.com, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name of Salesforce.com nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
# TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
A conjugate model on normally-distributied univariate data in which the
prior on the mean is normally distributed, and the prior on the variance
is Inverse-Chi-Square distributed.
The equations used here are from \cite{murphy2007conjugate}.
Murphy, K. "Conjugate Bayesian analysis of the Gaussian distribution" (2007)
Equation numbers referenced below are from this paper.
"""
from distributions.dbg.special import sqrt, log, pi, gammaln
from distributions.dbg.random import sample_chi2, sample_normal
from distributions.mixins import SharedMixin, GroupIoMixin, SharedIoMixin
# scalar score_student_t, see distributions.dbg.random.score_student_t
# for the multivariate generalization
def score_student_t(x, nu, mu, sigmasq):
"""
\cite{murphy2007conjugate}, Eq. 304
"""
score = gammaln(.5 * (nu + 1.)) - gammaln(.5 * nu)
score -= .5 * log(nu * pi * sigmasq)
xt = (x - mu)
s = xt * xt / sigmasq
score += -(.5 * (nu + 1.)) * log(1. + s / nu)
return score
NAME = 'NormalInverseChiSq'
EXAMPLES = [
{
'shared': {'mu': 0., 'kappa': 1., 'sigmasq': 1., 'nu': 1.},
'values': [-4.0, -2.0, -1.0, -0.5, 0.0, 0.5, 1.0, 2.0, 4.0],
},
]
Value = float
class Shared(SharedMixin, SharedIoMixin):
def __init__(self):
self.mu = None
self.kappa = None
self.sigmasq = None
self.nu = None
def plus_group(self, group):
"""
\cite{murphy2007conjugate}, Eqs.141-144
"""
total = group.mean * group.count
mu_1 = self.mu - group.mean
kappa_n = self.kappa + group.count
mu_n = (self.kappa * self.mu + total) / kappa_n
nu_n = self.nu + group.count
sigmasq_n = 1. / nu_n * (
self.nu * self.sigmasq
+ group.count_times_variance
+ (group.count * self.kappa * mu_1 * mu_1) / kappa_n)
post = self.__class__()
post.mu = mu_n
post.kappa = kappa_n
post.nu = nu_n
post.sigmasq = sigmasq_n
return post
def load(self, raw):
self.mu = float(raw['mu'])
self.kappa = float(raw['kappa'])
self.sigmasq = float(raw['sigmasq'])
self.nu = float(raw['nu'])
def dump(self):
return {
'mu': self.mu,
'kappa': self.kappa,
'sigmasq': self.sigmasq,
'nu': self.nu,
}
def protobuf_load(self, message):
self.mu = float(message.mu)
self.kappa = float(message.kappa)
self.sigmasq = float(message.sigmasq)
self.nu = float(message.nu)
def protobuf_dump(self, message):
message.Clear()
message.mu = self.mu
message.kappa = self.kappa
message.sigmasq = self.sigmasq
message.nu = self.nu
class Group(GroupIoMixin):
def __init__(self):
self.count = None
self.mean = None
self.count_times_variance = None # = count * variance
def init(self, shared):
self.count = 0
self.mean = 0.
self.count_times_variance = 0.
def add_value(self, shared, value):
self.count += 1
delta = value - self.mean
self.mean += delta / self.count
self.count_times_variance += delta * (value - self.mean)
def add_repeated_value(self, shared, value, count):
self.count += count
delta = count * value - self.mean
self.mean += delta / self.count
self.count_times_variance += delta * (value - self.mean)
def remove_value(self, shared, value):
total = self.mean * self.count
delta = value - self.mean
self.count -= 1
if self.count == 0:
self.mean = 0.
else:
self.mean = (total - value) / self.count
if self.count <= 1:
self.count_times_variance = 0.
else:
self.count_times_variance -= delta * (value - self.mean)
def merge(self, shared, source):
count = self.count + source.count
delta = source.mean - self.mean
source_part = float(source.count) / count
cross_part = self.count * source_part
self.count = count
self.mean += source_part * delta
self.count_times_variance += \
source.count_times_variance + cross_part * delta * delta
def score_value(self, shared, value):
"""
\cite{murphy2007conjugate}, Eq. 176
"""
post = shared.plus_group(self)
return score_student_t(
value,
post.nu,
post.mu,
((1 + post.kappa) * post.sigmasq) / post.kappa)
def score_data(self, shared):
"""
\cite{murphy2007conjugate}, Eq. 171
"""
post = shared.plus_group(self)
return gammaln(post.nu / 2.) - gammaln(shared.nu / 2.) \
+ 0.5 * log(shared.kappa / post.kappa) \
+ (0.5 * shared.nu) * log(shared.nu * shared.sigmasq) \
- (0.5 * post.nu) * log(post.nu * post.sigmasq) \
- self.count / 2. * 1.1447298858493991
def sample_value(self, shared):
sampler = Sampler()
sampler.init(shared, self)
return sampler.eval(shared)
def load(self, raw):
self.count = int(raw['count'])
self.mean = float(raw['mean'])
self.count_times_variance = float(raw['count_times_variance'])
def dump(self):
return {
'count': self.count,
'mean': self.mean,
'count_times_variance': self.count_times_variance,
}
def protobuf_load(self, message):
self.count = int(message.count)
self.mean = float(message.mean)
self.count_times_variance = float(message.count_times_variance)
def protobuf_dump(self, message):
message.count = self.count
message.mean = self.mean
message.count_times_variance = self.count_times_variance
class Sampler(object):
def init(self, shared, group=None):
"""
Draw samples from the marginal posteriors of mu and sigmasq
\cite{murphy2007conjugate}, Eqs. 156 & 167
"""
post = shared if group is None else shared.plus_group(group)
# Sample from the inverse-chi^2 using the transform from the chi^2
sigmasq_star = post.nu * post.sigmasq / sample_chi2(post.nu)
self.sigma = sqrt(sigmasq_star)
self.mu = sample_normal(post.mu, sqrt(sigmasq_star / post.kappa))
def eval(self, shared):
return sample_normal(self.mu, self.sigma)
def sample_group(shared, size):
group = Group()
group.init(shared)
sampler = Sampler()
sampler.init(shared, group)
return [sampler.eval(shared) for _ in xrange(size)]
| |
""" High-level classes for reading HDF5 files. """
from collections import deque
from collections.abc import Mapping, Sequence
import os
import posixpath
import numpy as np
from .core import Reference
from .dataobjects import DataObjects
from .misc_low_level import SuperBlock
class Group(Mapping):
"""
An HDF5 Group which may hold attributes, datasets, or other groups.
Attributes
----------
attrs : dict
Attributes for this group.
name : str
Full path to this group.
file : File
File instance where this group resides.
parent : Group
Group instance containing this group.
"""
def __init__(self, name, dataobjects, parent):
""" initalize. """
self.parent = parent
self.file = parent.file
self.name = name
self._links = dataobjects.get_links()
self._dataobjects = dataobjects
self._attrs = None # cached property
def __repr__(self):
return '<HDF5 group "%s" (%d members)>' % (self.name, len(self))
def __len__(self):
""" Number of links in the group. """
return len(self._links)
def _dereference(self, ref):
""" Deference a Reference object. """
if not ref:
raise ValueError('cannot deference null reference')
obj = self.file._get_object_by_address(ref.address_of_reference)
if obj is None:
raise ValueError('reference not found in file')
return obj
def __getitem__(self, y):
""" x.__getitem__(y) <==> x[y] """
if isinstance(y, Reference):
return self._dereference(y)
path = posixpath.normpath(y)
if path == '.':
return self
if path.startswith('/'):
return self.file[path[1:]]
if posixpath.dirname(path) != '':
next_obj, additional_obj = path.split('/', 1)
else:
next_obj = path
additional_obj = '.'
if next_obj not in self._links:
raise KeyError('%s not found in group' % (next_obj))
obj_name = posixpath.join(self.name, next_obj)
link_target = self._links[next_obj]
if isinstance(link_target, str):
try:
return self.__getitem__(link_target)
except KeyError:
return None
dataobjs = DataObjects(self.file._fh, link_target)
if dataobjs.is_dataset:
if additional_obj != '.':
raise KeyError('%s is a dataset, not a group' % (obj_name))
return Dataset(obj_name, dataobjs, self)
return Group(obj_name, dataobjs, self)[additional_obj]
def __iter__(self):
for k in self._links.keys():
yield k
def visit(self, func):
"""
Recursively visit all names in the group and subgroups.
func should be a callable with the signature:
func(name) -> None or return value
Returning None continues iteration, return anything else stops and
return that value from the visit method.
"""
return self.visititems(lambda name, obj: func(name))
def visititems(self, func):
"""
Recursively visit all objects in this group and subgroups.
func should be a callable with the signature:
func(name, object) -> None or return value
Returning None continues iteration, return anything else stops and
return that value from the visit method.
"""
root_name_length = len(self.name)
if not self.name.endswith('/'):
root_name_length += 1
queue = deque(self.values())
while queue:
obj = queue.popleft()
name = obj.name[root_name_length:]
ret = func(name, obj)
if ret is not None:
return ret
if isinstance(obj, Group):
queue.extend(obj.values())
return None
@property
def attrs(self):
""" attrs attribute. """
if self._attrs is None:
self._attrs = self._dataobjects.get_attributes()
return self._attrs
class File(Group):
"""
Open a HDF5 file.
Note in addition to having file specific methods the File object also
inherit the full interface of **Group**.
File is also a context manager and therefore supports the with statement.
Files opened by the class will be closed after the with block, file-like
object are not closed.
Parameters
----------
filename : str or file-like
Name of file (string or unicode) or file like object which has read
and seek methods which behaved like a Python file object.
Attributes
----------
filename : str
Name of the file on disk, None if not available.
mode : str
String indicating that the file is open readonly ("r").
userblock_size : int
Size of the user block in bytes (currently always 0).
"""
def __init__(self, filename):
""" initalize. """
self._close = False
if hasattr(filename, 'read'):
if not hasattr(filename, 'seek'):
raise ValueError(
'File like object must have a seek method')
self._fh = filename
self.filename = getattr(filename, 'name', None)
else:
self._fh = open(filename, 'rb')
self._close = True
self.filename = filename
self._superblock = SuperBlock(self._fh, 0)
offset = self._superblock.offset_to_dataobjects
dataobjects = DataObjects(self._fh, offset)
self.file = self
self.mode = 'r'
self.userblock_size = 0
super(File, self).__init__('/', dataobjects, self)
def __repr__(self):
return '<HDF5 file "%s" (mode r)>' % (os.path.basename(self.filename))
def _get_object_by_address(self, obj_addr):
""" Return the object pointed to by a given address. """
if self._dataobjects.offset == obj_addr:
return self
return self.visititems(
lambda x, y: y if y._dataobjects.offset == obj_addr else None)
def close(self):
""" Close the file. """
if self._close:
self._fh.close()
__del__ = close
def __enter__(self):
return self
def __exit__(self, exc_type, value, traceback):
self.close()
class Dataset(object):
"""
A HDF5 Dataset containing an n-dimensional array and meta-data attributes.
Attributes
----------
shape : tuple
Dataset dimensions.
dtype : dtype
Dataset's type.
size : int
Total number of elements in the dataset.
chunks : tuple or None
Chunk shape, or NOne is chunked storage not used.
compression : str or None
Compression filter used on dataset. None if compression is not enabled
for this dataset.
compression_opts : dict or None
Options for the compression filter.
scaleoffset : dict or None
Setting for the HDF5 scale-offset filter, or None if scale-offset
compression is not used for this dataset.
shuffle : bool
Whether the shuffle filter is applied for this dataset.
fletcher32 : bool
Whether the Fletcher32 checksumming is enabled for this dataset.
fillvalue : float or None
Value indicating uninitialized portions of the dataset. None is no fill
values has been defined.
dim : int
Number of dimensions.
dims : None
Dimension scales.
attrs : dict
Attributes for this dataset.
name : str
Full path to this dataset.
file : File
File instance where this dataset resides.
parent : Group
Group instance containing this dataset.
"""
def __init__(self, name, dataobjects, parent):
""" initalize. """
self.parent = parent
self.file = parent.file
self.name = name
self._dataobjects = dataobjects
self._attrs = None
self._astype = None
def __repr__(self):
info = (os.path.basename(self.name), self.shape, self.dtype)
return '<HDF5 dataset "%s": shape %s, type "%s">' % info
def __getitem__(self, args):
data = self._dataobjects.get_data()[args]
if self._astype is None:
return data
return data.astype(self._astype)
def read_direct(self, array, source_sel=None, dest_sel=None):
"""
Read from a HDF5 dataset directly into a NumPy array.
This is equivalent to dset[source_sel] = arr[dset_sel].
Creation of intermediates is not avoided. This method if provided from
compatibility with h5py, it is not efficient.
"""
array[dest_sel] = self[source_sel]
def astype(self, dtype):
"""
Return a context manager which returns data as a particular type.
Conversion is handled by NumPy after reading extracting the data.
"""
return AstypeContext(self, dtype)
def len(self):
""" Return the size of the first axis. """
return self.shape[0]
@property
def shape(self):
""" shape attribute. """
return self._dataobjects.shape
@property
def ndim(self):
""" number of dimensions. """
return len(self.shape)
@property
def dtype(self):
""" dtype attribute. """
return self._dataobjects.dtype
@property
def value(self):
""" alias for dataset[()]. """
DeprecationWarning(
"dataset.value has been deprecated. Use dataset[()] instead.")
return self[()]
@property
def size(self):
""" size attribute. """
return np.prod(self.shape)
@property
def chunks(self):
""" chunks attribute. """
return self._dataobjects.chunks
@property
def compression(self):
""" compression attribute. """
return self._dataobjects.compression
@property
def compression_opts(self):
""" compression_opts attribute. """
return self._dataobjects.compression_opts
@property
def scaleoffset(self):
""" scaleoffset attribute. """
return None # TODO support scale-offset filter
@property
def shuffle(self):
""" shuffle attribute. """
return self._dataobjects.shuffle
@property
def fletcher32(self):
""" fletcher32 attribute. """
return self._dataobjects.fletcher32
@property
def fillvalue(self):
""" fillvalue attribute. """
return self._dataobjects.fillvalue
@property
def dims(self):
""" dims attribute. """
return DimensionManager(self)
@property
def attrs(self):
""" attrs attribute. """
if self._attrs is None:
self._attrs = self._dataobjects.get_attributes()
return self._attrs
class DimensionManager(Sequence):
""" Represents a collection of dimensions associated with a dataset. """
def __init__(self, dset):
ndim = len(dset.shape)
dim_list = [[]]*ndim
if 'DIMENSION_LIST' in dset.attrs:
dim_list = dset.attrs['DIMENSION_LIST']
dim_labels = [b'']*ndim
if 'DIMENSION_LABELS' in dset.attrs:
dim_labels = dset.attrs['DIMENSION_LABELS']
self._dims = [
DimensionProxy(dset.file, label, refs) for
label, refs in zip(dim_labels, dim_list)]
def __len__(self):
return len(self._dims)
def __getitem__(self, x):
return self._dims[x]
class DimensionProxy(Sequence):
""" Represents a HDF5 "dimension". """
def __init__(self, dset_file, label, refs):
self.label = label.decode('utf-8')
self._refs = refs
self._file = dset_file
def __len__(self):
return len(self._refs)
def __getitem__(self, x):
return self._file[self._refs[x]]
class AstypeContext(object):
"""
Context manager which allows changing the type read from a dataset.
"""
def __init__(self, dset, dtype):
self._dset = dset
self._dtype = np.dtype(dtype)
def __enter__(self):
self._dset._astype = self._dtype
def __exit__(self, *args):
self._dset._astype = None
| |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'LostPasswordHash'
db.create_table('sentry_lostpasswordhash', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sentry.User'], unique=True)),
('hash', self.gf('django.db.models.fields.CharField')(max_length=32)),
('date_added', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('sentry', ['LostPasswordHash'])
def backwards(self, orm):
# Deleting model 'LostPasswordHash'
db.delete_table('sentry_lostpasswordhash')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'site': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'db_index': 'True'}),
'time_spent': ('django.db.models.fields.FloatField', [], {'null': 'True'})
},
'sentry.filterkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'FilterKey'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.filtervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'FilterValue'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'logger', 'culprit', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.messagecountbyminute': {
'Meta': {'unique_together': "(('project', 'group', 'date'),)", 'object_name': 'MessageCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.messagefiltervalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'MessageFilterValue'},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.messageindex': {
'Meta': {'unique_together': "(('column', 'value', 'object_id'),)", 'object_name': 'MessageIndex'},
'column': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'sentry.project': {
'Meta': {'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'})
},
'sentry.projectcountbyminute': {
'Meta': {'unique_together': "(('project', 'date'),)", 'object_name': 'ProjectCountByMinute'},
'date': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'user_added': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
},
'sentry.searchdocument': {
'Meta': {'unique_together': "(('project', 'group'),)", 'object_name': 'SearchDocument'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_changed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_events': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'})
},
'sentry.searchtoken': {
'Meta': {'unique_together': "(('document', 'field', 'token'),)", 'object_name': 'SearchToken'},
'document': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'token_set'", 'to': "orm['sentry.SearchDocument']"}),
'field': ('django.db.models.fields.CharField', [], {'default': "'text'", 'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['sentry.User']"})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('picklefield.fields.PickledObjectField', [], {})
}
}
complete_apps = ['sentry']
| |
from django.template import (
Context, Engine, TemplateDoesNotExist, TemplateSyntaxError, loader,
)
from django.test import SimpleTestCase
from ..utils import setup
from .test_basic import basic_templates
include_fail_templates = {
'include-fail1': '{% load bad_tag %}{% badtag %}',
'include-fail2': '{% load broken_tag %}',
}
class IncludeTagTests(SimpleTestCase):
libraries = {'bad_tag': 'template_tests.templatetags.bad_tag'}
@setup({'include01': '{% include "basic-syntax01" %}'}, basic_templates)
def test_include01(self):
output = self.engine.render_to_string('include01')
self.assertEqual(output, 'something cool')
@setup({'include02': '{% include "basic-syntax02" %}'}, basic_templates)
def test_include02(self):
output = self.engine.render_to_string('include02', {'headline': 'Included'})
self.assertEqual(output, 'Included')
@setup({'include03': '{% include template_name %}'}, basic_templates)
def test_include03(self):
output = self.engine.render_to_string(
'include03',
{'template_name': 'basic-syntax02', 'headline': 'Included'},
)
self.assertEqual(output, 'Included')
@setup({'include04': 'a{% include "nonexistent" %}b'})
def test_include04(self):
template = self.engine.get_template('include04')
with self.assertRaises(TemplateDoesNotExist):
template.render(Context({}))
@setup({
'include 05': 'template with a space',
'include06': '{% include "include 05"%}',
})
def test_include06(self):
output = self.engine.render_to_string('include06')
self.assertEqual(output, "template with a space")
@setup({'include07': '{% include "basic-syntax02" with headline="Inline" %}'}, basic_templates)
def test_include07(self):
output = self.engine.render_to_string('include07', {'headline': 'Included'})
self.assertEqual(output, 'Inline')
@setup({'include08': '{% include headline with headline="Dynamic" %}'}, basic_templates)
def test_include08(self):
output = self.engine.render_to_string('include08', {'headline': 'basic-syntax02'})
self.assertEqual(output, 'Dynamic')
@setup(
{'include09': '{{ first }}--'
'{% include "basic-syntax03" with first=second|lower|upper second=first|upper %}'
'--{{ second }}'},
basic_templates,
)
def test_include09(self):
output = self.engine.render_to_string('include09', {'first': 'Ul', 'second': 'lU'})
self.assertEqual(output, 'Ul--LU --- UL--lU')
@setup({'include10': '{% include "basic-syntax03" only %}'}, basic_templates)
def test_include10(self):
output = self.engine.render_to_string('include10', {'first': '1'})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID --- INVALID')
else:
self.assertEqual(output, ' --- ')
@setup({'include11': '{% include "basic-syntax03" only with second=2 %}'}, basic_templates)
def test_include11(self):
output = self.engine.render_to_string('include11', {'first': '1'})
if self.engine.string_if_invalid:
self.assertEqual(output, 'INVALID --- 2')
else:
self.assertEqual(output, ' --- 2')
@setup({'include12': '{% include "basic-syntax03" with first=1 only %}'}, basic_templates)
def test_include12(self):
output = self.engine.render_to_string('include12', {'second': '2'})
if self.engine.string_if_invalid:
self.assertEqual(output, '1 --- INVALID')
else:
self.assertEqual(output, '1 --- ')
@setup(
{'include13': '{% autoescape off %}{% include "basic-syntax03" %}{% endautoescape %}'},
basic_templates,
)
def test_include13(self):
output = self.engine.render_to_string('include13', {'first': '&'})
if self.engine.string_if_invalid:
self.assertEqual(output, '& --- INVALID')
else:
self.assertEqual(output, '& --- ')
@setup(
{'include14': '{% autoescape off %}'
'{% include "basic-syntax03" with first=var1 only %}'
'{% endautoescape %}'},
basic_templates,
)
def test_include14(self):
output = self.engine.render_to_string('include14', {'var1': '&'})
if self.engine.string_if_invalid:
self.assertEqual(output, '& --- INVALID')
else:
self.assertEqual(output, '& --- ')
# Include syntax errors
@setup({'include-error01': '{% include "basic-syntax01" with %}'})
def test_include_error01(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('include-error01')
@setup({'include-error02': '{% include "basic-syntax01" with "no key" %}'})
def test_include_error02(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('include-error02')
@setup({'include-error03': '{% include "basic-syntax01" with dotted.arg="error" %}'})
def test_include_error03(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('include-error03')
@setup({'include-error04': '{% include "basic-syntax01" something_random %}'})
def test_include_error04(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('include-error04')
@setup({'include-error05': '{% include "basic-syntax01" foo="duplicate" foo="key" %}'})
def test_include_error05(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('include-error05')
@setup({'include-error06': '{% include "basic-syntax01" only only %}'})
def test_include_error06(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('include-error06')
@setup(include_fail_templates)
def test_include_fail1(self):
with self.assertRaises(RuntimeError):
self.engine.get_template('include-fail1')
@setup(include_fail_templates)
def test_include_fail2(self):
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('include-fail2')
@setup({'include-error07': '{% include "include-fail1" %}'}, include_fail_templates)
def test_include_error07(self):
template = self.engine.get_template('include-error07')
with self.assertRaises(RuntimeError):
template.render(Context())
@setup({'include-error08': '{% include "include-fail2" %}'}, include_fail_templates)
def test_include_error08(self):
template = self.engine.get_template('include-error08')
with self.assertRaises(TemplateSyntaxError):
template.render(Context())
@setup({'include-error09': '{% include failed_include %}'}, include_fail_templates)
def test_include_error09(self):
context = Context({'failed_include': 'include-fail1'})
template = self.engine.get_template('include-error09')
with self.assertRaises(RuntimeError):
template.render(context)
@setup({'include-error10': '{% include failed_include %}'}, include_fail_templates)
def test_include_error10(self):
context = Context({'failed_include': 'include-fail2'})
template = self.engine.get_template('include-error10')
with self.assertRaises(TemplateSyntaxError):
template.render(context)
class IncludeTests(SimpleTestCase):
def test_include_missing_template(self):
"""
The correct template is identified as not existing
when {% include %} specifies a template that does not exist.
"""
engine = Engine(app_dirs=True, debug=True)
template = engine.get_template('test_include_error.html')
with self.assertRaises(TemplateDoesNotExist) as e:
template.render(Context())
self.assertEqual(e.exception.args[0], 'missing.html')
def test_extends_include_missing_baseloader(self):
"""
#12787 -- The correct template is identified as not existing
when {% extends %} specifies a template that does exist, but that
template has an {% include %} of something that does not exist.
"""
engine = Engine(app_dirs=True, debug=True)
template = engine.get_template('test_extends_error.html')
with self.assertRaises(TemplateDoesNotExist) as e:
template.render(Context())
self.assertEqual(e.exception.args[0], 'missing.html')
def test_extends_include_missing_cachedloader(self):
engine = Engine(debug=True, loaders=[
('django.template.loaders.cached.Loader', [
'django.template.loaders.app_directories.Loader',
]),
])
template = engine.get_template('test_extends_error.html')
with self.assertRaises(TemplateDoesNotExist) as e:
template.render(Context())
self.assertEqual(e.exception.args[0], 'missing.html')
# Repeat to ensure it still works when loading from the cache
template = engine.get_template('test_extends_error.html')
with self.assertRaises(TemplateDoesNotExist) as e:
template.render(Context())
self.assertEqual(e.exception.args[0], 'missing.html')
def test_include_template_argument(self):
"""
Support any render() supporting object
"""
engine = Engine()
ctx = Context({
'tmpl': engine.from_string('This worked!'),
})
outer_tmpl = engine.from_string('{% include tmpl %}')
output = outer_tmpl.render(ctx)
self.assertEqual(output, 'This worked!')
def test_include_from_loader_get_template(self):
tmpl = loader.get_template('include_tpl.html') # {% include tmpl %}
output = tmpl.render({'tmpl': loader.get_template('index.html')})
self.assertEqual(output, 'index\n\n')
def test_include_immediate_missing(self):
"""
#16417 -- Include tags pointing to missing templates should not raise
an error at parsing time.
"""
Engine(debug=True).from_string('{% include "this_does_not_exist.html" %}')
def test_include_recursive(self):
comments = [
{
'comment': 'A1',
'children': [
{'comment': 'B1', 'children': []},
{'comment': 'B2', 'children': []},
{'comment': 'B3', 'children': [
{'comment': 'C1', 'children': []}
]},
]
}
]
engine = Engine(app_dirs=True)
t = engine.get_template('recursive_include.html')
self.assertEqual(
"Recursion! A1 Recursion! B1 B2 B3 Recursion! C1",
t.render(Context({'comments': comments})).replace(' ', '').replace('\n', ' ').strip(),
)
def test_include_cache(self):
"""
{% include %} keeps resolved templates constant (#27974). The
CounterNode object in the {% counter %} template tag is created once
if caching works properly. Each iteration increases the counter instead
of restarting it.
This works as a regression test only if the cached loader
isn't used, so the @setup decorator isn't used.
"""
engine = Engine(loaders=[
('django.template.loaders.locmem.Loader', {
'template': '{% for x in vars %}{% include "include" %}{% endfor %}',
'include': '{% include "next" %}',
'next': '{% load custom %}{% counter %}'
}),
], libraries={'custom': 'template_tests.templatetags.custom'})
output = engine.render_to_string('template', {'vars': range(9)})
self.assertEqual(output, '012345678')
| |
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 1 15:03:35 2015
@author: fergal
$Id$
$URL$
"""
__version__ = "$Id$"
__URL__ = "$URL$"
import matplotlib as mp
import numpy as np
import matplotlib.pyplot as mp
import numpy as np
import dave.fileio.mastio as mastio
import dave.fileio.tpf as tpf
import dave.fileio.kplrfits as kplrfits
import dave.diffimg.prf as prf
import dave.diffimg.diffimg as diffimg
import dave.diffimg.arclen as arclen
import dave.misc.plotTpf
import dave.misc.covar as covar
import scipy.optimize as sopt
def exampleDiffImgCentroiding():
k2id = 206103150
campaign = 3
ar = mastio.K2Archive()
fits, hdr = ar.getLongTpf(k2id, campaign, header=True)
hdr0 = ar.getLongTpf(k2id, campaign, ext=0)
cube = tpf.getTargetPixelArrayFromFits(fits, hdr)
idx = np.isfinite(cube)
cube[~idx] = 0 #Remove Nans
flags = fits['QUALITY']
ccdMod = hdr0['module']
ccdOut = hdr0['output']
#Compute roll phase
llc = ar.getLongCadence(k2id, campaign)
time= llc['TIME']
cent1 = llc['MOM_CENTR1']
cent2 = llc['MOM_CENTR2']
centColRow = np.vstack((cent1, cent2)).transpose()
rot = arclen.computeArcLength(centColRow, flags>0)
rollPhase = rot[:,0]
rollPhase[flags>0] = -9999 #A bad value
prfObj = prf.KeplerPrf("/home/fergal/data/keplerprf")
bbox = getBoundingBoxForImage(cube[0], hdr)
period = 4.1591409
epoch = fits['time'][491]
dur = 3.0
out, log = measureDiffOffset(period, epoch, dur, time, prfObj, \
ccdMod, ccdOut, cube, bbox, rollPhase, flags)
idx = out[:,1] > 0
mp.clf()
mp.plot(out[:,3]-out[:,1], out[:,4]- out[:,2], 'ro')
return out
def exampleFitting():
kepid = 8554498
quarter = 16
ar = mastio.KeplerArchive()
fits, hdr = ar.getLongTpf(kepid, quarter, header=True)
hdr0 = ar.getLongTpf(kepid, quarter, ext=0)
cube = tpf.getTargetPixelArrayFromFits(fits, hdr)
module = hdr0['MODULE']
output = hdr0['OUTPUT']
img = cube[100]
idx = np.isfinite(img)
img[~idx] = 0
prfObj = prf.KeplerPrf("/home/fergal/data/keplerprf")
bbox = getBoundingBoxForImage(img, hdr)
res = fitPrfCentroidForImage(img, module, output, bbox, prfObj)
plotCentroidFitDiagnostic(img, hdr, module, output, res, prfObj)
return res
def measureOffsetProbabilityInTimeseries(offsets, minNumPoints=2):
"""
Vet the centroid time series produced by measureDiffOffset()
Measure the probablity that the observed difference image centroid-
offsets indicate that the transit happened off target.
Inputs:
--------
offsets
(Nca) The return value from measureDiffOffset
Optional Inputs:
------------------
minNumPoints
(int) Minimum number of points needed before computing result.
Currently set to the minimum needed by covar (==2)
Returns:
-----------
A tuple with 2 values
prob
Probability the transit happened off target
chi2
The chi-squared corresponding to that probability
Notes:
---------
For highly significant offsets, the probability flatlines at zero for
values below ~1e-40. The chi-squared value then can be used to indicate
the significance of the offset.
"""
idx = offsets[:, 'intr_col'] > 0
diffC = offsets[idx, 'diff_col'] - offsets[idx, 'intr_col']
diffR = offsets[idx, 'diff_row'] - offsets[idx, 'intr_row']
if len(diffC) < minNumPoints:
return 0, 0
prob, chi2 = covar.computeProbabilityOfObservedOffset(diffC, diffR)
return prob, chi2
def measureDiffOffset(period_days, epoch_bkjd, duration_hrs, \
time, prfObj, ccdMod, ccdOut, cube, bbox, rollPhase, flags, qFlags):
"""Measure Centroid shift between intransit and difference image
for every in-transit cadence
Inputs:
-----------
period_days, epoch_bkjd, duration_hrs
(floats) Properties of transit
time_bkjd
Array of times per cadence for the given campaign
prfObj
An object of the class prf.KeplerPrf()
ccdMod, ccdOut
(int) CCD module and output of image. Needed to
create the correct PRF model
cube
(3d np array) A data cube created from a TPF file.
See fileio.tpf.getTargetPixelArrayFromFits()
bbox
[c1, c2, r1, r2]. Define the range of columns (c1..c2)
and rows (r1..r2) defined by the image.
An exception raised if the following equality not true
img.shape = (c2-c1), (r2-r1)
rollPhase
(1d np array) An array of roll phases for each row
of cube. len(rollPhase) == len(cube). Units of this
array don't matter, so long as cadences with similar
roll angles have similar values of rollPhase. Roll phases
for bad cadences should be set to a bad value
flags
(1d array) flag values indicating bad cadences.
Currently a non-zero value of flags indicates a bad
cadence.
qFlags
(1d array) SAP Quality flags from lightcurve files
Returns:
-------------
A array with 5 columns, and as many rows as there are
in transit cadences. The columns are
0: Relative cadence number
1: In transit centroid column
2: In transit centroid row
3: Diff img centroid column
4: Diff img centroid row
If there is a statisically significant difference between the intransit
and difference image centroids then the transit is most likely not
on the target.
"""
duration_days = duration_hrs/24.
log = []
# idx = getIndicesInTransit(period_days, epoch_bkjd, duration_hrs, time)
idx = kplrfits.markTransitCadences(time, period_days, epoch_bkjd,\
duration_days, flags=flags)
wh = np.where(idx)[0]
out = -1 * np.ones((len(wh), 5))
diagnostics = range(len(wh))
for i,w in enumerate(wh):
out[i,0] = w
try:
out[i, 1:], dDict = measureInTransitAndDiffCentroidForOneImg(\
prfObj, ccdMod, ccdOut, cube, w, bbox, rollPhase, qFlags, \
hdr=None, plot=False)
diagnostics[i] = dDict
except ValueError, e:
log.append("Img %i: %s" %(w, e))
pass
return out, diagnostics, log
def measureInTransitAndDiffCentroidForOneImg(prfObj, ccdMod, ccdOut, cube, rin, bbox, rollPhase, flags, hdr=None, plot=False):
"""Measure image centroid of in-transit and difference images
Inputs:
-----------
prfObj
An object of the class prf.KeplerPrf()
ccdMod, ccdOut
(int) CCD module and output of image. Needed to
create the correct PRF model
cube
(3d np array) A TPF data cube as returned by
dave.fileio.getTargetPixelArrayFromFits()
rin
(int) Which image to process. rin should be in the range 0..len(cube)
bbox
[c1, c2, r1, r2]. Define the range of columns (c1..c2)
and rows (r1..r2) defined by the image.
An exception raised if the following equality not true
img.shape = (c2-c1), (r2-r1)
rollPhase
(1d np array) An array of roll phases for each row
of cube. len(rollPhase) == len(cube). Units of this
array don't matter, so long as cadences with similar
roll angles have similar values of rollPhase
flags
(1d array) flag values indicating bad cadences.
Currently a non-zero value of flags indicates a bad
cadence.
Optional Inputs:
---------------
hdr
Fits header object for TPF file. Useful if you want to plot
plot
(bool) Request plots.
Returns:
-------------
A two element tuple
A 4 element numpy array
ic In transit centroid column
ir In transit centroid row
dc Difference image centroid column
dr Difference image centroid row
A dictionary containing some diagnostics describing the cadences used
then creating the difference image.
"""
diff, oot, diagnostics = diffimg.constructK2DifferenceImage(cube, rin, \
rollPhase, flags)
if np.max(np.fabs(oot)) == 0:
return np.array([-1,-1,-1,-1]), diagnostics
ootRes = fitPrfCentroidForImage(oot, ccdMod, ccdOut, bbox, prfObj)
diffRes = fitPrfCentroidForImage(diff, ccdMod, ccdOut, bbox, prfObj)
#Fit the difference image. I don't think this is the right thing to do
# snr = diff / np.sqrt(cube[rin])
# snr[ np.isnan(snr) ] = 0
# diffRes = fitPrfCentroidForImage(snr, ccdMod, ccdOut, bbox, prfObj)
# print rin, diffRes.x
return np.array([ootRes.x[0], ootRes.x[1], diffRes.x[0], diffRes.x[1]]), diagnostics
def fitPrfCentroidForImage(img, ccdMod, ccdOut, bbox, prfObj):
"""Fit a PRF model to a TPF image and measure star centroid
Given a cadence image from a TPF file, find the best fitting
PRF model for that image. The best fit column and row of the
PRF model can be treated as the photometric centroid of the
light distribution from a single isolated star.
Inputs:
---------
img
(np 2d array) Image of star to be fit. Image is in the
format img[row, col]. img should not contain Nans
ccdMod, ccdOut
(int) CCD module and output of image. Needed to
create the correct PRF model
bbox
[c1, c2, r1, r2]. Define the range of columns (c1..c2)
and rows (r1..r2) defined by the image.
An exception raised if the following equality not true
img.shape = (c2-c1), (r2-r1)
prfObj
An object of the class prf.KeplerPrf()
Returns:
------------
A scipy.optimise.OptimizeResult object. The best fit
params are stored in OptimizeResult.x
Notes:
----------
The return object will frequently set success=False. I've seen no
evidence that a failed fit give worse results than a successful fit.
It appears that the fit likes to fall over when it gets very close
to the minimum.
In tests, the fitter often returns a best fit at a subpixel position
adjacent to the true best fit position (i.e .02 pixels away in either
column and/or row). This is a smaller distance than I'm concerned
about for my use case, but if you need really accurate positions
(say for PSF photometry), you may want to explore around the returned
best fit position for the optimal fit.
See example() in this module for an example of use
"""
if not np.all(np.isfinite(img)):
raise ValueError("Input img contains Nans. Set them to zero?")
if img.shape != ((bbox[3]-bbox[2]), (bbox[1]-bbox[0])):
raise ValueError("Shape of bbox doesn't match shape of image")
#Get initial guess for centroid == centre of brightest pixel
row, col = np.unravel_index(np.argmax(img), img.shape)
col += bbox[0] + .5
row += bbox[2] + .5
scale = np.max(img)
#Set options for optimiser
initGuess = [col, row, scale]
args = (ccdMod, ccdOut, bbox, img, prfObj)
options = {'disp':False, 'eps':.02, 'maxiter':80}
#Don't let the fit leave the bounding box
bounds=[(bbox[0], bbox[1]), \
(bbox[2], bbox[3]), \
(1, None), \
]
res = sopt.minimize(costFunc, initGuess, args, \
method="L-BFGS-B", bounds=bounds, options=options)
return res
def getBoundingBoxForImage(img, hdr):
"""Get the bounding box for the an image from a TPF file
i.e the corners of the rectangle that circumscribes the image
Inputs:
---------
img
(2d np array) Image to construct bounding box for
hdr
(FITS header object). The header of the first extension of the TPF
file. If you are dealing with an image not from a fits file, use
img.shape to get the bounding box instead.
Returns:
----------
A 4 element list for column0, column1, row0, row1
"""
shape= img.shape
c0 = float(hdr['1CRV4P'])
r0 = float(hdr['2CRV4P'])
extent = [c0, c0+shape[1], r0, r0+shape[0]]
return extent
def plotCentroidFitDiagnostic(img, hdr, ccdMod, ccdOut, res, prfObj):
"""Some diagnostic plots showing the performance of fitPrfCentroid()
Inputs:
-------------
img
(np 2d array) Image of star to be fit. Image is in the
format img[row, col]. img should not contain Nans
hdr
(Fits header object) header associated with the TPF file the
image was drawn from
ccdMod, ccdOut
(int) CCD module and output of image. Needed to
create the correct PRF model
prfObj
An object of the class prf.KeplerPrf()
Returns:
-------------
**None**
Output:
----------
A three panel subplot is created
"""
mp.figure(1)
mp.clf()
mp.subplot(131)
plotTpf.plotCadence(img, hdr)
mp.colorbar()
mp.title("Input Image")
mp.subplot(132)
c,r = res.x[0], res.x[1]
bbox = getBoundingBoxForImage(img, hdr)
model = prfObj.getPrfForBbox(ccdMod, ccdOut, c, r, bbox)
model *= res.x[2]
plotTpf.plotCadence(model, hdr)
mp.colorbar()
mp.title("Best fit model")
mp.subplot(133)
diff = img-model
plotTpf.plotCadence(diff, hdr)
mp.colorbar()
mp.title("Residuals")
print "Performance %.3f" %(np.max(np.abs(diff))/np.max(img))
def costFunc(x, module, output, bbox, img, prfObj):
"""Measure goodness of fit (chi-square) for a PRF
with a given col, row and scale (i.e brightness)"""
model = prfObj.getPrfForBbox(module, output, x[0], x[1], bbox)
model *= x[2]
cost = img-model
cost = np.sum(cost**2)
return cost
def costFunc1(x, module, output, col, row, bbox, img, prfObj):
"""Debugging function.
Does the same as costFunc, but col and row are constants,
and only the brightness of the prf can be changed.
"""
model = prfObj.getPrfForBbox(module, output, col, row, bbox)
model *= x[0]
cost = img-model
cost = np.sum(cost**2)
return cost
| |
#!/usr/bin/env python
# encoding: utf-8
import os
from efl.evas import EVAS_HINT_EXPAND, EVAS_HINT_FILL, FilledImage
from efl import elementary
from efl.elementary.window import StandardWindow
from efl.elementary.button import Button
from efl.elementary.list import List, ELM_LIST_LIMIT
from efl.elementary.icon import Icon
from efl.elementary.popup import Popup, ELM_WRAP_CHAR
EXPAND_BOTH = EVAS_HINT_EXPAND, EVAS_HINT_EXPAND
FILL_BOTH = EVAS_HINT_FILL, EVAS_HINT_FILL
script_path = os.path.dirname(os.path.abspath(__file__))
img_path = os.path.join(script_path, "images")
def cb_bnt_close(btn, popup):
if "im" in popup.data:
popup.data["im"].delete()
popup.delete()
def cb_btn_restack(btn, popup):
im = FilledImage(popup.evas);
im.file = os.path.join(img_path, "mystrale_2.jpg")
im.move(40, 40)
im.resize(500, 320)
im.show()
popup.data["im"] = im
popup.raise_()
def cb_popup_center_text(li, item, win):
popup = Popup(win, size_hint_weight=EXPAND_BOTH, timeout=3.0)
popup.text = "This Popup has content area and timeout value is 3 seconds"
popup.show()
def cb_popup_center_title_text_1button(li, item, win):
popup = Popup(win, size_hint_weight=EXPAND_BOTH)
popup.text = "This Popup has content area and " \
"action area set, action area has one button Close"
bt = Button(win, text="Close")
bt.callback_clicked_add(cb_bnt_close, popup)
popup.part_content_set("button1", bt)
popup.show()
def cb_popup_center_title_text_1button(li, item, win):
popup = Popup(win, size_hint_weight=EXPAND_BOTH)
popup.text = "This Popup has title area, content area and " \
"action area set, action area has one button Close"
popup.part_text_set("title,text", "Title")
bt = Button(win, text="Close")
bt.callback_clicked_add(cb_bnt_close, popup)
popup.part_content_set("button1", bt)
popup.show()
def cb_popup_center_title_text_block_clicked_event(li, item, win):
popup = Popup(win, size_hint_weight=EXPAND_BOTH)
popup.text = "This Popup has title area and content area. " \
"When clicked on blocked event region, popup gets deleted"
popup.part_text_set("title,text", "Title")
popup.callback_block_clicked_add(cb_bnt_close, popup)
popup.show()
def cb_popup_bottom_title_text_3button(li, item, win):
popup = Popup(win, size_hint_weight=EXPAND_BOTH,
content_text_wrap_type=ELM_WRAP_CHAR)
popup.text = "This Popup has title area, content area and " \
"action area set with content being character wrapped. " \
"action area has three buttons OK, Cancel and Close"
popup.part_text_set("title,text", "Title")
ic = Icon(win, file=os.path.join(img_path, "logo_small.png"))
popup.part_content_set("title,icon", ic)
bt = Button(win, text="OK")
popup.part_content_set("button1", bt)
bt = Button(win, text="Cancel")
popup.part_content_set("button2", bt)
bt = Button(win, text="Close")
bt.callback_clicked_add(cb_bnt_close, popup)
popup.part_content_set("button3", bt)
popup.show()
def cb_popup_center_title_content_3button(li, item, win):
ic = Icon(win, file=os.path.join(img_path, "logo_small.png"))
bt = Button(win, text="Content", content=ic)
popup = Popup(win, size_hint_weight=EXPAND_BOTH, content=bt)
popup.part_text_set("title,text", "Title")
bt = Button(win, text="OK")
popup.part_content_set("button1", bt)
bt = Button(win, text="Cancel")
popup.part_content_set("button2", bt)
bt = Button(win, text="Close")
bt.callback_clicked_add(cb_bnt_close, popup)
popup.part_content_set("button3", bt)
popup.show()
def cb_popup_center_title_item_3button(li, item, win):
popup = Popup(win, size_hint_weight=EXPAND_BOTH)
popup.part_text_set("title,text", "Title")
for i in range(1, 11):
if i in [3, 5, 6]:
ic = Icon(win, file=os.path.join(img_path, "logo_small.png"))
popup.item_append("item"+str(i), ic)
else:
popup.item_append("item"+str(i), None)
bt = Button(win, text="OK")
popup.part_content_set("button1", bt)
bt = Button(win, text="Cancel")
popup.part_content_set("button2", bt)
bt = Button(win, text="Close")
bt.callback_clicked_add(cb_bnt_close, popup)
popup.part_content_set("button3", bt)
popup.show()
def cb_popup_center_title_text_2button_restack(li, item, win):
popup = Popup(win, size_hint_weight=EXPAND_BOTH)
popup.text = "When you click the 'Restack' button, " \
"an image will be located under this popup"
popup.part_text_set("title,text", "Title")
bt = Button(win, text="Restack")
bt.callback_clicked_add(cb_btn_restack, popup)
popup.part_content_set("button1", bt)
bt = Button(win, text="Close")
bt.callback_clicked_add(cb_bnt_close, popup)
popup.part_content_set("button3", bt)
popup.show()
times = 0
g_popup = None
def cb_popup_center_text_1button_hide_show(li, item, win):
global times
global g_popup
times += 1
if g_popup is not None:
g_popup.text = "You have checked this popup %d times." % times
g_popup.show()
return
g_popup = Popup(win, size_hint_weight=EXPAND_BOTH)
g_popup.text = "Hide this popup by using the button." \
"When you click list item again, you will see this popup again."
bt = Button(win, text="Hide")
bt.callback_clicked_add(lambda b: g_popup.hide())
g_popup.part_content_set("button1", bt)
g_popup.show()
def popup_clicked(obj):
win = StandardWindow("popup", "Popup test", autodel=True, size=(480, 800))
if obj is None:
win.callback_delete_request_add(lambda o: elementary.exit())
li = List(win, mode=ELM_LIST_LIMIT, size_hint_weight=EXPAND_BOTH)
li.callback_selected_add(lambda li, it: it.selected_set(False))
win.resize_object_add(li)
li.show()
li.item_append("popup-center-text", None, None,
cb_popup_center_text, win)
li.item_append("popup-center-text + 1 button", None, None,
cb_popup_center_title_text_1button, win)
li.item_append("popup-center-title + text + 1 button", None, None,
cb_popup_center_title_text_1button, win)
li.item_append("popup-center-title + text (block,clicked handling)", None, None,
cb_popup_center_title_text_block_clicked_event, win)
li.item_append("popup-bottom-title + text + 3 buttons", None, None,
cb_popup_bottom_title_text_3button, win)
li.item_append("popup-center-title + content + 3 buttons", None, None,
cb_popup_center_title_content_3button, win)
li.item_append("popup-center-title + items + 3 buttons", None, None,
cb_popup_center_title_item_3button, win)
li.item_append("popup-center-title + text + 2 buttons (check restacking)", None, None,
cb_popup_center_title_text_2button_restack, win)
li.item_append("popup-center-text + 1 button (check hide, show)", None, None,
cb_popup_center_text_1button_hide_show, win)
li.go()
win.show()
if __name__ == "__main__":
elementary.init()
popup_clicked(None)
elementary.run()
elementary.shutdown()
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# This file is modified from the original file:
# https://raw.githubusercontent.com/tensorflow/tpu/master/models/official/resnet/resnet_model.py
"""Contains definitions for the post-activation form of Residual Networks.
Residual networks (ResNets) were proposed in:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
BATCH_NORM_DECAY = 0.9
BATCH_NORM_EPSILON = 1e-5
def batch_norm_relu(inputs, is_training, relu=True, init_zero=False,
data_format='channels_first', name=''):
"""Performs a batch normalization followed by a ReLU.
Args:
inputs: `Tensor` of shape `[batch, channels, ...]`.
is_training: `bool` for whether the model is training.
relu: `bool` if False, omits the ReLU operation.
init_zero: `bool` if True, initializes scale parameter of batch
normalization with 0 instead of 1 (default).
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
A normalized `Tensor` with the same `data_format`.
"""
if init_zero:
gamma_initializer = tf.zeros_initializer()
else:
gamma_initializer = tf.ones_initializer()
if data_format == 'channels_first':
axis = 1
else:
axis = 3
inputs = tf.layers.batch_normalization(
inputs=inputs,
axis=axis,
momentum=BATCH_NORM_DECAY,
epsilon=BATCH_NORM_EPSILON,
center=True,
scale=True,
training=is_training,
fused=True,
name=name,
gamma_initializer=gamma_initializer)
if relu:
inputs = tf.nn.relu(inputs)
return inputs
def fixed_padding(inputs, kernel_size, data_format='channels_first'):
"""Pads the input along the spatial dimensions independently of input size.
Args:
inputs: `Tensor` of size `[batch, channels, height, width]` or
`[batch, height, width, channels]` depending on `data_format`.
kernel_size: `int` kernel size to be used for `conv2d` or max_pool2d`
operations. Should be a positive integer.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
A padded `Tensor` of the same `data_format` with size either intact
(if `kernel_size == 1`) or padded (if `kernel_size > 1`).
"""
pad_total = kernel_size - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
if data_format == 'channels_first':
padded_inputs = tf.pad(inputs, [[0, 0], [0, 0],
[pad_beg, pad_end], [pad_beg, pad_end]])
else:
padded_inputs = tf.pad(inputs, [[0, 0], [pad_beg, pad_end],
[pad_beg, pad_end], [0, 0]])
return padded_inputs
def conv2d_fixed_padding(inputs, filters, kernel_size, strides,
data_format='channels_first', name=''):
"""Strided 2-D convolution with explicit padding.
The padding is consistent and is based only on `kernel_size`, not on the
dimensions of `inputs` (as opposed to using `tf.layers.conv2d` alone).
Args:
inputs: `Tensor` of size `[batch, channels, height_in, width_in]`.
filters: `int` number of filters in the convolution.
kernel_size: `int` size of the kernel to be used in the convolution.
strides: `int` strides of the convolution.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
A `Tensor` of shape `[batch, filters, height_out, width_out]`.
"""
if strides > 1:
inputs = fixed_padding(inputs, kernel_size, data_format=data_format)
return tf.layers.conv2d(
inputs=inputs, filters=filters, kernel_size=kernel_size, strides=strides,
padding=('SAME' if strides == 1 else 'VALID'), use_bias=False,
kernel_initializer=tf.variance_scaling_initializer(),
data_format=data_format, name=name)
def residual_block(inputs, filters, is_training, strides,
use_projection=False, data_format='channels_first', name=''):
"""Standard building block for residual networks with BN after convolutions.
Args:
inputs: `Tensor` of size `[batch, channels, height, width]`.
filters: `int` number of filters for the first two convolutions. Note that
the third and final convolution will use 4 times as many filters.
is_training: `bool` for whether the model is in training.
strides: `int` block stride. If greater than 1, this block will ultimately
downsample the input.
use_projection: `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
The output `Tensor` of the block.
"""
shortcut = inputs
if use_projection:
# Projection shortcut in first layer to match filters and strides
shortcut = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=1, strides=strides,
data_format=data_format, name=name+'_proj_conv')
shortcut = batch_norm_relu(shortcut, is_training, relu=False,
data_format=data_format, name=name+'_proj_bn')
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=strides,
data_format=data_format, name=name+'_conv0')
inputs = batch_norm_relu(inputs, is_training, data_format=data_format, name=name+'_bn0')
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=1,
data_format=data_format, name=name+"_conv1")
inputs = batch_norm_relu(inputs, is_training, relu=False, init_zero=True,
data_format=data_format, name=name+'_bn1')
return tf.nn.relu(inputs + shortcut)
def bottleneck_block(inputs, filters, is_training, strides,
use_projection=False, data_format='channels_first', name=''):
"""Bottleneck block variant for residual networks with BN after convolutions.
Args:
inputs: `Tensor` of size `[batch, channels, height, width]`.
filters: `int` number of filters for the first two convolutions. Note that
the third and final convolution will use 4 times as many filters.
is_training: `bool` for whether the model is in training.
strides: `int` block stride. If greater than 1, this block will ultimately
downsample the input.
use_projection: `bool` for whether this block should use a projection
shortcut (versus the default identity shortcut). This is usually `True`
for the first block of a block group, which may change the number of
filters and the resolution.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
The output `Tensor` of the block.
"""
shortcut = inputs
if use_projection:
# Projection shortcut only in first block within a group. Bottleneck blocks
# end with 4 times the number of filters.
filters_out = 4 * filters
shortcut = conv2d_fixed_padding(
inputs=inputs, filters=filters_out, kernel_size=1, strides=strides,
data_format=data_format, name=name+'_proj_conv')
shortcut = batch_norm_relu(shortcut, is_training, relu=False,
data_format=data_format, name=name+'_proj_bn')
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=1, strides=1,
data_format=data_format, name=name+'_conv0')
inputs = batch_norm_relu(inputs, is_training, data_format=data_format, name=name+'_bn0')
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filters, kernel_size=3, strides=strides,
data_format=data_format,name=name+'_conv1')
inputs = batch_norm_relu(inputs, is_training, data_format=data_format, name=name+'_bn1')
inputs = conv2d_fixed_padding(
inputs=inputs, filters=4 * filters, kernel_size=1, strides=1,
data_format=data_format, name=name+'_conv2')
inputs = batch_norm_relu(inputs, is_training, relu=False, init_zero=True,
data_format=data_format, name=name+'_bn2')
return tf.nn.relu(inputs + shortcut)
def block_group(inputs, filters, block_fn, blocks, strides, is_training, name,
data_format='channels_first'):
"""Creates one group of blocks for the ResNet model.
Args:
inputs: `Tensor` of size `[batch, channels, height, width]`.
filters: `int` number of filters for the first convolution of the layer.
block_fn: `function` for the block to use within the model
blocks: `int` number of blocks contained in the layer.
strides: `int` stride to use for the first convolution of the layer. If
greater than 1, this layer will downsample the input.
is_training: `bool` for whether the model is training.
name: `str`name for the Tensor output of the block layer.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
The output `Tensor` of the block layer.
"""
# Only the first block per block_group uses projection shortcut and strides.
inputs = block_fn(inputs, filters, is_training, strides,
use_projection=True, data_format=data_format, name=name+'_block0')
for i in range(1, blocks):
inputs = block_fn(inputs, filters, is_training, 1,
data_format=data_format, name=name+'_block' + str(i))
return tf.identity(inputs, name)
# Modified from the original file.
def resnet_v1_generator(block_fn, layers, num_classes,
data_format='channels_first', filter_size=64):
"""Generator for ResNet v1 models.
Args:
block_fn: `function` for the block to use within the model. Either
`residual_block` or `bottleneck_block`.
layers: list of 4 `int`s denoting the number of blocks to include in each
of the 4 block groups. Each group consists of blocks that take inputs of
the same resolution.
num_classes: `int` number of possible classes for image classification.
data_format: `str` either "channels_first" for `[batch, channels, height,
width]` or "channels_last for `[batch, height, width, channels]`.
Returns:
Model `function` that takes in `inputs` and `is_training` and returns the
output `Tensor` of the ResNet model.
"""
def model(inputs, is_training):
"""Creation of the model graph."""
inputs = conv2d_fixed_padding(
inputs=inputs, filters=filter_size, kernel_size=7, strides=2,
data_format=data_format, name='initial_conv')
inputs = tf.identity(inputs, 'initial_conv_identity')
inputs = batch_norm_relu(inputs, is_training, data_format=data_format, name='initial_bn')
inputs = tf.layers.max_pooling2d(
inputs=inputs, pool_size=3, strides=2, padding='SAME',
data_format=data_format, name='initial_max_pool')
inputs = tf.identity(inputs, 'initial_max_pool_identity')
inputs = block_group(
inputs=inputs, filters=filter_size, block_fn=block_fn, blocks=layers[0],
strides=1, is_training=is_training, name='block_group1',
data_format=data_format)
inputs = block_group(
inputs=inputs, filters=filter_size*2, block_fn=block_fn, blocks=layers[1],
strides=2, is_training=is_training, name='block_group2',
data_format=data_format)
inputs = block_group(
inputs=inputs, filters=filter_size*4, block_fn=block_fn, blocks=layers[2],
strides=2, is_training=is_training, name='block_group3',
data_format=data_format)
inputs = block_group(
inputs=inputs, filters=filter_size*8, block_fn=block_fn, blocks=layers[3],
strides=2, is_training=is_training, name='block_group4',
data_format=data_format)
# The activation is 7x7 so this is a global average pool.
inputs = tf.layers.average_pooling2d(
inputs=inputs, pool_size=7, strides=1, padding='VALID',
data_format=data_format, name='final_avg_pool')
inputs = tf.identity(inputs, 'final_avg_pool_identity')
shp = inputs.get_shape().as_list()
flattened_shape = shp[1] * shp[2] * shp[3]
inputs = tf.reshape(inputs, [shp[0], flattened_shape])
inputs = tf.layers.dense(
inputs=inputs,
units=num_classes,
kernel_initializer=tf.random_normal_initializer(stddev=.01), name='final_dense')
inputs = tf.identity(inputs, 'final_dense_identity')
return inputs
return model
def resnet_v1(layers, block_fn, num_classes, data_format='channels_first', filters=64):
"""Returns the ResNet model for a given size and number of output classes."""
# Modified from the original file.
if block_fn == 'residual':
block_fn = residual_block
elif block_fn == 'bottleneck':
block_fn = bottleneck_block
return resnet_v1_generator(
block_fn, layers, num_classes, data_format, filters)
| |
#1
#v 0.001
from ERROR import *
"""
TODO's:
- SetLight():
###
I have to learn about these a little more :P
"""
#global public usage variables:
global UGE_POINTS,UGE_LINES,UGE_LINESTRIP,UGE_LINELOOP,UGE_TRIANGLES,UGE_TRIANGLESTRIP,UGE_TRIANGLEFAN,UGE_QUADS,UGE_QUADSTRIP,UGE_POLYGON
UGE_POINTS,UGE_LINES,UGE_LINESTRIP,UGE_LINELOOP,UGE_TRIANGLES,UGE_TRIANGLESTRIP,UGE_TRIANGLEFAN,UGE_QUADS,UGE_QUADSTRIP,UGE_POLYGON = [1<<s for s in range(10)]
class __FormatHandler:
class SceneManager:
def __init__(self):
self.RenameDefault=1
self.IDs={"Default_Scene":0} #indexing
self.current="Default_Scene"
_Scenes={"Default_Scene":[]} #{ SceneName : [ObjectIDs] }
def NewName(self): return "Scene_%i"%(len(self.IDs)-self.RenameDefault)
def HasScene(self,Name):
#'return False' uses the currently active scene
if type(Name)==str: #Scene Name
if Name in self.IDs: #does the name exist?
self.current=Name
return True
elif type(Name)==int: #Scene Index
if -1<Name<len(self.IDs): #are we within range?
self.current=self.IDs.keys()[self.IDs.values().index(Name)]
#^not exactly the best method... (trying to avoid using another iterator)
return True
return False
def AddScene(self,Name):
if type(Name)==str:
if self.RenameDefault: #is this the first scene?
self._Scenes={Name:self._Scenes["Default_Scene"]}
self.IDs={Name:0}
self.current=Name
self.RenameDefault=0
else:
self._Scenes.update({Name:[]})
self.current=Name
self.IDs.update({Name:len(self.IDs)})
else:
pass #can't add scene with this name (or type)
def CID(self): return self.IDs[self.current]
#objecs always create a link to the current scene when created/called
#(multiple scenes can reference the same object)
def LinkObject(self,ID): #for objects only (so far) called from ObjectManager
if ID not in self._Scenes[self.current]: self._Scenes[self.current]+=[ID]
class ObjectManager:
class _Rig:
_Bones={}
class _Object:
LocX,LocY,LocZ=0.0,0.0,0.0
RotX,RotY,RotZ=0.0,0.0,0.0
ScaX,ScaY,ScaZ=1.0,1.0,1.0
ParentID=''
#the data contained by the object: ('_Rig','_Mesh' ,'_Curve','_Surface','_NURBS','_DMesh')
Data=[]
DataName=None #this is set with the object name upon object creation
DataType=''
def __init__(self):
self.IDs={} #for indexing objects
self.current=None
_Objects={}
def NewName(self): return "Object_%i"%len(self.IDs)
def HasObject(self,Name):
#'return False' uses the currently active object
if type(Name)==str: #Object Name
if Name in self.IDs: #does the name exist?
self.current=Name
return True
elif type(Name)==int: #Object Index
if -1<Name<len(self.IDs): #are we within range?
self.current=self.IDs.keys()[self.IDs.values().index(Name)]
#^not exactly the best method... (trying to avoid using another iterator)
return True
return False
def AddObject(self,Name):
if type(Name)==str:
self._Objects.update({Name:self._Object()})
self._Objects[Name].DataName=Name
self.current=Name
self.IDs.update({Name:len(self.IDs)})
#Scenes.LinkObject(self.IDs[self.current])
else:
pass #can't add object with this name (or type)
def CID(self): return self.IDs[self.current]
def RenameObjectData(self,Name): self._Objects[self.current].DataName=Name
def ParentObject(self,Name): pass
def SetLocX(self,X): self._Objects[self.current].LocX=X
def SetLocY(self,Y): self._Objects[self.current].LocY=Y
def SetLocZ(self,Z): self._Objects[self.current].LocZ=Z
def SetRotX(self,X): self._Objects[self.current].RotX=X
def SetRotY(self,Y): self._Objects[self.current].RotY=Y
def SetRotZ(self,Z): self._Objects[self.current].RotZ=Z
def SetScaX(self,X): self._Objects[self.current].ScaX=X
def SetScaY(self,Y): self._Objects[self.current].ScaY=Y
def SetScaZ(self,Z): self._Objects[self.current].ScaZ=Z
Scenes=SceneManager()
Objects=ObjectManager()
def Contents(self):
print 'Scenes:',self.Scenes._Scenes
print 'Objects:',self.Objects._Objects
def GL_Draw(self): #Called by VIEWER to draw specific objects to the scene
pass
#def ToFormat(self): #returns SESv1 format
#def FromFormat(self,fmt=[]): #sets data from SESv1 evaluated input data
#def Clear(self):
__Format=__FormatHandler()
#-------------------------------------------------------
# user functions:
#-------------------------------------------------------
#adds a new scene (renames the default scene), or activates an existing scene
def ugeSetScene(Name=__Format.Scenes.NewName()):
if not __Format.Scenes.HasScene(Name): __Format.Scenes.AddScene(Name)
ugeSetScene.func_defaults=(__Format.Scenes.NewName(),)
return __Format.Scenes.CID() #return the Current ID
#adds a new object, or activates an existing object
def ugeSetObject(Name=__Format.Objects.NewName()):
if not __Format.Objects.HasObject(Name): __Format.Objects.AddObject(Name)
CID = __Format.Objects.CID(); __Format.Scenes.LinkObject(CID)
ugeSetObject.func_defaults=(__Format.Objects.NewName(),)
return CID #return the Current ID
def ugeSetObjectDataName(Name): __Format.Objects.RenameObjectData(Name)
#set's the current object's parent
def ugeSetObjectParent(Name):
if __Format.Objects.HasObject(Name): __Format.Objects.ParentObject(Name)
#resets the current object's Location
def ugeSetObjectLoc(X=0.0,Y=0.0,Z=0.0):
if (type(X)==list or type(X)==tuple) and len(X)==3: X,Y,Z=X
elif type(X)==float: __Format.Objects.SetLocX(X);__Format.Objects.SetLocY(Y);__Format.Objects.SetLocZ(Z)
#individual functions for those formats that need them <_<
def ugeSetObjectLocX(X): __Format.Objects.SetLocX(X)
def ugeSetObjectLocY(Y): __Format.Objects.SetLocY(Y)
def ugeSetObjectLocZ(Z): __Format.Objects.SetLocZ(Z)
#resets the current object's Rotation
def ugeSetObjectRot(X=0.0,Y=0.0,Z=0.0):
if (type(X)==list or type(X)==tuple) and len(X)==3: X,Y,Z=X
elif type(X)==float: __Format.Objects.SetRotX(X);__Format.Objects.SetRotY(Y);__Format.Objects.SetRotZ(Z)
def ugeSetObjectRotX(X): __Format.Objects.SetRotX(X)
def ugeSetObjectRotY(Y): __Format.Objects.SetRotY(Y)
def ugeSetObjectRotZ(Z): __Format.Objects.SetRotZ(Z)
#resets the current object's Scale
def ugeSetObjectSca(X=1.0,Y=1.0,Z=1.0):
if (type(X)==list or type(X)==tuple) and len(X)==3: X,Y,Z=X
elif type(X)==float: __Format.Objects.SetScaX(X);__Format.Objects.SetScaY(Y);__Format.Objects.SetScaZ(Z)
def ugeSetObjectScaX(X): __Format.Objects.SetScaX(X)
def ugeSetObjectScaY(Y): __Format.Objects.SetScaY(Y)
def ugeSetObjectScaZ(Z): __Format.Objects.SetScaZ(Z)
"""
class old_format():
#global defaults (bypassing re-definition from functions (speedup))
DLRS=[0.0,0.0,0.0, 0.0,0.0,0.0, 1.0,1.0,1.0]
DM=[[1.0,0.0,0.0,0.0], [0.0,1.0,0.0,0.0], [0.0,0.0,1.0,0.0], [0.0,0.0,0.0,1.0]]
#___________________________________________________________________________________________
import VIEWER
#VIEWER.Libs[MatNodes, Images, Textures, Materials, Scenes, Objects]
#Active Data:
ActiveScene = 0
ActiveObject = None
ActiveMaterial = None
ActivePrimitive = 0 #in active object
def __GetOID(N): #check for specified object name/ID
ID=''
if type(N)==int: ID=('' if N>len(VIEWER.Libs[5]) else N) #N=1
else: #N="Object1"
###need a faster indexing method here
###VIEWER.Libs[5].index(N) won't work as VIEWER.Libs[5] values are lists with random internal datas
for I,O in enumerate(VIEWER.Libs[5]): #TODO: use a while loop (stop the loop if found)
if O[0]==N: ID=I
return ID #return int() if found
#___________________________________________________________________________________________
SceneCount = 0
#create a new scene, or activate the specified scene
def SetScene( Name="Scene0" ):
global SceneCount,ActiveScene
if SceneCount==0: #change the default scene name
VIEWER.Libs[4][0][0]=(Name if type(Name)==str else "Scene"+str(SceneCount))
SceneCount+=1
else: #user defined scenes already exist
SceneIndex = None
#TODO: usa a while loop
for Index,Scene in enumerate(VIEWER.Libs[4]): #check for specified scene name/index
if Scene[0]==Name or Index==Name: SceneIndex=Index
if SceneIndex == None: #create a new scene
VIEWER.Libs[4]+=[Name if type(Name)==str else "Scene"+str(SceneCount)]
ActiveScene=len(VIEWER.Libs[4]) #set the active scene index to the newly added scene
SceneCount+=1
else: ActiveScene=SceneIndex #set the active scene index to the specified scene
SetScene.func_defaults=( "Scene"+str(SceneCount), )
#TODO:
#- active scene rename: SetScene( [('Name' or Index), "NewName"] )
#^this will rename the scene while setting it to active
#___________________________________________________________________________________________
ObjectSceneID = [] #the indexed object's scene index
#create a new object in the active scene, or activate and change the data in a specified object
#(if a specified object exists in another scene, that object's scene will be set as active)
def SetObject( Name="Object0", Viewport=0, LocRotSca=[], Sub_Name='', ParentName='' ):
global ActiveScene,ActiveObject,ObjectSceneID,DLRS
ObjectLib=VIEWER.Libs[5]
#Verify Data: (use Defaults if neccesary)
N = (ObjectLib[Name][0] if (type(Name)==int and Name>-1 and Name<(len(ObjectLib)+1) #get the name of the specified object
) else (Name if type(Name)==str else "Object"+str(len(ObjectLib)))) #N must be a string
VP = (Viewport if (Viewport>0 and Viewport<25) else 1) #must be 1 to 24
LRS= (DLRS if len(LocRotSca)!=9 else LocRotSca) #TODO: advanced LRS verification
SD = ["",(N if (Sub_Name=='' or type(Sub_Name)!=str) else Sub_Name),[],[]]
P = (__GetOID(ParentName) if ParentName!=('__REMOVE__' or '') else ParentName)
OID=__GetOID(N) if len(VIEWER.Libs[5])>0 else '' #try to get an active object index
if OID=='': #if this is a new object:
VIEWER.Libs[5].append([N,VP,LRS,SD,(P if len(VIEWER.Libs[5])>0 else '')]) #ignore parent index if this is the first object
VIEWER.Libs[4][ActiveScene][1]+=[len(VIEWER.Libs[5])-1]
ObjectSceneID+=[ActiveScene]
ActiveObject=len(VIEWER.Libs[5])-1
else: #set the active object to the specicified object and change it's data
ActiveObject,ActiveScene = OID,ObjectSceneID[OID]; AO=ObjectLib[OID]
VIEWER.Libs[5][OID]=[ AO[0], #reset the object's data:
((VP if Viewport!=0 else AO[1]) if AO[1]!=VP else AO[1]),
((LRS if LRS!=DLRS else AO[2]) if AO[2]!=LRS else AO[2]),
[AO[3][0],(AO[3][1] if Sub_Name=='' else SD[1]),AO[3][2],AO[3][3]], #reset sub data name (not data)
((P if ObjectLib[OID][4]!=P else ObjectLib[OID][4]) if P!='__REMOVE__' else '')]
SetObject.func_defaults=( "Object"+str(len(VIEWER.Libs[5])), 0, [], '', '' )
#TODO:
#- verify the object doesn't have multiple parents (important)
#- active object rename: SetObject( [('Name' or Index), "NewName"], ... )
#^this will rename the specified object while setting it active and editing it's data
#___________________________________________________________________________________________
#set the active object's type to Rig and create a new bone within it, or change the data of an existing bone
#(you will recieve an error if used on another Object type)
#(you will also recieve an error if no object is defined)
def SetBone( Name="Bone0", Viewport=0, LocRotSca=[], BindMtx=[], ParentName='', PreviousName='' ):
global ActiveObject,BoneLib,N,VP,LRS,BM,PA,PR,DLRS
BoneLib=VIEWER.Libs[5][ActiveObject][3][3]
def GetID(S): #check for specified bone name/ID
global BoneLib
ID=''
if type(S)==int: ID=('' if S>len(BoneLib) else S) #S=1
else: #S="Bone1"
###need a faster indexing method here
###BoneLib.index(N) won't work as BoneLib values are lists with random internal datas
for I,B in enumerate(BoneLib):
if B[0]==S: ID=I
return ID
#Verify Data: (use Defaults if neccesary)
N = (Name if type(Name)==str else "Bone"+str(len(BoneLib)))
VP = (Viewport if (Viewport>0 and Viewport<25) else 1)
LRS= (DLRS if len(LocRotSca)!=9 else LocRotSca) #TODO: advanced LRS verification
BM = (DM if len(BindMtx)!=4 else BindMtx) #TODO: advanced matrix verification
PA = (GetID(ParentName) if ParentName!=('__REMOVE__' or '') else '')
PR = (GetID(PreviousName) if PreviousName!='' else '')
def Set():
global ActiveObject,N,VP,LRS,BM,PA,PR,BoneLib
#manage the bone data:
BID= GetID(N) if len(BoneLib)>0 else '' #try to get an active object index
if BID=='': VIEWER.Libs[5][ActiveObject][3][3]+=[[N,VP,LRS,BM,PA,PR]] #add a new bone
else: VIEWER.Libs[5][ActiveObject][3][3][BID]=[BoneLib[BID][0], #edit the specified bone
((VP if Viewport!=0 else BoneLib[BID][1]) if BoneLib[BID][1]!=VP else BoneLib[BID][1]),
((LRS if LRS!=DLRS else BoneLib[BID][2]) if BoneLib[BID][2]!=LRS else BoneLib[BID][2]),
((BM if BM!=DM44 else BoneLib[BID][3]) if BoneLib[BID][3]!=BM else BoneLib[BID][3]),
((PA if ParentName!='' else BoneLib[BID][4]) if BoneLib[BID][4]!=PA else BoneLib[BID][4]),
((PR if ParentName!='' else BoneLib[BID][5]) if BoneLib[BID][5]!=PR else BoneLib[BID][5])]
#^- need to check for previous bone looping (in case of user error)
#validate the active object
if len(VIEWER.Libs[5])>0:
if VIEWER.Libs[5][ActiveObject][3][0]=="": VIEWER.Libs[5][ActiveObject][3][0]="_Rig";Set() #set to "_Rig" and append a bone
elif VIEWER.Libs[5][ActiveObject][3][0]=="_Rig": Set() #append a bone
else: print 'Unable to append Bone to Object of type: "'+VIEWER.Libs[5][OID][3][0].split('_')[1]+'"\nignoring current data'
else: print 'please define an object'
SetBone.func_defaults=( "Bone"+str(len(VIEWER.Libs[5][ActiveObject][3][3])), 0, [], [], '', '' )
#TODO:
#- instead of ignoring the invalid bone's data, create a new rig object to append it to
#^you will then be able to parent the "ignored" bones to their proper object using a 3D editor
#NOTE: only 1 object will be created to be the place-holder for the ignored bones (instead of 1 object for each ignored bone)
#- rename bone: SetBone( [('Name' or Index), "NewName"], ... )
#^this will rename the specified bone while also editing it's data
#___________________________________________________________________________________________
#set the active object's type to Mesh and append a primitive in it's data
#(you will recieve an error if used on another Object type)
#(you will also recieve an error if no object is defined)
def SetPrimitive( Name=UMC_TRIANGLES ):
#TODO: figure out how to get the var itself to display (not it's value)
if len(VIEWER.Libs[5])>0: #validate the active object
if VIEWER.Libs[5][ActiveObject][3][0]=="":
VIEWER.Libs[5][ActiveObject][3][0]="_Mesh"
VIEWER.Libs[5][ActiveObject][3][3]=[[],[],[[],[]],[[],[],[],[],[],[],[],[]],[],[]]
VIEWER.Libs[5][ActiveObject][3][3][5]+=[[Name,[]]] #set to "_Mesh" and append a primitive
elif VIEWER.Libs[5][ActiveObject][3][0]=="_Mesh":
VIEWER.Libs[5][ActiveObject][3][3][5]+=[[Name,[]]] #append a primitive
else: #return error
print 'Unable to append Primitive to Object of type: "'+VIEWER.Libs[5][OID][3][0].split('_')[1]+'"\nignoring current data'
else: print 'please define an object'
SetPrimitive.func_defaults=( Name, )
#TODO:
#- index the proper primitive to add facepoints to
#^(I personally havn't seen a format you'd need this option for, but the possibility of it still lies about)
#___________________________________________________________________________________________
#set the active object's type to Mesh and append a valid Vector List to it's data
#(you will recieve an error if used on another Object type)
#(you will also recieve an error if no object is defined)
def SetVerts( List=[] ):
global ActiveObject
if len(VIEWER.Libs[5])>0:
if VIEWER.Libs[5][ActiveObject][3][0]=="":
VIEWER.Libs[5][ActiveObject][3][0]="_Mesh"
VIEWER.Libs[5][ActiveObject][3][3]=[List,[],[[],[]],[[],[],[],[],[],[],[],[]],[],[]]
elif VIEWER.Libs[5][ActiveObject][3][0]=="_Mesh": VIEWER.Libs[5][ActiveObject][3][3][0]=List
else: print 'Unable to append Vert List to Object of type: "'+VIEWER.Libs[5][OID][3][0].split('_')[1]+'"\nignoring current data'
else: print 'please define an object'
def SetNormals( List=[] ):
global ActiveObject
if len(VIEWER.Libs[5])>0:
if VIEWER.Libs[5][ActiveObject][3][0]=="":
VIEWER.Libs[5][ActiveObject][3][0]="_Mesh"
VIEWER.Libs[5][ActiveObject][3][3]=[[],List,[[],[]],[[],[],[],[],[],[],[],[]],[],[]]
elif VIEWER.Libs[5][ActiveObject][3][0]=="_Mesh": VIEWER.Libs[5][ActiveObject][3][3][1]=List
else: print 'Unable to append Normal List to Object of type: "'+VIEWER.Libs[5][OID][3][0].split('_')[1]+'"\nignoring current data'
else: print 'please define an object'
def SetColors( List0=[], List1=[] ):
global ActiveObject
if len(VIEWER.Libs[5])>0:
if VIEWER.Libs[5][ActiveObject][3][0]=="":
VIEWER.Libs[5][ActiveObject][3][0]="_Mesh"
VIEWER.Libs[5][ActiveObject][3][3]=[[],[],[List0,List1],[[],[],[],[],[],[],[],[]],[],[]]
elif VIEWER.Libs[5][ActiveObject][3][0]=="_Mesh": VIEWER.Libs[5][ActiveObject][3][3][2]=[List0,List1]
else: print 'Unable to append Color Lists to Object of type: "'+VIEWER.Libs[5][OID][3][0].split('_')[1]+'"\nignoring current data'
else: print 'please define an object'
def SetUVs( List0=[], List1=[], List2=[], List3=[], List4=[], List5=[], List6=[], List7=[] ):
global ActiveObject
if len(VIEWER.Libs[5])>0:
if VIEWER.Libs[5][ActiveObject][3][0]=="":
VIEWER.Libs[5][ActiveObject][3][0]="_Mesh"
VIEWER.Libs[5][ActiveObject][3][3]=[[],[],[[],[]],[List0,List1,List2,List3,List4,List5,List6,List7],[],[]]
elif VIEWER.Libs[5][ActiveObject][3][0]=="_Mesh": VIEWER.Libs[5][ActiveObject][3][3][0]=[List0,List1,List2,List3,List4,List5,List6,List7]
else: print 'Unable to append UV Lists to Object of type: "'+VIEWER.Libs[5][OID][3][0].split('_')[1]+'"\nignoring current data'
else: print 'please define an object'
#TODO:
#- validate vector lists
#- Validate replacements (don't replace a data with a default unless specified)
#___________________________________________________________________________________________
#Vectors: [ X, Y(, Z) ]
#Colors: [R,G,B,A] int( 0 : 255 ) OR float( 0.0 : 1.0 )
#^be careful not to specify an int when your type is float (for colors)
#^2D Verts and Normals are allowd.
#append a facepoint to the active primitive with the specified vectors
#(colors and uv's in list format are assumed to be single channel, and are read as such)
def SetFacepoint( Vert='', Normal='', Color='', UV='' ):
global ActiveObject
#verify we havn't switched objects to an invalid type before trying to add facepoints:
if VIEWER.Libs[5][ActiveObject][3][0]=="_Mesh": #we can only set the facepoints of an active mesh object
if len(VIEWER.Libs[5][ActiveObject][3][3])>0: #we can't append facepoints to an object with no primitives.
Colors,UVs = VIEWER.Libs[5][ActiveObject][3][3][2],VIEWER.Libs[5][ActiveObject][3][3][3]
def Index(value,List): #returns either a valid index or ''
if type(value)==list: #[X,Y(,Z)] or [I/R(,A/G(,B(,A)))]
try: return List.index(value)
except: List+=[value]; return List.index(value) #vector or color
elif type(value)==int: return value #index (doesn't validate against len(list))
elif type(value)==str: return '' #no vector (validate any string to '')
CIDs = ( (Index(Color[0],Colors[0])
,(Index(Color[1],Colors[1]) if len(Color)==2 else '')
) if type(Color)==tuple else (Index(Color,Colors[0]),'') )
UVIDs = ( (Index(UV[0],UVs[0])
,(Index(UV[1],UVs[1]) if len(UV)>=2 else '')
,(Index(UV[2],UVs[2]) if len(UV)>=3 else '')
,(Index(UV[3],UVs[3]) if len(UV)>=4 else '')
,(Index(UV[4],UVs[4]) if len(UV)>=5 else '')
,(Index(UV[5],UVs[5]) if len(UV)>=6 else '')
,(Index(UV[6],UVs[6]) if len(UV)>=7 else '')
,(Index(UV[7],UVs[7]) if len(UV)==8 else '')
) if type(UV)==tuple else (Index(UV,UVs[0]),'','','','','','','')
)
VIEWER.Libs[5][ActiveObject][3][3][5][-1][1]+=[
[Index(Vert,VIEWER.Libs[5][ActiveObject][3][3][0]),Index(Normal,VIEWER.Libs[5][ActiveObject][3][3][1]),CIDs,UVIDs]
]
else: print 'unable to append to a non-existant primitive'
else:
print 'Unable to append Facepoint to Object of type: "'+VIEWER.Libs[5][OID][3][0].split('_')[1]+'"'
print 'Make sure the active object is a Mesh-type Object before trying to append Facepoints'
#TODO:
#- strict-er inputs (no errors allowed)
#___________________________________________________________________________________________
#this function is used to give a bone weight to the current (existing) vert
def SetWeight( BoneName=0, Weight=1.0, VertID='' ): #VertID is a TODO (should accept both list and int)
global ActiveObject
#verify we havn't switched objects to an invalid type:
if ActiveObject != None:
SD = VIEWER.Libs[5][ActiveObject][3]
if VIEWER.Libs[5][ActiveObject][4] != '':
ParentObject = VIEWER.Libs[5][VIEWER.Libs[5][ActiveObject][4]]
if ParentObject[3][0] == "_Rig": #parent object must be a _Rig object
if type(BoneName) == int: #check for the bone name in the parent _Rig oblect
if BoneName < len(ParentObject[3][3]): #is the index w/in the bone count?
BoneName = ParentObject[3][3][BoneName][0] #must be a string
else: BoneName = 'Bone'+str(BoneName) #must be a string
else: BoneName = 'Bone'+str(BoneName)
else: BoneName = 'Bone'+str(BoneName)
if SD[0]=="_Mesh":
if len(SD[3][5]): #Has Primitives
if len(SD[3][5][-1][1]): #Has facepoints
if len(SD[3][0]): #Has Verts
#WGrps,found,WGid = SD[3][4],0,0
WGrps,found = SD[3][4],0
Vid = SD[3][5][-1][1][-1][0] #vert index from current primitive's current facepoint
if len(WGrps)>0:
'''
while WGid < len(WGrps)-1 or not found: #faster (stops if found or at end)
WGN,WGFs,WGVs = WGrps[WGid]
if WGN == BoneName: #append Vid to an existing weight group
WFid = len(WGFs) #assume the weight is a new weight
try: WFid = WGFs.index(Weight) #try to get a valid weight index
except: VIEWER.Libs[5][ActiveObject][3][3][4][WGid][1]+=[Weight] #append new weight
VIEWER.Libs[5][ActiveObject][3][3][4][WGid][2]+=[[Vid,WFid]]
found = 1
WGid += 1
''' #^???throws an indexing error...???
for WGid,WG in enumerate(WGrps):
WGN,WGFs,WGVs = WG
if WGN == BoneName: #append Vid to an existing weight group
WFid = len(WGFs) #assume the weight is a new weight
try: WFid = WGFs.index(Weight) #try to get a valid weight index
except: VIEWER.Libs[5][ActiveObject][3][3][4][WGid][1].append(Weight) #append new weight
VIEWER.Libs[5][ActiveObject][3][3][4][WGid][2].append([Vid,WFid])
found = 1
#'''
if not found: #append Vid to a new weight group
VIEWER.Libs[5][ActiveObject][3][3][4]+=[[BoneName,[Weight],[[Vid,0]]]]
#check get the vert index and append it to the specified weight
#VIEWER.Libs[5][ActiveObject][3][3][-1][-1][4].append([Weight,Bones])
#TODO:
#- use VID to index a specific vert. (some model formats may force you to use this)
#(currently indexing the last used vert (OpenGL-style))
#___________________________________________________________________________________________
#return the mesh-objects from either the specified scene, or from the object library
def GetMeshObjects(Scene=''):
def Sort(List):
L=[]
for ID,Object in enumerate(List):
if type(Object)==int:
if VIEWER.Libs[5][Object][3][0]=="_Mesh": L+=[Object]
else:
if Object[3][0]=="_Mesh": L+=[ID]
return L
if type(Scene)==str:
if Scene=='': return Sort(VIEWER.Libs[5])
else: return Sort(VIEWER.Libs[4][VIEWER.Libs[4].index(Scene)][1])
elif type(Scene)==int: return Sort(VIEWER.Libs[4][Scene][1])
#TODO: better error handling on SceneLib.index(Scene) and SceneLib[Scene]
#___________________________________________________________________________________________
def GetObjectName(Object=0):
if type(Object)==int: return VIEWER.Libs[5][Object][0]
#___________________________________________________________________________________________
def GetVerts(Object=''):
if type(Object)==int: return VIEWER.Libs[5][Object][3][3][0]
elif type(Object)==str: VIEWER.Libs[5][__GetOID(Object)][3][3][0]
#___________________________________________________________________________________________
def GetNormals(Object=''):
if type(Object)==int: return VIEWER.Libs[5][Object][3][3][1]
elif type(Object)==str: VIEWER.Libs[5][__GetOID(Object)][3][3][1]
#___________________________________________________________________________________________
def GetColors(Object='',Channel=0):
if type(Object)==int: return VIEWER.Libs[5][Object][3][3][2][Channel]
elif type(Object)==str: VIEWER.Libs[5][__GetOID(Object)][3][3][2][Channel]
#___________________________________________________________________________________________
def GetUVs(Object='',Channel=0):
if type(Object)==int: return VIEWER.Libs[5][Object][3][3][3][Channel]
elif type(Object)==str: VIEWER.Libs[5][__GetOID(Object)][3][3][3][Channel]
#___________________________________________________________________________________________
def GetPrimitives(Object=''):
if type(Object)==int: return VIEWER.Libs[5][Object][3][3][5]
elif type(Object)==str: VIEWER.Libs[5][__GetOID(Object)][3][3][5]
#___________________________________________________________________________________________
def AsTriangles( PrimitivesList, Option=0 ):
global UMC_POINTS,UMC_LINES,UMC_LINESTRIP,UMC_LINELOOP,UMC_TRIANGLES,UMC_TRIANGLESTRIP,UMC_TRIANGLEFAN,UMC_QUADS,UMC_QUADSTRIP,UMC_POLYGON
Triangles,Quads = [],[] #NOTE: "Quads" is only for single primitive conversion
for PID,PFPs in PrimitivesList:
index = 0;Tris = [3,[]]
if PID==UMC_POINTS:
if Option==(1 or 3): pass #primitive is not Tri/Quad
else: Triangles+=[[PID,PFPs]]
if PID==UMC_LINES:
if Option==(1 or 3): pass #primitive is not Tri/Quad
else: Triangles+=[[PID,PFPs]]
if PID==UMC_LINESTRIP:
if Option==(1 or 3): pass #primitive is not Tri/Quad
else: Triangles+=[[PID,PFPs]]
if PID==UMC_LINELOOP:
if Option==(1 or 3): pass #primitive is not Tri/Quad
else: Triangles+=[[PID,PFPs]]
if PID==UMC_TRIANGLES:
if Option==(1 or 3): Triangles+=PFPs #single primitive
else: Triangles+=[[PID,PFPs]]
if PID==UMC_TRIANGLESTRIP:
while index != len(PFPs)-2:
T=PFPs[index:index+3]
if T[0] != T[1] and T[0] != T[2] and T[1] != T[2]: Tris[1]+=(list(reversed(T)) if index%2 else T)
index += 1
if Option==(1 or 3): Triangles+=Tris[1] #single primitive
else: Triangles+=[Tris]
if PID==UMC_TRIANGLEFAN:
P=[PFPs[index]]
while index != len(PFPs)-2:
T=P+[PFPs[index+1],PFPs[index+2]]
if T[0] != T[1] and T[0] != T[2] and T[1] != T[2]: Tris[1]+=(list(reversed(T)) if index%2 else T)
index += 1
if Option==(1 or 3): Triangles+=Tris[1] #single primitive
else: TrianglesList+=[Tris]
if PID==UMC_QUADS:
while index != len(PFPs):
Q=[PFPs[index],PFPs[index+1],PFPs[index+2],PFPs[index+3]]
Tris[1]+=[Q[0],Q[1],Q[2],Q[1],Q[2],Q[3]] #TODO: face flipping
index += 4
if Option==0: Triangles+=[Tris]
if Option==1: Triangles+=Tris[1]
if Option==2: Triangles+=[[PID,PFPs]]
if Option==3: Quads+=PFPs
if PID==UMC_QUADSTRIP: #quad-strips
Qds=[]
pass #unknown handling atm (TODO)
if PID==UMC_POLYGON: #Polygons
pass #unknown handling atm (TODO)
if Option==0: return Triangles#................multiple triangle primitives
if Option==1: return [[3,Triangles]]#..........single triangle primitive
if Option==2: return Triangles#................multiple triangle and quad primitives
if Option==3: return [[3,Triangles],[6,Quads]]#single triangle and quad primitive
'''
def convertFromTriangles( TrianglesList ):
P=ConvertToTriangles(TrianglesList,1) #only works for single tris atm
'''
"""
| |
import json
from django.http import HttpResponse
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from clients.models import Client, Configuration, ConfigurationFile
from clients.models import ClientException
from cmdb_agent.lib import lib
###########
# Helpers
###########
def error_msg(msg):
return HttpResponse(json.dumps({'error': msg}), content_type='application/json')
def debug_msg(msg):
return HttpResponse(json.dumps({'debug': msg}), content_type='application/json')
def status_msg(name, date_created, is_disabled, is_blacklisted, api_key=''):
msg_dict = {
'name': name,
'date_created': str(date_created),
'is_disabled': is_disabled,
'is_blacklisted': is_blacklisted,
}
if len(api_key) == 40:
msg_dict.update({'api_key': api_key})
return HttpResponse(json.dumps(msg_dict), content_type='application/json')
def info_msg(client):
configurations = client.configuration_set.all()
msg_dict = {
'name': client.client_name,
'date_created': str(client.date_created),
'is_disabled': client.is_disabled,
'is_blacklisted': client.is_blacklisted,
'api_key': client.api_key,
'configurations_tracking': len(client.configuration_set.all()),
}
return HttpResponse(json.dumps(msg_dict), content_type='application/json')
def config_status_msg(config_status):
return HttpResponse(json.dumps(config_status), content_type='application/json')
def unregister_msg(name, is_disabled):
return HttpResponse(json.dumps({'fqdn': name, 'is_disabled': is_disabled}), content_type='application/json')
def configuration_added_msg(details):
return HttpResponse(json.dumps(details), content_type='application/json')
def poll_msg(details):
return HttpResponse(json.dumps(details), content_type='application/json')
def fetch_msg(details):
return HttpResponse(json.dumps(details), content_type='application/json')
#########
# Views
#########
@csrf_exempt
def register(request):
if request.method == 'POST':
obj = json.loads(request.body.decode('UTF-8'))
try:
fqdn = lib.require_key(obj, 'fqdn')
except Exception as e:
return error_msg(str(e))
try:
client = Client.new_client(fqdn)
if not client:
return error_msg('Client `{}` is already registered.'.format(fqdn))
except ClientException as e:
return error_msg(str(e))
return status_msg(fqdn, client.date_created, client.is_disabled, client.is_blacklisted, client.api_key)
else:
return error_msg('Invalid method.')
@csrf_exempt
def unregister(request):
if request.method == 'POST':
obj = json.loads(request.body.decode('UTF-8'))
try:
api_key = lib.require_key(obj, 'api_key')
except Exception as e:
return error_msg(str(e))
try:
client = Client.disable_client(api_key)
return unregister_msg(client.client_name, client.is_disabled)
except Exception as e:
return error_msg(str(e))
else:
return error_msg('Invalid method.')
def info(request):
if request.method == 'GET':
api_key = lib.require_key(request.GET, 'api_key')
if len(api_key) != 40:
return error_msg('info: Invalid `api_key`.')
try:
client = Client.objects.get(api_key=api_key)
return info_msg(client)
except:
return error_msg('info: Client for `api_key` {} doesn\'t exist.'.format(api_key))
def status(request):
if request.method == 'GET':
api_key = request.GET.get('api_key', '')
if len(api_key) != 40:
return error_msg('status: Invalid `api_key`.')
try:
config_status = Client.get_config_status(api_key)
return config_status_msg(config_status)
except:
return error_msg('status: Client for `api_key` doesn\'t exist.')
@csrf_exempt
def add(request):
if request.method == 'POST':
obj = json.loads(request.body.decode('UTF-8'))
if 'api_key' not in obj:
return error_msg('Could not find `api_key` key in object.')
try:
client = Client.get_by_api_key(obj.get('api_key'))
except:
return error_msg('Invalid API Key.')
if client.is_disabled:
return error_msg('Client for `api_key` is currently disabled.')
if 'type' not in obj:
return error_msg('Could not find `type` key in object.')
add_type = obj.get('type', '')
if add_type == 'configuration':
if add_type not in obj:
return error_msg('Could not find `{}` key in object.'.format(add_type))
# Get `file_path`
if 'file_path' not in obj.get('configuration'):
return error_msg('Could not find `file_path` key in object.configuration.')
else:
try:
file_path = str(obj.get('configuration').get('file_path'))
except:
return error_msg('`file_path` must be a string.')
# Get `mtime`
if 'mtime' not in obj.get('configuration'):
return error_msg('Could not find `mtime` key in object.configuration.')
else:
try:
mtime = int(str(obj.get('configuration').get('mtime')))
except:
return error_msg('`mtime` must be a numerical string or an integer.')
# Get `payload`
if 'payload' not in obj.get('configuration'):
return error_msg('Could not find `payload` key in object.configuration.')
else:
try:
payload = obj.get('configuration').get('payload')
except:
return error_msg('`payload` must be a string.')
# Get `case_sensitive`
if 'case_sensitive' not in obj.get('configuration'):
return error_msg('Could not find `case_sensitive` key in object.configuration.')
else:
try:
case_sensitive = bool(obj.get('configuration').get('case_sensitive'))
except:
return error_msg('`case_sensitive` must be boolean.')
details = {
'file_path': file_path,
'mtime': mtime,
'case_sensitive': case_sensitive,
'payload': payload,
}
try:
Configuration.add(client, **details)
except Exception as e:
return error_msg(str(e))
return configuration_added_msg(details)
else:
return error_msg('Could not understand the type: `{}`.'.format(obj.get('type')))
else:
return error_msg('Invalid method.')
@csrf_exempt
def remove(request):
if request.method == 'POST':
obj = json.loads(request.body.decode('UTF-8'))
if 'api_key' not in obj:
return error_msg('Could not find `api_key` key in object.')
try:
client = Client.get_by_api_key(obj.get('api_key'))
except:
return error_msg('Invalid API Key.')
if client.is_disabled:
return error_msg('Client for `api_key` is currently disabled.')
if 'type' not in obj:
return error_msg('Could not find `type` key in object.')
add_type = obj.get('type', '')
if add_type == 'configuration':
if add_type not in obj:
return error_msg('Could not find `{}` key in object.'.format(add_type))
# Get `file_path`
if 'file_path' not in obj.get('configuration'):
return error_msg('Could not find `file_path` key in object.configuration.')
else:
try:
file_path = str(obj.get('configuration').get('file_path'))
except:
return error_msg('`file_path` must be a string.')
# Get `mtime`
if 'mtime' not in obj.get('configuration'):
return error_msg('Could not find `mtime` key in object.configuration.')
else:
try:
mtime = int(str(obj.get('configuration').get('mtime')))
except:
return error_msg('`mtime` must be a numerical string or an integer.')
# Get `payload`
if 'payload' not in obj.get('configuration'):
return error_msg('Could not find `payload` key in object.configuration.')
else:
try:
payload = obj.get('configuration').get('payload')
except:
return error_msg('`payload` must be a string.')
# Get `case_sensitive`
if 'case_sensitive' not in obj.get('configuration'):
return error_msg('Could not find `case_sensitive` key in object.configuration.')
else:
try:
case_sensitive = bool(obj.get('configuration').get('case_sensitive'))
except:
return error_msg('`case_sensitive` must be boolean.')
details = {
'file_path': file_path,
'mtime': mtime,
'case_sensitive': case_sensitive,
'payload': payload,
}
try:
Configuration.remove(client, **details)
except Exception as e:
return error_msg(str(e))
return configuration_added_msg(details)
else:
return error_msg('Could not understand the type: `{}`.'.format(obj.get('type')))
else:
return error_msg('Invalid method.')
def poll(request):
if request.method == 'GET':
api_key = request.GET.get('api_key', '')
if len(api_key) != 40:
return error_msg('Invalid `api_key`.')
try:
client = Client.get_by_api_key(api_key)
except:
return error_msg('Client for `api_key` doesn\'t exist.')
return poll_msg(client.get_managed_configuration())
def fetch(request):
if request.method == 'GET':
api_key = request.GET.get('api_key', '')
file_path = request.GET.get('file_path', '')
if not file_path:
return error_msg('Invalid `file_path`.')
if len(api_key) != 40:
return error_msg('Invalid `api_key`.')
try:
client = Client.get_by_api_key(api_key)
except:
return error_msg('Client for `api_key` doesn\'t exist.')
try:
return fetch_msg(client.fetch_configuration(file_path))
except Exception as e:
return error_msg('Could not fetch `{}`: {}'.format(file_path, str(e)))
@csrf_exempt
def push(request):
if request.method == 'POST':
obj = json.loads(request.body.decode('UTF-8'))
api_key = obj.get('api_key', '')
if len(api_key) != 40:
return error_msg('Invalid `api_key`.')
try:
client = Client.get_by_api_key(api_key)
except:
return error_msg('Client for `api_key` doesn\'t exist.')
# file_path
file_path = obj.get('file_path', None)
if file_path is None:
return error_msg('Invalid `file_path`.')
if len(file_path) == 0:
return error_msg('`file_path` cannot be an empty string.')
# mtime
mtime = obj.get('mtime', None)
if mtime is None:
return error_msg('Invalid `mtime`.')
if not isinstance(mtime, int):
return error_msg('`mtime` must be an integer.')
# sha1_checksum
sha1_checksum = obj.get('sha1_checksum', None)
if sha1_checksum is None:
return error_msg('Invalid `sha1_checksum`.')
# content
content = obj.get('content', None)
if content is None:
return error_msg('Invalid `content`.')
# is_case_sensitive
is_case_sensitive = obj.get('is_case_sensitive', None)
if is_case_sensitive is None:
return error_msg('Invalid `is_case_sensitive`.')
if not isinstance(is_case_sensitive, bool):
return error_msg('`case_sensitive` must be boolean.')
try:
return fetch_msg(client.push_configuration(**obj))
except Exception as e:
return error_msg('Could not push `{}`: {}'.format(file_path, str(e)))
| |
#!/usr/bin/env python
#
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates BigQuery schema from API discovery documents."""
import re
from asset_inventory import bigquery_schema
import requests
class APISchema(object):
"""Convert a CAI asset type to a BigQuery table schema.
When import_pipeline uses a group_by of ASSET_TYPE or ASSET_TYPE_VERSION
use a BigQuery schema generated from API discovery documents. This
gives us all the type, names and description of an asset type property.
Will union all API versions into a single schema.
"""
_discovery_document_cache = dict()
_schema_cache = {}
@classmethod
def _get_discovery_document(cls, dd_url):
"""Retreive and cache a discovery document."""
if dd_url in cls._discovery_document_cache:
return cls._discovery_document_cache[dd_url]
discovery_document = None
# Ignore discovery document urls that aren't urls.
if dd_url and dd_url.startswith('http'):
response = requests.get(dd_url)
if response.status_code == 200:
try:
discovery_document = response.json()
except ValueError:
pass
cls._discovery_document_cache[dd_url] = discovery_document
return discovery_document
@classmethod
def _get_api_name_for_discovery_document_url(cls, dd_url):
"""Get API name from discovery document url.
Args:
dd_url: Discovery document url.
Returns:
API name if can be found, None otherwise.
"""
apiary_match = re.match(r'https://(?:[^/]+)/discovery/v1/apis/([^/]+)',
dd_url)
if apiary_match:
return apiary_match.group(1)
one_match = re.match(r'https://([^.]+).googleapis.com/\$discovery/rest',
dd_url)
if one_match:
return one_match.group(1)
return None
@classmethod
def _get_discovery_document_versions(cls, dd_url):
"""Return all verisons of the APIs discovery documents.
Args:
dd_url: the url of the asset's discovery document.
Returns:
list of discovery document json objects.
"""
# calculate all the discovery documents from the url.
discovery_documents = []
api_name = cls._get_api_name_for_discovery_document_url(dd_url)
dd = cls._get_discovery_document(dd_url)
# add the discovery document to return value
discovery_documents += [dd] if dd else []
# and discovery documents from other versions of the same API.
all_discovery_docs = cls._get_discovery_document(
'https://content.googleapis.com/discovery/v1/apis')
for discovery_doc in all_discovery_docs['items']:
dru = discovery_doc['discoveryRestUrl']
if (api_name == discovery_doc['name'] and dru != dd_url):
dd = cls._get_discovery_document(dru)
discovery_documents += [dd] if dd else []
return discovery_documents
@classmethod
def _get_schema_for_resource(cls, discovery_documents, resource_name):
"""Translate API discovery documents to a BigQuery schema."""
schemas = [
cls._translate_resource_to_schema(resource_name, document)
for document in discovery_documents
]
merged_schema = bigquery_schema.merge_schemas(schemas)
return merged_schema
@classmethod
def _get_bigquery_type_for_property(cls, property_value, resources):
"""Map API type to a BigQuery type."""
# default type
bigquery_type = 'STRING'
property_type = property_value.get('type', None)
# nested record.
if property_type == 'object' or 'properties' in property_value:
bigquery_type = 'RECORD'
# repeated, recurse into element type.
elif property_type == 'array':
return cls._get_bigquery_type_for_property(
property_value['items'],
resources)
if property_type in ('number', 'integer'):
bigquery_type = 'NUMERIC'
elif property_type == 'boolean':
bigquery_type = 'BOOL'
# type reference.
elif '$ref' in property_value:
property_resource_name = cls._ref_resource_name(property_value)
if property_resource_name:
return cls._get_bigquery_type_for_property(
resources[property_resource_name],
resources)
return bigquery_type
return bigquery_type
@classmethod
def _ref_resource_name(cls, property_value):
ref_name = property_value.get('$ref', None)
# strip the '#/definitions/' prefix if present.
if ref_name and ref_name.startswith('#/definitions/'):
return ref_name[len('#/definitions/'):]
return ref_name
@classmethod
def _get_properties_map_field_list(cls, property_name, property_value,
resources, seen_resources):
"""Return the fields of the `RECORD` property.
Args:
property_name: name of API property
property_value: value of the API property.
resources: dict of all other resources that might be referenced by
the API schema through reference types ($ref values).
seen_resources: dict of types we have processed to prevent endless
cycles.
Returns:
BigQuery fields dict list or None if the field should be skipped.
"""
# found a record type, this is a recursive exit condition.
if 'properties' in property_value:
return cls._properties_map_to_field_list(
property_value['properties'], resources, seen_resources)
property_resource_name = cls._ref_resource_name(property_value)
# get fields of the reference type.
if property_resource_name:
# not handling recursive fields.
if property_resource_name in seen_resources:
return None
# rack prior types to not recurse forever.
seen_resources[property_resource_name] = True
return_value = cls._get_properties_map_field_list(
property_resource_name, resources[property_resource_name],
resources, seen_resources)
del seen_resources[property_resource_name]
return return_value
# get fields of item type.
if 'items' in property_value:
return cls._get_properties_map_field_list(
property_name, property_value['items'],
resources, seen_resources)
# convert additionalProperties fields to a dict
# of name value pairs for a more regular schema.
if 'additionalProperties' in property_value:
fields = [{'name': 'name',
'field_type': 'STRING',
'description': 'additionalProperties name',
'mode': 'NULLABLE'}]
fields.append(
cls._property_to_field(
'value',
property_value['additionalProperties'],
resources, seen_resources))
return fields
# unknown property type.
return None
@classmethod
def _property_to_field(cls, property_name, property_value,
resources, seen_resources):
"""Convert api property to BigQuery field.
Args:
property_name: name of API property
property_value: value of the API property.
resources: dict of all other resources that might be referenced by
the API schema through reference types ($ref values).
seen_resources: dict of types we have processed to prevent endless
Returns:
BigQuery field or None if the field should be skipped.
"""
field = {'name': property_name}
property_type = property_value.get('type', None)
bigquery_type = cls._get_bigquery_type_for_property(
property_value, resources)
field['field_type'] = bigquery_type
if 'description' in property_value:
field['description'] = property_value['description'][:1024]
# array fields are BigQuery repeated fields, and convert
# additionalProperties to repeated lists of key value pairs.
if (property_type == 'array' or
'additionalProperties' in property_value):
field['mode'] = 'REPEATED'
else:
field['mode'] = 'NULLABLE'
if bigquery_type == 'RECORD':
fields_list = cls._get_properties_map_field_list(
property_name, property_value, resources, seen_resources)
if not fields_list:
return None
field['fields'] = fields_list
return field
@classmethod
def _properties_map_to_field_list(cls, properties_map, resources,
seen_resources):
"""Convert API resource properties to BigQuery schema.
Args:
properties_map: dict of properties from the API schema document we
are convering into a BigQuery field list.
resources: dict of all other resources that might be referenced by
the API schema through reference types ($ref values).
seen_resources: dict of types we have processed to prevent endless
cycles.
Returns:
BigQuery fields dict list.
"""
fields = []
for property_name, property_value in properties_map.items():
field = cls._property_to_field(property_name, property_value,
resources, seen_resources)
if field is not None:
fields.append(field)
return fields
@classmethod
def _get_cache_key(cls, resource_name, document):
if 'id' in document:
return '{}.{}'.format(document['id'], resource_name)
if 'info' in document:
info = document['info']
return '{}.{}.{}'.format(info['title'],
info['version'],
resource_name)
return resource_name
@classmethod
def _get_document_resources(cls, document):
if document.get('schemas'):
return document['schemas']
return document.get('definitions', [])
@classmethod
def _translate_resource_to_schema(cls, resource_name, document):
"""Expands the $ref properties of a reosurce definition."""
cache_key = cls._get_cache_key(resource_name, document)
if cache_key in cls._schema_cache:
return cls._schema_cache[cache_key]
resources = cls._get_document_resources(document)
field_list = []
if resource_name in resources:
resource = resources[resource_name]
properties_map = resource['properties']
field_list = cls._properties_map_to_field_list(
properties_map, resources, {resource_name: True})
cls._schema_cache[cache_key] = field_list
return field_list
@classmethod
def _add_asset_export_fields(cls,
schema,
include_resource=True,
include_iam_policy=True):
"""Add the fields that the asset export adds to each resource.
Args:
schema: list of `google.cloud.bigquery.SchemaField` like dict
objects .
include_resource: to include resource schema.
include_iam_policy: to include iam policy schema.
Returns:
list of `google.cloud.bigquery.SchemaField` like dict objects.
"""
asset_schema = [{
'name': 'name',
'field_type': 'STRING',
'description': 'URL of the asset.',
'mode': 'REQUIRED'
}, {
'name': 'asset_type',
'field_type': 'STRING',
'description': 'Asset name.',
'mode': 'REQUIRED'
}, {
'name': 'timestamp',
'field_type': 'TIMESTAMP',
'description': 'Load time.',
'mode': 'NULLABLE'
}]
if include_resource:
resource_schema = list(schema)
_, last_modified = bigquery_schema.get_field_by_name(
resource_schema,
'lastModifiedTime')
if not last_modified:
resource_schema.append({
'name': 'lastModifiedTime',
'field_type': 'STRING',
'mode': 'NULLABLE',
'description': 'Last time resource was changed.'
})
asset_schema.append({
'name': 'resource',
'field_type': 'RECORD',
'description': 'Resource properties.',
'mode': 'NULLABLE',
'fields': [{
'name': 'version',
'field_type': 'STRING',
'description': 'Api version of resource.',
'mode': 'REQUIRED'
}, {
'name': 'discovery_document_uri',
'field_type': 'STRING',
'description': 'Discovery document uri.',
'mode': 'REQUIRED'
}, {
'name': 'parent',
'field_type': 'STRING',
'description': 'Parent resource.',
'mode': 'NULLABLE'
}, {
'name': 'discovery_name',
'field_type': 'STRING',
'description': 'Name in discovery document.',
'mode': 'REQUIRED'
}, {
'name': 'data',
'field_type': 'RECORD',
'description': 'Resource properties.',
'mode': 'NULLABLE',
'fields': resource_schema
}]
})
if include_iam_policy:
asset_schema.append({
'name': 'iam_policy',
'field_type': 'RECORD',
'description': 'IAM Policy',
'mode': 'NULLABLE',
'fields': [{
'name': 'etag',
'field_type': 'STRING',
'description': 'Etag.',
'mode': 'NULLABLE'
}, {
'name': 'audit_configs',
'field_type': 'RECORD',
'description': 'Logging of each type of permission.',
'mode': 'REPEATED',
'fields': [{
'name': 'service',
'field_type': 'STRING',
'description':
'Service that will be enabled for audit logging.',
'mode': 'NULLABLE'
}, {
'name': 'audit_log_configs',
'field_type': 'RECORD',
'description': 'Logging of each type of permission.',
'mode': 'REPEATED',
'fields': [{
'name': 'log_type',
'field_type': 'NUMERIC',
'mode': 'NULLABLE',
'description':
('1: Admin reads. Example: CloudIAM getIamPolicy'
'2: Data writes. Example: CloudSQL Users create'
'3: Data reads. Example: CloudSQL Users list')
}]
}]
}, {
'name': 'bindings',
'field_type': 'RECORD',
'mode': 'REPEATED',
'description': 'Bindings',
'fields': [{
'name': 'role',
'field_type': 'STRING',
'mode': 'NULLABLE',
'description': 'Assigned role.'
}, {
'name': 'members',
'field_type': 'STRING',
'mode': 'REPEATED',
'description': 'Principles assigned the role.'
}]
}]
})
return asset_schema
@classmethod
def bigquery_schema_for_resource(cls, asset_type,
resource_name,
discovery_doc_url,
include_resource,
include_iam_policy):
"""Returns the BigQuery schema for the asset type.
Args:
asset_type: CAI asset type.
resource_name: name of the resource.
discovery_doc_url: URL of discovery document
include_resource: if resource schema should be included.
include_iam_policy: if IAM policy schema should be included.
Returns:
BigQuery schema.
"""
cache_key = '{}.{}.{}'.format(asset_type, include_resource,
include_iam_policy)
if cache_key in cls._schema_cache:
return cls._schema_cache[cache_key]
# get the resource schema if we are including the resource
# in the export.
resource_schema = None
if include_resource:
discovery_documents = cls._get_discovery_document_versions(
discovery_doc_url)
resource_schema = cls._get_schema_for_resource(
discovery_documents,
resource_name)
schema = cls._add_asset_export_fields(
resource_schema, include_resource, include_iam_policy)
cls._schema_cache[cache_key] = schema
return schema
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class DscCompilationJobOperations:
"""DscCompilationJobOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.automation.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_initial(
self,
resource_group_name: str,
automation_account_name: str,
compilation_job_name: str,
parameters: "_models.DscCompilationJobCreateParameters",
**kwargs
) -> "_models.DscCompilationJob":
cls = kwargs.pop('cls', None) # type: ClsType["_models.DscCompilationJob"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._]+$'),
'automationAccountName': self._serialize.url("automation_account_name", automation_account_name, 'str'),
'compilationJobName': self._serialize.url("compilation_job_name", compilation_job_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'DscCompilationJobCreateParameters')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DscCompilationJob', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}/compilationjobs/{compilationJobName}'} # type: ignore
async def begin_create(
self,
resource_group_name: str,
automation_account_name: str,
compilation_job_name: str,
parameters: "_models.DscCompilationJobCreateParameters",
**kwargs
) -> AsyncLROPoller["_models.DscCompilationJob"]:
"""Creates the Dsc compilation job of the configuration.
:param resource_group_name: Name of an Azure Resource group.
:type resource_group_name: str
:param automation_account_name: The name of the automation account.
:type automation_account_name: str
:param compilation_job_name: The DSC configuration Id.
:type compilation_job_name: str
:param parameters: The parameters supplied to the create compilation job operation.
:type parameters: ~azure.mgmt.automation.models.DscCompilationJobCreateParameters
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either DscCompilationJob or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.automation.models.DscCompilationJob]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.DscCompilationJob"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_initial(
resource_group_name=resource_group_name,
automation_account_name=automation_account_name,
compilation_job_name=compilation_job_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('DscCompilationJob', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._]+$'),
'automationAccountName': self._serialize.url("automation_account_name", automation_account_name, 'str'),
'compilationJobName': self._serialize.url("compilation_job_name", compilation_job_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}/compilationjobs/{compilationJobName}'} # type: ignore
async def get(
self,
resource_group_name: str,
automation_account_name: str,
compilation_job_name: str,
**kwargs
) -> "_models.DscCompilationJob":
"""Retrieve the Dsc configuration compilation job identified by job id.
:param resource_group_name: Name of an Azure Resource group.
:type resource_group_name: str
:param automation_account_name: The name of the automation account.
:type automation_account_name: str
:param compilation_job_name: The DSC configuration Id.
:type compilation_job_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: DscCompilationJob, or the result of cls(response)
:rtype: ~azure.mgmt.automation.models.DscCompilationJob
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DscCompilationJob"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._]+$'),
'automationAccountName': self._serialize.url("automation_account_name", automation_account_name, 'str'),
'compilationJobName': self._serialize.url("compilation_job_name", compilation_job_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('DscCompilationJob', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}/compilationjobs/{compilationJobName}'} # type: ignore
def list_by_automation_account(
self,
resource_group_name: str,
automation_account_name: str,
filter: Optional[str] = None,
**kwargs
) -> AsyncIterable["_models.DscCompilationJobListResult"]:
"""Retrieve a list of dsc compilation jobs.
:param resource_group_name: Name of an Azure Resource group.
:type resource_group_name: str
:param automation_account_name: The name of the automation account.
:type automation_account_name: str
:param filter: The filter to apply on the operation.
:type filter: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either DscCompilationJobListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.automation.models.DscCompilationJobListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.DscCompilationJobListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_automation_account.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._]+$'),
'automationAccountName': self._serialize.url("automation_account_name", automation_account_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
if filter is not None:
query_parameters['$filter'] = self._serialize.query("filter", filter, 'str')
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('DscCompilationJobListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_automation_account.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}/compilationjobs'} # type: ignore
async def get_stream(
self,
resource_group_name: str,
automation_account_name: str,
job_id: str,
job_stream_id: str,
**kwargs
) -> "_models.JobStream":
"""Retrieve the job stream identified by job stream id.
:param resource_group_name: Name of an Azure Resource group.
:type resource_group_name: str
:param automation_account_name: The name of the automation account.
:type automation_account_name: str
:param job_id: The job id.
:type job_id: str
:param job_stream_id: The job stream id.
:type job_stream_id: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: JobStream, or the result of cls(response)
:rtype: ~azure.mgmt.automation.models.JobStream
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.JobStream"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-06-01"
accept = "application/json"
# Construct URL
url = self.get_stream.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str', max_length=90, min_length=1, pattern=r'^[-\w\._]+$'),
'automationAccountName': self._serialize.url("automation_account_name", automation_account_name, 'str'),
'jobId': self._serialize.url("job_id", job_id, 'str'),
'jobStreamId': self._serialize.url("job_stream_id", job_stream_id, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('JobStream', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_stream.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Automation/automationAccounts/{automationAccountName}/compilationjobs/{jobId}/streams/{jobStreamId}'} # type: ignore
| |
from __future__ import unicode_literals
import re
from django.core.exceptions import FieldDoesNotExist
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import Col, Expression
from django.db.models.lookups import Lookup
from django.utils import six
gis_lookups = {}
class GISLookup(Lookup):
sql_template = None
transform_func = None
distance = False
@classmethod
def _check_geo_field(cls, opts, lookup):
"""
Utility for checking the given lookup with the given model options.
The lookup is a string either specifying the geographic field, e.g.
'point, 'the_geom', or a related lookup on a geographic field like
'address__point'.
If a GeometryField exists according to the given lookup on the model
options, it will be returned. Otherwise returns None.
"""
from django.contrib.gis.db.models.fields import GeometryField
# This takes into account the situation where the lookup is a
# lookup to a related geographic field, e.g., 'address__point'.
field_list = lookup.split(LOOKUP_SEP)
# Reversing so list operates like a queue of related lookups,
# and popping the top lookup.
field_list.reverse()
fld_name = field_list.pop()
try:
geo_fld = opts.get_field(fld_name)
# If the field list is still around, then it means that the
# lookup was for a geometry field across a relationship --
# thus we keep on getting the related model options and the
# model field associated with the next field in the list
# until there's no more left.
while len(field_list):
opts = geo_fld.rel.to._meta
geo_fld = opts.get_field(field_list.pop())
except (FieldDoesNotExist, AttributeError):
return False
# Finally, make sure we got a Geographic field and return.
if isinstance(geo_fld, GeometryField):
return geo_fld
else:
return False
def get_db_prep_lookup(self, value, connection):
# get_db_prep_lookup is called by process_rhs from super class
if isinstance(value, (tuple, list)):
# First param is assumed to be the geometric object
params = [connection.ops.Adapter(value[0])] + list(value)[1:]
else:
params = [connection.ops.Adapter(value)]
return ('%s', params)
def process_rhs(self, compiler, connection):
rhs, rhs_params = super(GISLookup, self).process_rhs(compiler, connection)
if hasattr(self.rhs, '_as_sql'):
# If rhs is some QuerySet, don't touch it
return rhs, rhs_params
geom = self.rhs
if isinstance(self.rhs, Col):
# Make sure the F Expression destination field exists, and
# set an `srid` attribute with the same as that of the
# destination.
geo_fld = self.rhs.output_field
if not hasattr(geo_fld, 'srid'):
raise ValueError('No geographic field found in expression.')
self.rhs.srid = geo_fld.srid
elif isinstance(self.rhs, Expression):
raise ValueError('Complex expressions not supported for GeometryField')
elif isinstance(self.rhs, (list, tuple)):
geom = self.rhs[0]
rhs = connection.ops.get_geom_placeholder(self.lhs.output_field, geom, compiler)
return rhs, rhs_params
def as_sql(self, compiler, connection):
lhs_sql, sql_params = self.process_lhs(compiler, connection)
rhs_sql, rhs_params = self.process_rhs(compiler, connection)
sql_params.extend(rhs_params)
template_params = {'lhs': lhs_sql, 'rhs': rhs_sql}
backend_op = connection.ops.gis_operators[self.lookup_name]
return backend_op.as_sql(connection, self, template_params, sql_params)
# ------------------
# Geometry operators
# ------------------
class OverlapsLeftLookup(GISLookup):
"""
The overlaps_left operator returns true if A's bounding box overlaps or is to the
left of B's bounding box.
"""
lookup_name = 'overlaps_left'
gis_lookups['overlaps_left'] = OverlapsLeftLookup
class OverlapsRightLookup(GISLookup):
"""
The 'overlaps_right' operator returns true if A's bounding box overlaps or is to the
right of B's bounding box.
"""
lookup_name = 'overlaps_right'
gis_lookups['overlaps_right'] = OverlapsRightLookup
class OverlapsBelowLookup(GISLookup):
"""
The 'overlaps_below' operator returns true if A's bounding box overlaps or is below
B's bounding box.
"""
lookup_name = 'overlaps_below'
gis_lookups['overlaps_below'] = OverlapsBelowLookup
class OverlapsAboveLookup(GISLookup):
"""
The 'overlaps_above' operator returns true if A's bounding box overlaps or is above
B's bounding box.
"""
lookup_name = 'overlaps_above'
gis_lookups['overlaps_above'] = OverlapsAboveLookup
class LeftLookup(GISLookup):
"""
The 'left' operator returns true if A's bounding box is strictly to the left
of B's bounding box.
"""
lookup_name = 'left'
gis_lookups['left'] = LeftLookup
class RightLookup(GISLookup):
"""
The 'right' operator returns true if A's bounding box is strictly to the right
of B's bounding box.
"""
lookup_name = 'right'
gis_lookups['right'] = RightLookup
class StrictlyBelowLookup(GISLookup):
"""
The 'strictly_below' operator returns true if A's bounding box is strictly below B's
bounding box.
"""
lookup_name = 'strictly_below'
gis_lookups['strictly_below'] = StrictlyBelowLookup
class StrictlyAboveLookup(GISLookup):
"""
The 'strictly_above' operator returns true if A's bounding box is strictly above B's
bounding box.
"""
lookup_name = 'strictly_above'
gis_lookups['strictly_above'] = StrictlyAboveLookup
class SameAsLookup(GISLookup):
"""
The "~=" operator is the "same as" operator. It tests actual geometric
equality of two features. So if A and B are the same feature,
vertex-by-vertex, the operator returns true.
"""
lookup_name = 'same_as'
gis_lookups['same_as'] = SameAsLookup
class ExactLookup(SameAsLookup):
# Alias of same_as
lookup_name = 'exact'
gis_lookups['exact'] = ExactLookup
class BBContainsLookup(GISLookup):
"""
The 'bbcontains' operator returns true if A's bounding box completely contains
by B's bounding box.
"""
lookup_name = 'bbcontains'
gis_lookups['bbcontains'] = BBContainsLookup
class BBOverlapsLookup(GISLookup):
"""
The 'bboverlaps' operator returns true if A's bounding box overlaps B's bounding box.
"""
lookup_name = 'bboverlaps'
gis_lookups['bboverlaps'] = BBOverlapsLookup
class ContainedLookup(GISLookup):
"""
The 'contained' operator returns true if A's bounding box is completely contained
by B's bounding box.
"""
lookup_name = 'contained'
gis_lookups['contained'] = ContainedLookup
# ------------------
# Geometry functions
# ------------------
class ContainsLookup(GISLookup):
lookup_name = 'contains'
gis_lookups['contains'] = ContainsLookup
class ContainsProperlyLookup(GISLookup):
lookup_name = 'contains_properly'
gis_lookups['contains_properly'] = ContainsProperlyLookup
class CoveredByLookup(GISLookup):
lookup_name = 'coveredby'
gis_lookups['coveredby'] = CoveredByLookup
class CoversLookup(GISLookup):
lookup_name = 'covers'
gis_lookups['covers'] = CoversLookup
class CrossesLookup(GISLookup):
lookup_name = 'crosses'
gis_lookups['crosses'] = CrossesLookup
class DisjointLookup(GISLookup):
lookup_name = 'disjoint'
gis_lookups['disjoint'] = DisjointLookup
class EqualsLookup(GISLookup):
lookup_name = 'equals'
gis_lookups['equals'] = EqualsLookup
class IntersectsLookup(GISLookup):
lookup_name = 'intersects'
gis_lookups['intersects'] = IntersectsLookup
class OverlapsLookup(GISLookup):
lookup_name = 'overlaps'
gis_lookups['overlaps'] = OverlapsLookup
class RelateLookup(GISLookup):
lookup_name = 'relate'
sql_template = '%(func)s(%(lhs)s, %(rhs)s, %%s)'
pattern_regex = re.compile(r'^[012TF\*]{9}$')
def get_db_prep_lookup(self, value, connection):
if len(value) != 2:
raise ValueError('relate must be passed a two-tuple')
# Check the pattern argument
backend_op = connection.ops.gis_operators[self.lookup_name]
if hasattr(backend_op, 'check_relate_argument'):
backend_op.check_relate_argument(value[1])
else:
pattern = value[1]
if not isinstance(pattern, six.string_types) or not self.pattern_regex.match(pattern):
raise ValueError('Invalid intersection matrix pattern "%s".' % pattern)
return super(RelateLookup, self).get_db_prep_lookup(value, connection)
gis_lookups['relate'] = RelateLookup
class TouchesLookup(GISLookup):
lookup_name = 'touches'
gis_lookups['touches'] = TouchesLookup
class WithinLookup(GISLookup):
lookup_name = 'within'
gis_lookups['within'] = WithinLookup
class DistanceLookupBase(GISLookup):
distance = True
sql_template = '%(func)s(%(lhs)s, %(rhs)s) %(op)s %%s'
def get_db_prep_lookup(self, value, connection):
if isinstance(value, (tuple, list)):
if not 2 <= len(value) <= 3:
raise ValueError("2 or 3-element tuple required for '%s' lookup." % self.lookup_name)
params = [connection.ops.Adapter(value[0])]
# Getting the distance parameter in the units of the field.
params += connection.ops.get_distance(self.lhs.output_field, value[1:], self.lookup_name)
return ('%s', params)
else:
return super(DistanceLookupBase, self).get_db_prep_lookup(value, connection)
class DWithinLookup(DistanceLookupBase):
lookup_name = 'dwithin'
sql_template = '%(func)s(%(lhs)s, %(rhs)s, %%s)'
gis_lookups['dwithin'] = DWithinLookup
class DistanceGTLookup(DistanceLookupBase):
lookup_name = 'distance_gt'
gis_lookups['distance_gt'] = DistanceGTLookup
class DistanceGTELookup(DistanceLookupBase):
lookup_name = 'distance_gte'
gis_lookups['distance_gte'] = DistanceGTELookup
class DistanceLTLookup(DistanceLookupBase):
lookup_name = 'distance_lt'
gis_lookups['distance_lt'] = DistanceLTLookup
class DistanceLTELookup(DistanceLookupBase):
lookup_name = 'distance_lte'
gis_lookups['distance_lte'] = DistanceLTELookup
| |
#!/usr/bin/python
#
# Copyright 2011 Software Freedom Conservancy.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.by import By
from selenium.webdriver.remote.webdriver import WebDriver
from selenium.webdriver.remote.webelement import WebElement
from abstract_event_listener import AbstractEventListener
def _wrap_elements(result, ef_driver):
if isinstance(result, WebElement):
return EventFiringWebElement(result, ef_driver)
elif isinstance(result, list):
return [_wrap_elements(item, ef_driver) for item in result]
else:
return result
class EventFiringWebDriver(object):
"""
A wrapper around an arbitrary WebDriver instance which supports firing events
"""
def __init__(self, driver, event_listener):
"""
Creates a new instance of the EventFiringWebDriver
:Args:
- driver : A WebDriver instance
- event_listener : Instance of a class that subclasses AbstractEventListener and implements it fully or partially
Example:
from selenium.webdriver import Firefox
from selenium.webdriver.support.events import EventFiringWebDriver, AbstractEventListener
class MyListener(AbstractEventListener):
def before_navigate_to(self, url, driver):
print "Before navigate to %s" % url
def after_navigate_to(self, url, driver):
print "After navigate to %s" % url
driver = Firefox()
ef_driver = EventFiringWebDriver(driver, MyListener())
ef_driver.get("http://www.google.co.in/")
"""
if not isinstance(driver, WebDriver):
raise WebDriverException("A WebDriver instance must be supplied")
if not isinstance(event_listener, AbstractEventListener):
raise WebDriverException("Event listener must be a subclass of AbstractEventListener")
self._driver = driver
self._listener = event_listener
@property
def wrapped_driver(self):
"""Returns the WebDriver instance wrapped by this EventsFiringWebDriver"""
return self._driver
def get(self, url):
self._dispatch("navigate_to", (url, self._driver), "get", (url, ))
def back(self):
self._dispatch("navigate_back", (self._driver,), "back", ())
def forward(self):
self._dispatch("navigate_forward", (self._driver,), "forward", ())
def execute_script(self, script, *args):
unwrapped_args = (script,) + self._unwrap_element_args(args)
return self._dispatch("execute_script", (script, self._driver), "execute_script", unwrapped_args)
def execute_async_script(self, script, *args):
unwrapped_args = (script,) + self._unwrap_element_args(args)
return self._dispatch("execute_script", (script, self._driver), "execute_async_script", unwrapped_args)
def close(self):
self._dispatch("close", (self._driver,), "close", ())
def quit(self):
self._dispatch("quit", (self._driver,), "quit", ())
def find_element(self, by=By.ID, value=None):
return self._dispatch("find", (by, value, self._driver), "find_element", (by, value))
def find_elements(self, by=By.ID, value=None):
return self._dispatch("find", (by, value, self._driver), "find_elements", (by, value))
def find_element_by_id(self, id_):
return self.find_element(by=By.ID, value=id_)
def find_elements_by_id(self, id_):
return self.find_elements(by=By.ID, value=id_)
def find_element_by_xpath(self, xpath):
return self.find_element(by=By.XPATH, value=xpath)
def find_elements_by_xpath(self, xpath):
return self.find_elements(by=By.XPATH, value=xpath)
def find_element_by_link_text(self, link_text):
return self.find_element(by=By.LINK_TEXT, value=link_text)
def find_elements_by_link_text(self, text):
return self.find_elements(by=By.LINK_TEXT, value=text)
def find_element_by_partial_link_text(self, link_text):
return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_elements_by_partial_link_text(self, link_text):
return self.find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_element_by_name(self, name):
return self.find_element(by=By.NAME, value=name)
def find_elements_by_name(self, name):
return self.find_elements(by=By.NAME, value=name)
def find_element_by_tag_name(self, name):
return self.find_element(by=By.TAG_NAME, value=name)
def find_elements_by_tag_name(self, name):
return self.find_elements(by=By.TAG_NAME, value=name)
def find_element_by_class_name(self, name):
return self.find_element(by=By.CLASS_NAME, value=name)
def find_elements_by_class_name(self, name):
return self.find_elements(by=By.CLASS_NAME, value=name)
def find_element_by_css_selector(self, css_selector):
return self.find_element(by=By.CSS_SELECTOR, value=css_selector)
def find_elements_by_css_selector(self, css_selector):
return self.find_elements(by=By.CSS_SELECTOR, value=css_selector)
def _dispatch(self, l_call, l_args, d_call, d_args):
getattr(self._listener, "before_%s" % l_call)(*l_args)
try:
result = getattr(self._driver, d_call)(*d_args)
except Exception, e:
self._listener.on_exception(e, self._driver)
raise e
getattr(self._listener, "after_%s" % l_call)(*l_args)
return _wrap_elements(result, self)
def _unwrap_element_args(self, args):
if isinstance(args, EventFiringWebElement):
return args.wrapped_element
elif isinstance(args, tuple):
return tuple([self._unwrap_element_args(item) for item in args])
elif isinstance(args, list):
return [self._unwrap_element_args(item) for item in args]
else:
return args
def __setattr__(self, item, value):
if item.startswith("_") or not hasattr(self._driver, item):
object.__setattr__(self, item, value)
else:
try:
object.__setattr__(self._driver, item, value)
except Exception, e:
self._listener.on_exception(e, self._driver)
raise e
def __getattr__(self, name):
def _wrap(*args):
try:
result = attrib(*args)
return _wrap_elements(result, self)
except Exception, e:
self._listener.on_exception(e, self._driver)
raise e
if hasattr(self._driver, name):
try:
attrib = getattr(self._driver, name)
if not callable(attrib):
return attrib
except Exception, e:
self._listener.on_exception(e, self._driver)
raise e
return _wrap
raise AttributeError(name)
class EventFiringWebElement(object):
""""
A wrapper around WebElement instance which supports firing events
"""
def __init__(self, webelement, ef_driver):
"""
Creates a new instance of the EventFiringWebElement
"""
self._webelement = webelement
self._ef_driver = ef_driver
self._driver = ef_driver.wrapped_driver
self._listener = ef_driver._listener
@property
def wrapped_element(self):
"""Returns the WebElement wrapped by this EventFiringWebElement instance"""
return self._webelement
def click(self):
self._dispatch("click", (self._webelement, self._driver), "click", ())
def clear(self):
self._dispatch("change_value_of", (self._webelement, self._driver), "clear", ())
def send_keys(self, *value):
self._dispatch("change_value_of", (self._webelement, self._driver), "send_keys", value)
def find_element(self, by=By.ID, value=None):
return self._dispatch("find", (by, value, self._driver), "find_element", (by, value))
def find_elements(self, by=By.ID, value=None):
return self._dispatch("find", (by, value, self._driver), "find_elements", (by, value))
def find_element_by_id(self, id_):
return self.find_element(by=By.ID, value=id_)
def find_elements_by_id(self, id_):
return self.find_elements(by=By.ID, value=id_)
def find_element_by_name(self, name):
return self.find_element(by=By.NAME, value=name)
def find_elements_by_name(self, name):
return self.find_elements(by=By.NAME, value=name)
def find_element_by_link_text(self, link_text):
return self.find_element(by=By.LINK_TEXT, value=link_text)
def find_elements_by_link_text(self, link_text):
return self.find_elements(by=By.LINK_TEXT, value=link_text)
def find_element_by_partial_link_text(self, link_text):
return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_elements_by_partial_link_text(self, link_text):
return self.find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_element_by_tag_name(self, name):
return self.find_element(by=By.TAG_NAME, value=name)
def find_elements_by_tag_name(self, name):
return self.find_elements(by=By.TAG_NAME, value=name)
def find_element_by_xpath(self, xpath):
return self.find_element(by=By.XPATH, value=xpath)
def find_elements_by_xpath(self, xpath):
return self.find_elements(by=By.XPATH, value=xpath)
def find_element_by_class_name(self, name):
return self.find_element(by=By.CLASS_NAME, value=name)
def find_elements_by_class_name(self, name):
return self.find_elements(by=By.CLASS_NAME, value=name)
def find_element_by_css_selector(self, css_selector):
return self.find_element(by=By.CSS_SELECTOR, value=css_selector)
def find_elements_by_css_selector(self, css_selector):
return self.find_elements(by=By.CSS_SELECTOR, value=css_selector)
def _dispatch(self, l_call, l_args, d_call, d_args):
getattr(self._listener, "before_%s" % l_call)(*l_args)
try:
result = getattr(self._webelement, d_call)(*d_args)
except Exception, e:
self._listener.on_exception(e, self._driver)
raise e
getattr(self._listener, "after_%s" % l_call)(*l_args)
return _wrap_elements(result, self._ef_driver)
def __setattr__(self, item, value):
if item.startswith("_") or not hasattr(self._webelement, item):
object.__setattr__(self, item, value)
else:
try:
object.__setattr__(self._webelement, item, value)
except Exception, e:
self._listener.on_exception(e, self._driver)
raise e
def __getattr__(self, name):
def _wrap(*args):
try:
result = attrib(*args)
return _wrap_elements(result, self._ef_driver)
except Exception, e:
self._listener.on_exception(e, self._driver)
raise e
if hasattr(self._webelement, name):
try:
attrib = getattr(self._webelement, name)
if not callable(attrib):
return attrib
except Exception, e:
self._listener.on_exception(e, self._driver)
raise e
return _wrap
raise AttributeError(name)
| |
#!/usr/bin/env python3
#
try:
import gdb
except ImportError as e:
raise ImportError("This script must be run in GDB: ", str(e))
import sys
import os
import common_helpers
sys.path.append(os.getcwd())
def find_type(orig, name):
typ = orig.strip_typedefs()
while True:
# Strip cv qualifiers
search = '%s::%s' % (typ.unqualified(), name)
try:
return gdb.lookup_type(search)
except RuntimeError:
pass
# type is not found, try superclass search
field = typ.fields()[0]
if not field.is_base_class:
raise ValueError("Cannot find type %s::%s" % (str(orig), name))
typ = field.type
def get_value_from_aligned_membuf(buf, valtype):
"""Returns the value held in a __gnu_cxx::__aligned_membuf."""
return buf['_M_storage'].address.cast(valtype.pointer()).dereference()
def get_value_from_node(node):
valtype = node.type.template_argument(0)
return get_value_from_aligned_membuf(node['_M_storage'], valtype)
class VectorObj:
def __init__ (self, gobj):
self.obj = gobj
@classmethod
def is_this_type(cls, obj_type):
type_name = str(obj_type)
if type_name.find("std::vector<") == 0:
return True
if type_name.find("std::__cxx11::vector<") == 0:
return True
return False
def element_type(self):
return self.obj.type.template_argument(0)
def size(self):
return int(self.obj['_M_impl']['_M_finish'] -
self.obj['_M_impl']['_M_start'])
def get_used_size(self):
if common_helpers.is_special_type(self.element_type()):
size = self.obj.type.sizeof
item = self.obj['_M_impl']['_M_start']
finish = self.obj['_M_impl']['_M_finish']
while item != finish:
elem = item.dereference()
obj = common_helpers.get_special_type_obj(elem)
size += obj.get_used_size()
item = item + 1
return size
return self.obj.type.sizeof + self.size() * self.element_type().sizeof
class ListObj:
def __init__ (self, gobj):
self.obj = gobj
@classmethod
def is_this_type(cls, obj_type):
type_name = str(obj_type)
if type_name.find("std::list<") == 0:
return True
if type_name.find("std::__cxx11::list<") == 0:
return True
return False
def element_type(self):
return self.obj.type.template_argument(0)
def get_used_size(self):
is_special = common_helpers.is_special_type(self.element_type())
head = self.obj['_M_impl']['_M_node']
# nodetype = find_type(self.obj.type, '_Node')
nodetype = head.type
nodetype = nodetype.strip_typedefs().pointer()
current = head['_M_next']
size = self.obj.type.sizeof
while current != head.address:
if is_special:
elem = current.cast(nodetype).dereference()
size += common_helpers.get_instance_size(elem)
else:
size += self.element_type().sizeof
current = current['_M_next']
return size
class PairObj:
def __init__ (self, gobj):
self.obj = gobj
@classmethod
def is_this_type(cls, obj_type):
type_name = str(obj_type)
if type_name.find("std::pair<") == 0:
return True
if type_name.find("std::__cxx11::pair<") == 0:
return True
return False
def key_type(self):
return self.obj.type.template_argument(0)
def value_type(self):
return self.obj.type.template_argument(1)
def get_used_size(self):
if not common_helpers.is_special_type(self.key_type()) and not common_helpers.is_special_type(self.value_type()):
return self.key_type().sizeof + self.value_type().sizeof
size = 0
if common_helpers.is_special_type(self.key_type()):
obj = common_helpers.get_special_type_obj(self.obj['first'])
size += obj.get_used_size()
else:
size += self.key_type().sizeof
if common_helpers.is_special_type(self.value_type()):
obj = common_helpers.get_special_type_obj(self.obj['second'])
size += obj.get_used_size()
else:
size += self.value_type().sizeof
return size
class MapObj:
def __init__ (self, gobj):
self.obj = gobj
self.obj_type = gobj.type
rep_type = find_type(self.obj_type, "_Rep_type")
self.node_type = find_type(rep_type, "_Link_type")
self.node_type = self.node_type.strip_typedefs()
@classmethod
def is_this_type(cls, obj_type):
type_name = str(obj_type)
if type_name.find("std::map<") == 0:
return True
if type_name.find("std::__cxx11::map<") == 0:
return True
return False
def key_type(self):
return self.obj_type.template_argument(0).strip_typedefs()
def value_type(self):
return self.obj_type.template_argument(1).strip_typedefs()
def size(self):
res = int(self.obj['_M_t']['_M_impl']['_M_node_count'])
return res
def get_used_size(self):
if not common_helpers.is_special_type(self.key_type()) and not common_helpers.is_special_type(self.value_type()):
return self.obj_type.sizeof + self.size() * (self.key_type().sizeof + self.value_type().sizeof)
if self.size() == 0:
return self.obj_type.sizeof
size = self.obj_type.sizeof
row_node = self.obj['_M_t']['_M_impl']['_M_header']['_M_left']
for i in range(self.size()):
node_val = row_node.cast(self.node_type).dereference()
pair = get_value_from_node(node_val)
obj = common_helpers.get_special_type_obj(pair)
size += obj.get_used_size()
node = row_node
if node.dereference()['_M_right']:
node = node.dereference()['_M_right']
while node.dereference()['_M_left']:
node = node.dereference()['_M_left']
else:
parent = node.dereference()['_M_parent']
while node == parent.dereference()['_M_right']:
node = parent
parent = parent.dereference()['_M_parent']
if node.dereference()['_M_right'] != parent:
node = parent
row_node = node
return size
class SetObj:
def __init__ (self, gobj):
self.obj = gobj
self.obj_type = gobj.type
rep_type = find_type(self.obj_type, "_Rep_type")
self.node_type = find_type(rep_type, "_Link_type")
self.node_type = self.node_type.strip_typedefs()
@classmethod
def is_this_type(cls, obj_type):
type_name = str(obj_type)
if type_name.find("std::set<") == 0:
return True
if type_name.find("std::__cxx11::set<") == 0:
return True
return False
def element_type(self):
return self.obj_type.template_argument(0)
def size(self):
res = int(self.obj['_M_t']['_M_impl']['_M_node_count'])
return res
def get_used_size(self):
if not common_helpers.is_special_type(self.element_type()):
return self.obj_type.sizeof + self.size() * self.element_type().sizeof
if self.size() == 0:
return self.obj_type.sizeof
size = self.obj_type.sizeof
row_node = self.obj['_M_t']['_M_impl']['_M_header']['_M_left']
for i in range(self.size()):
node_val = row_node.cast(self.node_type).dereference()
val = get_value_from_node(node_val)
obj = common_helpers.get_special_type_obj(val)
size += obj.get_used_size()
node = row_node
if node.dereference()['_M_right']:
node = node.dereference()['_M_right']
while node.dereference()['_M_left']:
node = node.dereference()['_M_left']
else:
parent = node.dereference()['_M_parent']
while node == parent.dereference()['_M_right']:
node = parent
parent = parent.dereference()['_M_parent']
if node.dereference()['_M_right'] != parent:
node = parent
row_node = node
return size
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import cherrypy
import htpc
import requests
from json import dumps
import logging
from cherrypy.lib.auth2 import require, member_of
from htpc.helpers import fix_basepath, striphttp
class Transmission(object):
# Transmission Session ID
sessionId = ''
reqz = requests.Session()
def __init__(self):
self.logger = logging.getLogger('modules.transmission')
htpc.MODULES.append({
'name': 'Transmission',
'id': 'transmission',
'test': htpc.WEBDIR + 'transmission/ping',
'fields': [
{'type': 'bool', 'label': 'Enable', 'name': 'transmission_enable'},
{'type': 'text', 'label': 'Menu name', 'name': 'transmission_name'},
{'type': 'text', 'label': 'IP / Host', 'placeholder': 'localhost', 'name': 'transmission_host'},
{'type': 'text', 'label': 'Port', 'placeholder': '9091', 'name': 'transmission_port'},
{'type': 'text', 'label': 'Reverse Proxy', 'placeholder': '', 'name': 'transmission_reverse_proxy_link'},
{'type': 'text', 'label': 'Rpc url', 'placeholder': '', 'name': 'transmission_rpcbasepath'},
{'type': 'text', 'label': 'Username', 'name': 'transmission_username'},
{'type': 'password', 'label': 'Password', 'name': 'transmission_password'}
]
})
@cherrypy.expose()
@require()
def index(self):
return htpc.LOOKUP.get_template('transmission.html').render(scriptname='transmission',
webinterface=Transmission.webinterface())
@staticmethod
def webinterface():
if htpc.settings.get('transmission_reverse_proxy_link'):
url = htpc.settings.get('transmission_reverse_proxy_link')
else:
host = striphttp(htpc.settings.get('transmission_host', ''))
port = str(htpc.settings.get('transmission_port', ''))
basepath = htpc.settings.get('transmission_rpcbasepath')
username = htpc.settings.get('transmission_username')
password = htpc.settings.get('transmission_password')
auth = None
# Default basepath is transmission
if not basepath:
basepath = '/transmission/'
basepath = fix_basepath(basepath)
url = 'http://%s:%s%srpc/' % (host, str(port), basepath)
return url
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def queue(self):
fields = ['id', 'name', 'status', 'comment', 'downloadDir', 'downloadDir', 'percentDone', 'isFinished', 'eta', 'rateDownload', 'rateUpload', 'uploadRatio']
return self.fetch('torrent-get', {'fields': fields})
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def stats(self):
return self.fetch('session-stats')
@cherrypy.expose()
@require(member_of(htpc.role_admin))
@cherrypy.tools.json_out()
def ping(self, **kwargs):
''' Test connection to Transmission '''
host = kwargs['transmission_host']
port = kwargs['transmission_port']
username = kwargs['transmission_username']
password = kwargs['transmission_password']
basepath = kwargs['transmission_rpcbasepath']
auth = None
if not basepath:
basepath = fix_basepath('/transmission/')
url = 'http://%s:%s%srpc/' % (striphttp(host), port, basepath)
# format post data
data = {'method': 'session-get'}
data = dumps(data)
# Set Header
header = {
'X-Transmission-Session-Id': self.sessionId,
'Content-Type': 'json; charset=UTF-8'
}
# Add authentication
if username and password:
auth = (username, password)
try:
r = self.reqz.post(url, data=data, timeout=10, headers=header, auth=auth)
if r.ok:
return r.json()
else:
if r.status_code == 409 and r.headers['x-transmission-session-id']:
self.logger.debug('Retry Transmission api with new session id.')
res = self.renewsession(url, data, header, auth, r)
return res
except Exception as e:
self.logger.error('Unable to fetch information from: %s %s' % (url, e))
return
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def session(self):
return self.fetch('session-get')
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def set_downspeed(self, speed):
if int(speed) == 0:
self.fetch('session-set', {'speed-limit-down': False})
return self.fetch('session-set', {'speed-limit-down': int(speed), 'speed-limit-down-enabled': True})
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def set_upspeed(self, speed):
if int(speed) == 0:
self.fetch('session-set', {'speed-limit-up': 'false'})
else:
return self.fetch('session-set', {'speed-limit-up': int(speed), 'speed-limit-up-enabled': 'true'})
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def start(self, torrentId=False):
if torrentId is False:
return self.fetch('torrent-start-now')
try:
torrentId = int(torrentId)
except ValueError:
return False
return self.fetch('torrent-start-now', {'ids': torrentId})
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def stop(self, torrentId=False):
if torrentId is False:
return self.fetch('torrent-stop')
try:
torrentId = int(torrentId)
except ValueError:
return False
return self.fetch('torrent-stop', {'ids': torrentId})
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def Add(self, filename=None, metainfo=None):
if metainfo:
return self.fetch('torrent-add', {'metainfo': metainfo})
return self.fetch('torrent-add', {'filename': filename})
@cherrypy.expose()
@require(member_of(htpc.role_user))
@cherrypy.tools.json_out()
def remove(self, torrentId):
try:
torrentId = int(torrentId)
except ValueError:
return False
return self.fetch('torrent-remove', {'ids': torrentId})
#For torrent search
@cherrypy.expose()
@require()
@cherrypy.tools.json_out()
def to_client(self, link, torrentname, **kwargs):
try:
self.logger.info('Added %s to uTorrent' % torrentname)
return self.fetch('torrent-add', {'filename': link})
except Exception as e:
self.logger.debug('Failed to add %s to Transmission %s %s'(torrentname, link, e))
# Wrapper to access the Transmission Api
# If the first call fails, there probably is no valid Session ID so we try it again
def fetch(self, method, arguments=''):
''' Do request to Transmission api '''
self.logger.debug('Request transmission method: ' + method)
host = striphttp(htpc.settings.get('transmission_host', ''))
port = str(htpc.settings.get('transmission_port', ''))
basepath = htpc.settings.get('transmission_rpcbasepath')
username = htpc.settings.get('transmission_username')
password = htpc.settings.get('transmission_password')
auth = None
# Default basepath is transmission
if not basepath:
basepath = '/transmission/'
basepath = fix_basepath(basepath)
url = 'http://%s:%s%srpc/' % (host, str(port), basepath)
# format post data
data = {'method': method}
if arguments:
data['arguments'] = arguments
data = dumps(data)
# Set Header
header = {
'X-Transmission-Session-Id': self.sessionId,
'Content-Type': 'json; charset=UTF-8'
}
if username and password:
auth = (username, password)
try:
r = self.reqz.post(url, data=data, timeout=10, auth=auth, headers=header)
if r.ok:
return r.json()
else:
if r.status_code == 409 and r.headers['x-transmission-session-id']:
self.renewsession(url, data, header, auth, r)
except Exception as e:
self.logger.error('Unable to fetch information from: %s %s %s' % (url, data, e))
return
def renewsession(self, url, data, header, auth, r):
self.logger.debug('Retry Transmission api with new session id.')
self.sessionId = r.headers['x-transmission-session-id']
header['X-Transmission-Session-Id'] = self.sessionId
try:
r = self.reqz.post(url, data=data, timeout=10, headers=header, auth=auth)
if r.ok:
return r.json()
except Exception as e:
self.logger.error('Unable access Transmission api with new session id. %s' % e)
| |
# file eulexistdb/db.py
#
# Copyright 2010,2011 Emory University Libraries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Connect to an eXist XML database and query it.
This module provides :class:`ExistDB` and related classes for connecting to
an eXist-db_ database and executing XQuery_ queries against it.
.. _XQuery: http://www.w3.org/TR/xquery/
.. _eXist-db: http://exist.sourceforge.net/
When used with Django, :class:`~eulexistdb.db.ExistDB` can pull
configuration settings directly from Django settings. If you create
an instance of :class:`~eulexistdb.db.ExistDB` without specifying a
server url, it will attempt to configure an eXist database based on
Django settings, using the configuration names documented below.
Projects that use this module should include the following settings in their
``settings.py``::
#Exist DB Settings
EXISTDB_SERVER_USER = 'user'
EXISTDB_SERVER_PASSWORD = 'password'
EXISTDB_SERVER_URL = "http://megaserver.example.com:8042/exist"
EXISTDB_ROOT_COLLECTION = "/sample_collection"
.. note:
User and password settings are optional.
To configure a timeout for most eXist connections, specify the desired
time in seconds as ``EXISTDB_TIMEOUT``; if none is specified, the
global default socket timeout will be used.
.. note::
Any configured ``EXISTDB_TIMEOUT`` will be ignored by the
**existdb** management command, since reindexing a large collection
could take significantly longer than a normal timeout would allow
for.
If you are using an eXist index configuration file, you can add another setting
to specify your configuration file::
EXISTDB_INDEX_CONFIGFILE = "/path/to/my/exist_index.xconf"
This will allow you to use the ``existdb`` management command to
manage your index configuration file in eXist.
If you wish to specify options for fulltext queries, you can set a dictionary
of options like this::
EXISTDB_FULLTEXT_OPTIONS = {'default-operator': 'and'}
.. note::
Full-text query options are only available in very recent versions of eXist.
If you are writing unit tests against code that uses
:mod:`eulexistdb`, you may want to take advantage of
:class:`eulexistdb.testutil.TestCase` for loading fixture data to a
test eXist-db collection, and
:class:`eulexistdb.testutil.ExistDBTestSuiteRunner`, which has logic
to set up and switch configurations between a development and test
collections in eXist.
----
"""
from functools import wraps
import httplib
import logging
import socket
from urllib import unquote_plus, splittype
import urlparse
import warnings
import xmlrpclib
from eulxml import xmlmap
from eulexistdb.exceptions import ExistDBException, ExistDBTimeout
__all__ = ['ExistDB', 'QueryResult', 'ExistDBException', 'EXISTDB_NAMESPACE']
logger = logging.getLogger(__name__)
EXISTDB_NAMESPACE = 'http://exist.sourceforge.net/NS/exist'
def _wrap_xmlrpc_fault(f):
@wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except socket.timeout as e:
raise ExistDBTimeout(e)
except (socket.error, xmlrpclib.Fault, \
xmlrpclib.ProtocolError, xmlrpclib.ResponseError) as e:
raise ExistDBException(e)
# FIXME: could we catch IOerror (connection reset) and try again ?
# occasionally getting this error (so far exclusively in unit tests)
# error: [Errno 104] Connection reset by peer
return wrapper
class ExistDB:
"""Connect to an eXist database, and manipulate and query it.
Construction doesn't initiate server communication, only store
information about where the server is, to be used in later
communications.
:param server_url: The XML-RPC endpoint of the server, typically
``/xmlrpc`` within the server root.
:param resultType: The class to use for returning :meth:`query` results;
defaults to :class:`QueryResult`
:param encoding: The encoding used to communicate with the server;
defaults to "UTF-8"
:param verbose: When True, print XML-RPC debugging messages to stdout
:param timeout: Specify a timeout for xmlrpc connection
requests.If not specified, the global default socket timeout
value will be used.
"""
def __init__(self, server_url=None, resultType=None, encoding='UTF-8', verbose=False,
**kwargs):
# FIXME: Will encoding ever be anything but UTF-8? Does this really
# need to be part of our public interface?
self.resultType = resultType or QueryResult
datetime_opt = {'use_datetime': True}
# distinguish between timeout not set and no timeout, to allow
# easily setting a timeout of None and have it override any
# configured EXISTDB_TIMEOUT
timeout = None
if 'timeout' in kwargs:
timeout = kwargs['timeout']
# if server url or timeout are not set, attempt to get from django settings
if server_url is None or 'timeout' not in kwargs:
try:
from django.conf import settings
if server_url is None:
server_url = self._serverurl_from_djangoconf()
if 'timeout' not in kwargs:
timeout = getattr(settings, 'EXISTDB_TIMEOUT', None)
except ImportError:
pass
# if server url is still not set, we have a problem
if server_url is None:
raise Exception('Cannot initialize an eXist-db connection without specifying ' +
'eXist server url directly or in Django settings as EXISTDB_SERVER_URL')
# determine if we need http or https transport
# (duplicates some logic in xmlrpclib)
type, uri = splittype(server_url)
if type not in ("http", "https"):
raise IOError, "unsupported XML-RPC protocol"
if type == 'https':
transport = TimeoutSafeTransport(timeout=timeout, **datetime_opt)
else:
transport = TimeoutTransport(timeout=timeout, **datetime_opt)
self.server = xmlrpclib.ServerProxy(
uri="%s/xmlrpc" % server_url.rstrip('/'),
transport=transport,
encoding=encoding,
verbose=verbose,
allow_none=True,
**datetime_opt
)
def _serverurl_from_djangoconf(self):
# determine what exist url to use based on django settings, if available
try:
from django.conf import settings
# don't worry about errors on this one - if it isn't set, this should fail
exist_url = settings.EXISTDB_SERVER_URL
# former syntax had credentials in the server url; warn about the change
if '@' in exist_url:
warnings.warn('EXISTDB_SERVER_URL should not include eXist user or ' +
'password information. You should update your django ' +
'settings to use EXISTDB_SERVER_USER and EXISTDB_SERVER_PASSWORD.')
# look for username & password
username = getattr(settings, 'EXISTDB_SERVER_USER', None)
password = getattr(settings, 'EXISTDB_SERVER_PASSWORD', None)
# if username or password are configured, add them to the url
if username or password:
# split the url into its component parts
urlparts = urlparse.urlsplit(exist_url)
# could have both username and password or just a username
if username and password:
prefix = '%s:%s' % (username, password)
else:
prefix = username
# prefix the network location with credentials
netloc = '%s@%s' % (prefix, urlparts.netloc)
# un-split the url with all the previous parts and modified location
exist_url = urlparse.urlunsplit((urlparts.scheme, netloc, urlparts.path,
urlparts.query, urlparts.fragment))
return exist_url
except ImportError:
pass
def getDocument(self, name, **kwargs):
"""Retrieve a document from the database.
:param name: database document path to retrieve
:rtype: string contents of the document
"""
logger.debug('getDocumentAsString %s options=%s' % (name, kwargs))
return self.server.getDocumentAsString(name, kwargs)
def getDoc(self, name, **kwargs):
"Alias for :meth:`getDocument`."
return self.getDocument(name, **kwargs)
def createCollection(self, collection_name, overwrite=False):
"""Create a new collection in the database.
:param collection_name: string name of collection
:param overwrite: overwrite existing document?
:rtype: boolean indicating success
"""
if not overwrite and self.hasCollection(collection_name):
raise ExistDBException(collection_name + " exists")
logger.debug('createCollection %s' % collection_name)
return self.server.createCollection(collection_name)
@_wrap_xmlrpc_fault
def removeCollection(self, collection_name):
"""Remove the named collection from the database.
:param collection_name: string name of collection
:rtype: boolean indicating success
"""
if (not self.hasCollection(collection_name)):
raise ExistDBException(collection_name + " does not exist")
logger.debug('removeCollection %s' % collection_name)
return self.server.removeCollection(collection_name)
def hasCollection(self, collection_name):
"""Check if a collection exists.
:param collection_name: string name of collection
:rtype: boolean
"""
try:
logger.debug('describeCollection %s' % collection_name)
self.server.describeCollection(collection_name)
return True
except xmlrpclib.Fault, e:
s = "collection " + collection_name + " not found"
if (e.faultCode == 0 and s in e.faultString):
return False
else:
raise ExistDBException(e)
def reindexCollection(self, collection_name):
"""Reindex a collection.
Reindex will fail if the eXist user does not have the correct permissions
within eXist (must be a member of the DBA group).
:param collection_name: string name of collection
:rtype: boolean success
"""
if (not self.hasCollection(collection_name)):
raise ExistDBException(collection_name + " does not exist")
# xquery reindex function requires that collection name begin with /db/
if collection_name[0:3] != '/db':
collection_name = '/db/' + collection_name.strip('/')
result = self.query("xmldb:reindex('%s')" % collection_name)
return result.values[0] == 'true'
@_wrap_xmlrpc_fault
def hasDocument(self, document_path):
"""Check if a document is present in eXist.
:param document_path: string full path to document in eXist
:rtype: boolean
"""
if self.describeDocument(document_path) == {}:
return False
else:
return True
@_wrap_xmlrpc_fault
def describeDocument(self, document_path):
"""Return information about a document in eXist.
Includes name, owner, group, created date, permissions, mime-type,
type, content-length.
Returns an empty dictionary if document is not found.
:param document_path: string full path to document in eXist
:rtype: dictionary
"""
logger.debug('describeResource %s' % document_path)
return self.server.describeResource(document_path)
@_wrap_xmlrpc_fault
def getCollectionDescription(self, collection_name):
"""Retrieve information about a collection.
:param collection_name: string name of collection
:rtype: boolean
"""
logger.debug('getCollectionDesc %s' % collection_name)
return self.server.getCollectionDesc(collection_name)
@_wrap_xmlrpc_fault
def load(self, xml, path, overwrite=False):
"""Insert or overwrite a document in the database.
:param xml: string or file object with the document contents
:param path: destination location in the database
:param overwrite: True to allow overwriting an existing document
:rtype: boolean indicating success
"""
if hasattr(xml, 'read'):
xml = xml.read()
logger.debug('parse %s overwrite=%s' % (path, overwrite))
return self.server.parse(xml, path, int(overwrite))
@_wrap_xmlrpc_fault
def removeDocument(self, name):
"""Remove a document from the database.
:param name: full eXist path to the database document to be removed
:rtype: boolean indicating success
"""
logger.debug('remove %s' % name)
return self.server.remove(name)
@_wrap_xmlrpc_fault
def moveDocument(self, from_collection, to_collection, document):
"""Move a document in eXist from one collection to another.
:param from_collection: collection where the document currently exists
:param to_collection: collection where the document should be moved
:param document: name of the document in eXist
:rtype: boolean
"""
self.query("xmldb:move('%s', '%s', '%s')" % \
(from_collection, to_collection, document))
# query result does not return any meaningful content,
# but any failure (missing collection, document, etc) should result in
# an exception, so return true if the query completed successfully
return True
@_wrap_xmlrpc_fault
def query(self, xquery, start=1, how_many=10, **kwargs):
"""Execute an XQuery query, returning the results directly.
:param xquery: a string XQuery query
:param start: first index to return (1-based)
:param how_many: maximum number of items to return
:rtype: the resultType specified at the creation of this ExistDB;
defaults to :class:`QueryResult`.
"""
logger.debug('query how_many=%d start=%d args=%s\n%s' % (how_many, start, kwargs, xquery))
xml_s = self.server.query(xquery, how_many, start, kwargs)
# xmlrpclib tries to guess whether the result is a string or
# unicode, returning whichever it deems most appropriate.
# Unfortunately, :meth:`~eulxml.xmlmap.load_xmlobject_from_string`
# requires a byte string. This means that if xmlrpclib gave us a
# unicode, we need to encode it:
if isinstance(xml_s, unicode):
xml_s = xml_s.encode("UTF-8")
return xmlmap.load_xmlobject_from_string(xml_s, self.resultType)
@_wrap_xmlrpc_fault
def executeQuery(self, xquery):
"""Execute an XQuery query, returning a server-provided result
handle.
:param xquery: a string XQuery query
:rtype: an integer handle identifying the query result for future calls
"""
# NOTE: eXist's xmlrpc interface requires a dictionary parameter.
# This parameter is not documented in the eXist docs at
# http://demo.exist-db.org/exist/devguide_xmlrpc.xml
# so it's not clear what we can pass there.
logger.debug('executeQuery\n%s' % xquery)
result_id = self.server.executeQuery(xquery, {})
logger.debug('result id is %s' % result_id)
return result_id
@_wrap_xmlrpc_fault
def querySummary(self, result_id):
"""Retrieve results summary from a past query.
:param result_id: an integer handle returned by :meth:`executeQuery`
:rtype: a dict describing the results
The returned dict has four fields:
* *queryTime*: processing time in milliseconds
* *hits*: number of hits in the result set
* *documents*: a list of lists. Each identifies a document and
takes the form [`doc_id`, `doc_name`, `hits`], where:
* *doc_id*: an internal integer identifier for the document
* *doc_name*: the name of the document as a string
* *hits*: the number of hits within that document
* *doctype*: a list of lists. Each contains a doctype public
identifier and the number of hits found for this
doctype.
"""
# FIXME: This just exposes the existdb xmlrpc querySummary function.
# Frankly, this return is just plain ugly. We should come up with
# something more meaningful.
summary = self.server.querySummary(result_id)
logger.debug('querySummary result id %d : ' % result_id + \
'%(hits)s hits, query took %(queryTime)s ms' % summary)
return summary
@_wrap_xmlrpc_fault
def getHits(self, result_id):
"""Get the number of hits in a query result.
:param result_id: an integer handle returned by :meth:`executeQuery`
:rtype: integer representing the number of hits
"""
hits = self.server.getHits(result_id)
logger.debug('getHits result id %d : %s' % (result_id, hits))
return hits
@_wrap_xmlrpc_fault
def retrieve(self, result_id, position, highlight=False, **options):
"""Retrieve a single result fragment.
:param result_id: an integer handle returned by :meth:`executeQuery`
:param position: the result index to return
:param highlight: enable search term highlighting in result; optional,
defaults to False
:rtype: the query result item as a string
"""
if highlight:
# eXist highlight modes: attributes, elements, or both
# using elements because it seems most reasonable default
options['highlight-matches'] = 'elements'
# pretty-printing with eXist matches can introduce unwanted whitespace
if 'indent' not in options:
options['indent'] = 'no'
logger.debug('retrieve result id %d position=%d options=%s' % (result_id, position, options))
return self.server.retrieve(result_id, position, options)
@_wrap_xmlrpc_fault
def releaseQueryResult(self, result_id):
"""Release a result set handle in the server.
:param result_id: an integer handle returned by :meth:`executeQuery`
"""
logger.debug('releaseQueryResult result id %d' % result_id)
self.server.releaseQueryResult(result_id)
@_wrap_xmlrpc_fault
def setPermissions(self, resource, permissions):
"""Set permissions on a resource in eXist.
:param resource: full path to a collection or document in eXist
:param permissions: int or string permissions statement
"""
# TODO: support setting owner, group ?
logger.debug('setPermissions %s %s' % (resource, permissions))
self.server.setPermissions(resource, permissions)
@_wrap_xmlrpc_fault
def getPermissions(self, resource):
"""Retrieve permissions for a resource in eXist.
:param resource: full path to a collection or document in eXist
:rtype: ExistPermissions
"""
return ExistPermissions(self.server.getPermissions(resource))
def loadCollectionIndex(self, collection_name, index, overwrite=True):
"""Load an index configuration for the specified collection.
Creates the eXist system config collection if it is not already there,
and loads the specified index config file, as per eXist collection and
index naming conventions.
:param collection_name: name of the collection to be indexed
:param index: string or file object with the document contents (as used by :meth:`load`)
:param overwrite: set to False to disallow overwriting current index (overwrite allowed by default)
:rtype: boolean indicating success
"""
index_collection = self._configCollectionName(collection_name)
# FIXME: what error handling should be done at this level?
# create config collection if it does not exist
if not self.hasCollection(index_collection):
self.createCollection(index_collection)
# load index content as the collection index configuration file
return self.load(index, self._collectionIndexPath(collection_name), overwrite)
def removeCollectionIndex(self, collection_name):
"""Remove index configuration for the specified collection.
If index collection has no documents or subcollections after the index
file is removed, the configuration collection will also be removed.
:param collection: name of the collection with an index to be removed
:rtype: boolean indicating success
"""
# collection indexes information must be stored under system/config/db/collection_name
index_collection = self._configCollectionName(collection_name)
# remove collection.xconf in the configuration collection
self.removeDocument(self._collectionIndexPath(collection_name))
desc = self.getCollectionDescription(index_collection)
# no documents and no sub-collections - safe to remove index collection
if desc['collections'] == [] and desc['documents'] == []:
self.removeCollection(index_collection)
return True
def hasCollectionIndex(self, collection_name):
"""Check if the specified collection has an index configuration in eXist.
Note: according to eXist documentation, index config file does not *have*
to be named *collection.xconf* for reasons of backward compatibility.
This function assumes that the recommended naming conventions are followed.
:param collection: name of the collection with an index to be removed
:rtype: boolean indicating collection index is present
"""
return self.hasCollection(self._configCollectionName(collection_name)) \
and self.hasDocument(self._collectionIndexPath(collection_name))
def _configCollectionName(self, collection_name):
"""Generate eXist db path to the configuration collection for a specified collection
according to eXist collection naming conventions.
"""
# collection indexes information must be stored under system/config/db/collection_name
return "/db/system/config/db/" + collection_name.strip('/')
def _collectionIndexPath(self, collection_name):
"""Generate full eXist db path to the index configuration file for a specified
collection according to eXist collection naming conventions.
"""
# collection indexes information must be stored under system/config/db/collection_name
return self._configCollectionName(collection_name) + "/collection.xconf"
class ExistPermissions:
"Permissions for an eXist resource - owner, group, and active permissions."
def __init__(self, data):
self.owner = data['owner']
self.group = data['group']
self.permissions = data['permissions']
def __str__(self):
return "owner: %s; group: %s; permissions: %s" % (self.owner, self.group, self.permissions)
def __repr__(self):
return '<%s %s>' % (self.__class__.__name__, str(self))
class QueryResult(xmlmap.XmlObject):
"""The results of an eXist XQuery query"""
start = xmlmap.IntegerField("@start")
"""The index of the first result returned"""
values = xmlmap.StringListField("exist:value")
"Generic value (*exist:value*) returned from an exist xquery"
_raw_count = xmlmap.IntegerField("@count")
@property
def count(self):
"""The number of results returned in this chunk"""
return self._raw_count or 0
_raw_hits = xmlmap.IntegerField("@hits")
@property
def hits(self):
"""The total number of hits found by the search"""
return self._raw_hits or 0
@property
def results(self):
"""The result documents themselves as nodes, starting at
:attr:`start` and containing :attr:`count` members"""
return self.node.xpath('*')
# FIXME: Why do we have two properties here with the same value?
# start == show_from. We should pick one and deprecate the other.
@property
def show_from(self):
"""The index of first object in this result chunk.
Equivalent to :attr:`start`."""
return self.start
# FIXME: Not sure how we're using this, but it feels wonky. If we're
# using it for chunking or paging then we should probably follow the
# slice convention of returning the index past the last one. If we're
# using it for pretty-printing results ranges then the rVal < 0 branch
# sounds like an exception condition that should be handled at a higher
# level. Regardless, shouldn't some system invariant handle the rVal >
# self.hits branch for us? This whole method just *feels* weird. It
# warrants some examination.
@property
def show_to(self):
"""The index of last object in this result chunk"""
rVal = (self.start - 1) + self.count
if rVal > self.hits:
#show_to can not exceed total hits
return self.hits
elif rVal < 0:
return 0
else:
return rVal
# FIXME: This, too, feels like it checks a number of things that should
# probably be system invariants. We should coordinate what this does
# with how it's actually used.
def hasMore(self):
"""Are there more matches after this one?"""
if not self.hits or not self.start or not self.count:
return False
return self.hits > (self.start + self.count)
# Custom xmlrpclib Transport classes for configurable timeout
# Initially adapted from code found here:
# http://stackoverflow.com/questions/372365/set-timeout-for-xmlrpclib-serverproxy
# NOTE: TimeoutHTTP and TimeoutHTTPS are needed for compatibility with
# Python 2.6 and earlier (see UGLY HACK ALERT below). They are not used in
# Python 2.7 and newer.
class TimeoutHTTP(httplib.HTTP):
def __init__(self, host='', port=None, strict=None, timeout=None):
if port == 0:
port = None
self._setup(self._connection_class(host, port, strict, timeout))
class TimeoutHTTPS(httplib.HTTPS):
def __init__(self, host='', port=None, strict=None, timeout=None):
if port == 0:
port = None
self._setup(self._connection_class(host, port, strict, timeout))
class TimeoutTransport(xmlrpclib.Transport):
'''Extend the default :class:`xmlrpclib.Transport` to expose a
connection timeout parameter.'''
# UGLY HACK ALERT. Python 2.6 wants make_connection to return something
# that looks like a httplib.HTTP. Python 2.7 wants a
# httplib.HTTPConnection. We use an ugly hack (commented below) to
# figure out which environment we're running in. _http_connection is
# used for 2.7-style connections; _http_connection_compat is used for
# 2.6-style.
_http_connection = httplib.HTTPConnection
_http_connection_compat = TimeoutHTTP
def __init__(self, timeout=None, *args, **kwargs):
if timeout is None:
timeout = socket._GLOBAL_DEFAULT_TIMEOUT
xmlrpclib.Transport.__init__(self, *args, **kwargs)
self.timeout = timeout
# UGLY HACK ALERT. If we're running on Python 2.6 or earlier,
# self.make_connection() needs to return an HTTP; newer versions
# expect an HTTPConnection. Our strategy is to guess which is
# running, and override self.make_connection for older versions.
# That check and override happens here.
if self._connection_requires_compat():
self.make_connection = self._make_connection_compat
def _connection_requires_compat(self):
# UGLY HACK ALERT. Python 2.7 xmlrpclib caches connection objects in
# self._connection (and sets self._connection in __init__). Python
# 2.6 and earlier has no such cache. Thus, if self._connection
# exists, we're running the newer-style, and if it doesn't then
# we're running older-style and thus need compatibility mode.
try:
self._connection
return False
except AttributeError:
return True
def make_connection(self, host):
# This is the make_connection that runs under Python 2.7 and newer.
# The code is pulled straight from 2.7 xmlrpclib, except replacing
# HTTPConnection with self._http_connection
if self._connection and host == self._connection[0]:
return self._connection[1]
chost, self._extra_headers, x509 = self.get_host_info(host)
self._connection = host, self._http_connection(chost, timeout=self.timeout)
return self._connection[1]
def _make_connection_compat(self, host):
# This method runs as make_connection under Python 2.6 and older.
# __init__ detects which version we need and pastes this method
# directly into self.make_connection if necessary.
host, extra_headers, x509 = self.get_host_info(host)
return self._http_connection_compat(host, timeout=self.timeout)
class TimeoutSafeTransport(TimeoutTransport):
'''Extend class:`TimeoutTransport` but use HTTPS connections;
timeout-enabled equivalent to :class:`xmlrpclib.SafeTransport`.'''
_http_connection = httplib.HTTPSConnection
_http_connection_compat = TimeoutHTTPS
| |
import contextlib
import json
import os
import shutil
import tempfile
from unittest import mock
from urllib.parse import quote as url_quote
import requests
from pylxd import exceptions, models
from pylxd.tests import testing
class TestInstance(testing.PyLXDTestCase):
"""Tests for pylxd.models.Instance."""
def test_all(self):
"""A list of all instances are returned."""
instances = models.Instance.all(self.client)
self.assertEqual(1, len(instances))
def test_get(self):
"""Return a instance."""
name = "an-instance"
an_instance = models.Instance.get(self.client, name)
self.assertEqual(name, an_instance.name)
def test_get_not_found(self):
"""LXDAPIException is raised when the instance doesn't exist."""
def not_found(request, context):
context.status_code = 404
return json.dumps(
{"type": "error", "error": "Not found", "error_code": 404}
)
self.add_rule(
{
"text": not_found,
"method": "GET",
"url": r"^http://pylxd.test/1.0/instances/an-missing-instance$",
}
)
name = "an-missing-instance"
self.assertRaises(
exceptions.LXDAPIException, models.Instance.get, self.client, name
)
def test_get_error(self):
"""LXDAPIException is raised when the LXD API errors."""
def not_found(request, context):
context.status_code = 500
return json.dumps(
{"type": "error", "error": "Not found", "error_code": 500}
)
self.add_rule(
{
"text": not_found,
"method": "GET",
"url": r"^http://pylxd.test/1.0/instances/an-missing-instance$",
}
)
name = "an-missing-instance"
self.assertRaises(
exceptions.LXDAPIException, models.Instance.get, self.client, name
)
def test_create(self):
"""A new instance is created."""
config = {"name": "an-new-instance"}
an_new_instance = models.Instance.create(self.client, config, wait=True)
self.assertEqual(config["name"], an_new_instance.name)
def test_create_remote_location(self):
"""A new instance is created at target."""
config = {"name": "an-new-remote-instance"}
# the server must be in a cluster for the location to be set
self.client.host_info["environment"]["server_clustered"] = True
an_new_remote_instance = models.Instance.create(
self.client, config, wait=True, target="an-remote"
)
self.assertEqual(config["name"], an_new_remote_instance.name)
self.assertEqual("an-remote", an_new_remote_instance.location)
def test_create_location_none(self):
config = {"name": "an-new-remote-instance"}
instance = models.Instance.create(self.client, config, wait=True)
self.assertIsNone(instance.location)
def test_exists(self):
"""A instance exists."""
name = "an-instance"
self.assertTrue(models.Instance.exists(self.client, name))
def test_not_exists(self):
"""A instance exists."""
def not_found(request, context):
context.status_code = 404
return json.dumps(
{"type": "error", "error": "Not found", "error_code": 404}
)
self.add_rule(
{
"text": not_found,
"method": "GET",
"url": r"^http://pylxd.test/1.0/instances/an-missing-instance$",
}
)
name = "an-missing-instance"
self.assertFalse(models.Instance.exists(self.client, name))
def test_fetch(self):
"""A sync updates the properties of a instance."""
an_instance = models.Instance(self.client, name="an-instance")
an_instance.sync()
self.assertTrue(an_instance.ephemeral)
def test_fetch_not_found(self):
"""LXDAPIException is raised on a 404 for updating instance."""
def not_found(request, context):
context.status_code = 404
return json.dumps(
{"type": "error", "error": "Not found", "error_code": 404}
)
self.add_rule(
{
"text": not_found,
"method": "GET",
"url": r"^http://pylxd.test/1.0/instances/an-missing-instance$",
}
)
an_instance = models.Instance(self.client, name="an-missing-instance")
self.assertRaises(exceptions.LXDAPIException, an_instance.sync)
def test_fetch_error(self):
"""LXDAPIException is raised on error."""
def not_found(request, context):
context.status_code = 500
return json.dumps(
{"type": "error", "error": "An bad error", "error_code": 500}
)
self.add_rule(
{
"text": not_found,
"method": "GET",
"url": r"^http://pylxd.test/1.0/instances/an-missing-instance$",
}
)
an_instance = models.Instance(self.client, name="an-missing-instance")
self.assertRaises(exceptions.LXDAPIException, an_instance.sync)
def test_update(self):
"""A instance is updated."""
an_instance = models.Instance(self.client, name="an-instance")
an_instance.architecture = 1
an_instance.config = {}
an_instance.created_at = 1
an_instance.devices = {}
an_instance.ephemeral = 1
an_instance.expanded_config = {}
an_instance.expanded_devices = {}
an_instance.profiles = 1
an_instance.status = 1
an_instance.save(wait=True)
self.assertTrue(an_instance.ephemeral)
def test_rename(self):
an_instance = models.Instance(self.client, name="an-instance")
an_instance.rename("an-renamed-instance", wait=True)
self.assertEqual("an-renamed-instance", an_instance.name)
def test_delete(self):
"""A instance is deleted."""
# XXX: rockstar (21 May 2016) - This just executes
# a code path. There should be an assertion here, but
# it's not clear how to assert that, just yet.
an_instance = models.Instance(self.client, name="an-instance")
an_instance.delete(wait=True)
@mock.patch("pylxd.models.instance._StdinWebsocket")
@mock.patch("pylxd.models.instance._CommandWebsocketClient")
def test_execute(self, _CommandWebsocketClient, _StdinWebsocket):
"""A command is executed on a instance."""
fake_websocket = mock.Mock()
fake_websocket.data = "test\n"
_StdinWebsocket.return_value = fake_websocket
_CommandWebsocketClient.return_value = fake_websocket
an_instance = models.Instance(self.client, name="an-instance")
result = an_instance.execute(["echo", "test"])
self.assertEqual(0, result.exit_code)
self.assertEqual("test\n", result.stdout)
@mock.patch("pylxd.models.instance._StdinWebsocket")
@mock.patch("pylxd.models.instance._CommandWebsocketClient")
def test_execute_with_env(self, _CommandWebsocketClient, _StdinWebsocket):
"""A command is executed on a instance with custom env variables."""
fake_websocket = mock.Mock()
fake_websocket.data = "test\n"
_StdinWebsocket.return_value = fake_websocket
_CommandWebsocketClient.return_value = fake_websocket
an_instance = models.Instance(self.client, name="an-instance")
result = an_instance.execute(["echo", "test"], environment={"DISPLAY": ":1"})
self.assertEqual(0, result.exit_code)
self.assertEqual("test\n", result.stdout)
def test_execute_string(self):
"""A command passed as string raises a TypeError."""
an_instance = models.Instance(self.client, name="an-instance")
self.assertRaises(TypeError, an_instance.execute, "apt-get update")
def test_raw_interactive_execute(self):
an_instance = models.Instance(self.client, name="an-instance")
result = an_instance.raw_interactive_execute(["/bin/bash"])
self.assertEqual(
result["ws"], "/1.0/operations/operation-abc/websocket?secret=abc"
)
self.assertEqual(
result["control"], "/1.0/operations/operation-abc/websocket?secret=jkl"
)
def test_raw_interactive_execute_env(self):
an_instance = models.Instance(self.client, name="an-instance")
result = an_instance.raw_interactive_execute(["/bin/bash"], {"PATH": "/"})
self.assertEqual(
result["ws"], "/1.0/operations/operation-abc/websocket?secret=abc"
)
self.assertEqual(
result["control"], "/1.0/operations/operation-abc/websocket?secret=jkl"
)
def test_raw_interactive_execute_string(self):
"""A command passed as string raises a TypeError."""
an_instance = models.Instance(self.client, name="an-instance")
self.assertRaises(
TypeError, an_instance.raw_interactive_execute, "apt-get update"
)
def test_raw_interactive_execute_options(self):
"""It's possible to pass user, group and cwd arguments to an execute command."""
an_instance = models.Instance(self.client, name="an-instance")
result = an_instance.raw_interactive_execute(
["/bin/bash"], user="user", group="group", cwd="/some/path"
)
self.assertEqual(
result["ws"], "/1.0/operations/operation-abc/websocket?secret=abc"
)
self.assertEqual(
result["control"], "/1.0/operations/operation-abc/websocket?secret=jkl"
)
def test_migrate(self):
"""A instance is migrated."""
from pylxd.client import Client
client2 = Client(endpoint="http://pylxd2.test")
an_instance = models.Instance(self.client, name="an-instance")
an_migrated_instance = an_instance.migrate(client2)
self.assertEqual("an-instance", an_migrated_instance.name)
self.assertEqual(client2, an_migrated_instance.client)
@mock.patch("pylxd.models.instance.Instance.generate_migration_data")
def test_migrate_exception_error(self, generate_migration_data):
"""LXDAPIException is raised in case of migration failure"""
from pylxd.client import Client
from pylxd.exceptions import LXDAPIException
def generate_exception(*args, **kwargs):
response = mock.Mock()
response.status_code = 400
raise LXDAPIException(response)
generate_migration_data.side_effect = generate_exception
an_instance = models.Instance(self.client, name="an-instance")
client2 = Client(endpoint="http://pylxd2.test")
self.assertRaises(LXDAPIException, an_instance.migrate, client2)
@mock.patch("pylxd.models.instance.Instance.generate_migration_data")
def test_migrate_exception_running(self, generate_migration_data):
"""Migrated instance already running on destination"""
from pylxd.client import Client
from pylxd.exceptions import LXDAPIException
client2 = Client(endpoint="http://pylxd2.test")
an_instance = models.Instance(self.client, name="an-instance")
an_instance.status_code = 103
def generate_exception(*args, **kwargs):
response = mock.Mock()
response.status_code = 103
raise LXDAPIException(response)
generate_migration_data.side_effect = generate_exception
an_migrated_instance = an_instance.migrate(client2, live=True)
self.assertEqual("an-instance", an_migrated_instance.name)
self.assertEqual(client2, an_migrated_instance.client)
generate_migration_data.assert_called_once_with(True)
def test_migrate_started(self):
"""A instance is migrated."""
from pylxd.client import Client
client2 = Client(endpoint="http://pylxd2.test")
an_instance = models.Instance.get(self.client, name="an-instance")
an_instance.status_code = 103
an_migrated_instance = an_instance.migrate(client2)
self.assertEqual("an-instance", an_migrated_instance.name)
self.assertEqual(client2, an_migrated_instance.client)
def test_migrate_stopped(self):
"""A instance is migrated."""
from pylxd.client import Client
client2 = Client(endpoint="http://pylxd2.test")
an_instance = models.Instance.get(self.client, name="an-instance")
an_instance.status_code = 102
an_migrated_instance = an_instance.migrate(client2)
self.assertEqual("an-instance", an_migrated_instance.name)
self.assertEqual(client2, an_migrated_instance.client)
@mock.patch("pylxd.client._APINode.get")
def test_migrate_local_client(self, get):
"""Migration from local clients is not supported."""
# Mock out the _APINode for the local instance.
response = mock.Mock()
response.json.return_value = {"metadata": {"fake": "response"}}
response.status_code = 200
get.return_value = response
from pylxd.client import Client
client2 = Client(endpoint="http+unix://pylxd2.test")
an_instance = models.Instance(client2, name="an-instance")
self.assertRaises(ValueError, an_instance.migrate, self.client)
def test_publish(self):
"""Instances can be published."""
self.add_rule(
{
"text": json.dumps(
{
"type": "sync",
"metadata": {
"id": "operation-abc",
"metadata": {
"fingerprint": (
"e3b0c44298fc1c149afbf4c8996fb92427"
"ae41e4649b934ca495991b7852b855"
)
},
},
}
),
"method": "GET",
"url": r"^http://pylxd.test/1.0/operations/operation-abc$",
}
)
an_instance = models.Instance(self.client, name="an-instance")
# Hack to get around mocked data
an_instance.type = "container"
image = an_instance.publish(wait=True)
self.assertEqual(
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
image.fingerprint,
)
def test_restore_snapshot(self):
"""Snapshots can be restored"""
an_instance = models.Instance(self.client, name="an-instance")
an_instance.restore_snapshot("thing")
class TestInstanceState(testing.PyLXDTestCase):
"""Tests for pylxd.models.InstanceState."""
def test_get(self):
"""Return a instance."""
name = "an-instance"
an_instance = models.Instance.get(self.client, name)
state = an_instance.state()
self.assertEqual("Running", state.status)
self.assertEqual(103, state.status_code)
def test_start(self):
"""A instance is started."""
an_instance = models.Instance.get(self.client, "an-instance")
an_instance.start(wait=True)
def test_stop(self):
"""A instance is stopped."""
an_instance = models.Instance.get(self.client, "an-instance")
an_instance.stop()
def test_restart(self):
"""A instance is restarted."""
an_instance = models.Instance.get(self.client, "an-instance")
an_instance.restart()
def test_freeze(self):
"""A instance is suspended."""
an_instance = models.Instance.get(self.client, "an-instance")
an_instance.freeze()
def test_unfreeze(self):
"""A instance is resumed."""
an_instance = models.Instance.get(self.client, "an-instance")
an_instance.unfreeze()
class TestInstanceSnapshots(testing.PyLXDTestCase):
"""Tests for pylxd.models.Instance.snapshots."""
def setUp(self):
super().setUp()
self.instance = models.Instance.get(self.client, "an-instance")
def test_get(self):
"""Return a specific snapshot."""
snapshot = self.instance.snapshots.get("an-snapshot")
self.assertEqual("an-snapshot", snapshot.name)
def test_all(self):
"""Return all snapshots."""
snapshots = self.instance.snapshots.all()
self.assertEqual(1, len(snapshots))
self.assertEqual("an-snapshot", snapshots[0].name)
self.assertEqual(self.client, snapshots[0].client)
self.assertEqual(self.instance, snapshots[0].instance)
def test_create(self):
"""Create a snapshot."""
snapshot = self.instance.snapshots.create(
"an-snapshot", stateful=True, wait=True
)
self.assertEqual("an-snapshot", snapshot.name)
class TestSnapshot(testing.PyLXDTestCase):
"""Tests for pylxd.models.Snapshot."""
def setUp(self):
super().setUp()
self.instance = models.Instance.get(self.client, "an-instance")
def test_rename(self):
"""A snapshot is renamed."""
snapshot = models.Snapshot(
self.client, instance=self.instance, name="an-snapshot"
)
snapshot.rename("an-renamed-snapshot", wait=True)
self.assertEqual("an-renamed-snapshot", snapshot.name)
def test_delete(self):
"""A snapshot is deleted."""
snapshot = models.Snapshot(
self.client, instance=self.instance, name="an-snapshot"
)
snapshot.delete(wait=True)
# TODO: add an assertion here
def test_delete_failure(self):
"""If the response indicates delete failure, raise an exception."""
def not_found(request, context):
context.status_code = 404
return json.dumps(
{"type": "error", "error": "Not found", "error_code": 404}
)
self.add_rule(
{
"text": not_found,
"method": "DELETE",
"url": (
r"^http://pylxd.test/1.0/instances/"
"an-instance/snapshots/an-snapshot$"
),
}
)
snapshot = models.Snapshot(
self.client, instance=self.instance, name="an-snapshot"
)
self.assertRaises(exceptions.LXDAPIException, snapshot.delete)
def test_publish(self):
"""Snapshots can be published."""
self.add_rule(
{
"text": json.dumps(
{
"type": "sync",
"metadata": {
"id": "operation-abc",
"metadata": {
"fingerprint": (
"e3b0c44298fc1c149afbf4c8996fb92427"
"ae41e4649b934ca495991b7852b855"
)
},
},
}
),
"method": "GET",
"url": r"^http://pylxd.test/1.0/operations/operation-abc$",
}
)
snapshot = models.Snapshot(
self.client, instance=self.instance, name="an-snapshot"
)
image = snapshot.publish(wait=True)
self.assertEqual(
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
image.fingerprint,
)
def test_restore_snapshot(self):
"""Snapshots can be restored from the snapshot object"""
snapshot = models.Snapshot(
self.client, instance=self.instance, name="an-snapshot"
)
snapshot.restore(wait=True)
class TestFiles(testing.PyLXDTestCase):
"""Tests for pylxd.models.Instance.files."""
def setUp(self):
super().setUp()
self.instance = models.Instance.get(self.client, "an-instance")
def test_put_delete(self):
"""A file is put on the instance and then deleted"""
# we are mocked, so delete should initially not be available
self.assertEqual(False, self.instance.files.delete_available())
self.assertRaises(
exceptions.LXDAPIExtensionNotAvailable,
self.instance.files.delete,
"/some/file",
)
# Now insert delete
self.add_rule(
{
"text": json.dumps(
{
"type": "sync",
"metadata": {
"auth": "trusted",
"environment": {
"certificate": "an-pem-cert",
},
"api_extensions": ["file_delete"],
},
}
),
"method": "GET",
"url": r"^http://pylxd.test/1.0$",
}
)
# Update hostinfo
self.client.host_info = self.client.api.get().json()["metadata"]
self.assertEqual(True, self.instance.files.delete_available())
# mock out the delete rule:
self.add_rule(
{
"method": "DELETE",
"url": (
r"^http://pylxd.test/1.0/instances/an-instance/files"
r"\?path=%2Fsome%2Ffile$"
),
}
)
self.instance.files.delete("/some/file")
# now check that an error (non 200) causes an exception
def responder(request, context):
context.status_code = 404
self.add_rule(
{
"text": responder,
"method": "DELETE",
"url": (
r"^http://pylxd.test/1.0/instances/an-instance/files"
r"\?path=%2Fsome%2Ffile%2Fnot%2Ffound$"
),
}
)
with self.assertRaises(exceptions.LXDAPIException):
self.instance.files.delete("/some/file/not/found")
def test_put_mode_uid_gid(self):
"""Should be able to set the mode, uid and gid of a file"""
# fix up the default POST rule to allow us to see the posted vars
_capture = {}
def capture(request, context):
_capture["headers"] = getattr(request._request, "headers")
context.status_code = 200
self.add_rule(
{
"text": capture,
"method": "POST",
"url": (
r"^http://pylxd.test/1.0/instances/an-instance/files"
r"\?path=%2Ftmp%2Fputted$"
),
}
)
data = "The quick brown fox"
# start with an octal mode
self.instance.files.put("/tmp/putted", data, mode=0o123, uid=1, gid=2)
headers = _capture["headers"]
self.assertEqual(headers["X-LXD-mode"], "0123")
self.assertEqual(headers["X-LXD-uid"], "1")
self.assertEqual(headers["X-LXD-gid"], "2")
# use a str mode this type
self.instance.files.put("/tmp/putted", data, mode="456")
headers = _capture["headers"]
self.assertEqual(headers["X-LXD-mode"], "0456")
# check that mode='0644' also works (i.e. already has 0 prefix)
self.instance.files.put("/tmp/putted", data, mode="0644")
headers = _capture["headers"]
self.assertEqual(headers["X-LXD-mode"], "0644")
# check that assertion is raised
with self.assertRaises(ValueError):
self.instance.files.put("/tmp/putted", data, mode=object)
def test_mk_dir(self):
"""Tests pushing an empty directory"""
_capture = {}
def capture(request, context):
_capture["headers"] = getattr(request._request, "headers")
context.status_code = 200
self.add_rule(
{
"text": capture,
"method": "POST",
"url": (
r"^http://pylxd.test/1.0/instances/an-instance/files"
r"\?path=%2Ftmp%2Fputted$"
),
}
)
self.instance.files.mk_dir("/tmp/putted", mode=0o123, uid=1, gid=2)
headers = _capture["headers"]
self.assertEqual(headers["X-LXD-type"], "directory")
self.assertEqual(headers["X-LXD-mode"], "0123")
self.assertEqual(headers["X-LXD-uid"], "1")
self.assertEqual(headers["X-LXD-gid"], "2")
# check that assertion is raised
with self.assertRaises(ValueError):
self.instance.files.mk_dir("/tmp/putted", mode=object)
response = mock.Mock()
response.status_code = 404
with mock.patch("pylxd.client._APINode.post", response):
with self.assertRaises(exceptions.LXDAPIException):
self.instance.files.mk_dir("/tmp/putted")
def test_recursive_put(self):
@contextlib.contextmanager
def tempdir(prefix="tmp"):
tmpdir = tempfile.mkdtemp(prefix=prefix)
try:
yield tmpdir
finally:
shutil.rmtree(tmpdir)
def create_file(_dir, name, content):
path = os.path.join(_dir, name)
actual_dir = os.path.dirname(path)
if not os.path.exists(actual_dir):
os.makedirs(actual_dir)
with open(path, "w") as f:
f.write(content)
_captures = []
def capture(request, context):
_captures.append(
{
"headers": getattr(request._request, "headers"),
"body": request._request.body,
}
)
context.status_code = 200
with tempdir() as _dir:
base = r"^http://pylxd.test/1.0/instances/" r"an-instance/files\?path="
rules = [
{
"text": capture,
"method": "POST",
"url": base + url_quote("target", safe="") + "$",
},
{
"text": capture,
"method": "POST",
"url": base + url_quote("target/dir", safe="") + "$",
},
{
"text": capture,
"method": "POST",
"url": base + url_quote("target/file1", safe="") + "$",
},
{
"text": capture,
"method": "POST",
"url": base + url_quote("target/dir/file2", safe="") + "$",
},
]
self.add_rules(rules)
create_file(_dir, "file1", "This is file1")
create_file(_dir, "dir/file2", "This is file2")
self.instance.files.recursive_put(_dir, "./target/")
self.assertEqual(_captures[0]["headers"]["X-LXD-type"], "directory")
self.assertEqual(_captures[1]["body"], b"This is file1")
self.assertEqual(_captures[2]["headers"]["X-LXD-type"], "directory")
self.assertEqual(_captures[3]["body"], b"This is file2")
def test_get(self):
"""A file is retrieved from the instance."""
data = self.instance.files.get("/tmp/getted")
self.assertEqual(b"This is a getted file", data)
def test_recursive_get(self):
"""A folder is retrieved recursively from the instance"""
@contextlib.contextmanager
def tempdir(prefix="tmp"):
tmpdir = tempfile.mkdtemp(prefix=prefix)
try:
yield tmpdir
finally:
shutil.rmtree(tmpdir)
def create_file(_dir, name, content):
path = os.path.join(_dir, name)
actual_dir = os.path.dirname(path)
if not os.path.exists(actual_dir):
os.makedirs(actual_dir)
with open(path, "w") as f:
f.write(content)
_captures = []
def capture(request, context):
_captures.append(
{
"headers": getattr(request._request, "headers"),
"body": request._request.body,
}
)
context.status_code = 200
response = requests.models.Response()
response.status_code = 200
response.headers["X-LXD-type"] = "directory"
response._content = json.dumps({"metadata": ["file1", "file2"]})
response1 = requests.models.Response()
response1.status_code = 200
response1.headers["X-LXD-type"] = "file"
response1._content = "This is file1"
response2 = requests.models.Response()
response2.status_code = 200
response2.headers["X-LXD-type"] = "file"
response2._content = "This is file2"
return_values = [response, response1, response2]
with mock.patch("pylxd.client._APINode.get") as get_mocked:
get_mocked.side_effect = return_values
with mock.patch("os.mkdir") as mkdir_mocked:
# distinction needed for the code to work with python2.7 and 3
try:
with mock.patch("__builtin__.open") as open_mocked:
self.instance.files.recursive_get("/tmp/getted", "/tmp")
assert mkdir_mocked.call_count == 1
assert open_mocked.call_count == 2
except ImportError:
try:
with mock.patch("builtins.open") as open_mocked:
self.instance.files.recursive_get("/tmp/getted", "/tmp")
assert mkdir_mocked.call_count == 1
assert open_mocked.call_count == 2
except ImportError as e:
raise e
def test_get_not_found(self):
"""LXDAPIException is raised on bogus filenames."""
def not_found(request, context):
context.status_code = 500
rule = {
"text": not_found,
"method": "GET",
"url": (
r"^http://pylxd.test/1.0/instances/an-instance/files"
r"\?path=%2Ftmp%2Fgetted$"
),
}
self.add_rule(rule)
self.assertRaises(
exceptions.LXDAPIException, self.instance.files.get, "/tmp/getted"
)
def test_get_error(self):
"""LXDAPIException is raised on error."""
def not_found(request, context):
context.status_code = 503
rule = {
"text": not_found,
"method": "GET",
"url": (
r"^http://pylxd.test/1.0/instances/an-instance/files"
r"\?path=%2Ftmp%2Fgetted$"
),
}
self.add_rule(rule)
self.assertRaises(
exceptions.LXDAPIException, self.instance.files.get, "/tmp/getted"
)
# for bug/281 -- getting an empty json file is interpreted as an API
# get rather than a raw get.
def test_get_json_file(self):
data = self.instance.files.get("/tmp/json-get")
self.assertEqual(b'{"some": "value"}', data)
| |
#!/usr/bin/env python
# coding: utf-8
import os
import re
import time
import subprocess
import glob
import tarfile
import shutil
import getpass
DelExe = '../testMain'
# OutDir = '/store/user/benwu/Stop16/Trigger/'
# OutDir = '/store/user/benwu/Stop16/TTZ'
OutDir = '/store/user/benwu/Stop16/Tagger'
# OutDir = '/store/user/benwu/Stop16/TFCheck'
# tempdir = ''
tempdir = '/uscmst1b_scratch/lpc1/lpctrig/benwu/CondorTemp'
UserEMAIL = 'benwu@fnal.gov'
ProjectName = 'Aggreate_v11'
# ProjectName = 'TaggerLepClean_v0'
# ProjectName = 'Signal_v0'
# ProjectName = 'Batool_v9'
# ProjectName = 'StopPre_v2'
# ProjectName = 'DataMC_v11'
# ProjectName = 'BugTest_v0'
# ProjectName = 'FullSimFastSim_v1'
Process = {
# # #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SM ~~~~~
# "WJetsToLNu_HT_70to100" : ['', 10],
# "WJetsToLNu_HT_100to200" : ['', 50],
# "WJetsToLNu_HT_200to400" : ['', 30],
# "WJetsToLNu_HT_400to600" : ['', 50],
# "WJetsToLNu_HT_600to800" : ['', 20],
# "WJetsToLNu_HT_800to1200" : ['', 7],
# "WJetsToLNu_HT_1200to2500" : ['', 6],
# "WJetsToLNu_HT_2500toInf" : ['', 4],
# # "WJetsToLNu_Inc" : ['', 20],
# "DYJetsToLL_HT_70to100" : ['', 10],
# "DYJetsToLL_HT_100to200" : ['', 20],
# "DYJetsToLL_HT_200to400" : ['', 20],
# "DYJetsToLL_HT_400to600" : ['', 20],
# "DYJetsToLL_HT_600to800" : ['', 20],
# "DYJetsToLL_HT_800to1200" : ['', 8],
# "DYJetsToLL_HT_1200to2500" : ['', 10],
# "DYJetsToLL_HT_2500toInf" : ['', 8],
# # "DYJetsToLL_Inc" : ['', 50],
# "ZJetsToNuNu_HT_100to200" : ['', 13],
# "ZJetsToNuNu_HT_200to400" : ['', 15],
# "ZJetsToNuNu_HT_400to600" : ['', 7],
# "ZJetsToNuNu_HT_600to800" : ['', 7],
# "ZJetsToNuNu_HT_800to1200" : ['', 10],
# "ZJetsToNuNu_HT_1200to2500" : ['', 5],
# "ZJetsToNuNu_HT_2500toInf" : ['', 6],
# "TTbarDiLep" : ['', 20],
# "TTbarSingleLepTbar" : ['', 80],
# "TTbarSingleLepT" : ['', 80],
# # "TTbar_fastsim_wt_genJets_wt_genMET" : ['', 15],
# # "TTbar_fullsim_wt_genJets_wt_genMET" : ['', 15],
# # "TTbar_HT-600to800" : ['', 15],
# # "TTbar_HT-800to1200" : ['', 15],
# # "TTbar_HT-1200to2500" : ['', 6],
# # "TTbar_HT-2500toInf" : ['', 3],
# "QCD_HT100to200" : ['', 40],
# "QCD_HT200to300" : ['', 25],
# "QCD_HT300to500" : ['', 32],
# "QCD_HT500to700" : ['', 50],
# "QCD_HT700to1000" : ['', 50],
# "QCD_HT1000to1500" : ['', 13],
# "QCD_HT1500to2000" : ['', 11],
# "QCD_HT2000toInf" : ['', 10],
# #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ SingleTop ~~~~~
# "ST_s" : ['', 4],
# "ST_t_antitop" : ['', 40],
# "ST_t_top" : ['', 80],
# "tW_antitop_incl" : ['', 40],
# # "tW_antitop_NoHad" : ['', 10],
# "tW_top_incl" : ['', 40],
# # "tW_top_NoHad" : ['', 10],
# # # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Top Associated Production ~~~~~
# "ST_tWll" : ['', 2],
# "ST_tWnunu" : ['', 2],
# "TTGJets" : ['', 16],
# "ttHTobb" : ['', 12],
# "ttHToNonbb" : ['', 10],
# "TTTT" : ['', 2],
# "TTWJetsToLNu" : ['', 16],
# "TTWJetsToQQ" : ['', 2],
# "TTZToLLNuNu" : ['', 4],
# "TTZToQQ" : ['', 2],
# "tZq_ll" : ['', 30],
# #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Diboson ~~~~~
# "ZZTo2L2Nu" : ['', 22],
# "ZZTo2Q2Nu" : ['', 50],
# "ZZTo4L" : ['', 10],
# "ZZTo4Q" : ['', 50],
# # "WZ" : ['', 8],
# "WZTo1L1Nu2Q" : ['', 48],
# "WZTo1L3Nu" : ['', 2],
# "WZTo3LNu" : ['', 20],
# "WWTo2L2Nu" : ['', 2],
# "WWTo4Q" : ['', 2],
# "WWToLNuQQ" : ['', 12],
# #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ TriBoson ~~~~~
# "WWG" : ['', 2],
# "WWW" : ['', 2],
# "WWZ" : ['', 2],
# "WZG" : ['', 2],
# "WZZ" : ['', 2],
# "ZZZ" : ['', 2],
# #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Higgs ~~~~~
# "VHToNonbb" : ['', 2],
# "GluGluHToZZTo4L" : ['', 2],
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Data ~~~~~
# "Data_MET_Run2016G" : ['', 200],
# # "Data_SingleElectron_Run2016G" : ['', 200],
# "Data_SingleElectron_2016" : ['', 1400],
"Data_MET_2016" : ['', 600],
# "Data_SingleMuon_2016" : ['', 1400],
# "Data_HTMHT_2016" : ['', 1000],
# "Data_SingleMuon_Run2016G" : ['', 200],
# #~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Signal ~~~~~
# "Signal_fastsim_T1tttt_1200_800" : ['',1],
# "Signal_fastsim_T1tttt_1500_100" : ['',1],
# "Signal_fastsim_T1tttt_2000_100" : ['',1],
# "Signal_fastsim_T2tt_425_325" : ['',1],
# "Signal_fastsim_T2tt_500_325" : ['',1],
# "Signal_fastsim_T2tt_650_350" : ['',1],
# "Signal_fastsim_T2tt_850_100" : ['',1],
# "Signal_fullsim_T1tttt_2000_100" : ['',1],
# "Signal_fullsim_T2tt_425_325" : ['',1],
# "Signal_fullsim_T2tt_650_350" : ['',1],
# "Signal_T1tttt_mGluino1200_mLSP800" : ['',1],
# "Signal_T1tttt_mGluino1500_mLSP100" : ['',1],
# "Signal_T2tt_mStop500_mLSP325" : ['',1],
# "Signal_T2tt_mStop850_mLSP100" : ['',3],
}
Mergeblock = """#!/usr/bin/env python
# File : merge.py
# Author : Ben Wu
# Contact : benwu@fnal.gov
# Date : 2015 Jul 20
#
# Description : Code to merge output hists
import re
import glob
import os
import subprocess
import multiprocessing
def MergeFile(prod):
print "Processing %s" % prod
g = glob.glob("%s*.root" % prod)
logfile = open("%s.log" % prod, 'w')
sub = re.compile(r'^%s_\d+\.root$' % prod)
allfile = set()
goodfile = set()
for f in g:
if sub.match(f) is not None:
allfile.add(f)
if os.path.getsize(f) != 0:
goodfile.add(f)
run = "hadd -f merged/%s.root " % prod
run += " ".join(goodfile)
process = subprocess.Popen(run, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate()
logfile.write(out)
logfile.write(err)
logfile.close()
if __name__ == "__main__":
cmd_exists = lambda x: any(os.access(os.path.join(path, x), os.X_OK)
for path in os.environ["PATH"].split(os.pathsep))
if cmd_exists('hadd'):
if not os.path.isdir("merged"):
os.mkdir("merged")
else:
HEADER = '[95m'
OKBLUE = '[94m'
OKGREEN = '[92m'
WARNING = '[93m'
FAIL = '[91m'
ENDC = '[0m'
BOLD = '[1m'
UNDERLINE = '[4m'
print(FAIL + "Warning: no hadd available! Please setup ROOT!!" + ENDC)
exit()
pattern = re.compile(r'^(.*)_\d+\.root$')
g = glob.glob("*.root")
## Get all the process
process = set()
for files in g:
match = pattern.match(files)
if match is not None:
process.add(match.group(1))
else:
print files
cmd = "cp %s merged/" % files
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = proc.communicate()
print process
## Run with multiprocessing Pool
pool = multiprocessing.Pool(processes = multiprocessing.cpu_count()/3)
pool.map(MergeFile, process)
"""
def Condor_Sub(condor_file):
curdir = os.path.abspath(os.path.curdir)
os.chdir(os.path.dirname(condor_file))
print "To submit condor with " + condor_file
os.system("condor_submit " + condor_file)
os.chdir(curdir)
def SplitPro(key, file, fraction):
splitedfiles = []
filelistdir = tempdir + '/' + "FileList"
try:
os.makedirs(filelistdir)
except OSError:
pass
filename = os.path.abspath(file)
if fraction == 1:
splitedfiles.append(os.path.abspath(filename))
shutil.copy2(os.path.abspath(filename), "%s/%s" % (filelistdir, os.path.basename(filename)))
return splitedfiles
f = open(filename, 'r')
lines = f.readlines()
if len(lines) <= fraction:
lineperfile = 1
fraction = len(lines)
else:
lineperfile = len(lines) / fraction
if len(lines) % fraction > 0:
lineperfile += 1
for i in range(0, fraction):
wlines = []
if i == fraction - 1 :
wlines = lines[lineperfile*i :]
else:
wlines = lines[lineperfile*i : lineperfile*(i+1)]
if len(wlines) > 0:
outf = open("%s/%s.%d.list" % (filelistdir, key, i), 'w')
outf.writelines(wlines)
splitedfiles.append(os.path.abspath("%s/%s.%d.list" % (filelistdir, key, i)))
outf.close()
return splitedfiles
def my_process():
## temp dir for submit
global tempdir
global Mergeblock
global ProjectName
ProjectName = time.strftime('%b%d') + ProjectName
tempdir = tempdir + os.getlogin() + "/" + ProjectName + "/"
try:
os.makedirs(tempdir)
except OSError:
pass
## Create the output directory
outdir = OutDir + "/" + ProjectName + "/"
try:
os.makedirs("/eos/uscms/%s" % outdir)
except OSError:
pass
## Update RunHT.csh with DelDir and pileups
RunHTFile = tempdir + "/" + "RunExe.csh"
with open(RunHTFile, "wt") as outfile:
for line in open("RunExe.csh", "r"):
line = line.replace("DELSCR", os.environ['SCRAM_ARCH'])
line = line.replace("DELDIR", os.environ['CMSSW_VERSION'])
line = line.replace("DELEXE", DelExe.split('/')[-1])
line = line.replace("OUTDIR", outdir)
outfile.write(line)
## Script for merging output histograms
MergeFile = tempdir + "/" + "merge.py"
f = open("%s/merge.py" % tempdir, 'wt')
f.writelines(Mergeblock)
f.close()
shutil.copy2("%s/merge.py" % tempdir, "/eos/uscms/%s/merge.py" % outdir)
### Keeping track of running script
shutil.copy2("../src/testMain.cc", "/eos/uscms/%s/testMain.cc" % outdir)
### Create Tarball
NewNpro = {}
Tarfiles = []
for key, value in Process.items():
if value[0] == "":
value[0] = "../FileList/"+key+".list"
if not os.path.isfile(value[0]):
continue
npro = GetProcess(key, value)
Tarfiles+=npro
NewNpro[key] = len(npro)
Tarfiles.append(os.path.abspath(DelExe))
Tarfiles += GetNeededFileList(key)
tarballname ="%s/%s.tar.gz" % (tempdir, ProjectName)
with tarfile.open(tarballname, "w:gz", dereference=True) as tar:
[tar.add(f, arcname=f.split('/')[-1]) for f in Tarfiles]
tar.close()
### Update condor files
for key, value in Process.items():
if NewNpro[key] > 1:
arg = "\nArguments = %s.$(Process).list %s_$(Process).root \nQueue %d \n" % (key, key, NewNpro[key])
else:
arg = "\nArguments = %s.list %s.root \n Queue\n" % (key, key)
## Prepare the condor file
condorfile = tempdir + "/" + "condor_" + ProjectName +"_" + key
with open(condorfile, "wt") as outfile:
for line in open("condor_template", "r"):
line = line.replace("EXECUTABLE", os.path.abspath(RunHTFile))
line = line.replace("TARFILES", tarballname)
line = line.replace("TEMPDIR", tempdir)
line = line.replace("PROJECTNAME", ProjectName)
line = line.replace("ARGUMENTS", arg)
outfile.write(line)
Condor_Sub(condorfile)
def GetProcess(key, value):
if len(value) == 1:
return SplitPro(key, value[0], 1)
else :
return SplitPro(key, value[0], value[1])
def GetNeededFileList(key):
relist = []
g = glob.glob("../FileList/*.tar.gz")
relist += [os.path.abspath(h) for h in g]
g = glob.glob("../*root")
relist += [os.path.abspath(h) for h in g]
g = glob.glob("../*csv")
relist += [os.path.abspath(h) for h in g]
g = glob.glob("../*cfg")
relist += [os.path.abspath(h) for h in g]
g = glob.glob("../*model")
relist += [os.path.abspath(h) for h in g]
process = subprocess.Popen( "ldd %s " % os.path.abspath(DelExe) , shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
for l in process.stdout:
if os.getenv('USER') in l:
relist.append(l.strip().split(' ')[2])
return relist
if __name__ == "__main__":
my_process()
| |
# Copyright (c) 2001, Stanford University
# All rights reserved.
#
# See the file LICENSE.txt for information on redistributing this software.
import sys
sys.path.append( "../glapi_parser" )
import apiutil
apiutil.CopyrightC()
print """/* DO NOT EDIT! THIS CODE IS AUTOGENERATED BY unpack.py */
#include "unpacker.h"
#include "cr_opcodes.h"
#include "cr_error.h"
#include "cr_mem.h"
#include "cr_spu.h"
#include "unpack_extend.h"
#include <stdio.h>
#include <memory.h>
const unsigned char *cr_unpackData = NULL;
SPUDispatchTable cr_unpackDispatch;
static void crUnpackExtend(void);
"""
#
# Useful functions
#
def ReadData( offset, arg_type ):
"""Emit a READ_DOUBLE or READ_DATA call for pulling a GL function
argument out of the buffer's operand area."""
if arg_type == "GLdouble" or arg_type == "GLclampd":
retval = "READ_DOUBLE( %d )" % offset
else:
retval = "READ_DATA( %d, %s )" % (offset, arg_type)
return retval
def FindReturnPointer( return_type, params ):
"""For GL functions that return values (either as the return value or
through a pointer parameter) emit a SET_RETURN_PTR call."""
arg_len = apiutil.PacketLength( params )
if (return_type != 'void'):
print '\tSET_RETURN_PTR( %d );' % (arg_len + 8) # extended opcode plus packet length
else:
paramList = [ ('foo', 'void *', 0) ]
print '\tSET_RETURN_PTR( %d );' % (arg_len + 8 - apiutil.PacketLength(paramList))
def FindWritebackPointer( return_type, params ):
"""Emit a SET_WRITEBACK_PTR call."""
arg_len = apiutil.PacketLength( params )
if return_type != 'void':
paramList = [ ('foo', 'void *', 0) ]
arg_len += apiutil.PacketLength( paramList )
print '\tSET_WRITEBACK_PTR( %d );' % (arg_len + 8) # extended opcode plus packet length
def MakeNormalCall( return_type, func_name, params, counter_init = 0 ):
counter = counter_init
copy_of_params = params[:]
for i in range( 0, len(params) ):
(name, type, vecSize) = params[i]
if apiutil.IsPointer(copy_of_params[i][1]):
params[i] = ('NULL', type, vecSize)
copy_of_params[i] = (copy_of_params[i][0], 'void', 0)
if not "get" in apiutil.Properties(func_name):
print '\tcrError( "%s needs to be special cased!" );' % func_name
else:
print "\t%s %s = %s;" % ( copy_of_params[i][1], name, ReadData( counter, copy_of_params[i][1] ) )
counter += apiutil.sizeof(copy_of_params[i][1])
if ("get" in apiutil.Properties(func_name)):
FindReturnPointer( return_type, params )
FindWritebackPointer( return_type, params )
if return_type != "void":
print "\t(void)",
else:
print "\t",
print "cr_unpackDispatch.%s( %s );" % (func_name, apiutil.MakeCallString(params))
def MakeVectorCall( return_type, func_name, arg_type ):
"""Convert a call like glVertex3f to glVertex3fv."""
vec_func = apiutil.VectorFunction(func_name)
params = apiutil.Parameters(vec_func)
assert len(params) == 1
(arg_name, vecType, vecSize) = params[0]
if arg_type == "GLdouble" or arg_type == "GLclampd":
print "#ifdef CR_UNALIGNED_ACCESS_OKAY"
print "\tcr_unpackDispatch.%s((%s) cr_unpackData);" % (vec_func, vecType)
print "#else"
for index in range(0, vecSize):
print "\tGLdouble v" + `index` + " = READ_DOUBLE(", `index * 8`, ");"
if return_type != "void":
print "\t(void) cr_unpackDispatch.%s(" % func_name,
else:
print "\tcr_unpackDispatch.%s(" % func_name,
for index in range(0, vecSize):
print "v" + `index`,
if index != vecSize - 1:
print ",",
print ");"
print "#endif"
else:
print "\tcr_unpackDispatch.%s((%s) cr_unpackData);" % (vec_func, vecType)
keys = apiutil.GetDispatchedFunctions("../glapi_parser/APIspec.txt")
#
# Generate unpack functions for all the simple functions.
#
for func_name in keys:
if (not "pack" in apiutil.ChromiumProps(func_name) or
apiutil.FindSpecial( "unpacker", func_name )):
continue
params = apiutil.Parameters(func_name)
return_type = apiutil.ReturnType(func_name)
print "static void crUnpack%s(void)" % func_name
print "{"
vector_func = apiutil.VectorFunction(func_name)
if (vector_func and len(apiutil.Parameters(vector_func)) == 1):
MakeVectorCall( return_type, func_name, params[0][1] )
else:
MakeNormalCall( return_type, func_name, params )
packet_length = apiutil.PacketLength( params )
if packet_length == 0:
print "\tINCR_DATA_PTR_NO_ARGS( );"
else:
print "\tINCR_DATA_PTR( %d );" % packet_length
print "}\n"
#
# Emit some code
#
print """
typedef struct __dispatchNode {
const unsigned char *unpackData;
struct __dispatchNode *next;
} DispatchNode;
static DispatchNode *unpackStack = NULL;
static SPUDispatchTable *cr_lastDispatch = NULL;
void crUnpackPush(void)
{
DispatchNode *node = (DispatchNode*)crAlloc( sizeof( *node ) );
node->next = unpackStack;
unpackStack = node;
node->unpackData = cr_unpackData;
}
void crUnpackPop(void)
{
DispatchNode *node = unpackStack;
if (!node)
{
crError( "crUnpackPop called with an empty stack!" );
}
unpackStack = node->next;
cr_unpackData = node->unpackData;
crFree( node );
}
void crUnpack( const void *data, const void *opcodes,
unsigned int num_opcodes, SPUDispatchTable *table )
{
unsigned int i;
const unsigned char *unpack_opcodes;
if (table != cr_lastDispatch)
{
crSPUCopyDispatchTable( &cr_unpackDispatch, table );
cr_lastDispatch = table;
}
unpack_opcodes = (const unsigned char *)opcodes;
cr_unpackData = (const unsigned char *)data;
for (i = 0 ; i < num_opcodes ; i++)
{
/*crDebug(\"Unpacking opcode \%d\", *unpack_opcodes);*/
switch( *unpack_opcodes )
{"""
#
# Emit switch cases for all unextended opcodes
#
for func_name in keys:
if "pack" in apiutil.ChromiumProps(func_name):
print '\t\t\tcase %s:' % apiutil.OpcodeName( func_name ),
print 'crUnpack%s(); break;' % func_name
print """
case CR_EXTEND_OPCODE: crUnpackExtend(); break;
default:
crError( "Unknown opcode: %d", *unpack_opcodes );
break;
}
unpack_opcodes--;
}
}"""
#
# Emit unpack functions for extended opcodes, non-special functions only.
#
for func_name in keys:
if ("extpack" in apiutil.ChromiumProps(func_name)
and not apiutil.FindSpecial("unpacker", func_name)):
return_type = apiutil.ReturnType(func_name)
params = apiutil.Parameters(func_name)
print 'static void crUnpackExtend%s(void)' % func_name
print '{'
MakeNormalCall( return_type, func_name, params, 8 )
print '}\n'
print 'static void crUnpackExtend(void)'
print '{'
print '\tGLenum extend_opcode = %s;' % ReadData( 4, 'GLenum' );
print ''
print '\t/*crDebug(\"Unpacking extended opcode \%d", extend_opcode);*/'
print '\tswitch( extend_opcode )'
print '\t{'
#
# Emit switch statement for extended opcodes
#
for func_name in keys:
if "extpack" in apiutil.ChromiumProps(func_name):
print '\t\tcase %s:' % apiutil.ExtendedOpcodeName( func_name )
print '\t\t\tcrUnpackExtend%s( );' % func_name
print '\t\t\tbreak;'
print """ default:
crError( "Unknown extended opcode: %d", (int) extend_opcode );
break;
}
INCR_VAR_PTR();
}"""
| |
#!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: rax_cbs
short_description: Manipulate Rackspace Cloud Block Storage Volumes
description:
- Manipulate Rackspace Cloud Block Storage Volumes
version_added: 1.6
options:
description:
description:
- Description to give the volume being created
default: null
image:
description:
- image to use for bootable volumes. Can be an C(id), C(human_id) or
C(name). This option requires C(pyrax>=1.9.3)
default: null
version_added: 1.9
meta:
description:
- A hash of metadata to associate with the volume
default: null
name:
description:
- Name to give the volume being created
default: null
required: true
size:
description:
- Size of the volume to create in Gigabytes
default: 100
required: true
snapshot_id:
description:
- The id of the snapshot to create the volume from
default: null
state:
description:
- Indicate desired state of the resource
choices:
- present
- absent
default: present
required: true
volume_type:
description:
- Type of the volume being created
choices:
- SATA
- SSD
default: SATA
required: true
wait:
description:
- wait for the volume to be in state 'available' before returning
default: "no"
choices:
- "yes"
- "no"
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
author:
- "Christopher H. Laco (@claco)"
- "Matt Martz (@sivel)"
extends_documentation_fragment: rackspace.openstack
'''
EXAMPLES = '''
- name: Build a Block Storage Volume
gather_facts: False
hosts: local
connection: local
tasks:
- name: Storage volume create request
local_action:
module: rax_cbs
credentials: ~/.raxpub
name: my-volume
description: My Volume
volume_type: SSD
size: 150
region: DFW
wait: yes
state: present
meta:
app: my-cool-app
register: my_volume
'''
from distutils.version import LooseVersion
try:
import pyrax
HAS_PYRAX = True
except ImportError:
HAS_PYRAX = False
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.rax import (VOLUME_STATUS, rax_argument_spec, rax_find_image, rax_find_volume,
rax_required_together, rax_to_dict, setup_rax_module)
def cloud_block_storage(module, state, name, description, meta, size,
snapshot_id, volume_type, wait, wait_timeout,
image):
changed = False
volume = None
instance = {}
cbs = pyrax.cloud_blockstorage
if cbs is None:
module.fail_json(msg='Failed to instantiate client. This '
'typically indicates an invalid region or an '
'incorrectly capitalized region name.')
if image:
# pyrax<1.9.3 did not have support for specifying an image when
# creating a volume which is required for bootable volumes
if LooseVersion(pyrax.version.version) < LooseVersion('1.9.3'):
module.fail_json(msg='Creating a bootable volume requires '
'pyrax>=1.9.3')
image = rax_find_image(module, pyrax, image)
volume = rax_find_volume(module, pyrax, name)
if state == 'present':
if not volume:
kwargs = dict()
if image:
kwargs['image'] = image
try:
volume = cbs.create(name, size=size, volume_type=volume_type,
description=description,
metadata=meta,
snapshot_id=snapshot_id, **kwargs)
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
else:
if wait:
attempts = wait_timeout // 5
pyrax.utils.wait_for_build(volume, interval=5,
attempts=attempts)
volume.get()
instance = rax_to_dict(volume)
result = dict(changed=changed, volume=instance)
if volume.status == 'error':
result['msg'] = '%s failed to build' % volume.id
elif wait and volume.status not in VOLUME_STATUS:
result['msg'] = 'Timeout waiting on %s' % volume.id
if 'msg' in result:
module.fail_json(**result)
else:
module.exit_json(**result)
elif state == 'absent':
if volume:
instance = rax_to_dict(volume)
try:
volume.delete()
changed = True
except Exception as e:
module.fail_json(msg='%s' % e.message)
module.exit_json(changed=changed, volume=instance)
def main():
argument_spec = rax_argument_spec()
argument_spec.update(
dict(
description=dict(type='str'),
image=dict(type='str'),
meta=dict(type='dict', default={}),
name=dict(required=True),
size=dict(type='int', default=100),
snapshot_id=dict(),
state=dict(default='present', choices=['present', 'absent']),
volume_type=dict(choices=['SSD', 'SATA'], default='SATA'),
wait=dict(type='bool', default=False),
wait_timeout=dict(type='int', default=300)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
required_together=rax_required_together()
)
if not HAS_PYRAX:
module.fail_json(msg='pyrax is required for this module')
description = module.params.get('description')
image = module.params.get('image')
meta = module.params.get('meta')
name = module.params.get('name')
size = module.params.get('size')
snapshot_id = module.params.get('snapshot_id')
state = module.params.get('state')
volume_type = module.params.get('volume_type')
wait = module.params.get('wait')
wait_timeout = module.params.get('wait_timeout')
setup_rax_module(module, pyrax)
cloud_block_storage(module, state, name, description, meta, size,
snapshot_id, volume_type, wait, wait_timeout,
image)
if __name__ == '__main__':
main()
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Invoke tasks. To run a task, run ``$ invoke <COMMAND>``. To see a list of
commands, run ``$ invoke --list``.
"""
import os
import sys
import code
import json
import platform
import subprocess
import logging
from time import sleep
import invoke
from invoke import Collection
from website import settings
from utils import pip_install, bin_prefix
logging.getLogger('invoke').setLevel(logging.CRITICAL)
# gets the root path for all the scripts that rely on it
HERE = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '..'))
WHEELHOUSE_PATH = os.environ.get('WHEELHOUSE')
CONSTRAINTS_PATH = os.path.join(HERE, 'requirements', 'constraints.txt')
try:
__import__('rednose')
except ImportError:
TEST_CMD = 'nosetests'
else:
TEST_CMD = 'nosetests --rednose'
ns = Collection()
try:
from admin import tasks as admin_tasks
ns.add_collection(Collection.from_module(admin_tasks), name='admin')
except ImportError:
pass
def task(*args, **kwargs):
"""Behaves the same way as invoke.task. Adds the task
to the root namespace.
"""
if len(args) == 1 and callable(args[0]):
new_task = invoke.task(args[0])
ns.add_task(new_task)
return new_task
def decorator(f):
new_task = invoke.task(f, *args, **kwargs)
ns.add_task(new_task)
return new_task
return decorator
def _monkey_patch_werkzeug_reloader_for_docker():
from werkzeug import _reloader
from werkzeug._reloader import _find_observable_paths
def _find_common_roots(paths):
"""Out of some paths it finds the common roots that need monitoring."""
# rv = orig_docker_find_common_roots(paths)
rv = set()
root = os.getcwd()
rv.add(root)
for path in paths:
if path.startswith(root):
rv.add(path)
return rv
_reloader._find_common_roots = _find_common_roots
def run(self):
watches = {}
observer = self.observer_class()
observer.start()
while not self.should_reload:
to_delete = set(watches)
paths = _find_observable_paths(self.extra_files)
for path in paths:
if path not in watches:
try:
watches[path] = observer.schedule(
self.event_handler, path, recursive=False) # FIX: docker-compose performance issue
except OSError:
# "Path is not a directory". We could filter out
# those paths beforehand, but that would cause
# additional stat calls.
watches[path] = None
to_delete.discard(path)
for path in to_delete:
watch = watches.pop(path, None)
if watch is not None:
observer.unschedule(watch)
self.observable_paths = paths
self._sleep(self.interval)
sys.exit(3)
_reloader.WatchdogReloaderLoop.run = run
@task
def server(ctx, host=None, port=5000, debug=True, gitlogs=False):
"""Run the app server."""
if os.environ.get('WERKZEUG_RUN_MAIN') == 'true' or not debug:
if os.environ.get('WEB_REMOTE_DEBUG', None):
import pydevd
_monkey_patch_werkzeug_reloader_for_docker()
# e.g. '127.0.0.1:5678'
remote_parts = os.environ.get('WEB_REMOTE_DEBUG').split(':')
pydevd.settrace(remote_parts[0], port=int(remote_parts[1]), suspend=False, stdoutToServer=True, stderrToServer=True)
if gitlogs:
git_logs(ctx)
from website.app import init_app
os.environ['DJANGO_SETTINGS_MODULE'] = 'api.base.settings'
app = init_app(set_backends=True, routes=True)
settings.API_SERVER_PORT = port
else:
from framework.flask import app
context = None
if settings.SECURE_MODE:
context = (settings.OSF_SERVER_CERT, settings.OSF_SERVER_KEY)
app.run(host=host, port=port, debug=debug, threaded=debug, extra_files=[settings.ASSET_HASH_PATH], ssl_context=context)
@task
def git_logs(ctx, branch=None):
from scripts.meta import gatherer
gatherer.main(branch=branch)
@task
def apiserver(ctx, port=8000, wait=True, autoreload=True, host='127.0.0.1', pty=True):
"""Run the API server."""
env = os.environ.copy()
cmd = 'DJANGO_SETTINGS_MODULE=api.base.settings {} manage.py runserver {}:{} --nothreading'\
.format(sys.executable, host, port)
if not autoreload:
cmd += ' --noreload'
if settings.SECURE_MODE:
cmd = cmd.replace('runserver', 'runsslserver')
cmd += ' --certificate {} --key {}'.format(settings.OSF_SERVER_CERT, settings.OSF_SERVER_KEY)
if wait:
return ctx.run(cmd, echo=True, pty=pty)
from subprocess import Popen
return Popen(cmd, shell=True, env=env)
@task
def adminserver(ctx, port=8001, host='127.0.0.1', pty=True):
"""Run the Admin server."""
env = 'DJANGO_SETTINGS_MODULE="admin.base.settings"'
cmd = '{} python manage.py runserver {}:{} --nothreading'.format(env, host, port)
if settings.SECURE_MODE:
cmd = cmd.replace('runserver', 'runsslserver')
cmd += ' --certificate {} --key {}'.format(settings.OSF_SERVER_CERT, settings.OSF_SERVER_KEY)
ctx.run(cmd, echo=True, pty=pty)
SHELL_BANNER = """
{version}
+--------------------------------------------------+
|cccccccccccccccccccccccccccccccccccccccccccccccccc|
|ccccccccccccccccccccccOOOOOOOccccccccccccccccccccc|
|ccccccccccccccccccccOOOOOOOOOOcccccccccccccccccccc|
|cccccccccccccccccccOOOOOOOOOOOOccccccccccccccccccc|
|cccccccccOOOOOOOcccOOOOOOOOOOOOcccOOOOOOOccccccccc|
|cccccccOOOOOOOOOOccOOOOOsssOOOOcOOOOOOOOOOOccccccc|
|ccccccOOOOOOOOOOOOccOOssssssOOccOOOOOOOOOOOccccccc|
|ccccccOOOOOOOOOOOOOcOssssssssOcOOOOOOOOOOOOOcccccc|
|ccccccOOOOOOOOOOOOsOcOssssssOOOOOOOOOOOOOOOccccccc|
|cccccccOOOOOOOOOOOssccOOOOOOcOssOOOOOOOOOOcccccccc|
|cccccccccOOOOOOOsssOccccccccccOssOOOOOOOcccccccccc|
|cccccOOOccccOOssssOccccccccccccOssssOccccOOOcccccc|
|ccOOOOOOOOOOOOOccccccccccccccccccccOOOOOOOOOOOOccc|
|cOOOOOOOOssssssOcccccccccccccccccOOssssssOOOOOOOOc|
|cOOOOOOOssssssssOccccccccccccccccOsssssssOOOOOOOOc|
|cOOOOOOOOsssssssOccccccccccccccccOsssssssOOOOOOOOc|
|cOOOOOOOOOssssOOccccccccccccccccccOsssssOOOOOOOOcc|
|cccOOOOOOOOOOOOOOOccccccccccccccOOOOOOOOOOOOOOOccc|
|ccccccccccccOOssssOOccccccccccOssssOOOcccccccccccc|
|ccccccccOOOOOOOOOssOccccOOcccOsssOOOOOOOOccccccccc|
|cccccccOOOOOOOOOOOsOcOOssssOcOssOOOOOOOOOOOccccccc|
|ccccccOOOOOOOOOOOOOOOsssssssOcOOOOOOOOOOOOOOcccccc|
|ccccccOOOOOOOOOOOOOcOssssssssOcOOOOOOOOOOOOOcccccc|
|ccccccOOOOOOOOOOOOcccOssssssOcccOOOOOOOOOOOccccccc|
|ccccccccOOOOOOOOOcccOOOOOOOOOOcccOOOOOOOOOcccccccc|
|ccccccccccOOOOcccccOOOOOOOOOOOcccccOOOOccccccccccc|
|ccccccccccccccccccccOOOOOOOOOOcccccccccccccccccccc|
|cccccccccccccccccccccOOOOOOOOOcccccccccccccccccccc|
|cccccccccccccccccccccccOOOOccccccccccccccccccccccc|
|cccccccccccccccccccccccccccccccccccccccccccccccccc|
+--------------------------------------------------+
Welcome to the OSF Python Shell. Happy hacking!
{transaction}
Available variables:
{context}
"""
TRANSACTION_WARNING = """
*** TRANSACTION AUTOMATICALLY STARTED ***
To persist changes run 'commit()'.
Keep in mind that changing documents will lock them.
This feature can be disabled with the '--no-transaction' flag.
"""
def make_shell_context(auto_transact=True):
from modularodm import Q
from framework.auth import User, Auth
from framework.mongo import database
from website.app import init_app
from website.project.model import Node
from website import models # all models
from website import settings
import requests
from framework.transactions import commands
from framework.transactions import context as tcontext
app = init_app()
def commit():
commands.commit()
print('Transaction committed.')
if auto_transact:
commands.begin()
print('New transaction opened.')
def rollback():
commands.rollback()
print('Transaction rolled back.')
if auto_transact:
commands.begin()
print('New transaction opened.')
context = {
'transaction': tcontext.TokuTransaction,
'start_transaction': commands.begin,
'commit': commit,
'rollback': rollback,
'app': app,
'db': database,
'User': User,
'Auth': Auth,
'Node': Node,
'Q': Q,
'models': models,
'run_tests': test,
'rget': requests.get,
'rpost': requests.post,
'rdelete': requests.delete,
'rput': requests.put,
'settings': settings,
}
try: # Add a fake factory for generating fake names, emails, etc.
from faker import Factory
fake = Factory.create()
context['fake'] = fake
except ImportError:
pass
if auto_transact:
commands.begin()
return context
def format_context(context):
lines = []
for name, obj in context.items():
line = '{name}: {obj!r}'.format(**locals())
lines.append(line)
return '\n'.join(lines)
# Shell command adapted from Flask-Script. See NOTICE for license info.
@task
def shell(ctx, transaction=True):
context = make_shell_context(auto_transact=transaction)
banner = SHELL_BANNER.format(version=sys.version,
context=format_context(context),
transaction=TRANSACTION_WARNING if transaction else ''
)
try:
try:
# 0.10.x
from IPython.Shell import IPShellEmbed
ipshell = IPShellEmbed(banner=banner)
ipshell(global_ns={}, local_ns=context)
except ImportError:
# 0.12+
from IPython import embed
embed(banner1=banner, user_ns=context)
return
except ImportError:
pass
# fallback to basic python shell
code.interact(banner, local=context)
return
@task(aliases=['mongo'])
def mongoserver(ctx, daemon=False, config=None):
"""Run the mongod process.
"""
if not config:
platform_configs = {
'darwin': '/usr/local/etc/tokumx.conf', # default for homebrew install
'linux': '/etc/tokumx.conf',
}
platform = str(sys.platform).lower()
config = platform_configs.get(platform)
port = settings.DB_PORT
cmd = 'mongod --port {0}'.format(port)
if config:
cmd += ' --config {0}'.format(config)
if daemon:
cmd += ' --fork'
ctx.run(cmd, echo=True)
@task(aliases=['mongoshell'])
def mongoclient(ctx):
"""Run the mongo shell for the OSF database."""
db = settings.DB_NAME
port = settings.DB_PORT
ctx.run('mongo {db} --port {port}'.format(db=db, port=port), pty=True)
@task
def mongodump(ctx, path):
"""Back up the contents of the running OSF database"""
db = settings.DB_NAME
port = settings.DB_PORT
cmd = 'mongodump --db {db} --port {port} --out {path}'.format(
db=db,
port=port,
path=path,
pty=True)
if settings.DB_USER:
cmd += ' --username {0}'.format(settings.DB_USER)
if settings.DB_PASS:
cmd += ' --password {0}'.format(settings.DB_PASS)
ctx.run(cmd, echo=True)
print()
print('To restore from the dumped database, run `invoke mongorestore {0}`'.format(
os.path.join(path, settings.DB_NAME)))
@task
def mongorestore(ctx, path, drop=False):
"""Restores the running OSF database with the contents of the database at
the location given its argument.
By default, the contents of the specified database are added to
the existing database. The `--drop` option will cause the existing database
to be dropped.
A caveat: if you `invoke mongodump {path}`, you must restore with
`invoke mongorestore {path}/{settings.DB_NAME}, as that's where the
database dump will be stored.
"""
db = settings.DB_NAME
port = settings.DB_PORT
cmd = 'mongorestore --db {db} --port {port}'.format(
db=db,
port=port,
pty=True)
if settings.DB_USER:
cmd += ' --username {0}'.format(settings.DB_USER)
if settings.DB_PASS:
cmd += ' --password {0}'.format(settings.DB_PASS)
if drop:
cmd += ' --drop'
cmd += ' ' + path
ctx.run(cmd, echo=True)
@task
def sharejs(ctx, host=None, port=None, db_url=None, cors_allow_origin=None):
"""Start a local ShareJS server."""
if host:
os.environ['SHAREJS_SERVER_HOST'] = host
if port:
os.environ['SHAREJS_SERVER_PORT'] = port
if db_url:
os.environ['SHAREJS_DB_URL'] = db_url
if cors_allow_origin:
os.environ['SHAREJS_CORS_ALLOW_ORIGIN'] = cors_allow_origin
if settings.SENTRY_DSN:
os.environ['SHAREJS_SENTRY_DSN'] = settings.SENTRY_DSN
share_server = os.path.join(settings.ADDON_PATH, 'wiki', 'shareServer.js')
ctx.run('node {0}'.format(share_server))
@task(aliases=['celery'])
def celery_worker(ctx, level='debug', hostname=None, beat=False):
"""Run the Celery process."""
cmd = 'celery worker -A framework.celery_tasks -l {0}'.format(level)
if hostname:
cmd = cmd + ' --hostname={}'.format(hostname)
# beat sets up a cron like scheduler, refer to website/settings
if beat:
cmd = cmd + ' --beat'
ctx.run(bin_prefix(cmd), pty=True)
@task(aliases=['beat'])
def celery_beat(ctx, level='debug', schedule=None):
"""Run the Celery process."""
# beat sets up a cron like scheduler, refer to website/settings
cmd = 'celery beat -A framework.celery_tasks -l {0} --pidfile='.format(level)
if schedule:
cmd = cmd + ' --schedule={}'.format(schedule)
ctx.run(bin_prefix(cmd), pty=True)
@task
def rabbitmq(ctx):
"""Start a local rabbitmq server.
NOTE: this is for development only. The production environment should start
the server as a daemon.
"""
ctx.run('rabbitmq-server', pty=True)
@task(aliases=['elastic'])
def elasticsearch(ctx):
"""Start a local elasticsearch server
NOTE: Requires that elasticsearch is installed. See README for instructions
"""
import platform
if platform.linux_distribution()[0] == 'Ubuntu':
ctx.run('sudo service elasticsearch start')
elif platform.system() == 'Darwin': # Mac OSX
ctx.run('elasticsearch')
else:
print('Your system is not recognized, you will have to start elasticsearch manually')
@task
def migrate_search(ctx, delete=False, index=settings.ELASTIC_INDEX):
"""Migrate the search-enabled models."""
from website.search_migration.migrate import migrate
migrate(delete, index=index)
@task
def rebuild_search(ctx):
"""Delete and recreate the index for elasticsearch"""
ctx.run('curl -s -XDELETE {uri}/{index}*'.format(uri=settings.ELASTIC_URI,
index=settings.ELASTIC_INDEX))
ctx.run('curl -s -XPUT {uri}/{index}'.format(uri=settings.ELASTIC_URI,
index=settings.ELASTIC_INDEX))
migrate_search(ctx)
@task
def mailserver(ctx, port=1025):
"""Run a SMTP test server."""
cmd = 'python -m smtpd -n -c DebuggingServer localhost:{port}'.format(port=port)
ctx.run(bin_prefix(cmd), pty=True)
@task
def jshint(ctx):
"""Run JSHint syntax check"""
js_folder = os.path.join(HERE, 'website', 'static', 'js')
cmd = 'jshint {}'.format(js_folder)
ctx.run(cmd, echo=True)
@task(aliases=['flake8'])
def flake(ctx):
ctx.run('flake8 .', echo=True)
@task(aliases=['req'])
def requirements(ctx, base=False, addons=False, release=False, dev=False, metrics=False, quick=False):
"""Install python dependencies.
Examples:
inv requirements
inv requirements --quick
Quick requirements are, in order, addons, dev and the base requirements. You should be able to use --quick for
day to day development.
By default, base requirements will run. However, if any set of addons, release, dev, or metrics are chosen, base
will have to be mentioned explicitly in order to run. This is to remain compatible with previous usages. Release
requirements will prevent dev, metrics, and base from running.
"""
if quick:
base = True
addons = True
dev = True
if not(addons or dev or metrics):
base = True
if release or addons:
addon_requirements(ctx)
# "release" takes precedence
if release:
req_file = os.path.join(HERE, 'requirements', 'release.txt')
ctx.run(
pip_install(req_file, constraints_file=CONSTRAINTS_PATH),
echo=True
)
else:
if dev: # then dev requirements
req_file = os.path.join(HERE, 'requirements', 'dev.txt')
ctx.run(
pip_install(req_file, constraints_file=CONSTRAINTS_PATH),
echo=True
)
if metrics: # then dev requirements
req_file = os.path.join(HERE, 'requirements', 'metrics.txt')
ctx.run(
pip_install(req_file, constraints_file=CONSTRAINTS_PATH),
echo=True
)
if base: # then base requirements
req_file = os.path.join(HERE, 'requirements.txt')
ctx.run(
pip_install(req_file, constraints_file=CONSTRAINTS_PATH),
echo=True
)
# fix URITemplate name conflict h/t @github
ctx.run('pip uninstall uritemplate.py --yes || true')
ctx.run('pip install --no-cache-dir uritemplate.py==0.3.0')
@task
def test_module(ctx, module=None, verbosity=2):
"""Helper for running tests.
"""
# Allow selecting specific submodule
module_fmt = ' '.join(module) if isinstance(module, list) else module
args = ' --verbosity={0} -s {1}'.format(verbosity, module_fmt)
# Use pty so the process buffers "correctly"
ctx.run(bin_prefix(TEST_CMD) + args, pty=True)
@task
def test_osf(ctx):
"""Run the OSF test suite."""
test_module(ctx, module='tests/')
@task
def test_api(ctx):
"""Run the API test suite."""
test_module(ctx, module='api_tests/')
@task
def test_admin(ctx):
"""Run the Admin test suite."""
# test_module(ctx, module="admin_tests/")
module = 'admin_tests/'
module_fmt = ' '.join(module) if isinstance(module, list) else module
admin_tasks.manage(ctx, 'test {}'.format(module_fmt))
@task
def test_varnish(ctx):
"""Run the Varnish test suite."""
proc = apiserver(ctx, wait=False, autoreload=False)
try:
sleep(5)
test_module(ctx, module='api/caching/tests/test_caching.py')
finally:
proc.kill()
@task
def test_addons(ctx):
"""Run all the tests in the addons directory.
"""
modules = []
for addon in settings.ADDONS_REQUESTED:
module = os.path.join(settings.BASE_PATH, 'addons', addon)
modules.append(module)
test_module(ctx, module=modules)
@task
def test(ctx, all=False, syntax=False):
"""
Run unit tests: OSF (always), plus addons and syntax checks (optional)
"""
if syntax:
flake(ctx)
jshint(ctx)
test_osf(ctx)
test_api(ctx)
test_admin(ctx)
if all:
test_addons(ctx)
karma(ctx, single=True, browsers='PhantomJS')
@task
def test_js(ctx):
jshint(ctx)
karma(ctx, single=True, browsers='PhantomJS')
@task
def test_travis_osf(ctx):
"""
Run half of the tests to help travis go faster. Lints and Flakes happen everywhere to keep from wasting test time.
"""
flake(ctx)
jshint(ctx)
test_osf(ctx)
test_addons(ctx)
@task
def test_travis_else(ctx):
"""
Run other half of the tests to help travis go faster. Lints and Flakes happen everywhere to keep from
wasting test time.
"""
flake(ctx)
jshint(ctx)
test_api(ctx)
test_admin(ctx)
@task
def test_travis_varnish(ctx):
"""
Run the fast and quirky JS tests and varnish tests in isolation
"""
test_js(ctx)
test_varnish(ctx)
@task
def karma(ctx, single=False, sauce=False, browsers=None):
"""Run JS tests with Karma. Requires PhantomJS to be installed."""
karma_bin = os.path.join(
HERE, 'node_modules', 'karma', 'bin', 'karma'
)
cmd = '{} start'.format(karma_bin)
if sauce:
cmd += ' karma.saucelabs.conf.js'
if single:
cmd += ' --single-run'
# Use browsers if specified on the command-line, otherwise default
# what's specified in karma.conf.js
if browsers:
cmd += ' --browsers {}'.format(browsers)
ctx.run(cmd, echo=True)
@task
def wheelhouse(ctx, addons=False, release=False, dev=False, metrics=False, pty=True):
"""Build wheels for python dependencies.
Examples:
inv wheelhouse --dev
inv wheelhouse --addons
inv wheelhouse --release
inv wheelhouse --metrics
"""
if release or addons:
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory)
if os.path.isdir(path):
req_file = os.path.join(path, 'requirements.txt')
if os.path.exists(req_file):
cmd = 'pip wheel --find-links={} -r {} --wheel-dir={} -c {}'.format(
WHEELHOUSE_PATH, req_file, WHEELHOUSE_PATH, CONSTRAINTS_PATH,
)
ctx.run(cmd, pty=pty)
if release:
req_file = os.path.join(HERE, 'requirements', 'release.txt')
elif dev:
req_file = os.path.join(HERE, 'requirements', 'dev.txt')
elif metrics:
req_file = os.path.join(HERE, 'requirements', 'metrics.txt')
else:
req_file = os.path.join(HERE, 'requirements.txt')
cmd = 'pip wheel --find-links={} -r {} --wheel-dir={} -c {}'.format(
WHEELHOUSE_PATH, req_file, WHEELHOUSE_PATH, CONSTRAINTS_PATH,
)
ctx.run(cmd, pty=pty)
@task
def addon_requirements(ctx):
"""Install all addon requirements."""
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory)
requirements_file = os.path.join(path, 'requirements.txt')
if os.path.isdir(path) and os.path.isfile(requirements_file):
print('Installing requirements for {0}'.format(directory))
ctx.run(
pip_install(requirements_file, constraints_file=CONSTRAINTS_PATH),
echo=True
)
print('Finished installing addon requirements')
@task
def travis_addon_settings(ctx):
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory, 'settings')
if os.path.isdir(path):
try:
open(os.path.join(path, 'local-travis.py'))
ctx.run('cp {path}/local-travis.py {path}/local.py'.format(path=path))
except IOError:
pass
@task
def copy_addon_settings(ctx):
for directory in os.listdir(settings.ADDON_PATH):
path = os.path.join(settings.ADDON_PATH, directory, 'settings')
if os.path.isdir(path) and not os.path.isfile(os.path.join(path, 'local.py')):
try:
open(os.path.join(path, 'local-dist.py'))
ctx.run('cp {path}/local-dist.py {path}/local.py'.format(path=path))
except IOError:
pass
@task
def copy_settings(ctx, addons=False):
# Website settings
if not os.path.isfile('website/settings/local.py'):
print('Creating local.py file')
ctx.run('cp website/settings/local-dist.py website/settings/local.py')
# Addon settings
if addons:
copy_addon_settings(ctx)
@task
def packages(ctx):
brew_commands = [
'update',
'upgrade',
'install libxml2',
'install libxslt',
'install elasticsearch',
'install rabbitmq',
'install node',
'tap tokutek/tokumx',
'install tokumx-bin',
]
if platform.system() == 'Darwin':
print('Running brew commands')
for item in brew_commands:
command = 'brew {cmd}'.format(cmd=item)
ctx.run(command)
elif platform.system() == 'Linux':
# TODO: Write a script similar to brew bundle for Ubuntu
# e.g., run('sudo apt-get install [list of packages]')
pass
@task(aliases=['bower'])
def bower_install(ctx):
print('Installing bower-managed packages')
bower_bin = os.path.join(HERE, 'node_modules', 'bower', 'bin', 'bower')
ctx.run('{} prune --allow-root'.format(bower_bin), echo=True)
ctx.run('{} install --allow-root'.format(bower_bin), echo=True)
@task
def setup(ctx):
"""Creates local settings, and installs requirements"""
copy_settings(ctx, addons=True)
packages(ctx)
requirements(ctx, addons=True, dev=True)
# Build nodeCategories.json before building assets
build_js_config_files(ctx)
assets(ctx, dev=True, watch=False)
@task
def clear_sessions(ctx, months=1, dry_run=False):
from website.app import init_app
init_app(routes=False, set_backends=True)
from scripts import clear_sessions
clear_sessions.clear_sessions_relative(months=months, dry_run=dry_run)
# Release tasks
@task
def hotfix(ctx, name, finish=False, push=False):
"""Rename hotfix branch to hotfix/<next-patch-version> and optionally
finish hotfix.
"""
print('Checking out master to calculate curent version')
ctx.run('git checkout master')
latest_version = latest_tag_info()['current_version']
print('Current version is: {}'.format(latest_version))
major, minor, patch = latest_version.split('.')
next_patch_version = '.'.join([major, minor, str(int(patch) + 1)])
print('Bumping to next patch version: {}'.format(next_patch_version))
print('Renaming branch...')
new_branch_name = 'hotfix/{}'.format(next_patch_version)
ctx.run('git checkout {}'.format(name), echo=True)
ctx.run('git branch -m {}'.format(new_branch_name), echo=True)
if finish:
ctx.run('git flow hotfix finish {}'.format(next_patch_version), echo=True, pty=True)
if push:
ctx.run('git push origin master', echo=True)
ctx.run('git push --tags', echo=True)
ctx.run('git push origin develop', echo=True)
@task
def feature(ctx, name, finish=False, push=False):
"""Rename the current branch to a feature branch and optionally finish it."""
print('Renaming branch...')
ctx.run('git branch -m feature/{}'.format(name), echo=True)
if finish:
ctx.run('git flow feature finish {}'.format(name), echo=True)
if push:
ctx.run('git push origin develop', echo=True)
# Adapted from bumpversion
def latest_tag_info():
try:
# git-describe doesn't update the git-index, so we do that
# subprocess.check_output(["git", "update-index", "--refresh"])
# get info about the latest tag in git
describe_out = subprocess.check_output([
'git',
'describe',
'--dirty',
'--tags',
'--long',
'--abbrev=40'
], stderr=subprocess.STDOUT
).decode().split('-')
except subprocess.CalledProcessError as err:
raise err
# logger.warn("Error when running git describe")
return {}
info = {}
if describe_out[-1].strip() == 'dirty':
info['dirty'] = True
describe_out.pop()
info['commit_sha'] = describe_out.pop().lstrip('g')
info['distance_to_latest_tag'] = int(describe_out.pop())
info['current_version'] = describe_out.pop().lstrip('v')
# assert type(info["current_version"]) == str
assert 0 == len(describe_out)
return info
# Tasks for generating and bundling SSL certificates
# See http://cosdev.readthedocs.org/en/latest/osf/ops.html for details
@task
def generate_key(ctx, domain, bits=2048):
cmd = 'openssl genrsa -des3 -out {0}.key {1}'.format(domain, bits)
ctx.run(cmd)
@task
def generate_key_nopass(ctx, domain):
cmd = 'openssl rsa -in {domain}.key -out {domain}.key.nopass'.format(
domain=domain
)
ctx.run(cmd)
@task
def generate_csr(ctx, domain):
cmd = 'openssl req -new -key {domain}.key.nopass -out {domain}.csr'.format(
domain=domain
)
ctx.run(cmd)
@task
def request_ssl_cert(ctx, domain):
"""Generate a key, a key with password removed, and a signing request for
the specified domain.
Usage:
> invoke request_ssl_cert pizza.osf.io
"""
generate_key(ctx, domain)
generate_key_nopass(ctx, domain)
generate_csr(ctx, domain)
@task
def bundle_certs(ctx, domain, cert_path):
"""Concatenate certificates from NameCheap in the correct order. Certificate
files must be in the same directory.
"""
cert_files = [
'{0}.crt'.format(domain),
'COMODORSADomainValidationSecureServerCA.crt',
'COMODORSAAddTrustCA.crt',
'AddTrustExternalCARoot.crt',
]
certs = ' '.join(
os.path.join(cert_path, cert_file)
for cert_file in cert_files
)
cmd = 'cat {certs} > {domain}.bundle.crt'.format(
certs=certs,
domain=domain,
)
ctx.run(cmd)
@task
def clean_assets(ctx):
"""Remove built JS files."""
public_path = os.path.join(HERE, 'website', 'static', 'public')
js_path = os.path.join(public_path, 'js')
ctx.run('rm -rf {0}'.format(js_path), echo=True)
@task(aliases=['pack'])
def webpack(ctx, clean=False, watch=False, dev=False, colors=False):
"""Build static assets with webpack."""
if clean:
clean_assets(ctx)
webpack_bin = os.path.join(HERE, 'node_modules', 'webpack', 'bin', 'webpack.js')
args = [webpack_bin]
args += ['--progress']
if watch:
args += ['--watch']
if colors:
args += ['--colors']
config_file = 'webpack.dev.config.js' if dev else 'webpack.prod.config.js'
args += ['--config {0}'.format(config_file)]
command = ' '.join(args)
ctx.run(command, echo=True)
@task()
def build_js_config_files(ctx):
from website import settings
print('Building JS config files...')
with open(os.path.join(settings.STATIC_FOLDER, 'built', 'nodeCategories.json'), 'wb') as fp:
json.dump(settings.NODE_CATEGORY_MAP, fp)
print('...Done.')
@task()
def assets(ctx, dev=False, watch=False, colors=False):
"""Install and build static assets."""
npm = 'npm install'
if not dev:
npm += ' --production'
ctx.run(npm, echo=True)
bower_install(ctx)
build_js_config_files(ctx)
# Always set clean=False to prevent possible mistakes
# on prod
webpack(ctx, clean=False, watch=watch, dev=dev, colors=colors)
@task
def generate_self_signed(ctx, domain):
"""Generate self-signed SSL key and certificate.
"""
cmd = (
'openssl req -x509 -nodes -days 365 -newkey rsa:2048'
' -keyout {0}.key -out {0}.crt'
).format(domain)
ctx.run(cmd)
@task
def update_citation_styles(ctx):
from scripts import parse_citation_styles
total = parse_citation_styles.main()
print('Parsed {} styles'.format(total))
@task
def clean(ctx, verbose=False):
ctx.run('find . -name "*.pyc" -delete', echo=True)
@task(default=True)
def usage(ctx):
ctx.run('invoke --list')
### Maintenance Tasks ###
@task
def set_maintenance(ctx, start=None, end=None):
from website.maintenance import set_maintenance, get_maintenance
"""Set the time period for the maintenance notice to be displayed.
If no start or end values are displayed, default to starting now
and ending 24 hours from now. If no timezone info is passed along,
everything will be converted to UTC.
If a given end time results in a start that is after the end, start
will be changed to be 24 hours before the end time.
Examples:
invoke set_maintenance_state
invoke set_maintenance_state --start 2016-03-16T15:41:00-04:00
invoke set_maintenance_state --end 2016-03-16T15:41:00-04:00
"""
set_maintenance(start, end)
state = get_maintenance()
print('Maintenance notice up for {} to {}.'.format(state['start'], state['end']))
@task
def unset_maintenance(ctx):
from website.maintenance import unset_maintenance
print('Taking down maintenance notice...')
unset_maintenance()
print('...Done.')
| |
# Authors: Nicolas Tresegnie <nicolas.tresegnie@gmail.com>
# Sergey Feldman <sergeyfeldman@gmail.com>
# License: BSD 3 clause
from __future__ import division
import warnings
import numbers
import numpy as np
import numpy.ma as ma
from scipy import sparse
from scipy import stats
from ..base import BaseEstimator, TransformerMixin
from ..utils.sparsefuncs import _get_median
from ..utils.validation import check_is_fitted
from ..utils.validation import FLOAT_DTYPES
from ..utils.fixes import _object_dtype_isnan
from ..utils import is_scalar_nan
from ..utils import check_array
def _check_inputs_dtype(X, missing_values):
if (X.dtype.kind in ("f", "i", "u") and
not isinstance(missing_values, numbers.Real)):
raise ValueError("'X' and 'missing_values' types are expected to be"
" both numerical. Got X.dtype={} and "
" type(missing_values)={}."
.format(X.dtype, type(missing_values)))
def _get_mask(X, value_to_mask):
"""Compute the boolean mask X == missing_values."""
if is_scalar_nan(value_to_mask):
if X.dtype.kind == "f":
return np.isnan(X)
elif X.dtype.kind in ("i", "u"):
# can't have NaNs in integer array.
return np.zeros(X.shape, dtype=bool)
else:
# np.isnan does not work on object dtypes.
return _object_dtype_isnan(X)
else:
# X == value_to_mask with object dytpes does not always perform
# element-wise for old versions of numpy
return np.equal(X, value_to_mask)
def _most_frequent(array, extra_value, n_repeat):
"""Compute the most frequent value in a 1d array extended with
[extra_value] * n_repeat, where extra_value is assumed to be not part
of the array."""
# Compute the most frequent value in array only
if array.size > 0:
with warnings.catch_warnings():
# stats.mode raises a warning when input array contains objects due
# to incapacity to detect NaNs. Irrelevant here since input array
# has already been NaN-masked.
warnings.simplefilter("ignore", RuntimeWarning)
mode = stats.mode(array)
most_frequent_value = mode[0][0]
most_frequent_count = mode[1][0]
else:
most_frequent_value = 0
most_frequent_count = 0
# Compare to array + [extra_value] * n_repeat
if most_frequent_count == 0 and n_repeat == 0:
return np.nan
elif most_frequent_count < n_repeat:
return extra_value
elif most_frequent_count > n_repeat:
return most_frequent_value
elif most_frequent_count == n_repeat:
# Ties the breaks. Copy the behaviour of scipy.stats.mode
if most_frequent_value < extra_value:
return most_frequent_value
else:
return extra_value
class SimpleImputer(BaseEstimator, TransformerMixin):
"""Imputation transformer for completing missing values.
Read more in the :ref:`User Guide <impute>`.
Parameters
----------
missing_values : number, string, np.nan (default) or None
The placeholder for the missing values. All occurrences of
`missing_values` will be imputed.
strategy : string, optional (default="mean")
The imputation strategy.
- If "mean", then replace missing values using the mean along
each column. Can only be used with numeric data.
- If "median", then replace missing values using the median along
each column. Can only be used with numeric data.
- If "most_frequent", then replace missing using the most frequent
value along each column. Can be used with strings or numeric data.
- If "constant", then replace missing values with fill_value. Can be
used with strings or numeric data.
.. versionadded:: 0.20
strategy="constant" for fixed value imputation.
fill_value : string or numerical value, optional (default=None)
When strategy == "constant", fill_value is used to replace all
occurrences of missing_values.
If left to the default, fill_value will be 0 when imputing numerical
data and "missing_value" for strings or object data types.
verbose : integer, optional (default=0)
Controls the verbosity of the imputer.
copy : boolean, optional (default=True)
If True, a copy of X will be created. If False, imputation will
be done in-place whenever possible. Note that, in the following cases,
a new copy will always be made, even if `copy=False`:
- If X is not an array of floating values;
- If X is encoded as a CSR matrix;
- If add_indicator=True.
add_indicator : boolean, optional (default=False)
If True, a `MissingIndicator` transform will stack onto output
of the imputer's transform. This allows a predictive estimator
to account for missingness despite imputation. If a feature has no
missing values at fit/train time, the feature won't appear on
the missing indicator even if there are missing values at
transform/test time.
Attributes
----------
statistics_ : array of shape (n_features,)
The imputation fill value for each feature.
Computing statistics can result in `np.nan` values.
During `transform`, features corresponding to `np.nan`
statistics will be discarded.
indicator_ : :class:`sklearn.impute.MissingIndicator`
Indicator used to add binary indicators for missing values.
``None`` if add_indicator is False.
See also
--------
IterativeImputer : Multivariate imputation of missing values.
Examples
--------
>>> import numpy as np
>>> from sklearn.impute import SimpleImputer
>>> imp_mean = SimpleImputer(missing_values=np.nan, strategy='mean')
>>> imp_mean.fit([[7, 2, 3], [4, np.nan, 6], [10, 5, 9]])
SimpleImputer()
>>> X = [[np.nan, 2, 3], [4, np.nan, 6], [10, np.nan, 9]]
>>> print(imp_mean.transform(X))
[[ 7. 2. 3. ]
[ 4. 3.5 6. ]
[10. 3.5 9. ]]
Notes
-----
Columns which only contained missing values at `fit` are discarded upon
`transform` if strategy is not "constant".
"""
def __init__(self, missing_values=np.nan, strategy="mean",
fill_value=None, verbose=0, copy=True, add_indicator=False):
self.missing_values = missing_values
self.strategy = strategy
self.fill_value = fill_value
self.verbose = verbose
self.copy = copy
self.add_indicator = add_indicator
def _validate_input(self, X):
allowed_strategies = ["mean", "median", "most_frequent", "constant"]
if self.strategy not in allowed_strategies:
raise ValueError("Can only use these strategies: {0} "
" got strategy={1}".format(allowed_strategies,
self.strategy))
if self.strategy in ("most_frequent", "constant"):
dtype = None
else:
dtype = FLOAT_DTYPES
if not is_scalar_nan(self.missing_values):
force_all_finite = True
else:
force_all_finite = "allow-nan"
try:
X = check_array(X, accept_sparse='csc', dtype=dtype,
force_all_finite=force_all_finite, copy=self.copy)
except ValueError as ve:
if "could not convert" in str(ve):
raise ValueError("Cannot use {0} strategy with non-numeric "
"data. Received datatype :{1}."
"".format(self.strategy, X.dtype.kind))
else:
raise ve
_check_inputs_dtype(X, self.missing_values)
if X.dtype.kind not in ("i", "u", "f", "O"):
raise ValueError("SimpleImputer does not support data with dtype "
"{0}. Please provide either a numeric array (with"
" a floating point or integer dtype) or "
"categorical data represented either as an array "
"with integer dtype or an array of string values "
"with an object dtype.".format(X.dtype))
return X
def fit(self, X, y=None):
"""Fit the imputer on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data, where ``n_samples`` is the number of samples and
``n_features`` is the number of features.
Returns
-------
self : SimpleImputer
"""
X = self._validate_input(X)
# default fill_value is 0 for numerical input and "missing_value"
# otherwise
if self.fill_value is None:
if X.dtype.kind in ("i", "u", "f"):
fill_value = 0
else:
fill_value = "missing_value"
else:
fill_value = self.fill_value
# fill_value should be numerical in case of numerical input
if (self.strategy == "constant" and
X.dtype.kind in ("i", "u", "f") and
not isinstance(fill_value, numbers.Real)):
raise ValueError("'fill_value'={0} is invalid. Expected a "
"numerical value when imputing numerical "
"data".format(fill_value))
if sparse.issparse(X):
# missing_values = 0 not allowed with sparse data as it would
# force densification
if self.missing_values == 0:
raise ValueError("Imputation not possible when missing_values "
"== 0 and input is sparse. Provide a dense "
"array instead.")
else:
self.statistics_ = self._sparse_fit(X,
self.strategy,
self.missing_values,
fill_value)
else:
self.statistics_ = self._dense_fit(X,
self.strategy,
self.missing_values,
fill_value)
if self.add_indicator:
self.indicator_ = MissingIndicator(
missing_values=self.missing_values, error_on_new=False)
self.indicator_.fit(X)
else:
self.indicator_ = None
return self
def _sparse_fit(self, X, strategy, missing_values, fill_value):
"""Fit the transformer on sparse data."""
mask_data = _get_mask(X.data, missing_values)
n_implicit_zeros = X.shape[0] - np.diff(X.indptr)
statistics = np.empty(X.shape[1])
if strategy == "constant":
# for constant strategy, self.statistcs_ is used to store
# fill_value in each column
statistics.fill(fill_value)
else:
for i in range(X.shape[1]):
column = X.data[X.indptr[i]:X.indptr[i + 1]]
mask_column = mask_data[X.indptr[i]:X.indptr[i + 1]]
column = column[~mask_column]
# combine explicit and implicit zeros
mask_zeros = _get_mask(column, 0)
column = column[~mask_zeros]
n_explicit_zeros = mask_zeros.sum()
n_zeros = n_implicit_zeros[i] + n_explicit_zeros
if strategy == "mean":
s = column.size + n_zeros
statistics[i] = np.nan if s == 0 else column.sum() / s
elif strategy == "median":
statistics[i] = _get_median(column,
n_zeros)
elif strategy == "most_frequent":
statistics[i] = _most_frequent(column,
0,
n_zeros)
return statistics
def _dense_fit(self, X, strategy, missing_values, fill_value):
"""Fit the transformer on dense data."""
mask = _get_mask(X, missing_values)
masked_X = ma.masked_array(X, mask=mask)
# Mean
if strategy == "mean":
mean_masked = np.ma.mean(masked_X, axis=0)
# Avoid the warning "Warning: converting a masked element to nan."
mean = np.ma.getdata(mean_masked)
mean[np.ma.getmask(mean_masked)] = np.nan
return mean
# Median
elif strategy == "median":
median_masked = np.ma.median(masked_X, axis=0)
# Avoid the warning "Warning: converting a masked element to nan."
median = np.ma.getdata(median_masked)
median[np.ma.getmaskarray(median_masked)] = np.nan
return median
# Most frequent
elif strategy == "most_frequent":
# scipy.stats.mstats.mode cannot be used because it will no work
# properly if the first element is masked and if its frequency
# is equal to the frequency of the most frequent valid element
# See https://github.com/scipy/scipy/issues/2636
# To be able access the elements by columns
X = X.transpose()
mask = mask.transpose()
if X.dtype.kind == "O":
most_frequent = np.empty(X.shape[0], dtype=object)
else:
most_frequent = np.empty(X.shape[0])
for i, (row, row_mask) in enumerate(zip(X[:], mask[:])):
row_mask = np.logical_not(row_mask).astype(np.bool)
row = row[row_mask]
most_frequent[i] = _most_frequent(row, np.nan, 0)
return most_frequent
# Constant
elif strategy == "constant":
# for constant strategy, self.statistcs_ is used to store
# fill_value in each column
return np.full(X.shape[1], fill_value, dtype=X.dtype)
def transform(self, X):
"""Impute all missing values in X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data to complete.
"""
check_is_fitted(self, 'statistics_')
X = self._validate_input(X)
statistics = self.statistics_
if X.shape[1] != statistics.shape[0]:
raise ValueError("X has %d features per sample, expected %d"
% (X.shape[1], self.statistics_.shape[0]))
if self.add_indicator:
X_trans_indicator = self.indicator_.transform(X)
# Delete the invalid columns if strategy is not constant
if self.strategy == "constant":
valid_statistics = statistics
else:
# same as np.isnan but also works for object dtypes
invalid_mask = _get_mask(statistics, np.nan)
valid_mask = np.logical_not(invalid_mask)
valid_statistics = statistics[valid_mask]
valid_statistics_indexes = np.flatnonzero(valid_mask)
if invalid_mask.any():
missing = np.arange(X.shape[1])[invalid_mask]
if self.verbose:
warnings.warn("Deleting features without "
"observed values: %s" % missing)
X = X[:, valid_statistics_indexes]
# Do actual imputation
if sparse.issparse(X):
if self.missing_values == 0:
raise ValueError("Imputation not possible when missing_values "
"== 0 and input is sparse. Provide a dense "
"array instead.")
else:
mask = _get_mask(X.data, self.missing_values)
indexes = np.repeat(np.arange(len(X.indptr) - 1, dtype=np.int),
np.diff(X.indptr))[mask]
X.data[mask] = valid_statistics[indexes].astype(X.dtype,
copy=False)
else:
mask = _get_mask(X, self.missing_values)
n_missing = np.sum(mask, axis=0)
values = np.repeat(valid_statistics, n_missing)
coordinates = np.where(mask.transpose())[::-1]
X[coordinates] = values
if self.add_indicator:
hstack = sparse.hstack if sparse.issparse(X) else np.hstack
X = hstack((X, X_trans_indicator))
return X
def _more_tags(self):
return {'allow_nan': True}
class MissingIndicator(BaseEstimator, TransformerMixin):
"""Binary indicators for missing values.
Note that this component typically should not be used in a vanilla
:class:`Pipeline` consisting of transformers and a classifier, but rather
could be added using a :class:`FeatureUnion` or :class:`ColumnTransformer`.
Read more in the :ref:`User Guide <impute>`.
Parameters
----------
missing_values : number, string, np.nan (default) or None
The placeholder for the missing values. All occurrences of
`missing_values` will be indicated (True in the output array), the
other values will be marked as False.
features : str, optional
Whether the imputer mask should represent all or a subset of
features.
- If "missing-only" (default), the imputer mask will only represent
features containing missing values during fit time.
- If "all", the imputer mask will represent all features.
sparse : boolean or "auto", optional
Whether the imputer mask format should be sparse or dense.
- If "auto" (default), the imputer mask will be of same type as
input.
- If True, the imputer mask will be a sparse matrix.
- If False, the imputer mask will be a numpy array.
error_on_new : boolean, optional
If True (default), transform will raise an error when there are
features with missing values in transform that have no missing values
in fit. This is applicable only when ``features="missing-only"``.
Attributes
----------
features_ : ndarray, shape (n_missing_features,) or (n_features,)
The features indices which will be returned when calling ``transform``.
They are computed during ``fit``. For ``features='all'``, it is
to ``range(n_features)``.
Examples
--------
>>> import numpy as np
>>> from sklearn.impute import MissingIndicator
>>> X1 = np.array([[np.nan, 1, 3],
... [4, 0, np.nan],
... [8, 1, 0]])
>>> X2 = np.array([[5, 1, np.nan],
... [np.nan, 2, 3],
... [2, 4, 0]])
>>> indicator = MissingIndicator()
>>> indicator.fit(X1)
MissingIndicator()
>>> X2_tr = indicator.transform(X2)
>>> X2_tr
array([[False, True],
[ True, False],
[False, False]])
"""
def __init__(self, missing_values=np.nan, features="missing-only",
sparse="auto", error_on_new=True):
self.missing_values = missing_values
self.features = features
self.sparse = sparse
self.error_on_new = error_on_new
def _get_missing_features_info(self, X):
"""Compute the imputer mask and the indices of the features
containing missing values.
Parameters
----------
X : {ndarray or sparse matrix}, shape (n_samples, n_features)
The input data with missing values. Note that ``X`` has been
checked in ``fit`` and ``transform`` before to call this function.
Returns
-------
imputer_mask : {ndarray or sparse matrix}, shape \
(n_samples, n_features) or (n_samples, n_features_with_missing)
The imputer mask of the original data.
features_with_missing : ndarray, shape (n_features_with_missing)
The features containing missing values.
"""
if sparse.issparse(X):
mask = _get_mask(X.data, self.missing_values)
# The imputer mask will be constructed with the same sparse format
# as X.
sparse_constructor = (sparse.csr_matrix if X.format == 'csr'
else sparse.csc_matrix)
imputer_mask = sparse_constructor(
(mask, X.indices.copy(), X.indptr.copy()),
shape=X.shape, dtype=bool)
imputer_mask.eliminate_zeros()
if self.features == 'missing-only':
n_missing = imputer_mask.getnnz(axis=0)
if self.sparse is False:
imputer_mask = imputer_mask.toarray()
elif imputer_mask.format == 'csr':
imputer_mask = imputer_mask.tocsc()
else:
imputer_mask = _get_mask(X, self.missing_values)
if self.features == 'missing-only':
n_missing = imputer_mask.sum(axis=0)
if self.sparse is True:
imputer_mask = sparse.csc_matrix(imputer_mask)
if self.features == 'all':
features_indices = np.arange(X.shape[1])
else:
features_indices = np.flatnonzero(n_missing)
return imputer_mask, features_indices
def _validate_input(self, X):
if not is_scalar_nan(self.missing_values):
force_all_finite = True
else:
force_all_finite = "allow-nan"
X = check_array(X, accept_sparse=('csc', 'csr'), dtype=None,
force_all_finite=force_all_finite)
_check_inputs_dtype(X, self.missing_values)
if X.dtype.kind not in ("i", "u", "f", "O"):
raise ValueError("MissingIndicator does not support data with "
"dtype {0}. Please provide either a numeric array"
" (with a floating point or integer dtype) or "
"categorical data represented either as an array "
"with integer dtype or an array of string values "
"with an object dtype.".format(X.dtype))
if sparse.issparse(X) and self.missing_values == 0:
# missing_values = 0 not allowed with sparse data as it would
# force densification
raise ValueError("Sparse input with missing_values=0 is "
"not supported. Provide a dense "
"array instead.")
return X
def fit(self, X, y=None):
"""Fit the transformer on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data, where ``n_samples`` is the number of samples and
``n_features`` is the number of features.
Returns
-------
self : object
Returns self.
"""
X = self._validate_input(X)
self._n_features = X.shape[1]
if self.features not in ('missing-only', 'all'):
raise ValueError("'features' has to be either 'missing-only' or "
"'all'. Got {} instead.".format(self.features))
if not ((isinstance(self.sparse, str) and
self.sparse == "auto") or isinstance(self.sparse, bool)):
raise ValueError("'sparse' has to be a boolean or 'auto'. "
"Got {!r} instead.".format(self.sparse))
self.features_ = self._get_missing_features_info(X)[1]
return self
def transform(self, X):
"""Generate missing values indicator for X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data to complete.
Returns
-------
Xt : {ndarray or sparse matrix}, shape (n_samples, n_features)
The missing indicator for input data. The data type of ``Xt``
will be boolean.
"""
check_is_fitted(self, "features_")
X = self._validate_input(X)
if X.shape[1] != self._n_features:
raise ValueError("X has a different number of features "
"than during fitting.")
imputer_mask, features = self._get_missing_features_info(X)
if self.features == "missing-only":
features_diff_fit_trans = np.setdiff1d(features, self.features_)
if (self.error_on_new and features_diff_fit_trans.size > 0):
raise ValueError("The features {} have missing values "
"in transform but have no missing values "
"in fit.".format(features_diff_fit_trans))
if self.features_.size < self._n_features:
imputer_mask = imputer_mask[:, self.features_]
return imputer_mask
def fit_transform(self, X, y=None):
"""Generate missing values indicator for X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
The input data to complete.
Returns
-------
Xt : {ndarray or sparse matrix}, shape (n_samples, n_features)
The missing indicator for input data. The data type of ``Xt``
will be boolean.
"""
return self.fit(X, y).transform(X)
def _more_tags(self):
return {'allow_nan': True,
'X_types': ['2darray', 'string']}
| |
#
#
# Copyright (C) 2006, 2007, 2010, 2011 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Utility functions with algorithms.
"""
import re
import time
import itertools
from ganeti import compat
from ganeti.utils import text
_SORTER_GROUPS = 8
_SORTER_RE = re.compile("^%s(.*)$" % (_SORTER_GROUPS * r"(\D+|\d+)?"))
def UniqueSequence(seq):
"""Returns a list with unique elements.
Element order is preserved.
@type seq: sequence
@param seq: the sequence with the source elements
@rtype: list
@return: list of unique elements from seq
"""
seen = set()
return [i for i in seq if i not in seen and not seen.add(i)]
def JoinDisjointDicts(dict_a, dict_b):
"""Joins dictionaries with no conflicting keys.
Enforces the constraint that the two key sets must be disjoint, and then
merges the two dictionaries in a new dictionary that is returned to the
caller.
@type dict_a: dict
@param dict_a: the first dictionary
@type dict_b: dict
@param dict_b: the second dictionary
@rtype: dict
@return: a new dictionary containing all the key/value pairs contained in the
two dictionaries.
"""
assert not (set(dict_a) & set(dict_b)), ("Duplicate keys found while joining"
" %s and %s" % (dict_a, dict_b))
result = dict_a.copy()
result.update(dict_b)
return result
def FindDuplicates(seq):
"""Identifies duplicates in a list.
Does not preserve element order.
@type seq: sequence
@param seq: Sequence with source elements
@rtype: list
@return: List of duplicate elements from seq
"""
dup = set()
seen = set()
for item in seq:
if item in seen:
dup.add(item)
else:
seen.add(item)
return list(dup)
def GetRepeatedKeys(*dicts):
"""Return the set of keys defined multiple times in the given dicts.
>>> GetRepeatedKeys({"foo": 1, "bar": 2},
... {"foo": 5, "baz": 7}
... )
set("foo")
@type dicts: dict
@param dicts: The dictionaries to check for duplicate keys.
@rtype: set
@return: Keys used more than once across all dicts
"""
if len(dicts) < 2:
return set()
keys = []
for dictionary in dicts:
keys.extend(dictionary)
return set(FindDuplicates(keys))
def _NiceSortTryInt(val):
"""Attempts to convert a string to an integer.
"""
if val and val.isdigit():
return int(val)
else:
return val
def NiceSortKey(value):
"""Extract key for sorting.
"""
return [_NiceSortTryInt(grp)
for grp in _SORTER_RE.match(str(value)).groups()]
def NiceSort(values, key=None):
"""Sort a list of strings based on digit and non-digit groupings.
Given a list of names C{['a1', 'a10', 'a11', 'a2']} this function
will sort the list in the logical order C{['a1', 'a2', 'a10',
'a11']}.
The sort algorithm breaks each name in groups of either only-digits
or no-digits. Only the first eight such groups are considered, and
after that we just use what's left of the string.
@type values: list
@param values: the names to be sorted
@type key: callable or None
@param key: function of one argument to extract a comparison key from each
list element, must return string
@rtype: list
@return: a copy of the name list sorted with our algorithm
"""
if key is None:
keyfunc = NiceSortKey
else:
keyfunc = lambda value: NiceSortKey(key(value))
return sorted(values, key=keyfunc)
def InvertDict(dict_in):
"""Inverts the key/value mapping of a dict.
@param dict_in: The dict to invert
@return: the inverted dict
"""
return dict(zip(dict_in.values(), dict_in.keys()))
def InsertAtPos(src, pos, other):
"""Inserts C{other} at given C{pos} into C{src}.
@note: This function does not modify C{src} in place but returns a new copy
@type src: list
@param src: The source list in which we want insert elements
@type pos: int
@param pos: The position where we want to start insert C{other}
@type other: list
@param other: The other list to insert into C{src}
@return: A copy of C{src} with C{other} inserted at C{pos}
"""
new = src[:pos]
new.extend(other)
new.extend(src[pos:])
return new
def SequenceToDict(seq, key=compat.fst):
"""Converts a sequence to a dictionary with duplicate detection.
@type seq: sequen
@param seq: Input sequence
@type key: callable
@param key: Function for retrieving dictionary key from sequence element
@rtype: dict
"""
keys = map(key, seq)
duplicates = FindDuplicates(keys)
if duplicates:
raise ValueError("Duplicate keys found: %s" % text.CommaJoin(duplicates))
assert len(keys) == len(seq)
return dict(zip(keys, seq))
def _MakeFlatToDict(data):
"""Helper function for C{FlatToDict}.
This function is recursively called
@param data: The input data as described in C{FlatToDict}, already splitted
@returns: The so far converted dict
"""
if not compat.fst(compat.fst(data)):
assert len(data) == 1, \
"not bottom most element, found %d elements, expected 1" % len(data)
return compat.snd(compat.fst(data))
keyfn = lambda e: compat.fst(e).pop(0)
return dict([(k, _MakeFlatToDict(list(g)))
for (k, g) in itertools.groupby(sorted(data), keyfn)])
def FlatToDict(data, field_sep="/"):
"""Converts a flat structure to a fully fledged dict.
It accept a list of tuples in the form::
[
("foo/bar", {"key1": "data1", "key2": "data2"}),
("foo/baz", {"key3" :"data3" }),
]
where the first element is the key separated by C{field_sep}.
This would then return::
{
"foo": {
"bar": {"key1": "data1", "key2": "data2"},
"baz": {"key3" :"data3" },
},
}
@type data: list of tuple
@param data: Input list to convert
@type field_sep: str
@param field_sep: The separator for the first field of the tuple
@returns: A dict based on the input list
"""
return _MakeFlatToDict([(keys.split(field_sep), value)
for (keys, value) in data])
class RunningTimeout(object):
"""Class to calculate remaining timeout when doing several operations.
"""
__slots__ = [
"_allow_negative",
"_start_time",
"_time_fn",
"_timeout",
]
def __init__(self, timeout, allow_negative, _time_fn=time.time):
"""Initializes this class.
@type timeout: float
@param timeout: Timeout duration
@type allow_negative: bool
@param allow_negative: Whether to return values below zero
@param _time_fn: Time function for unittests
"""
object.__init__(self)
if timeout is not None and timeout < 0.0:
raise ValueError("Timeout must not be negative")
self._timeout = timeout
self._allow_negative = allow_negative
self._time_fn = _time_fn
self._start_time = None
def Remaining(self):
"""Returns the remaining timeout.
"""
if self._timeout is None:
return None
# Get start time on first calculation
if self._start_time is None:
self._start_time = self._time_fn()
# Calculate remaining time
remaining_timeout = self._start_time + self._timeout - self._time_fn()
if not self._allow_negative:
# Ensure timeout is always >= 0
return max(0.0, remaining_timeout)
return remaining_timeout
| |
import ray._raylet as raylet
import ray.core.generated.ray_client_pb2 as ray_client_pb2
import ray.core.generated.ray_client_pb2_grpc as ray_client_pb2_grpc
from ray.util.client import ray
from ray.util.client.options import validate_options
from ray._private.signature import get_signature, extract_signature
from ray._private.utils import check_oversized_function
from concurrent.futures import Future
from dataclasses import dataclass
import grpc
import os
import uuid
import inspect
import pickle
from ray.util.inspect import (
is_cython,
is_class_method,
is_function_or_method,
is_static_method,
)
import logging
import threading
from collections import OrderedDict
from typing import Any
from typing import List
from typing import Dict
from typing import Optional
from typing import Tuple
from typing import Union
from typing import Callable
logger = logging.getLogger(__name__)
# The maximum field value for int32 id's -- which is also the maximum
# number of simultaneous in-flight requests.
INT32_MAX = (2 ** 31) - 1
# gRPC status codes that the client shouldn't attempt to recover from
# Resource exhausted: Server is low on resources, or has hit the max number
# of client connections
# Invalid argument: Reserved for application errors
# Not found: Set if the client is attempting to reconnect to a session that
# does not exist
# Failed precondition: Reserverd for application errors
# Aborted: Set when an error is serialized into the details of the context,
# signals that error should be deserialized on the client side
GRPC_UNRECOVERABLE_ERRORS = (
grpc.StatusCode.RESOURCE_EXHAUSTED,
grpc.StatusCode.INVALID_ARGUMENT,
grpc.StatusCode.NOT_FOUND,
grpc.StatusCode.FAILED_PRECONDITION,
grpc.StatusCode.ABORTED,
)
# TODO: Instead of just making the max message size large, the right thing to
# do is to split up the bytes representation of serialized data into multiple
# messages and reconstruct them on either end. That said, since clients are
# drivers and really just feed initial things in and final results out, (when
# not going to S3 or similar) then a large limit will suffice for many use
# cases.
#
# Currently, this is 2GiB, the max for a signed int.
GRPC_MAX_MESSAGE_SIZE = (2 * 1024 * 1024 * 1024) - 1
# 30 seconds because ELB timeout is 60 seconds
GRPC_KEEPALIVE_TIME_MS = 1000 * 30
# Long timeout because we do not want gRPC ending a connection.
GRPC_KEEPALIVE_TIMEOUT_MS = 1000 * 600
GRPC_OPTIONS = [
("grpc.max_send_message_length", GRPC_MAX_MESSAGE_SIZE),
("grpc.max_receive_message_length", GRPC_MAX_MESSAGE_SIZE),
("grpc.keepalive_time_ms", GRPC_KEEPALIVE_TIME_MS),
("grpc.keepalive_timeout_ms", GRPC_KEEPALIVE_TIMEOUT_MS),
("grpc.keepalive_permit_without_calls", 1),
# Send an infinite number of pings
("grpc.http2.max_pings_without_data", 0),
("grpc.http2.min_ping_interval_without_data_ms", GRPC_KEEPALIVE_TIME_MS - 50),
# Allow many strikes
("grpc.http2.max_ping_strikes", 0),
]
CLIENT_SERVER_MAX_THREADS = float(os.getenv("RAY_CLIENT_SERVER_MAX_THREADS", 100))
# Large objects are chunked into 64 MiB messages
OBJECT_TRANSFER_CHUNK_SIZE = 64 * 2 ** 20
# Warn the user if the object being transferred is larger than 2 GiB
OBJECT_TRANSFER_WARNING_SIZE = 2 * 2 ** 30
class ClientObjectRef(raylet.ObjectRef):
def __init__(self, id: Union[bytes, Future]):
self._mutex = threading.Lock()
self._worker = ray.get_context().client_worker
self._id_future = None
if isinstance(id, bytes):
self._set_id(id)
elif isinstance(id, Future):
self._id_future = id
else:
raise TypeError("Unexpected type for id {}".format(id))
def __del__(self):
if self._worker is not None and self._worker.is_connected():
try:
if not self.is_nil():
self._worker.call_release(self.id)
except Exception:
logger.info(
"Exception in ObjectRef is ignored in destructor. "
"To receive this exception in application code, call "
"a method on the actor reference before its destructor "
"is run."
)
def binary(self):
self._wait_for_id()
return super().binary()
def hex(self):
self._wait_for_id()
return super().hex()
def is_nil(self):
self._wait_for_id()
return super().is_nil()
def __hash__(self):
self._wait_for_id()
return hash(self.id)
def task_id(self):
self._wait_for_id()
return super().task_id()
@property
def id(self):
return self.binary()
def future(self) -> Future:
fut = Future()
def set_future(data: Any) -> None:
"""Schedules a callback to set the exception or result
in the Future."""
if isinstance(data, Exception):
fut.set_exception(data)
else:
fut.set_result(data)
self._on_completed(set_future)
# Prevent this object ref from being released.
fut.object_ref = self
return fut
def _on_completed(self, py_callback: Callable[[Any], None]) -> None:
"""Register a callback that will be called after Object is ready.
If the ObjectRef is already ready, the callback will be called soon.
The callback should take the result as the only argument. The result
can be an exception object in case of task error.
"""
def deserialize_obj(
resp: Union[ray_client_pb2.DataResponse, Exception]
) -> None:
from ray.util.client.client_pickler import loads_from_server
if isinstance(resp, Exception):
data = resp
else:
obj = resp.get
data = None
if not obj.valid:
data = loads_from_server(resp.get.error)
else:
data = loads_from_server(resp.get.data)
py_callback(data)
self._worker.register_callback(self, deserialize_obj)
def _set_id(self, id):
super()._set_id(id)
self._worker.call_retain(id)
def _wait_for_id(self, timeout=None):
if self._id_future:
with self._mutex:
if self._id_future:
self._set_id(self._id_future.result(timeout=timeout))
self._id_future = None
class ClientActorRef(raylet.ActorID):
def __init__(self, id: Union[bytes, Future]):
self._mutex = threading.Lock()
self._worker = ray.get_context().client_worker
if isinstance(id, bytes):
self._set_id(id)
self._id_future = None
elif isinstance(id, Future):
self._id_future = id
else:
raise TypeError("Unexpected type for id {}".format(id))
def __del__(self):
if self._worker is not None and self._worker.is_connected():
try:
if not self.is_nil():
self._worker.call_release(self.id)
except Exception:
logger.info(
"Exception from actor creation is ignored in destructor. "
"To receive this exception in application code, call "
"a method on the actor reference before its destructor "
"is run."
)
def binary(self):
self._wait_for_id()
return super().binary()
def hex(self):
self._wait_for_id()
return super().hex()
def is_nil(self):
self._wait_for_id()
return super().is_nil()
def __hash__(self):
self._wait_for_id()
return hash(self.id)
@property
def id(self):
return self.binary()
def _set_id(self, id):
super()._set_id(id)
self._worker.call_retain(id)
def _wait_for_id(self, timeout=None):
if self._id_future:
with self._mutex:
if self._id_future:
self._set_id(self._id_future.result(timeout=timeout))
self._id_future = None
class ClientStub:
pass
class ClientRemoteFunc(ClientStub):
"""A stub created on the Ray Client to represent a remote
function that can be exectued on the cluster.
This class is allowed to be passed around between remote functions.
Args:
_func: The actual function to execute remotely
_name: The original name of the function
_ref: The ClientObjectRef of the pickled code of the function, _func
"""
def __init__(self, f, options=None):
self._lock = threading.Lock()
self._func = f
self._name = f.__name__
self._signature = get_signature(f)
self._ref = None
self._client_side_ref = ClientSideRefID.generate_id()
self._options = validate_options(options)
def __call__(self, *args, **kwargs):
raise TypeError(
"Remote function cannot be called directly. "
f"Use {self._name}.remote method instead"
)
def remote(self, *args, **kwargs):
# Check if supplied parameters match the function signature. Same case
# at the other callsites.
self._signature.bind(*args, **kwargs)
return return_refs(ray.call_remote(self, *args, **kwargs))
def options(self, **kwargs):
return OptionWrapper(self, kwargs)
def _remote(self, args=None, kwargs=None, **option_args):
if args is None:
args = []
if kwargs is None:
kwargs = {}
return self.options(**option_args).remote(*args, **kwargs)
def __repr__(self):
return "ClientRemoteFunc(%s, %s)" % (self._name, self._ref)
def _ensure_ref(self):
with self._lock:
if self._ref is None:
# While calling ray.put() on our function, if
# our function is recursive, it will attempt to
# encode the ClientRemoteFunc -- itself -- and
# infinitely recurse on _ensure_ref.
#
# So we set the state of the reference to be an
# in-progress self reference value, which
# the encoding can detect and handle correctly.
self._ref = InProgressSentinel()
data = ray.worker._dumps_from_client(self._func)
# Check pickled size before sending it to server, which is more
# efficient and can be done synchronously inside remote() call.
check_oversized_function(data, self._name, "remote function", None)
self._ref = ray.worker._put_pickled(
data, client_ref_id=self._client_side_ref.id
)
def _prepare_client_task(self) -> ray_client_pb2.ClientTask:
self._ensure_ref()
task = ray_client_pb2.ClientTask()
task.type = ray_client_pb2.ClientTask.FUNCTION
task.name = self._name
task.payload_id = self._ref.id
set_task_options(task, self._options, "baseline_options")
return task
def _num_returns(self) -> int:
if not self._options:
return None
return self._options.get("num_returns")
class ClientActorClass(ClientStub):
"""A stub created on the Ray Client to represent an actor class.
It is wrapped by ray.remote and can be executed on the cluster.
Args:
actor_cls: The actual class to execute remotely
_name: The original name of the class
_ref: The ClientObjectRef of the pickled `actor_cls`
"""
def __init__(self, actor_cls, options=None):
self.actor_cls = actor_cls
self._lock = threading.Lock()
self._name = actor_cls.__name__
self._init_signature = inspect.Signature(
parameters=extract_signature(actor_cls.__init__, ignore_first=True)
)
self._ref = None
self._client_side_ref = ClientSideRefID.generate_id()
self._options = validate_options(options)
def __call__(self, *args, **kwargs):
raise TypeError(
"Remote actor cannot be instantiated directly. "
f"Use {self._name}.remote() instead"
)
def _ensure_ref(self):
with self._lock:
if self._ref is None:
# As before, set the state of the reference to be an
# in-progress self reference value, which
# the encoding can detect and handle correctly.
self._ref = InProgressSentinel()
data = ray.worker._dumps_from_client(self.actor_cls)
# Check pickled size before sending it to server, which is more
# efficient and can be done synchronously inside remote() call.
check_oversized_function(data, self._name, "actor", None)
self._ref = ray.worker._put_pickled(
data, client_ref_id=self._client_side_ref.id
)
def remote(self, *args, **kwargs) -> "ClientActorHandle":
self._init_signature.bind(*args, **kwargs)
# Actually instantiate the actor
futures = ray.call_remote(self, *args, **kwargs)
assert len(futures) == 1
return ClientActorHandle(ClientActorRef(futures[0]), actor_class=self)
def options(self, **kwargs):
return ActorOptionWrapper(self, kwargs)
def _remote(self, args=None, kwargs=None, **option_args):
if args is None:
args = []
if kwargs is None:
kwargs = {}
return self.options(**option_args).remote(*args, **kwargs)
def __repr__(self):
return "ClientActorClass(%s, %s)" % (self._name, self._ref)
def __getattr__(self, key):
if key not in self.__dict__:
raise AttributeError("Not a class attribute")
raise NotImplementedError("static methods")
def _prepare_client_task(self) -> ray_client_pb2.ClientTask:
self._ensure_ref()
task = ray_client_pb2.ClientTask()
task.type = ray_client_pb2.ClientTask.ACTOR
task.name = self._name
task.payload_id = self._ref.id
set_task_options(task, self._options, "baseline_options")
return task
@staticmethod
def _num_returns() -> int:
return 1
class ClientActorHandle(ClientStub):
"""Client-side stub for instantiated actor.
A stub created on the Ray Client to represent a remote actor that
has been started on the cluster. This class is allowed to be passed
around between remote functions.
Args:
actor_ref: A reference to the running actor given to the client. This
is a serialized version of the actual handle as an opaque token.
"""
def __init__(
self, actor_ref: ClientActorRef, actor_class: Optional[ClientActorClass] = None
):
self.actor_ref = actor_ref
self._dir: Optional[List[str]] = None
if actor_class is not None:
self._method_num_returns = {}
self._method_signatures = {}
for method_name, method_obj in inspect.getmembers(
actor_class.actor_cls, is_function_or_method
):
self._method_num_returns[method_name] = getattr(
method_obj, "__ray_num_returns__", None
)
self._method_signatures[method_name] = inspect.Signature(
parameters=extract_signature(
method_obj,
ignore_first=(
not (
is_class_method(method_obj)
or is_static_method(actor_class.actor_cls, method_name)
)
),
)
)
else:
self._method_num_returns = None
self._method_signatures = None
def __dir__(self) -> List[str]:
if self._method_num_returns is not None:
return self._method_num_returns.keys()
if ray.is_connected():
self._init_class_info()
return self._method_num_returns.keys()
return super().__dir__()
# For compatibility with core worker ActorHandle._actor_id which returns
# ActorID
@property
def _actor_id(self) -> ClientActorRef:
return self.actor_ref
def __getattr__(self, key):
if self._method_num_returns is None:
self._init_class_info()
return ClientRemoteMethod(
self,
key,
self._method_num_returns.get(key),
self._method_signatures.get(key),
)
def __repr__(self):
return "ClientActorHandle(%s)" % (self.actor_ref.id.hex())
def _init_class_info(self):
# TODO: fetch Ray method decorators
@ray.remote(num_cpus=0)
def get_class_info(x):
return x._ray_method_num_returns, x._ray_method_signatures
self._method_num_returns, method_parameters = ray.get(
get_class_info.remote(self)
)
self._method_signatures = {}
for method, parameters in method_parameters.items():
self._method_signatures[method] = inspect.Signature(parameters=parameters)
class ClientRemoteMethod(ClientStub):
"""A stub for a method on a remote actor.
Can be annotated with execution options.
Args:
actor_handle: A reference to the ClientActorHandle that generated
this method and will have this method called upon it.
method_name: The name of this method
"""
def __init__(
self,
actor_handle: ClientActorHandle,
method_name: str,
num_returns: int,
signature: inspect.Signature,
):
self._actor_handle = actor_handle
self._method_name = method_name
self._method_num_returns = num_returns
self._signature = signature
def __call__(self, *args, **kwargs):
raise TypeError(
"Actor methods cannot be called directly. Instead "
f"of running 'object.{self._method_name}()', try "
f"'object.{self._method_name}.remote()'."
)
def remote(self, *args, **kwargs):
self._signature.bind(*args, **kwargs)
return return_refs(ray.call_remote(self, *args, **kwargs))
def __repr__(self):
return "ClientRemoteMethod(%s, %s, %s)" % (
self._method_name,
self._actor_handle,
self._method_num_returns,
)
def options(self, **kwargs):
return OptionWrapper(self, kwargs)
def _remote(self, args=None, kwargs=None, **option_args):
if args is None:
args = []
if kwargs is None:
kwargs = {}
return self.options(**option_args).remote(*args, **kwargs)
def _prepare_client_task(self) -> ray_client_pb2.ClientTask:
task = ray_client_pb2.ClientTask()
task.type = ray_client_pb2.ClientTask.METHOD
task.name = self._method_name
task.payload_id = self._actor_handle.actor_ref.id
return task
def _num_returns(self) -> int:
return self._method_num_returns
class OptionWrapper:
def __init__(self, stub: ClientStub, options: Optional[Dict[str, Any]]):
self._remote_stub = stub
self._options = validate_options(options)
def remote(self, *args, **kwargs):
self._remote_stub._signature.bind(*args, **kwargs)
return return_refs(ray.call_remote(self, *args, **kwargs))
def __getattr__(self, key):
return getattr(self._remote_stub, key)
def _prepare_client_task(self):
task = self._remote_stub._prepare_client_task()
set_task_options(task, self._options)
return task
def _num_returns(self) -> int:
if self._options:
num = self._options.get("num_returns")
if num is not None:
return num
return self._remote_stub._num_returns()
class ActorOptionWrapper(OptionWrapper):
def remote(self, *args, **kwargs):
self._remote_stub._init_signature.bind(*args, **kwargs)
futures = ray.call_remote(self, *args, **kwargs)
assert len(futures) == 1
actor_class = None
if isinstance(self._remote_stub, ClientActorClass):
actor_class = self._remote_stub
return ClientActorHandle(ClientActorRef(futures[0]), actor_class=actor_class)
def set_task_options(
task: ray_client_pb2.ClientTask,
options: Optional[Dict[str, Any]],
field: str = "options",
) -> None:
if options is None:
task.ClearField(field)
return
getattr(task, field).pickled_options = pickle.dumps(options)
def return_refs(
futures: List[Future],
) -> Union[None, ClientObjectRef, List[ClientObjectRef]]:
if not futures:
return None
if len(futures) == 1:
return ClientObjectRef(futures[0])
return [ClientObjectRef(fut) for fut in futures]
class InProgressSentinel:
def __repr__(self) -> str:
return self.__class__.__name__
class ClientSideRefID:
"""An ID generated by the client for objects not yet given an ObjectRef"""
def __init__(self, id: bytes):
assert len(id) != 0
self.id = id
@staticmethod
def generate_id() -> "ClientSideRefID":
tid = uuid.uuid4()
return ClientSideRefID(b"\xcc" + tid.bytes)
def remote_decorator(options: Optional[Dict[str, Any]]):
def decorator(function_or_class) -> ClientStub:
if inspect.isfunction(function_or_class) or is_cython(function_or_class):
return ClientRemoteFunc(function_or_class, options=options)
elif inspect.isclass(function_or_class):
return ClientActorClass(function_or_class, options=options)
else:
raise TypeError(
"The @ray.remote decorator must be applied to "
"either a function or to a class."
)
return decorator
@dataclass
class ClientServerHandle:
"""Holds the handles to the registered gRPC servicers and their server."""
task_servicer: ray_client_pb2_grpc.RayletDriverServicer
data_servicer: ray_client_pb2_grpc.RayletDataStreamerServicer
logs_servicer: ray_client_pb2_grpc.RayletLogStreamerServicer
grpc_server: grpc.Server
def stop(self, grace: int) -> None:
# The data servicer might be sleeping while waiting for clients to
# reconnect. Signal that they no longer have to sleep and can exit
# immediately, since the RPC server is stopped.
self.grpc_server.stop(grace)
self.data_servicer.stopped.set()
# Add a hook for all the cases that previously
# expected simply a gRPC server
def __getattr__(self, attr):
return getattr(self.grpc_server, attr)
def _get_client_id_from_context(context: Any) -> str:
"""
Get `client_id` from gRPC metadata. If the `client_id` is not present,
this function logs an error and sets the status_code.
"""
metadata = {k: v for k, v in context.invocation_metadata()}
client_id = metadata.get("client_id") or ""
if client_id == "":
logger.error("Client connecting with no client_id")
context.set_code(grpc.StatusCode.FAILED_PRECONDITION)
return client_id
def _propagate_error_in_context(e: Exception, context: Any) -> bool:
"""
Encode an error into the context of an RPC response. Returns True
if the error can be recovered from, false otherwise
"""
try:
if isinstance(e, grpc.RpcError):
# RPC error, propagate directly by copying details into context
context.set_code(e.code())
context.set_details(e.details())
return e.code() not in GRPC_UNRECOVERABLE_ERRORS
except Exception:
# Extra precaution -- if encoding the RPC directly fails fallback
# to treating it as a regular error
pass
context.set_code(grpc.StatusCode.FAILED_PRECONDITION)
context.set_details(str(e))
return False
def _id_is_newer(id1: int, id2: int) -> bool:
"""
We should only replace cache entries with the responses for newer IDs.
Most of the time newer IDs will be the ones with higher value, except when
the req_id counter rolls over. We check for this case by checking the
distance between the two IDs. If the distance is significant, then it's
likely that the req_id counter rolled over, and the smaller id should
still be used to replace the one in cache.
"""
diff = abs(id2 - id1)
if diff > (INT32_MAX // 2):
# Rollover likely occurred. In this case the smaller ID is newer
return id1 < id2
return id1 > id2
class ResponseCache:
"""
Cache for blocking method calls. Needed to prevent retried requests from
being applied multiple times on the server, for example when the client
disconnects. This is used to cache requests/responses sent through
unary-unary RPCs to the RayletServicer.
Note that no clean up logic is used, the last response for each thread
will always be remembered, so at most the cache will hold N entries,
where N is the number of threads on the client side. This relies on the
assumption that a thread will not make a new blocking request until it has
received a response for a previous one, at which point it's safe to
overwrite the old response.
The high level logic is:
1. Before making a call, check the cache for the current thread.
2. If present in the cache, check the request id of the cached
response.
a. If it matches the current request_id, then the request has been
received before and we shouldn't re-attempt the logic. Wait for
the response to become available in the cache, and then return it
b. If it doesn't match, then this is a new request and we can
proceed with calling the real stub. While the response is still
being generated, temporarily keep (req_id, None) in the cache.
Once the call is finished, update the cache entry with the
new (req_id, response) pair. Notify other threads that may
have been waiting for the response to be prepared.
"""
def __init__(self):
self.cv = threading.Condition()
self.cache: Dict[int, Tuple[int, Any]] = {}
def check_cache(self, thread_id: int, request_id: int) -> Optional[Any]:
"""
Check the cache for a given thread, and see if the entry in the cache
matches the current request_id. Returns None if the request_id has
not been seen yet, otherwise returns the cached result.
Throws an error if the placeholder in the cache doesn't match the
request_id -- this means that a new request evicted the old value in
the cache, and that the RPC for `request_id` is redundant and the
result can be discarded, i.e.:
1. Request A is sent (A1)
2. Channel disconnects
3. Request A is resent (A2)
4. A1 is received
5. A2 is received, waits for A1 to finish
6. A1 finishes and is sent back to client
7. Request B is sent
8. Request B overwrites cache entry
9. A2 wakes up extremely late, but cache is now invalid
In practice this is VERY unlikely to happen, but the error can at
least serve as a sanity check or catch invalid request id's.
"""
with self.cv:
if thread_id in self.cache:
cached_request_id, cached_resp = self.cache[thread_id]
if cached_request_id == request_id:
while cached_resp is None:
# The call was started, but the response hasn't yet
# been added to the cache. Let go of the lock and
# wait until the response is ready.
self.cv.wait()
cached_request_id, cached_resp = self.cache[thread_id]
if cached_request_id != request_id:
raise RuntimeError(
"Cached response doesn't match the id of the "
"original request. This might happen if this "
"request was received out of order. The "
"result of the caller is no longer needed. "
f"({request_id} != {cached_request_id})"
)
return cached_resp
if not _id_is_newer(request_id, cached_request_id):
raise RuntimeError(
"Attempting to replace newer cache entry with older "
"one. This might happen if this request was received "
"out of order. The result of the caller is no "
f"longer needed. ({request_id} != {cached_request_id}"
)
self.cache[thread_id] = (request_id, None)
return None
def update_cache(self, thread_id: int, request_id: int, response: Any) -> None:
"""
Inserts `response` into the cache for `request_id`.
"""
with self.cv:
cached_request_id, cached_resp = self.cache[thread_id]
if cached_request_id != request_id or cached_resp is not None:
# The cache was overwritten by a newer requester between
# our call to check_cache and our call to update it.
# This can't happen if the assumption that the cached requests
# are all blocking on the client side, so if you encounter
# this, check if any async requests are being cached.
raise RuntimeError(
"Attempting to update the cache, but placeholder's "
"do not match the current request_id. This might happen "
"if this request was received out of order. The result "
f"of the caller is no longer needed. ({request_id} != "
f"{cached_request_id})"
)
self.cache[thread_id] = (request_id, response)
self.cv.notify_all()
class OrderedResponseCache:
"""
Cache for streaming RPCs, i.e. the DataServicer. Relies on explicit
ack's from the client to determine when it can clean up cache entries.
"""
def __init__(self):
self.last_received = 0
self.cv = threading.Condition()
self.cache: Dict[int, Any] = OrderedDict()
def check_cache(self, req_id: int) -> Optional[Any]:
"""
Check the cache for a given thread, and see if the entry in the cache
matches the current request_id. Returns None if the request_id has
not been seen yet, otherwise returns the cached result.
"""
with self.cv:
if _id_is_newer(self.last_received, req_id) or self.last_received == req_id:
# Request is for an id that has already been cleared from
# cache/acknowledged.
raise RuntimeError(
"Attempting to accesss a cache entry that has already "
"cleaned up. The client has already acknowledged "
f"receiving this response. ({req_id}, "
f"{self.last_received})"
)
if req_id in self.cache:
cached_resp = self.cache[req_id]
while cached_resp is None:
# The call was started, but the response hasn't yet been
# added to the cache. Let go of the lock and wait until
# the response is ready
self.cv.wait()
if req_id not in self.cache:
raise RuntimeError(
"Cache entry was removed. This likely means that "
"the result of this call is no longer needed."
)
cached_resp = self.cache[req_id]
return cached_resp
self.cache[req_id] = None
return None
def update_cache(self, req_id: int, resp: Any) -> None:
"""
Inserts `response` into the cache for `request_id`.
"""
with self.cv:
self.cv.notify_all()
if req_id not in self.cache:
raise RuntimeError(
"Attempting to update the cache, but placeholder is "
"missing. This might happen on a redundant call to "
f"update_cache. ({req_id})"
)
self.cache[req_id] = resp
def invalidate(self, e: Exception) -> bool:
"""
Invalidate any partially populated cache entries, replacing their
placeholders with the passed in exception. Useful to prevent a thread
from waiting indefinitely on a failed call.
Returns True if the cache contains an error, False otherwise
"""
with self.cv:
invalid = False
for req_id in self.cache:
if self.cache[req_id] is None:
self.cache[req_id] = e
if isinstance(self.cache[req_id], Exception):
invalid = True
self.cv.notify_all()
return invalid
def cleanup(self, last_received: int) -> None:
"""
Cleanup all of the cached requests up to last_received. Assumes that
the cache entries were inserted in ascending order.
"""
with self.cv:
if _id_is_newer(last_received, self.last_received):
self.last_received = last_received
to_remove = []
for req_id in self.cache:
if _id_is_newer(last_received, req_id) or last_received == req_id:
to_remove.append(req_id)
else:
break
for req_id in to_remove:
del self.cache[req_id]
self.cv.notify_all()
| |
import pytest
import random
import numpy.random
from numpy.testing import assert_equal
from thinc.api import fix_random_seed
from spacy import util
from spacy.lang.en import English
from spacy.language import Language
from spacy.pipeline import TextCategorizer
from spacy.tokens import Doc
from spacy.pipeline.tok2vec import DEFAULT_TOK2VEC_MODEL
from spacy.scorer import Scorer
from spacy.training import Example
from ..util import make_tempdir
TRAIN_DATA_SINGLE_LABEL = [
("I'm so happy.", {"cats": {"POSITIVE": 1.0, "NEGATIVE": 0.0}}),
("I'm so angry", {"cats": {"POSITIVE": 0.0, "NEGATIVE": 1.0}}),
]
TRAIN_DATA_MULTI_LABEL = [
("I'm angry and confused", {"cats": {"ANGRY": 1.0, "CONFUSED": 1.0, "HAPPY": 0.0}}),
("I'm confused but happy", {"cats": {"ANGRY": 0.0, "CONFUSED": 1.0, "HAPPY": 1.0}}),
]
def make_get_examples_single_label(nlp):
train_examples = []
for t in TRAIN_DATA_SINGLE_LABEL:
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
def get_examples():
return train_examples
return get_examples
def make_get_examples_multi_label(nlp):
train_examples = []
for t in TRAIN_DATA_MULTI_LABEL:
train_examples.append(Example.from_dict(nlp.make_doc(t[0]), t[1]))
def get_examples():
return train_examples
return get_examples
@pytest.mark.skip(reason="Test is flakey when run with others")
def test_simple_train():
nlp = Language()
textcat = nlp.add_pipe("textcat")
textcat.add_label("answer")
nlp.initialize()
for i in range(5):
for text, answer in [
("aaaa", 1.0),
("bbbb", 0),
("aa", 1.0),
("bbbbbbbbb", 0.0),
("aaaaaa", 1),
]:
nlp.update((text, {"cats": {"answer": answer}}))
doc = nlp("aaa")
assert "answer" in doc.cats
assert doc.cats["answer"] >= 0.5
@pytest.mark.skip(reason="Test is flakey when run with others")
def test_textcat_learns_multilabel():
random.seed(5)
numpy.random.seed(5)
docs = []
nlp = Language()
letters = ["a", "b", "c"]
for w1 in letters:
for w2 in letters:
cats = {letter: float(w2 == letter) for letter in letters}
docs.append((Doc(nlp.vocab, words=["d"] * 3 + [w1, w2] + ["d"] * 3), cats))
random.shuffle(docs)
textcat = TextCategorizer(nlp.vocab, width=8)
for letter in letters:
textcat.add_label(letter)
optimizer = textcat.initialize(lambda: [])
for i in range(30):
losses = {}
examples = [Example.from_dict(doc, {"cats": cats}) for doc, cat in docs]
textcat.update(examples, sgd=optimizer, losses=losses)
random.shuffle(docs)
for w1 in letters:
for w2 in letters:
doc = Doc(nlp.vocab, words=["d"] * 3 + [w1, w2] + ["d"] * 3)
truth = {letter: w2 == letter for letter in letters}
textcat(doc)
for cat, score in doc.cats.items():
if not truth[cat]:
assert score < 0.5
else:
assert score > 0.5
@pytest.mark.parametrize("name", ["textcat", "textcat_multilabel"])
def test_label_types(name):
nlp = Language()
textcat = nlp.add_pipe(name)
textcat.add_label("answer")
with pytest.raises(ValueError):
textcat.add_label(9)
@pytest.mark.parametrize("name", ["textcat", "textcat_multilabel"])
def test_no_label(name):
nlp = Language()
nlp.add_pipe(name)
with pytest.raises(ValueError):
nlp.initialize()
@pytest.mark.parametrize(
"name,get_examples",
[
("textcat", make_get_examples_single_label),
("textcat_multilabel", make_get_examples_multi_label),
],
)
def test_implicit_label(name, get_examples):
nlp = Language()
nlp.add_pipe(name)
nlp.initialize(get_examples=get_examples(nlp))
@pytest.mark.parametrize("name", ["textcat", "textcat_multilabel"])
def test_no_resize(name):
nlp = Language()
textcat = nlp.add_pipe(name)
textcat.add_label("POSITIVE")
textcat.add_label("NEGATIVE")
nlp.initialize()
assert textcat.model.get_dim("nO") >= 2
# this throws an error because the textcat can't be resized after initialization
with pytest.raises(ValueError):
textcat.add_label("NEUTRAL")
def test_error_with_multi_labels():
nlp = Language()
nlp.add_pipe("textcat")
train_examples = []
for text, annotations in TRAIN_DATA_MULTI_LABEL:
train_examples.append(Example.from_dict(nlp.make_doc(text), annotations))
with pytest.raises(ValueError):
nlp.initialize(get_examples=lambda: train_examples)
@pytest.mark.parametrize(
"name,get_examples, train_data",
[
("textcat", make_get_examples_single_label, TRAIN_DATA_SINGLE_LABEL),
("textcat_multilabel", make_get_examples_multi_label, TRAIN_DATA_MULTI_LABEL),
],
)
def test_initialize_examples(name, get_examples, train_data):
nlp = Language()
textcat = nlp.add_pipe(name)
for text, annotations in train_data:
for label, value in annotations.get("cats").items():
textcat.add_label(label)
# you shouldn't really call this more than once, but for testing it should be fine
nlp.initialize()
nlp.initialize(get_examples=get_examples(nlp))
with pytest.raises(TypeError):
nlp.initialize(get_examples=lambda: None)
with pytest.raises(TypeError):
nlp.initialize(get_examples=get_examples())
def test_overfitting_IO():
# Simple test to try and quickly overfit the single-label textcat component - ensuring the ML models work correctly
fix_random_seed(0)
nlp = English()
textcat = nlp.add_pipe("textcat")
train_examples = []
for text, annotations in TRAIN_DATA_SINGLE_LABEL:
train_examples.append(Example.from_dict(nlp.make_doc(text), annotations))
optimizer = nlp.initialize(get_examples=lambda: train_examples)
assert textcat.model.get_dim("nO") == 2
for i in range(50):
losses = {}
nlp.update(train_examples, sgd=optimizer, losses=losses)
assert losses["textcat"] < 0.01
# test the trained model
test_text = "I am happy."
doc = nlp(test_text)
cats = doc.cats
assert cats["POSITIVE"] > 0.9
assert cats["POSITIVE"] + cats["NEGATIVE"] == pytest.approx(1.0, 0.001)
# Also test the results are still the same after IO
with make_tempdir() as tmp_dir:
nlp.to_disk(tmp_dir)
nlp2 = util.load_model_from_path(tmp_dir)
doc2 = nlp2(test_text)
cats2 = doc2.cats
assert cats2["POSITIVE"] > 0.9
assert cats2["POSITIVE"] + cats2["NEGATIVE"] == pytest.approx(1.0, 0.001)
# Test scoring
scores = nlp.evaluate(train_examples)
assert scores["cats_micro_f"] == 1.0
assert scores["cats_macro_f"] == 1.0
assert scores["cats_macro_auc"] == 1.0
assert scores["cats_score"] == 1.0
assert "cats_score_desc" in scores
# Make sure that running pipe twice, or comparing to call, always amounts to the same predictions
texts = ["Just a sentence.", "I like green eggs.", "I am happy.", "I eat ham."]
batch_cats_1 = [doc.cats for doc in nlp.pipe(texts)]
batch_cats_2 = [doc.cats for doc in nlp.pipe(texts)]
no_batch_cats = [doc.cats for doc in [nlp(text) for text in texts]]
assert_equal(batch_cats_1, batch_cats_2)
assert_equal(batch_cats_1, no_batch_cats)
def test_overfitting_IO_multi():
# Simple test to try and quickly overfit the multi-label textcat component - ensuring the ML models work correctly
fix_random_seed(0)
nlp = English()
textcat = nlp.add_pipe("textcat_multilabel")
train_examples = []
for text, annotations in TRAIN_DATA_MULTI_LABEL:
train_examples.append(Example.from_dict(nlp.make_doc(text), annotations))
optimizer = nlp.initialize(get_examples=lambda: train_examples)
assert textcat.model.get_dim("nO") == 3
for i in range(100):
losses = {}
nlp.update(train_examples, sgd=optimizer, losses=losses)
assert losses["textcat_multilabel"] < 0.01
# test the trained model
test_text = "I am confused but happy."
doc = nlp(test_text)
cats = doc.cats
assert cats["HAPPY"] > 0.9
assert cats["CONFUSED"] > 0.9
# Also test the results are still the same after IO
with make_tempdir() as tmp_dir:
nlp.to_disk(tmp_dir)
nlp2 = util.load_model_from_path(tmp_dir)
doc2 = nlp2(test_text)
cats2 = doc2.cats
assert cats2["HAPPY"] > 0.9
assert cats2["CONFUSED"] > 0.9
# Test scoring
scores = nlp.evaluate(train_examples)
assert scores["cats_micro_f"] == 1.0
assert scores["cats_macro_f"] == 1.0
assert "cats_score_desc" in scores
# Make sure that running pipe twice, or comparing to call, always amounts to the same predictions
texts = ["Just a sentence.", "I like green eggs.", "I am happy.", "I eat ham."]
batch_deps_1 = [doc.cats for doc in nlp.pipe(texts)]
batch_deps_2 = [doc.cats for doc in nlp.pipe(texts)]
no_batch_deps = [doc.cats for doc in [nlp(text) for text in texts]]
assert_equal(batch_deps_1, batch_deps_2)
assert_equal(batch_deps_1, no_batch_deps)
# fmt: off
@pytest.mark.parametrize(
"name,train_data,textcat_config",
[
("textcat_multilabel", TRAIN_DATA_MULTI_LABEL, {"@architectures": "spacy.TextCatBOW.v1", "exclusive_classes": False, "ngram_size": 1, "no_output_layer": False}),
("textcat", TRAIN_DATA_SINGLE_LABEL, {"@architectures": "spacy.TextCatBOW.v1", "exclusive_classes": True, "ngram_size": 4, "no_output_layer": False}),
("textcat_multilabel", TRAIN_DATA_MULTI_LABEL, {"@architectures": "spacy.TextCatBOW.v1", "exclusive_classes": False, "ngram_size": 3, "no_output_layer": True}),
("textcat", TRAIN_DATA_SINGLE_LABEL, {"@architectures": "spacy.TextCatBOW.v1", "exclusive_classes": True, "ngram_size": 2, "no_output_layer": True}),
("textcat_multilabel", TRAIN_DATA_MULTI_LABEL, {"@architectures": "spacy.TextCatEnsemble.v2", "tok2vec": DEFAULT_TOK2VEC_MODEL, "linear_model": {"@architectures": "spacy.TextCatBOW.v1", "exclusive_classes": False, "ngram_size": 1, "no_output_layer": False}}),
("textcat", TRAIN_DATA_SINGLE_LABEL, {"@architectures": "spacy.TextCatEnsemble.v2", "tok2vec": DEFAULT_TOK2VEC_MODEL, "linear_model": {"@architectures": "spacy.TextCatBOW.v1", "exclusive_classes": True, "ngram_size": 5, "no_output_layer": False}}),
("textcat", TRAIN_DATA_SINGLE_LABEL, {"@architectures": "spacy.TextCatCNN.v1", "tok2vec": DEFAULT_TOK2VEC_MODEL, "exclusive_classes": True}),
("textcat_multilabel", TRAIN_DATA_MULTI_LABEL, {"@architectures": "spacy.TextCatCNN.v1", "tok2vec": DEFAULT_TOK2VEC_MODEL, "exclusive_classes": False}),
],
)
# fmt: on
def test_textcat_configs(name, train_data, textcat_config):
pipe_config = {"model": textcat_config}
nlp = English()
textcat = nlp.add_pipe(name, config=pipe_config)
train_examples = []
for text, annotations in train_data:
train_examples.append(Example.from_dict(nlp.make_doc(text), annotations))
for label, value in annotations.get("cats").items():
textcat.add_label(label)
optimizer = nlp.initialize()
for i in range(5):
losses = {}
nlp.update(train_examples, sgd=optimizer, losses=losses)
def test_positive_class():
nlp = English()
textcat = nlp.add_pipe("textcat")
get_examples = make_get_examples_single_label(nlp)
textcat.initialize(get_examples, labels=["POS", "NEG"], positive_label="POS")
assert textcat.labels == ("POS", "NEG")
assert textcat.cfg["positive_label"] == "POS"
textcat_multilabel = nlp.add_pipe("textcat_multilabel")
get_examples = make_get_examples_multi_label(nlp)
with pytest.raises(TypeError):
textcat_multilabel.initialize(
get_examples, labels=["POS", "NEG"], positive_label="POS"
)
textcat_multilabel.initialize(get_examples, labels=["FICTION", "DRAMA"])
assert textcat_multilabel.labels == ("FICTION", "DRAMA")
assert "positive_label" not in textcat_multilabel.cfg
def test_positive_class_not_present():
nlp = English()
textcat = nlp.add_pipe("textcat")
get_examples = make_get_examples_single_label(nlp)
with pytest.raises(ValueError):
textcat.initialize(get_examples, labels=["SOME", "THING"], positive_label="POS")
def test_positive_class_not_binary():
nlp = English()
textcat = nlp.add_pipe("textcat")
get_examples = make_get_examples_multi_label(nlp)
with pytest.raises(ValueError):
textcat.initialize(
get_examples, labels=["SOME", "THING", "POS"], positive_label="POS"
)
def test_textcat_evaluation():
train_examples = []
nlp = English()
ref1 = nlp("one")
ref1.cats = {"winter": 1.0, "summer": 1.0, "spring": 1.0, "autumn": 1.0}
pred1 = nlp("one")
pred1.cats = {"winter": 1.0, "summer": 0.0, "spring": 1.0, "autumn": 1.0}
train_examples.append(Example(pred1, ref1))
ref2 = nlp("two")
ref2.cats = {"winter": 0.0, "summer": 0.0, "spring": 1.0, "autumn": 1.0}
pred2 = nlp("two")
pred2.cats = {"winter": 1.0, "summer": 0.0, "spring": 0.0, "autumn": 1.0}
train_examples.append(Example(pred2, ref2))
scores = Scorer().score_cats(
train_examples, "cats", labels=["winter", "summer", "spring", "autumn"]
)
assert scores["cats_f_per_type"]["winter"]["p"] == 1 / 2
assert scores["cats_f_per_type"]["winter"]["r"] == 1 / 1
assert scores["cats_f_per_type"]["summer"]["p"] == 0
assert scores["cats_f_per_type"]["summer"]["r"] == 0 / 1
assert scores["cats_f_per_type"]["spring"]["p"] == 1 / 1
assert scores["cats_f_per_type"]["spring"]["r"] == 1 / 2
assert scores["cats_f_per_type"]["autumn"]["p"] == 2 / 2
assert scores["cats_f_per_type"]["autumn"]["r"] == 2 / 2
assert scores["cats_micro_p"] == 4 / 5
assert scores["cats_micro_r"] == 4 / 6
| |
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Base metrics support is extended for a concrete monitoring system."""
import datetime
import logging
import re
import sys
import threading
import time
class Metric(object):
"""A metric with unique combination of name and bindings."""
@property
def family(self):
"""The metric family this instance belongs to.
Members of a family share the same name but different label bindings.
"""
return self.__family
@property
def name(self):
return self.__family.name
@property
def labels(self):
return self.__labels
@property
def last_modified(self):
"""In real seconds."""
return self.__last_modified
@property
def mutex(self):
return self.__mutex
def __init__(self, family, labels):
self.__mutex = threading.Lock()
self.__name = family.name
self.__last_modified = None
self.__family = family
self.__labels = labels
def touch(self, utc=None):
"""Update last modified time"""
self.__last_modified = utc or datetime.datetime.utcnow()
self.__family.registry.queue_update(self)
class Counter(Metric):
@property
def count(self):
"""Returns the current [local] counter value."""
return self.__count
def __init__(self, family, labels):
super(Counter, self).__init__(family, labels)
self.__count = 0
def inc(self, amount=1, utc=None):
with self.mutex:
self.__count += amount
self.touch(utc=utc)
class Gauge(Metric):
@property
def value(self):
return self.__compute()
def __init__(self, family, labels, compute=None):
super(Gauge, self).__init__(family, labels)
func = lambda: self.__value
self.__value = 0
self.__compute = compute or func
def track(self, func, *pos_args, **kwargs):
"""Add to gauge while function call is in progress."""
try:
self.inc()
return func(*pos_args, **kwargs)
finally:
self.dec()
def set(self, value, utc=None):
"""Set the gauge to an absolute value."""
with self.mutex:
self.__value = value
self.touch(utc=utc)
def inc(self, amount=1, utc=None):
"""Increment the gauge by an amount."""
with self.mutex:
self.__value += amount
self.touch(utc=utc)
def dec(self, amount=1, utc=None):
"""Decrement the gauge by an amount."""
with self.mutex:
self.__value -= amount
self.touch(utc=utc)
class Timer(Metric):
"""Observes how long functions take to execute."""
@property
def count(self):
"""The number of timings captured."""
return self.__count
@property
def total_seconds(self):
"""The total time across all the captured timings."""
return self.__total
def __init__(self, family, labels):
super(Timer, self).__init__(family, labels)
self.__count = 0
self.__total = 0
def observe(self, seconds, utc=None):
"""Capture a timing observation."""
with self.mutex:
self.__count += 1
self.__total += seconds
self.touch(utc=utc)
class MetricFamily(object):
"""A Factory for a counter or Gauge metric with specifically bound labels."""
GAUGE = 'GAUGE'
COUNTER = 'COUNTER'
TIMER = 'TIMER'
@property
def start_time(self):
"""The start time values are relative to."""
return self.__registry.start_time
@property
def name(self):
"""The name for this family will be the name of its Metric instances."""
return self.__name
@property
def registry(self):
"""The MetricsRegistry containing this family."""
return self.__registry
@property
def family_type(self):
"""Returns the type of metrics in this family (GAUGE, COUNTER, TIMER)."""
return self.__family_type
@property
def mutex(self):
"""Returns lock for this family."""
return self.__mutex
@property
def instance_list(self):
"""Return all the label binding metric variations within this family."""
return self.__instances.values()
def __init__(self, registry, name, factory, family_type):
self.__mutex = threading.Lock()
self.__name = name
self.__factory = factory
self.__instances = {}
self.__registry = registry
self.__family_type = family_type
def get(self, labels):
"""Returns a metric instance with bound labels."""
key = ''.join('{0}={1}'.format(key, value) for key, value in labels.items())
with self.__mutex:
got = self.__instances.get(key)
if got is None:
got = self.__factory(self, labels)
self.__instances[key] = got
return got
class BaseMetricsRegistry(object):
"""Provides base class interface for metrics management.
Specific metric stores would subclass this to specialize to push
into their own systems.
While having this registry be abstract is overkill, it is for what feels
like practical reasons where there is no easy to use system for our use
case of short lived batch jobs so there's going to be a lot of maintainence
here and trials of different systems making this investment more appealing.
"""
# pylint: disable=too-many-public-methods
@staticmethod
def default_determine_outcome_labels(result, base_labels):
"""Return the outcome labels for a set of tracking labels."""
ex_type, _, _ = sys.exc_info()
labels = dict(base_labels)
labels.update({
'success': ex_type is None,
'exception_type': '' if ex_type is None else ex_type.__name__
})
return labels
@staticmethod
def determine_outcome_labels_from_error_result(result, base_labels):
if result is None:
# Call itself threw an exception before it could return the error
_, result, _ = sys.exc_info()
labels = dict(base_labels)
labels.update({
'success': result is None,
'exception_type': '' if result is None else result.__class__.__name__
})
return labels
@property
def options(self):
"""Configured options."""
return self.__options
@property
def start_time(self):
"""When the registry started -- values are relative to this utc time."""
return self.__start_time
@property
def metric_family_list(self):
"""Return all the metric families."""
return self.__metric_families.values()
@staticmethod
def __make_context_labels(options):
if not hasattr(options, 'monitoring_context_labels'):
return {}
labels = {}
matcher = re.compile(r'(\w+)=(.*)')
for binding in (options.monitoring_context_labels or '').split(','):
if not binding:
continue
try:
match = matcher.match(binding)
labels[match.group(1)] = match.group(2)
except Exception as ex:
raise ValueError(
'Invalid monitoring_context_labels binding "%s": %s' % (
binding, ex))
return labels
def __init__(self, options):
"""Constructs registry with options from init_argument_parser."""
self.__start_time = datetime.datetime.utcnow()
self.__options = options
self.__pusher_thread = None
self.__pusher_thread_event = threading.Event()
self.__metric_families = {}
self.__family_mutex = threading.Lock()
self.__updated_metrics = set([])
self.__update_mutex = threading.Lock()
self.__inject_labels = self.__make_context_labels(options)
if self.__inject_labels:
logging.debug('Injecting additional metric labels %s',
self.__inject_labels)
def _do_make_family(self, family_type, name, label_names):
"""Creates new metric-system specific gauge family.
Args:
family_type: MetricFamily.COUNTER, GUAGE, or TIMER
name: [string] Metric name.
label_names: [list of string] The labels used to distinguish instances.
Returns:
specialized MetricFamily for the given type and registry implementation.
"""
raise NotImplementedError()
def queue_update(self, metric):
"""Add metric to list of metrics to push out."""
with self.__update_mutex:
self.__updated_metrics.add(metric)
def inc_counter(self, name, labels, **kwargs):
"""Track number of completed calls to the given function."""
counter = self.get_metric(MetricFamily.COUNTER, name, labels)
counter.inc(**kwargs)
return counter
def count_call(self, name, labels, func, *pos_args, **kwargs):
"""Track number of completed calls to the given function."""
labels = dict(labels)
success = False
try:
result = func(*pos_args, **kwargs)
success = True
return result
finally:
labels['success'] = success
self.inc_counter(name, labels, **kwargs)
def set(self, name, labels, value):
"""Sets the implied gauge with the specified value."""
gauge = self.get_metric(MetricFamily.GAUGE, name, labels)
gauge.set(value)
return gauge
def track_call(self, name, labels, func, *pos_args, **kwargs):
"""Track number of active calls to the given function."""
gauge = self.get_metric(MetricFamily.GAUGE, name, labels)
return gauge.track(func, *pos_args, **kwargs)
def observe_timer(self, name, labels, seconds):
"""Add an observation to the specified timer."""
timer = self.get_metric(MetricFamily.TIMER, name, labels)
timer.observe(seconds)
return timer
def time_call(self, name, labels, label_func,
time_func, *pos_args, **kwargs):
"""Track number of completed calls to the given function."""
try:
start_time = time.time()
result = time_func(*pos_args, **kwargs)
outcome_labels = label_func(result, labels)
return result
except:
try:
outcome_labels = label_func(None, labels)
except Exception as ex:
logging.exception('label_func failed with %s', ex.message)
raise ex
raise
finally:
timer = self.get_metric(MetricFamily.TIMER, name, outcome_labels)
timer.observe(time.time() - start_time)
def lookup_family_or_none(self, name):
return self.__metric_families.get(name)
def __normalize_labels(self, labels):
result = dict(self.__inject_labels)
result.update(labels)
return result
def get_metric(self, family_type, name, labels):
"""Return instance in family with given name and labels.
Returns the existing instance if present, otherwise makes a new one.
"""
labels = self.__normalize_labels(labels)
family = self.__metric_families.get(name)
if family:
if family.family_type != family_type:
raise TypeError('{have} is not a {want}'.format(
have=family, want=family_type))
return family.get(labels)
family = self._do_make_family(family_type, name, labels.keys())
with self.__family_mutex:
if name not in self.__metric_families:
self.__metric_families[name] = family
return family.get(labels)
def track_and_time_call(
self, name, labels, outcome_labels_func,
result_func, *pos_args, **kwargs):
"""Call the function with the given arguments while instrumenting it.
This will instrument both tracking of call counts in progress
as well as the final outcomes in terms of performance and outcome.
"""
tracking_name = name + '_InProgress'
outcome_name = name + '_Outcome'
return self.track_call(
tracking_name, labels,
self.time_call,
outcome_name, labels, outcome_labels_func,
result_func, *pos_args, **kwargs)
def start_pusher_thread(self):
"""Starts thread for pushing metrics."""
def delay_func():
"""Helper function for push thread"""
# pylint: disable=broad-except
try:
if self.__pusher_thread:
self.__pusher_thread_event.wait(
self.options.monitoring_flush_frequency)
return self.__pusher_thread is not None
except Exception as ex:
logging.error('Pusher thread delay func caught %s', ex)
return False
self.__pusher_thread = threading.Thread(
name='MetricsManager', target=self.flush_every_loop, args=[delay_func])
self.__pusher_thread.start()
return True
def stop_pusher_thread(self):
"""Stop thread for pushing metrics."""
logging.debug('Signaling pusher thread %s', self.__pusher_thread)
pusher_thread = self.__pusher_thread
self.__pusher_thread = None
self.__pusher_thread_event.set()
# Give a chance for the thread to self-terminate before we continue.
# It's ok if this times out, but logging is cleaner to give it a chance.
if pusher_thread is not None:
pusher_thread.join(2)
def flush_every_loop(self, ready_func):
"""Start a loop that pushes while the ready_func is true."""
logging.debug('Starting loop to push metrics...')
while ready_func():
self.flush_updated_metrics()
logging.debug('Ending loop to push metrics...')
def _do_flush_updated_metrics(self, updated_metrics):
"""Writes metrics to the server."""
raise NotImplementedError()
def _do_flush_final_metrics(self):
"""Notifies that we're doing updating and it is safe to push final metrics.
This is only informative for implementations that are not incremental.
"""
pass
def flush_final_metrics(self):
"""Push the final metrics to the metrics server."""
if not self.options.monitoring_enabled:
logging.warning('Monitoring disabled -- dont push final metrics.')
return
self._do_flush_final_metrics()
def flush_updated_metrics(self):
"""Push incremental metrics to the metrics server."""
if not self.options.monitoring_enabled:
logging.warning('Monitoring disabled -- dont push incremental metrics.')
return
with self.__update_mutex:
updated_metrics = self.__updated_metrics
self.__updated_metrics = set([])
self._do_flush_updated_metrics(updated_metrics)
| |
"""A contour component. This component wraps around the
tvtk.ContourFilter and provides convenient options to either
automatically generate a specified number of contours between a given
minimum and maximum value or explicitly specify the contours. This
component may be used for any input data. The component also provides
a convenient option to create "filled contours".
"""
# Author: Prabhu Ramachandran <prabhu_r@users.sf.net>
# Copyright (c) 2005, Enthought, Inc.
# License: BSD Style.
# Standard library imports.
import numpy
# Enthought library imports.
from traits.api import Instance, List, Tuple, Bool, Range, \
Float, Property
from tvtk.api import tvtk
# Local imports.
from mayavi.core.component import Component
from mayavi.core.common import error
from mayavi.components.common \
import get_module_source, convert_to_poly_data
######################################################################
# `Contour` class.
######################################################################
class Contour(Component):
# The version of this class. Used for persistence.
__version__ = 0
# The contour filter being currently used.
contour_filter = Property
# Specify if filled contours are generated.
filled_contours = Bool(False, desc='if filled contours are '\
'to be generated')
# Specify if contours are generated explicitly or automatically.
auto_contours = Bool(False, desc='if contours are '\
'given explicitly or automatically computed')
# Number of contours, used when `auto_contours` are chosen.
number_of_contours = Range(1, 100000, enter_set=True, auto_set=False,
desc='number of contours to generate')
# Minimum contour, this is the starting value when `auto_contours`
# is turned on.
minimum_contour = Range(value=0.0,
low='_data_min',
high='_data_max',
enter_set=True,
auto_set=False,
desc='the starting contour value')
# Maximum contour, this is the last contour when `auto_contours`
# is turned on.
maximum_contour = Range(value=0.0,
low='_data_min',
high='_data_max',
enter_set=True,
auto_set=False,
desc='the ending contour value')
# The explicit contours to create. These specify the contours
# explicitly and are used when `auto_contours` is turned off. The
# traits of the items in the list are dynamically generated based
# on input data.
contours = List(Range(value='_default_contour',
low='_data_min',
high='_data_max',
enter_set=True,
auto_set=False,
),
rows=3,
desc='explicitly the contours to be generated')
# Specify if the filled contour option should be shown in the view
# or not. This is useful in situations like the iso_surface
# module where it does not make sense to use filled contours at
# all.
show_filled_contours = Bool(True)
# Specify if the lower and upper bound for the data is to be
# automatically reset or not.
auto_update_range = Bool(True,
desc='if the contour range is updated automatically')
########################################
# The component's view
#view = View(Group(Item(name='filled_contours',
# defined_when='show_filled_contours'),
# Item(name='auto_contours'), '_',
# Item(name='contours',
# style='custom',
# visible_when='not auto_contours'),
# Item(name='number_of_contours',
# visible_when='auto_contours'),
# Item(name='minimum_contour',
# visible_when='auto_contours'),
# Item(name='maximum_contour',
# visible_when='auto_contours'),
# Item(name='auto_update_range'),
# Item(name='_data_min',
# label='Data minimum',
# visible_when='not auto_update_range'),
# Item(name='_data_max',
# label='Data maximum',
# visible_when='not auto_update_range'),
# )
# )
########################################
# Private traits.
_current_range = Tuple
# The minimum value of the input data. Set to a very large negative value
# to avoid errors prior to the object being added to the mayavi
# tree.
_data_min = Float(-1e20, enter_set=True, auto_set=False)
# The maximum value of the input data. Set to a very large value
# to avoid errors prior to the object being added to the mayavi
# tree.
_data_max = Float(1e20, enter_set=True, auto_set=False)
# The default value of the contour to add, this property is computed
# from the _data_min and _data_max traits and used when the user
# adds a contour manually from the UI when auto_contours are turned
# off.
_default_contour = Property(Float)
# The contour filter.
_cont_filt = Instance(tvtk.ContourFilter, args=())
# The filled contour filter. This filter generates the filled contours.
_fill_cont_filt = Instance(tvtk.BandedPolyDataContourFilter, args=(),
kw={'clipping': 1, 'scalar_mode':'value'})
######################################################################
# `object` interface
######################################################################
def __get_pure_state__(self):
d = super(Contour, self).__get_pure_state__()
# These traits are dynamically created.
for name in ('_data_min', '_data_max', '_default_contour'):
d.pop(name, None)
return d
######################################################################
# `Component` interface
######################################################################
def update_pipeline(self):
"""Override this method so that it *updates* the tvtk pipeline
when data upstream is known to have changed.
This method is invoked (automatically) when the input fires a
`pipeline_changed` event.
"""
if not self._has_input():
return
cf = self._set_contour_input()
first = False
if len(self._current_range) == 0:
first = True
self._update_ranges()
# If this is the first time, create a default contour
if first:
cr = self._current_range
self.contours = [(cr[0] + cr[1])/2]
self.minimum_contour = cr[0]
self.maximum_contour = cr[1]
self.outputs = [cf.output]
def update_data(self):
"""Override this method to do what is necessary when upstream
data changes.
This method is invoked (automatically) when any of the inputs
sends a `data_changed` event.
"""
self._update_ranges()
# Propagage the data changed event.
self.data_changed = True
def has_output_port(self):
""" The contour filter has an output port."""
return True
def get_output_object(self):
""" Returns the output port."""
return self.contour_filter.output_port
######################################################################
# Non-public methods.
######################################################################
def _contours_items_changed(self, list_event):
if self.auto_contours or not self._has_input():
return
cf = self.contour_filter
added, removed, index = (list_event.added, list_event.removed,
list_event.index)
if len(added) == len(removed):
cf.set_value(index, added[0])
cf.update()
self.data_changed = True
else:
self._contours_changed(self.contours)
def _contours_changed(self, values):
if self.auto_contours or not self._has_input():
return
cf = self.contour_filter
cf.number_of_contours = len(values)
for i, x in enumerate(values):
cf.set_value(i, x)
cf.update()
self.data_changed = True
def _update_ranges(self):
# Here we get the module's source since the input of this
# component may not in general represent the entire object.
if not self.auto_update_range:
return
src = get_module_source(self.inputs[0])
sc = src.outputs[0].point_data.scalars
if sc is not None:
sc_array = sc.to_array()
has_nan = numpy.isnan(sc_array).any()
if has_nan:
rng = (float(numpy.nanmin(sc_array)),
float(numpy.nanmax(sc_array)))
else:
rng = sc.range
else:
error('Cannot contour: No scalars in input data!')
rng = (0.0, 1.0)
if rng != self._current_range:
self.set(_data_min=rng[0], _data_max=rng[1],
trait_change_notify=False)
self._clip_contours(rng)
self._current_range = rng
def _minimum_contour_changed(self, value):
self._do_auto_contours()
def _maximum_contour_changed(self, value):
self._do_auto_contours()
def _number_of_contours_changed(self, value):
self._do_auto_contours()
def _auto_contours_changed(self, value):
if value:
self._do_auto_contours()
else:
self._contours_changed(self.contours)
def _auto_update_range_changed(self, value):
if value:
rng = self._data_min, self._data_max
self._current_range = rng
self._update_ranges()
self.trait_property_changed('_data_min', rng[0],
self._data_min)
self.trait_property_changed('_data_max', rng[1],
self._data_max)
def _do_auto_contours(self):
if not self._has_input():
return
if self.auto_contours:
minc, maxc = self.minimum_contour, self.maximum_contour
self.contour_filter.generate_values(self.number_of_contours,
min(minc, maxc),
max(minc, maxc))
self.data_changed = True
def _filled_contours_changed(self, val):
if not self._has_input():
return
cf = self._set_contour_input()
# This will trigger a change.
self._auto_contours_changed(self.auto_contours)
self.outputs = [cf.output]
def _get_contour_filter(self):
if self.filled_contours:
return self._fill_cont_filt
else:
return self._cont_filt
def _set_contour_input(self):
"""Sets the input to the appropriate contour filter and
returns the currently used contour filter.
"""
inp = self.inputs[0].outputs[0]
cf = self.contour_filter
if self.filled_contours:
inp = convert_to_poly_data(inp)
self.configure_input_data(cf, inp)
else:
self.configure_connection(cf, self.inputs[0])
cf.update()
return cf
def _has_input(self):
"""Returns if this component has a valid input."""
if (len(self.inputs) > 0) and \
(len(self.inputs[0].outputs) > 0):
return True
else:
return False
def _clip_contours(self, rng):
"""Clips the contour related values when the data range has
changed. The new range is given as the argument.
"""
ctr = []
dmin, dmax = rng
ctr = [min(max(x, dmin), dmax) for x in self.contours]
if self.auto_contours or ctr != self.contours:
self.contours = ctr
self.set(minimum_contour=self._data_min,
maximum_contour=self._data_max,
trait_change_notify=False)
self._do_auto_contours()
def _get__default_contour(self):
return (self._data_min + self._data_max)*0.5
| |
# -*- coding: utf-8 -*-
from functools import wraps
import json
import logging
import traceback
import time
from base64 import b64encode as encode_url
from flask import Flask, Response, session, abort, jsonify, g, request
from flask_restful import Resource, Api
from werkzeug.local import LocalProxy
import requests
from flask_cache import Cache
from flask_cors import CORS
from db_handler_High import High as Database
from Initdb import Test_editable
import jwt
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from custom_errors import CustomError
with open("DO/GenericConfigFile.json", "r") as conf:
config = json.load(conf)
def get_config():
conf = requests.get("http://127.0.0.1/config").text
config = json.load(conf)
return config
# config = get_config() # Uncomment to fetch config from localhost.
# Private key
with open("operator_private_key.pem", "rb") as key_file:
operator_rsa_private = serialization.load_pem_private_key(key_file.read(), None, default_backend())
# operator_rsa_private = load_pem_x509_certificate(key_file.read(), default_backend())
# Public key
with open("operator_public_key.pem", "rb") as key_file:
operator_rsa_public = serialization.load_pem_public_key(key_file.read(), default_backend())
# operator_rsa_public = load_pem_x509_certificate(key_file.read(), default_backend())
error_responses = {
'MethodNotAllowed': {
'msg': "You probably POSTed something to GET end point or GET something from POST endpoint.",
'status': 405,
},
"NotFound": {
'msg': "API endpoint not found.",
'status': 404,
},
"BadRequest": {
'msg': "Missing Content-Type, check your parameters.",
'status': 400,
},
"NotImplemented": {
'msg': "The method, endpoint or service you tried to use is not implemented yet.",
'status': 501,
},
}
app = Flask(__name__)
CORS(app, resources={r"/*": {"origins": "*"}}, allow_headers='*')
cache = Cache(app, config={'CACHE_TYPE': 'simple'})
debug_log_format = (
'-' * 80 + '\n' +
'%(levelname)s in %(module)s [%(pathname)s:%(lineno)d]:\n' +
'%(message)s\n' +
'-' * 80
)
logConsoleHandle = logging.StreamHandler()
# fileHandler = logging.FileHandler("DO-log_"+str(time.time())+".log")
# fileHandler.setFormatter(logging.Formatter(debug_log_format))
logConsoleHandle.setFormatter(logging.Formatter(debug_log_format))
app.logger.addHandler(logConsoleHandle)
# app.logger.addHandler(fileHandler)
# app.logger.setLevel(logging.DEBUG)
app.secret_key = 'Nants ingonyama bagithi baba, Sithi uhhmm ingonyama, Ingonyama'
api = Api(app, errors=error_responses, catch_all_404s=True)
# db = db_handler.High()
# tests = db_handler.Test_editable(db)
logger = app.logger
logger.setLevel(logging.DEBUG)
info = logger.info
error = logger.error
debug = logger.debug
warning = logger.warning
# tests = db_handler.Test_editable(db)
# api_path = "/api/v1.0/"
api_path = "/"
api_path_ui = api_path + "ui/"
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = Database(logger)
return db
db = LocalProxy(get_db)
@app.before_request
def db_opener():
db = LocalProxy(get_db)
info("START OF REQUEST{}".format("#" * 90))
@app.teardown_request
def db_closer(exception):
info("END OF REQUEST{}".format("#" * 90))
db.session.commit()
db.session.close()
def error_handler(e, classname):
log = logging.getLogger("DB_Handler")
log.exception(e)
msg = "class calling error_handler {} got {} ---- {}".format(classname, traceback.format_exc(), repr(e))
log.error(msg)
return msg
def response(Object):
'''
Takes in dictionary or string, returns dictionary.
'''
t = time.time()
debug("We got object:{}\nContaining: {}".format(type(Object), Object))
try:
if isinstance(Object, dict):
js = jsonify(Object)
debug("Creating response JSON took: {}".format(time.time() - t))
return js
if isinstance(Object, str):
js = jsonify(json.loads(Object))
debug("Creating response JSON took: {}".format(time.time() - t))
return js
if isinstance(Object, unicode):
js = jsonify(json.loads(Object.decode()))
debug("Creating response JSON took: {}".format(time.time() - t))
return js
else:
debug("We are being misused, this is a lucky and desperate guess.")
debug("Creating response JSON took: {}".format(time.time() - t))
return jsonify(Object)
except Exception as e:
raise CustomError(description="Crash has occurred in response method of app.py with following error: {}".format(
error_handler(e, "app.py response(Object)")), code=500)
def check_auth(username, password):
"""This function is called to check if a username /
password combination is valid.
"""
# js = json.dumps({"username": "admin", "password": "Hello"})
# db.register_userAccount(js)
# db.add_user(js)
# db.modify_userAccount(json.dumps({"id":1, "password": "Hello"}))
info("Checking username {}".format(username))
user = db.get_userAccount_by_username(username)
if user is None:
app.logger.debug("Username not found.")
return False
correct = db.verify_pw(username, password)
app.logger.debug("The username comparison of {} and {} resulted: {}"
.format(username, user.username, username == user.username))
verified = username == user.username and correct
if verified:
session["username"] = username
info("Correct credentials. Session username is now {}.".format(session["username"]))
return verified
warning("Invalid credentials.")
return False
def authenticate():
"""Sends a 401 response that enables basic auth"""
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="Login Required"'})
# @cache.cached(timeout=800)
def requires_auth(f):
t = time.time()
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
debug("Authentication took: {}s".format(time.time() - t))
return decorated
def requires_admin(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return authenticate()
return f(*args, **kwargs)
return decorated
def get_public_key():
return operator_rsa_public
def verify_rpt(encoded_rpt):
encoded_rpt = encoded_rpt["rpt"]
# testing = db.generate_rpt("http://test.fi/data/efghri123")
# debug(testing)
# encoded_rpt = testing
info("Verifying RPT:" + encoded_rpt)
try:
decoded = jwt.decode(encoded_rpt, get_public_key(), algorithms=['RS256'])
info("RPT decoded successfully.")
try:
check = db.get_resourceset_by_rs_id(encode_url(decoded["rs_id"]))
info("RS_ID exists in our DB")
debug("ResourceSet: \n {}".format(check))
except Exception as e:
info("RS_ID doesn't exist in our DB")
pass
debug("RPT: {}".format(decoded))
return {"status": True} # TODO: document
except Exception as e:
debug("Verifying of RPT failed with: {}".format(error_handler(e, "app.py verify_rpt(encoded_rpt)")))
return {"status": False}
class RunningTest(Resource):
def get(self):
info("Test endpoint running fine.")
info("This logger is {}".format(db))
return "And we are online!"
class UserInformation(Resource):
@requires_auth
@cache.cached(timeout=800)
def get(self):
t = time.time()
try:
info("Fetching userInformation.")
filtering = request.args.get("select")
if filtering is not None:
filtering = filtering.split(",")
username = session["username"]
resp = response(db.userInformation(username, filtering))
debug("UserInformation took: {}s".format(time.time() - t))
return resp
raise CustomError(
"An Error occurred in UserInformation."
" Please check you typed the fields correctly and provided 'select?' query string.", code=400)
except Exception as e:
raise CustomError("An Error occurred in UserInformation."
" Error: {}".format(error_handler(e, self.__class__.__name__)),code=500)
class MyServices(Resource):
@requires_auth
def get(self):
t = time.time()
info("Fetching MyServices.")
try:
current = int(request.args.get("current"))
count = current + int(request.args.get("count"))
categories = request.args.get("categories").split(",")
debug("Filering based on: {}, {}, {}".format(current, count, categories))
resp = response(db.myServices(current, count, categories, session["username"]))
debug("MyServices took: {}s".format(time.time() - t))
return resp
except Exception as e:
error(error_handler(e, self.__class__.__name__))
raise CustomError("An Error occurred, please check you provided required query fields."
" Error: {}".format(error_handler(e, self.__class__.__name__)), code=400)
class MyServices_Number(Resource):
@requires_auth
def get(self):
try:
status = request.args.get("status")
if status.title() in ["All", "Disabled", "Active"]:
return response(db.myServices_numberOfServices(status.title(), session["username"]))
except Exception as e:
raise CustomError(
"An Error occurred, please check you typed the fields correctly and all required parameters."
" Error: {}".format(error_handler(e, self.__class__.__name__)), code=400)
class Services(Resource):
@requires_auth
def get(self):
t = time.time()
info("Fetching Services.")
try:
current = int(request.args.get("current"))
count = current + int(request.args.get("count"))
categories = request.args.get("categories").split(",")
labels = request.args.get("labels").split(",")
debug("Filering based on: {}, {}, {}, {}".format(current, count, categories, labels))
username = session["username"]
resp = response(db.ui_Services(categories, labels, current, count, username))
debug("Services took: {}s".format(time.time() - t))
return resp
except Exception as e:
error(error_handler(e, self.__class__.__name__))
raise CustomError(
description="Services endpoint in app.py encountered an error."
" Make sure you supplied current, count, categories and labels parameters in query string."
" Error: {}".format(error_handler(e, self.__class__.__name__)), code=500)
class Service(Resource):
@requires_auth
def get(self, ide):
t = time.time()
info("Fetching a Service.")
try:
resp = response(db.ui_Services_id(ide, session["username"]))
debug("Service took: {}s".format(time.time() - t))
return resp
except CustomError as e:
raise e
except Exception as e:
error("Error occured: {}".format(error_handler(e, self.__class__.__name__)))
raise CustomError(
"An Error occurred, please check you typed the fields correctly and all required parameters. Error: {}".format(
error_handler(e, self.__class__.__name__)), code=400)
class Location_and_Nationality(Resource):
def get(self, table_name):
try:
info("Fetching " + table_name)
return response(db.ui_Locations_and_Nationalities(table_name))
except CustomError as e:
raise e
except Exception as e:
raise CustomError(description="Error occurred: {}".format(error_handler(e, self.__class__.__name__)))
class Location_and_Nationality_id(Resource):
def get(self, table_name, ide):
try:
info("Fetching {} with id {}".format(table_name, ide))
return response(db.ui_Locations_and_Nationalities_id(table_name, ide))
except CustomError as e:
raise e
except Exception as e:
raise CustomError(description="Error occurred: {}".format(error_handler(e, self.__class__.__name__)))
class Login(Resource): # TODO: Move most of the logic to db_handler
@requires_auth
def post(self):
try:
info("Entered Login class post() function.")
info("Will now try to authenticate with the service provided.")
debug("We got data: {}".format(request.data))
credentials = json.loads(request.data)
service_id_none_check = credentials.get("service_id", None)
if service_id_none_check is not None: # Ensuring service_id is given and then turn it to int
# Future notes, if and when we want to recognise services with uuid
# Its easier to make up method to fetch id based on uuid and use it
# here. Having int id's internally allows easy ordering of results.
try:
service_id = int(service_id_none_check)
except Exception as e:
raise CustomError(description="service_id not int."
.format(error_handler(e, self.__class__.__name__)), code=400)
debug("service_id = {}".format(service_id))
else:
error("No service_id provided, aborting.")
raise CustomError(description="No service_id provided, aborting.", code=400)
def get_service_address(service_id):
service = db.get_service(service_id)
return "http://{}:{}/".format(service.ip_address, service.port_api)
service_url = get_service_address(service_id)
debug("Got service_url: {}".format(service_url))
credentials.pop("service_id", None)
try:
req = requests.post(service_url + "api/v0.1/auth", json=credentials, timeout=15).text
except Exception as e:
raise CustomError(description="Connecting to service failed: {}"
.format(error_handler(e, self.__class__.__name__)),
code=503)
debug("We got reply: {}".format(req))
ext_id_json = json.loads(req) # fetch ext_id
ext_id = ext_id_json.get("ext_id", None)
if ext_id is None:
error("No ext_id provided, aborting.")
raise CustomError(description="No ext_id provided, probably incorrect credentials. Aborting.", code=403)
userAccount = db.get_userAccount_by_username(session["username"])
table = {"services_id": service_id,
"userAccounts_id": userAccount.id,
"ext_id": ext_id}
db.add_ext_id(json.dumps(table))
info("Fetching SCT from service {}".format(service_id))
contract = requests.get(service_url + "api/v0.1/contract",
headers={"Authorization": "bearer " + ext_id})
debug("Got Following SCT:\n{}".format(contract.text))
if db.SCT_correctly_formatted(contract.text) is False:
error("SCT was not correctly formatted.")
raise CustomError(description="SCT was not correctly formatted.", code=400)
if contract.__bool__():
info("Got contract successfully!")
return response(contract.text)
error("We didn't receive OK from service.")
raise CustomError(description="The service responded something besides"
" OK when requesting SCT, Output in logs "
"and included here: {}".format(contract.text), code=520)
except CustomError as e:
raise e
except Exception as e:
raise CustomError(description="Error occurred: {}".format(error_handler(e, self.__class__.__name__)))
class VerifyRPT(Resource):
def post(self):
try:
debug("Entered VerifyRPT function with value:\n " + request.data)
return verify_rpt(json.loads(request.data))
except CustomError as e:
raise e
except Exception as e:
raise CustomError(description="Error occurred: {}".format(error_handler(e, self.__class__.__name__)))
class UiSCT(Resource):
@requires_auth
def post(self):
try:
info(request.data)
contract = json.loads(request.data)
debug(type(contract))
debug(contract["intendet_use"])
if isinstance(contract["intendet_use"], list):
contract["intendet_use"] = ",".join(contract["intendet_use"])
if db.ui_SCT_correctly_formatted(contract) is False:
CustomError(description="Validating of SCT format failed. Check SCT format.", code=400)
if db.signing_function(contract) is False:
CustomError(description="Signing of SCT failed.", code=400)
db.make_contract(session["username"], contract)
return {"status": "success"}
except CustomError as e:
raise e
except Exception as e:
raise CustomError(description="Error occurred: {}".format(error_handler(e, self.__class__.__name__)))
class ResourceSets(Resource):
@requires_auth
def get(self):
abort(501)
return response(db.src_ResourceSets())
@requires_auth
def post(self):
'''
{
"service_id": id,
"categories": ["Food", "Fitness"]
}
'''
try:
js = request.json
info("Adding new Resource set.")
debug(request.json)
# ext_id = request.headers.get("Authorization")
# if ext_id is None:
# abort(400)
# ext_id = ext_id.split(" ")[-1]
js["username"] = session["username"]
return response(db.CreateResourceSet(js))
except CustomError as e:
raise e
except Exception as e:
raise CustomError(description="Error occurred: {}".format(error_handler(e, self.__class__.__name__)))
class ResourceSet2(Resource):
@requires_auth
def get(self):
abort(501)
return response(db.src_ResourceSets())
@requires_auth
def post(self):
'''
{
"service_id": id,
"categories": ["Food", "Fitness"]
}
'''
try:
js = request.json
info("Adding new Resource set.")
debug(request.json)
# ext_id = request.headers.get("Authorization")
# if ext_id is None:
# abort(400)
# ext_id = ext_id.split(" ")[-1]
js["username"] = session["username"]
return response(db.CreateResourceSet(js))
except CustomError as e:
raise e
except Exception as e:
raise CustomError(description="Error occurred: {}".format(error_handler(e, self.__class__.__name__)))
class ConsentView(Resource):
@requires_auth
def post(self):
t = time.time()
'''
{
"sink_id":"",
"source_id":"",
"consent_id":""
}
'''
try:
data = request.json
sink_id = data["sink_id"]
source_id = data["source_id"]
consent_id = data.get("consent_id")
resp = response(db.ui_ConsentView(session["username"], sink_id, source_id, consent_id))
debug("ConsentView took: {}s".format(time.time() - t))
return resp
except CustomError as e:
raise e
except Exception as e:
raise CustomError(description="Error occurred: {}".format(error_handler(e, self.__class__.__name__)))
class ResourceSet(Resource):
def get(self, ide):
return response(db.src_ResourceSet(ide))
# class Register_ResourceSet(Resource):
# @requires_auth
# def get(self):
# categories = request.args.get("categories")
# service_id = request.args.get("service_id")
# username = session["username"]
# js = {"categories": categories,
# "service_id": service_id,
# "username": username}
# db.CreateResourceSet(js)
class GiveConsent(Resource):
@requires_auth
def post(self):
try:
js = request.json
js["rs_id"] = js["rs_id"]["rs_id"] # TODO: This is a fix for UI problem
db.create_consent_receipt(js, session["username"])
return response({"status": "200", "success": True})
except CustomError as e:
raise e
except Exception as e:
raise CustomError(description="Error occurred: {}".format(error_handler(e, self.__class__.__name__)))
class Licenses(Resource):
@requires_auth
def get(self):
try:
return response(db.parse_dict_list_to_json([x.tojson for x in db.get_licensetypes()]))
except CustomError as e:
raise e
except Exception as e:
raise CustomError(description="Something went wrong in fetching licenses list with error: {}"
.format(error_handler(e, self.__class__.__name__)))
class ServDB(Resource):
@requires_auth
def get(self):
try:
return response(db.parse_dict_list_to_json([x.tojson for x in db.get_services()]))
except CustomError as e:
raise e
except Exception as e:
raise CustomError(description="Something went wrong in fetching licenses list with error: {}"
.format(error_handler(e, self.__class__.__name__)))
class Config(Resource):
@requires_auth
def get(self):
try:
return response(config)
except Exception as e:
raise CustomError(description="Something went wrong in fetching licenses list with error: {}".format(
error_handler(e, self.__class__.__name__)))
class Active_Licenses(Resource):
@requires_auth
def get(self, id):
try:
resp = db.get_active_licenses(id)
return response({"active_licenses": resp})
except CustomError as e:
raise e
except Exception as e:
raise CustomError(description="Error occurred: {}".format(error_handler(e, self.__class__.__name__)))
class Active_Categories(Resource):
@requires_auth
def get(self, id):
try:
resp = db.get_active_categories(id)
return response({"active_categories": resp})
except CustomError as e:
raise e
except Exception as e:
raise CustomError(description="Error occurred: {}".format(error_handler(e, self.__class__.__name__)))
class Disable_Consent(Resource):
@requires_auth
def get(self, id):
try:
current_link = json.loads(db.get_consentreceiptslink(id).tojson)
debug("[{}]Deleting ConsentReceiptLink:\n {}".format(time.time(), current_link))
resp = db.delete_consentreceiptslink(id)
return "Removed."
except CustomError as e:
raise e
except Exception as e:
raise CustomError(description="Error occurred: {}".format(error_handler(e, self.__class__.__name__)))
class ResetDB(Resource):
# @requires_auth
def get(self):
try:
db.reset_database()
except Exception as e:
pass
global db
setattr(g, '_database', None)
db = LocalProxy(get_db)
tests = Test_editable(db)
with open("DO/GenericConfigFile.json", "r") as conf:
global config
config = json.load(conf)
# config = get_config()
return "DataBase has been RESETED and INITIALIZED!"
class TestConsentStatus(Resource):
def get(self):
js = {"Result": "First get current status which is '{}' then set status to paused. Operation was a '{}'."
" Checking that now its '{}' and we change it back to active after this."
" Operation was '{}' and status is now '{}'.".format(db.get_consent_status(1, session["username"]),
db.set_consent_status(1, "paused",
session["username"]),
db.get_consent_status(1, session["username"]),
db.set_consent_status(1, "active",
session["username"]),
db.get_consent_status(1, session["username"]))
}
return response(js)
class SetConsentStatus(Resource):
@requires_auth
def put(self):
'''
{
"consent_id": consentID,
"status": "active"
}
'''
js = json.loads(request.data)
return response({"status": db.set_consent_status(js["consent_id"], js["status"], session["username"])})
api.add_resource(RunningTest, '/')
api.add_resource(UserInformation, api_path_ui + 'userInformation') # Documented in swagger.yml
api.add_resource(MyServices, api_path_ui + 'myServices') # Documented in swagger.yml
api.add_resource(MyServices_Number, api_path_ui + 'myServices/numberOfServices') # Documented in swagger.yml
api.add_resource(Services, api_path_ui + 'services') # Documented in swagger.yml
api.add_resource(Service, api_path_ui + 'services/<ide>') # Documented in swagger.yml
api.add_resource(Location_and_Nationality, api_path_ui + 'location/<table_name>', # UNUSED
api_path_ui + 'language/<table_name>')
api.add_resource(Location_and_Nationality_id, api_path_ui + 'location/<table_name>/<ide>', # UNUSED
api_path_ui + 'language/<table_name>/<ide>')
api.add_resource(Login, api_path_ui + "foreign_login") # Documented in swagger.yml
api.add_resource(VerifyRPT, api_path + "verify_rpt") # Documented in swagger.yml
api.add_resource(UiSCT, api_path_ui + "accept_contract") # Documented in swagger.yml
api.add_resource(GiveConsent, api_path_ui + "give_consent") # Documented in swagger.yml
api.add_resource(ResourceSets, api_path + "protection/resourceSets") # Documented in swagger.yml
api.add_resource(ResourceSet, api_path + "protection/resourceSets/<ide>") # UNUSED
api.add_resource(Licenses, api_path + "db/licenses") # UNUSED
api.add_resource(ServDB, api_path + "db/services") # Documented in swagger.yml
api.add_resource(ConsentView, api_path + "ui/ConsentView") # Documented in swagger.yml
api.add_resource(Active_Licenses, api_path + "ui/active_licenses/<id>") # TODO: Document # UNUSED
api.add_resource(Active_Categories, api_path + "ui/active_categories/<id>") # TODO: Document # UNUSED
api.add_resource(Disable_Consent, api_path + "ui/disable_consent/<id>") # Documented in swagger.yml
api.add_resource(Config, api_path + "config") # Documented in swagger.yml
api.add_resource(ResetDB, api_path + "RESET") # Documented in swagger.yml
api.add_resource(TestConsentStatus, api_path + "CTest") # TODO: Document, Testing
api.add_resource(SetConsentStatus, api_path + "db/ConsentStatus") # TODO: Document # UNUSED
if __name__ == '__main__':
app.run(debug=False, port=8080, host="0.0.0.0", threaded=False)
'''
TODO:
Service contract starting from service
Make rpt invalid if consent is revoked/paused. (We check for invalid/missing rpt but fail silently incase or problems)
'''
| |
__author__ = 'Frank Sehnke, sehnke@in.tum.de'
from pybrain.rl.environments import EpisodicTask
from pybrain.rl.environments.ode.sensors import SpecificBodyPositionSensor
from scipy import tanh, zeros, array, random, sqrt, asarray
#Basic class for all ccrl tasks
class CCRLTask(EpisodicTask):
def __init__(self, env):
EpisodicTask.__init__(self, env)
#Overall maximal tourque - is multiplied with relative max tourque for individual joint.
self.maxPower = 100.0
self.reward_history = []
self.count = 0 #timestep counter
self.epiLen = 1500 #suggestet episodic length for normal Johnnie tasks
self.incLearn = 0 #counts the task resets for incrementall learning
self.env.FricMu = 20.0 #We need higher friction for CCRL
self.env.dt = 0.002 #We also need more timly resolution
# normalize standard sensors to (-1, 1)
self.sensor_limits = []
#Angle sensors
for i in range(self.env.actLen):
self.sensor_limits.append((self.env.cLowList[i], self.env.cHighList[i]))
# Joint velocity sensors
for i in range(self.env.actLen):
self.sensor_limits.append((-20, 20))
#Norm all actor dimensions to (-1, 1)
self.actor_limits = [(-1, 1)] * env.actLen
self.oldAction = zeros(env.actLen, float)
self.dist = zeros(9, float)
self.dif = array([0.0, 0.0, 0.0])
self.target = array([-6.5, 1.75, -10.5])
self.grepRew = 0.0
self.tableFlag = 0.0
self.env.addSensor(SpecificBodyPositionSensor(['objectP00'], "glasPos"))
self.env.addSensor(SpecificBodyPositionSensor(['palmLeft'], "palmPos"))
self.env.addSensor(SpecificBodyPositionSensor(['fingerLeft1'], "finger1Pos"))
self.env.addSensor(SpecificBodyPositionSensor(['fingerLeft2'], "finger2Pos"))
#we changed sensors so we need to update environments sensorLength variable
self.env.obsLen = len(self.env.getSensors())
#normalization for the task spezific sensors
for i in range(self.env.obsLen - 2 * self.env.actLen):
self.sensor_limits.append((-4, 4))
self.actor_limits = None
def getObservation(self):
""" a filtered mapping to getSample of the underlying environment. """
sensors = self.env.getSensors()
#Sensor hand to target object
for i in range(3):
self.dist[i] = ((sensors[self.env.obsLen - 9 + i] + sensors[self.env.obsLen - 6 + i] + sensors[self.env.obsLen - 3 + i]) / 3.0 - (sensors[self.env.obsLen - 12 + i] + self.dif[i])) * 4.0 #sensors[self.env.obsLen-12+i]
#Sensor hand angle to horizontal plane X-Axis
for i in range(3):
self.dist[i + 3] = (sensors[self.env.obsLen - 3 + i] - sensors[self.env.obsLen - 6 + i]) * 5.0
#Sensor hand angle to horizontal plane Y-Axis
for i in range(3):
self.dist[i + 6] = ((sensors[self.env.obsLen - 3 + i] + sensors[self.env.obsLen - 6 + i]) / 2.0 - sensors[self.env.obsLen - 9 + i]) * 10.0
if self.sensor_limits:
sensors = self.normalize(sensors)
sens = []
for i in range(self.env.obsLen - 12):
sens.append(sensors[i])
for i in range(9):
sens.append(self.dist[i])
for i in self.oldAction:
sens.append(i)
return sens
def performAction(self, action):
#Filtered mapping towards performAction of the underlying environment
#The standard CCRL task uses a PID controller to controll directly angles instead of forces
#This makes most tasks much simpler to learn
self.oldAction = action
#Grasping as reflex depending on the distance to target - comment in for more easy grasping
if abs(abs(self.dist[:3]).sum())<2.0: action[15]=1.0 #self.grepRew=action[15]*.01
else: action[15]=-1.0 #self.grepRew=action[15]*-.03
isJoints=array(self.env.getSensorByName('JointSensor')) #The joint angles
isSpeeds=array(self.env.getSensorByName('JointVelocitySensor')) #The joint angular velocitys
act=(action+1.0)/2.0*(self.env.cHighList-self.env.cLowList)+self.env.cLowList #norm output to action intervall
action=tanh((act-isJoints-0.9*isSpeeds*self.env.tourqueList)*16.0)*self.maxPower*self.env.tourqueList #simple PID
EpisodicTask.performAction(self, action)
#self.env.performAction(action)
def isFinished(self):
#returns true if episode timesteps has reached episode length and resets the task
if self.count > self.epiLen:
self.res()
return True
else:
self.count += 1
return False
def res(self):
#sets counter and history back, increases incremental counter
self.count = 0
self.incLearn += 1
self.reward_history.append(self.getTotalReward())
self.tableFlag = 0.0
def getReward(self):
#rewarded for approaching the object
dis = sqrt((self.dist[0:3] ** 2).sum())
return (25.0 - dis) / float(self.epiLen) - float(self.env.tableSum) * 0.1
#Learn to grasp a glas at a fixed location
class CCRLGlasTask(CCRLTask):
def __init__(self, env):
CCRLTask.__init__(self, env)
self.dif = array([0.0, 0.0, 0.0])
self.epiLen = 1000 #suggestet episodic length for normal Johnnie tasks
def isFinished(self):
#returns true if episode timesteps has reached episode length and resets the task
if self.count > self.epiLen:
self.res()
return True
else:
self.count += 1
return False
def getReward(self):
if self.env.glasSum >= 2: grip = 1000.0
else: grip = 0.0
if self.env.tableSum > 0: self.tableFlag = -1.0
else: tableFlag = 0.0
self.dist[3] = 0.0
self.dist[8] = 0.0
dis = 100.0/((self.dist[:3] ** 2).sum()+0.1)
nig = 10.0/((self.dist[3:] ** 2).sum()+0.1)
if self.env.stepCounter == self.epiLen: print "Grip:", grip, "Dis:", dis, "Nig:", nig, "Table:", self.tableFlag
return (10 + grip + nig + dis + self.tableFlag) / float(self.epiLen) #-dis
#else:
# return (25.0 - dis) / float(self.epiLen) + (grip / nig - float(self.env.tableSum)) * 0.1 #+self.grepRew (10.0-dis)/float(self.epiLen)+
#Learn to grasp a plate at a fixed location
class CCRLPlateTask(CCRLTask):
def __init__(self, env):
CCRLTask.__init__(self, env)
self.dif = array([0.0, 0.2, 0.8])
self.epiLen = 1000 #suggestet episodic length for normal Johnnie tasks
def isFinished(self):
#returns true if episode timesteps has reached episode length and resets the task
if self.count > self.epiLen:
self.res()
return True
else:
if self.count == 1: self.pertGlasPos(0)
self.count += 1
return False
def pertGlasPos(self, num):
if num == 0: self.env.pert = asarray([0.0, 0.0, 0.5])
def getReward(self):
if self.env.glasSum >= 2: grip = 1.0
else: grip = 0.0
if self.env.tableSum > 0: self.tableFlag = 10.0
#self.dist[4]=0.0
#self.dist[8]=0.0
dis = sqrt((self.dist[0:3] ** 2).sum())
if self.count == self.epiLen:
return 25.0 + grip - dis - self.tableFlag #/nig
else:
return (25.0 - dis) / float(self.epiLen) + (grip - float(self.env.tableSum)) * 0.1 #/nig -(1.0+self.oldAction[15])
#Learn to grasp a glas at 5 different locations
class CCRLGlasVarTask(CCRLGlasTask):
def __init__(self, env):
CCRLGlasTask.__init__(self, env)
self.epiLen = 5000 #suggestet episodic length for normal Johnnie tasks
def isFinished(self):
#returns true if episode timesteps has reached episode length and resets the task
if self.count > self.epiLen:
self.res()
return True
else:
if self.count == 1:
self.pertGlasPos(0)
if self.count == self.epiLen / 5 + 1:
self.env.reset()
self.pertGlasPos(1)
if self.count == 2 * self.epiLen / 5 + 1:
self.env.reset()
self.pertGlasPos(2)
if self.count == 3 * self.epiLen / 5 + 1:
self.env.reset()
self.pertGlasPos(3)
if self.count == 4 * self.epiLen / 5 + 1:
self.env.reset()
self.pertGlasPos(4)
self.count += 1
return False
def pertGlasPos(self, num):
if num == 0: self.env.pert = asarray([1.0, 0.0, 0.5])
if num == 1: self.env.pert = asarray([-1.0, 0.0, 0.5])
if num == 2: self.env.pert = asarray([1.0, 0.0, 0.0])
if num == 3: self.env.pert = asarray([-1.0, 0.0, 0.0])
if num == 4: self.env.pert = asarray([0.0, 0.0, 0.25])
def getReward(self):
if self.env.glasSum >= 2: grip = 1.0
else: grip = 0.0
if self.env.tableSum > 0: self.tableFlag = 10.0
self.dist[3] = 0.0
self.dist[8] = 0.0
dis = sqrt((self.dist ** 2).sum())
nig = (abs(self.dist[4]) + 1.0)
if self.count == self.epiLen or self.count == self.epiLen / 5 or self.count == 2 * self.epiLen / 5 or self.count == 3 * self.epiLen / 5 or self.count == 4 * self.epiLen / 5:
return 25.0 + grip / nig - dis - self.tableFlag #/nig
else:
return (25.0 - dis) / float(self.epiLen) + (grip / nig - float(self.env.tableSum)) * 0.1 #/nig
#Learn to grasp a glas at random locations
class CCRLGlasVarRandTask(CCRLGlasVarTask):
def pertGlasPos(self, num):
self.env.pert = asarray([random.random()*2.0 - 1.0, 0.0, random.random()*0.5 + 0.5])
#Some experimental stuff
class CCRLPointTask(CCRLGlasVarTask):
def __init__(self, env):
CCRLGlasVarTask.__init__(self, env)
self.epiLen = 1000 #suggestet episodic length for normal Johnnie tasks
def isFinished(self):
#returns true if episode timesteps has reached episode length and resets the task
if self.count > self.epiLen:
self.res()
return True
else:
if self.count == 1:
self.pertGlasPos(0)
self.count += 1
return False
def getObservation(self):
""" a filtered mapping to getSample of the underlying environment. """
sensors = self.env.getSensors()
sensSort = []
#Angle and angleVelocity
for i in range(32):
sensSort.append(sensors[i])
#Angles wanted (old action)
for i in self.oldAction:
sensSort.append(i)
#Hand position
for i in range(3):
sensSort.append((sensors[38 + i] + sensors[41 + i]) / 2)
#Hand orientation (Hack - make correkt!!!!)
sensSort.append((sensors[38] - sensors[41]) / 2 - sensors[35]) #pitch
sensSort.append((sensors[38 + 1] - sensors[41 + 1]) / 2 - sensors[35 + 1]) #yaw
sensSort.append((sensors[38 + 1] - sensors[41 + 1])) #roll
#Target position
for i in range(3):
sensSort.append(self.target[i])
#Target orientation
for i in range(3):
sensSort.append(0.0)
#Object type (start with random)
sensSort.append(float(random.randint(-1, 1))) #roll
#normalisation
if self.sensor_limits:
sensors = self.normalize(sensors)
sens = []
for i in range(32):
sens.append(sensors[i])
for i in range(29):
sens.append(sensSort[i + 32])
#calc dist to target
self.dist = array([(sens[54] - sens[48]), (sens[55] - sens[49]), (sens[56] - sens[50]), sens[51], sens[52], sens[53], sens[15]])
return sens
def pertGlasPos(self, num):
if num == 0: self.target = asarray([0.0, 0.0, 1.0])
self.env.pert = self.target.copy()
self.target = self.target.copy() + array([-6.5, 1.75, -10.5])
def getReward(self):
dis = sqrt((self.dist ** 2).sum())
return (25.0 - dis) / float(self.epiLen) - float(self.env.tableSum) * 0.1
class CCRLPointVarTask(CCRLPointTask):
def __init__(self, env):
CCRLPointTask.__init__(self, env)
self.epiLen = 2000 #suggestet episodic length for normal Johnnie tasks
def isFinished(self):
#returns true if episode timesteps has reached episode length and resets the task
if self.count > self.epiLen:
self.res()
return True
else:
if self.count == 1:
self.pertGlasPos(0)
if self.count == self.epiLen / 2 + 1:
self.env.reset()
self.pertGlasPos(1)
self.count += 1
return False
def getObservation(self):
""" a filtered mapping to getSample of the underlying environment. """
sensors = self.env.getSensors()
sensSort = []
#Angle and angleVelocity
for i in range(32):
sensSort.append(sensors[i])
#Angles wanted (old action)
for i in self.oldAction:
sensSort.append(i)
#Hand position
for i in range(3):
sensSort.append((sensors[38 + i] + sensors[41 + i]) / 2)
#Hand orientation (Hack - make correkt!!!!)
sensSort.append((sensors[38] - sensors[41]) / 2 - sensors[35]) #pitch
sensSort.append((sensors[38 + 1] - sensors[41 + 1]) / 2 - sensors[35 + 1]) #yaw
sensSort.append((sensors[38 + 1] - sensors[41 + 1])) #roll
#Target position
for i in range(3):
sensSort.append(self.target[i])
#Target orientation
for i in range(3):
sensSort.append(0.0)
#Object type (start with random)
sensSort.append(float(random.randint(-1, 1))) #roll
#normalisation
if self.sensor_limits:
sensors = self.normalize(sensors)
sens = []
for i in range(32):
sens.append(sensors[i])
for i in range(29):
sens.append(sensSort[i + 32])
#calc dist to target
self.dist = array([(sens[54] - sens[48]) * 10.0, (sens[55] - sens[49]) * 10.0, (sens[56] - sens[50]) * 10.0, sens[51], sens[52], sens[53], 1.0 + sens[15]])
return sens
def pertGlasPos(self, num):
if num == 0: self.target = asarray([1.0, 0.0, 1.0])
if num == 1: self.target = asarray([-1.0, 0.0, 1.0])
if num == 2: self.target = asarray([1.0, 0.0, 0.0])
if num == 3: self.target = asarray([-1.0, 0.0, 0.0])
if num == 4: self.target = asarray([0.0, 0.0, 0.5])
self.env.pert = self.target.copy()
self.target = self.target.copy() + array([-6.5, 1.75, -10.5])
def getReward(self):
dis = sqrt((self.dist ** 2).sum())
subEpi = self.epiLen / 2
if self.count == self.epiLen or self.count == subEpi:
return (25.0 - dis) / 2.0
else:
return (25.0 - dis) / float(self.epiLen) - float(self.env.tableSum) * 0.1
| |
"""
This module contains SymPy functions mathcin corresponding to special math functions in the
C standard library (since C99, also available in C++11).
The functions defined in this module allows the user to express functions such as ``expm1``
as a SymPy function for symbolic manipulation.
"""
from sympy.core.function import ArgumentIndexError, Function
from sympy.core.numbers import Rational
from sympy.core.power import Pow
from sympy.core.singleton import S
from sympy.functions.elementary.exponential import exp, log
from sympy.functions.elementary.miscellaneous import sqrt
def _expm1(x):
return exp(x) - S.One
class expm1(Function):
"""
Represents the exponential function minus one.
The benefit of using ``expm1(x)`` over ``exp(x) - 1``
is that the latter is prone to cancellation under finite precision
arithmetic when x is close to zero.
Examples
========
>>> from sympy.abc import x
>>> from sympy.codegen.cfunctions import expm1
>>> '%.0e' % expm1(1e-99).evalf()
'1e-99'
>>> from math import exp
>>> exp(1e-99) - 1
0.0
>>> expm1(x).diff(x)
exp(x)
See Also
========
log1p
"""
nargs = 1
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
return exp(*self.args)
else:
raise ArgumentIndexError(self, argindex)
def _eval_expand_func(self, **hints):
return _expm1(*self.args)
def _eval_rewrite_as_exp(self, arg, **kwargs):
return exp(arg) - S.One
_eval_rewrite_as_tractable = _eval_rewrite_as_exp
@classmethod
def eval(cls, arg):
exp_arg = exp.eval(arg)
if exp_arg is not None:
return exp_arg - S.One
def _eval_is_real(self):
return self.args[0].is_real
def _eval_is_finite(self):
return self.args[0].is_finite
def _log1p(x):
return log(x + S.One)
class log1p(Function):
"""
Represents the natural logarithm of a number plus one.
The benefit of using ``log1p(x)`` over ``log(x + 1)``
is that the latter is prone to cancellation under finite precision
arithmetic when x is close to zero.
Examples
========
>>> from sympy.abc import x
>>> from sympy.codegen.cfunctions import log1p
>>> from sympy.core.function import expand_log
>>> '%.0e' % expand_log(log1p(1e-99)).evalf()
'1e-99'
>>> from math import log
>>> log(1 + 1e-99)
0.0
>>> log1p(x).diff(x)
1/(x + 1)
See Also
========
expm1
"""
nargs = 1
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
return S.One/(self.args[0] + S.One)
else:
raise ArgumentIndexError(self, argindex)
def _eval_expand_func(self, **hints):
return _log1p(*self.args)
def _eval_rewrite_as_log(self, arg, **kwargs):
return _log1p(arg)
_eval_rewrite_as_tractable = _eval_rewrite_as_log
@classmethod
def eval(cls, arg):
if arg.is_Rational:
return log(arg + S.One)
elif not arg.is_Float: # not safe to add 1 to Float
return log.eval(arg + S.One)
elif arg.is_number:
return log(Rational(arg) + S.One)
def _eval_is_real(self):
return (self.args[0] + S.One).is_nonnegative
def _eval_is_finite(self):
if (self.args[0] + S.One).is_zero:
return False
return self.args[0].is_finite
def _eval_is_positive(self):
return self.args[0].is_positive
def _eval_is_zero(self):
return self.args[0].is_zero
def _eval_is_nonnegative(self):
return self.args[0].is_nonnegative
_Two = S(2)
def _exp2(x):
return Pow(_Two, x)
class exp2(Function):
"""
Represents the exponential function with base two.
The benefit of using ``exp2(x)`` over ``2**x``
is that the latter is not as efficient under finite precision
arithmetic.
Examples
========
>>> from sympy.abc import x
>>> from sympy.codegen.cfunctions import exp2
>>> exp2(2).evalf() == 4
True
>>> exp2(x).diff(x)
log(2)*exp2(x)
See Also
========
log2
"""
nargs = 1
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
return self*log(_Two)
else:
raise ArgumentIndexError(self, argindex)
def _eval_rewrite_as_Pow(self, arg, **kwargs):
return _exp2(arg)
_eval_rewrite_as_tractable = _eval_rewrite_as_Pow
def _eval_expand_func(self, **hints):
return _exp2(*self.args)
@classmethod
def eval(cls, arg):
if arg.is_number:
return _exp2(arg)
def _log2(x):
return log(x)/log(_Two)
class log2(Function):
"""
Represents the logarithm function with base two.
The benefit of using ``log2(x)`` over ``log(x)/log(2)``
is that the latter is not as efficient under finite precision
arithmetic.
Examples
========
>>> from sympy.abc import x
>>> from sympy.codegen.cfunctions import log2
>>> log2(4).evalf() == 2
True
>>> log2(x).diff(x)
1/(x*log(2))
See Also
========
exp2
log10
"""
nargs = 1
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
return S.One/(log(_Two)*self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
if arg.is_number:
result = log.eval(arg, base=_Two)
if result.is_Atom:
return result
elif arg.is_Pow and arg.base == _Two:
return arg.exp
def _eval_expand_func(self, **hints):
return _log2(*self.args)
def _eval_rewrite_as_log(self, arg, **kwargs):
return _log2(arg)
_eval_rewrite_as_tractable = _eval_rewrite_as_log
def _fma(x, y, z):
return x*y + z
class fma(Function):
"""
Represents "fused multiply add".
The benefit of using ``fma(x, y, z)`` over ``x*y + z``
is that, under finite precision arithmetic, the former is
supported by special instructions on some CPUs.
Examples
========
>>> from sympy.abc import x, y, z
>>> from sympy.codegen.cfunctions import fma
>>> fma(x, y, z).diff(x)
y
"""
nargs = 3
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex in (1, 2):
return self.args[2 - argindex]
elif argindex == 3:
return S.One
else:
raise ArgumentIndexError(self, argindex)
def _eval_expand_func(self, **hints):
return _fma(*self.args)
def _eval_rewrite_as_tractable(self, arg, **kwargs):
return _fma(arg)
_Ten = S(10)
def _log10(x):
return log(x)/log(_Ten)
class log10(Function):
"""
Represents the logarithm function with base ten.
Examples
========
>>> from sympy.abc import x
>>> from sympy.codegen.cfunctions import log10
>>> log10(100).evalf() == 2
True
>>> log10(x).diff(x)
1/(x*log(10))
See Also
========
log2
"""
nargs = 1
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
return S.One/(log(_Ten)*self.args[0])
else:
raise ArgumentIndexError(self, argindex)
@classmethod
def eval(cls, arg):
if arg.is_number:
result = log.eval(arg, base=_Ten)
if result.is_Atom:
return result
elif arg.is_Pow and arg.base == _Ten:
return arg.exp
def _eval_expand_func(self, **hints):
return _log10(*self.args)
def _eval_rewrite_as_log(self, arg, **kwargs):
return _log10(arg)
_eval_rewrite_as_tractable = _eval_rewrite_as_log
def _Sqrt(x):
return Pow(x, S.Half)
class Sqrt(Function): # 'sqrt' already defined in sympy.functions.elementary.miscellaneous
"""
Represents the square root function.
The reason why one would use ``Sqrt(x)`` over ``sqrt(x)``
is that the latter is internally represented as ``Pow(x, S.Half)`` which
may not be what one wants when doing code-generation.
Examples
========
>>> from sympy.abc import x
>>> from sympy.codegen.cfunctions import Sqrt
>>> Sqrt(x)
Sqrt(x)
>>> Sqrt(x).diff(x)
1/(2*sqrt(x))
See Also
========
Cbrt
"""
nargs = 1
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
return Pow(self.args[0], Rational(-1, 2))/_Two
else:
raise ArgumentIndexError(self, argindex)
def _eval_expand_func(self, **hints):
return _Sqrt(*self.args)
def _eval_rewrite_as_Pow(self, arg, **kwargs):
return _Sqrt(arg)
_eval_rewrite_as_tractable = _eval_rewrite_as_Pow
def _Cbrt(x):
return Pow(x, Rational(1, 3))
class Cbrt(Function): # 'cbrt' already defined in sympy.functions.elementary.miscellaneous
"""
Represents the cube root function.
The reason why one would use ``Cbrt(x)`` over ``cbrt(x)``
is that the latter is internally represented as ``Pow(x, Rational(1, 3))`` which
may not be what one wants when doing code-generation.
Examples
========
>>> from sympy.abc import x
>>> from sympy.codegen.cfunctions import Cbrt
>>> Cbrt(x)
Cbrt(x)
>>> Cbrt(x).diff(x)
1/(3*x**(2/3))
See Also
========
Sqrt
"""
nargs = 1
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex == 1:
return Pow(self.args[0], Rational(-_Two/3))/3
else:
raise ArgumentIndexError(self, argindex)
def _eval_expand_func(self, **hints):
return _Cbrt(*self.args)
def _eval_rewrite_as_Pow(self, arg, **kwargs):
return _Cbrt(arg)
_eval_rewrite_as_tractable = _eval_rewrite_as_Pow
def _hypot(x, y):
return sqrt(Pow(x, 2) + Pow(y, 2))
class hypot(Function):
"""
Represents the hypotenuse function.
The hypotenuse function is provided by e.g. the math library
in the C99 standard, hence one may want to represent the function
symbolically when doing code-generation.
Examples
========
>>> from sympy.abc import x, y
>>> from sympy.codegen.cfunctions import hypot
>>> hypot(3, 4).evalf() == 5
True
>>> hypot(x, y)
hypot(x, y)
>>> hypot(x, y).diff(x)
x/hypot(x, y)
"""
nargs = 2
def fdiff(self, argindex=1):
"""
Returns the first derivative of this function.
"""
if argindex in (1, 2):
return 2*self.args[argindex-1]/(_Two*self.func(*self.args))
else:
raise ArgumentIndexError(self, argindex)
def _eval_expand_func(self, **hints):
return _hypot(*self.args)
def _eval_rewrite_as_Pow(self, arg, **kwargs):
return _hypot(arg)
_eval_rewrite_as_tractable = _eval_rewrite_as_Pow
| |
from sympy.utilities.pytest import XFAIL, raises
from sympy import (S, Symbol, symbols, oo, I, pi, Float, And, Or, Not, Implies,
Xor)
from sympy.core.relational import ( Relational, Equality, Unequality,
GreaterThan, LessThan, StrictGreaterThan, StrictLessThan, Rel, Eq, Lt, Le,
Gt, Ge, Ne )
from sympy.sets.sets import Interval, FiniteSet
x, y, z, t = symbols('x,y,z,t')
def test_rel_ne():
assert Relational(x, y, '!=') == Ne(x, y)
def test_rel_subs():
e = Relational(x, y, '==')
e = e.subs(x, z)
assert isinstance(e, Equality)
assert e.lhs == z
assert e.rhs == y
e = Relational(x, y, '>=')
e = e.subs(x, z)
assert isinstance(e, GreaterThan)
assert e.lhs == z
assert e.rhs == y
e = Relational(x, y, '<=')
e = e.subs(x, z)
assert isinstance(e, LessThan)
assert e.lhs == z
assert e.rhs == y
e = Relational(x, y, '>')
e = e.subs(x, z)
assert isinstance(e, StrictGreaterThan)
assert e.lhs == z
assert e.rhs == y
e = Relational(x, y, '<')
e = e.subs(x, z)
assert isinstance(e, StrictLessThan)
assert e.lhs == z
assert e.rhs == y
e = Eq(x, 0)
assert e.subs(x, 0) is S.true
assert e.subs(x, 1) is S.false
def test_wrappers():
e = x + x**2
res = Relational(y, e, '==')
assert Rel(y, x + x**2, '==') == res
assert Eq(y, x + x**2) == res
res = Relational(y, e, '<')
assert Lt(y, x + x**2) == res
res = Relational(y, e, '<=')
assert Le(y, x + x**2) == res
res = Relational(y, e, '>')
assert Gt(y, x + x**2) == res
res = Relational(y, e, '>=')
assert Ge(y, x + x**2) == res
res = Relational(y, e, '!=')
assert Ne(y, x + x**2) == res
def test_Eq():
assert Eq(x**2) == Eq(x**2, 0)
assert Eq(x**2) != Eq(x**2, 1)
def test_rel_Infinity():
# NOTE: All of these are actually handled by sympy.core.Number, and do
# not create Relational objects.
assert (oo > oo) is S.false
assert (oo > -oo) is S.true
assert (oo > 1) is S.true
assert (oo < oo) is S.false
assert (oo < -oo) is S.false
assert (oo < 1) is S.false
assert (oo >= oo) is S.true
assert (oo >= -oo) is S.true
assert (oo >= 1) is S.true
assert (oo <= oo) is S.true
assert (oo <= -oo) is S.false
assert (oo <= 1) is S.false
assert (-oo > oo) is S.false
assert (-oo > -oo) is S.false
assert (-oo > 1) is S.false
assert (-oo < oo) is S.true
assert (-oo < -oo) is S.false
assert (-oo < 1) is S.true
assert (-oo >= oo) is S.false
assert (-oo >= -oo) is S.true
assert (-oo >= 1) is S.false
assert (-oo <= oo) is S.true
assert (-oo <= -oo) is S.true
assert (-oo <= 1) is S.true
def test_bool():
assert Eq(0, 0) is S.true
assert Eq(1, 0) is S.false
assert Ne(0, 0) is S.false
assert Ne(1, 0) is S.true
assert Lt(0, 1) is S.true
assert Lt(1, 0) is S.false
assert Le(0, 1) is S.true
assert Le(1, 0) is S.false
assert Le(0, 0) is S.true
assert Gt(1, 0) is S.true
assert Gt(0, 1) is S.false
assert Ge(1, 0) is S.true
assert Ge(0, 1) is S.false
assert Ge(1, 1) is S.true
assert Eq(I, 2) is S.false
assert Ne(I, 2) is S.true
assert Gt(I, 2) not in [S.true, S.false]
assert Ge(I, 2) not in [S.true, S.false]
assert Lt(I, 2) not in [S.true, S.false]
assert Le(I, 2) not in [S.true, S.false]
a = Float('.000000000000000000001', '')
b = Float('.0000000000000000000001', '')
assert Eq(pi + a, pi + b) is S.false
def test_rich_cmp():
assert (x < y) == Lt(x, y)
assert (x <= y) == Le(x, y)
assert (x > y) == Gt(x, y)
assert (x >= y) == Ge(x, y)
def test_doit():
from sympy import Symbol
p = Symbol('p', positive=True)
n = Symbol('n', negative=True)
np = Symbol('np', nonpositive=True)
nn = Symbol('nn', nonnegative=True)
assert Gt(p, 0).doit() is S.true
assert Gt(p, 1).doit() == Gt(p, 1)
assert Ge(p, 0).doit() is S.true
assert Le(p, 0).doit() is S.false
assert Lt(n, 0).doit() is S.true
assert Le(np, 0).doit() is S.true
assert Gt(nn, 0).doit() == Gt(nn, 0)
assert Lt(nn, 0).doit() is S.false
assert Eq(x, 0).doit() == Eq(x, 0)
def test_new_relational():
x = Symbol('x')
assert Eq(x) == Relational(x, 0) # None ==> Equality
assert Eq(x) == Relational(x, 0, '==')
assert Eq(x) == Relational(x, 0, 'eq')
assert Eq(x) == Equality(x, 0)
assert Eq(x, -1) == Relational(x, -1) # None ==> Equality
assert Eq(x, -1) == Relational(x, -1, '==')
assert Eq(x, -1) == Relational(x, -1, 'eq')
assert Eq(x, -1) == Equality(x, -1)
assert Eq(x) != Relational(x, 1) # None ==> Equality
assert Eq(x) != Relational(x, 1, '==')
assert Eq(x) != Relational(x, 1, 'eq')
assert Eq(x) != Equality(x, 1)
assert Eq(x, -1) != Relational(x, 1) # None ==> Equality
assert Eq(x, -1) != Relational(x, 1, '==')
assert Eq(x, -1) != Relational(x, 1, 'eq')
assert Eq(x, -1) != Equality(x, 1)
assert Ne(x, 0) == Relational(x, 0, '!=')
assert Ne(x, 0) == Relational(x, 0, '<>')
assert Ne(x, 0) == Relational(x, 0, 'ne')
assert Ne(x, 0) == Unequality(x, 0)
assert Ne(x, 0) != Relational(x, 1, '!=')
assert Ne(x, 0) != Relational(x, 1, '<>')
assert Ne(x, 0) != Relational(x, 1, 'ne')
assert Ne(x, 0) != Unequality(x, 1)
assert Ge(x, 0) == Relational(x, 0, '>=')
assert Ge(x, 0) == Relational(x, 0, 'ge')
assert Ge(x, 0) == GreaterThan(x, 0)
assert Ge(x, 1) != Relational(x, 0, '>=')
assert Ge(x, 1) != Relational(x, 0, 'ge')
assert Ge(x, 1) != GreaterThan(x, 0)
assert (x >= 1) == Relational(x, 1, '>=')
assert (x >= 1) == Relational(x, 1, 'ge')
assert (x >= 1) == GreaterThan(x, 1)
assert (x >= 0) != Relational(x, 1, '>=')
assert (x >= 0) != Relational(x, 1, 'ge')
assert (x >= 0) != GreaterThan(x, 1)
assert Le(x, 0) == Relational(x, 0, '<=')
assert Le(x, 0) == Relational(x, 0, 'le')
assert Le(x, 0) == LessThan(x, 0)
assert Le(x, 1) != Relational(x, 0, '<=')
assert Le(x, 1) != Relational(x, 0, 'le')
assert Le(x, 1) != LessThan(x, 0)
assert (x <= 1) == Relational(x, 1, '<=')
assert (x <= 1) == Relational(x, 1, 'le')
assert (x <= 1) == LessThan(x, 1)
assert (x <= 0) != Relational(x, 1, '<=')
assert (x <= 0) != Relational(x, 1, 'le')
assert (x <= 0) != LessThan(x, 1)
assert Gt(x, 0) == Relational(x, 0, '>')
assert Gt(x, 0) == Relational(x, 0, 'gt')
assert Gt(x, 0) == StrictGreaterThan(x, 0)
assert Gt(x, 1) != Relational(x, 0, '>')
assert Gt(x, 1) != Relational(x, 0, 'gt')
assert Gt(x, 1) != StrictGreaterThan(x, 0)
assert (x > 1) == Relational(x, 1, '>')
assert (x > 1) == Relational(x, 1, 'gt')
assert (x > 1) == StrictGreaterThan(x, 1)
assert (x > 0) != Relational(x, 1, '>')
assert (x > 0) != Relational(x, 1, 'gt')
assert (x > 0) != StrictGreaterThan(x, 1)
assert Lt(x, 0) == Relational(x, 0, '<')
assert Lt(x, 0) == Relational(x, 0, 'lt')
assert Lt(x, 0) == StrictLessThan(x, 0)
assert Lt(x, 1) != Relational(x, 0, '<')
assert Lt(x, 1) != Relational(x, 0, 'lt')
assert Lt(x, 1) != StrictLessThan(x, 0)
assert (x < 1) == Relational(x, 1, '<')
assert (x < 1) == Relational(x, 1, 'lt')
assert (x < 1) == StrictLessThan(x, 1)
assert (x < 0) != Relational(x, 1, '<')
assert (x < 0) != Relational(x, 1, 'lt')
assert (x < 0) != StrictLessThan(x, 1)
# finally, some fuzz testing
from random import randint
from sympy.core.compatibility import unichr
for i in range(100):
while 1:
strtype, length = (unichr, 65535) if randint(0, 1) else (chr, 255)
relation_type = strtype( randint(0, length) )
if randint(0, 1):
relation_type += strtype( randint(0, length) )
if relation_type not in ('==', 'eq', '!=', '<>', 'ne', '>=', 'ge',
'<=', 'le', '>', 'gt', '<', 'lt'):
break
raises(ValueError, lambda: Relational(x, 1, relation_type))
def test_relational_bool_output():
# https://github.com/sympy/sympy/issues/5931
raises(TypeError, lambda: bool(x > 3))
raises(TypeError, lambda: bool(x >= 3))
raises(TypeError, lambda: bool(x < 3))
raises(TypeError, lambda: bool(x <= 3))
raises(TypeError, lambda: bool(Eq(x, 3)))
raises(TypeError, lambda: bool(Ne(x, 3)))
def test_relational_logic_symbols():
# See issue 6204
assert (x < y) & (z < t) == And(x < y, z < t)
assert (x < y) | (z < t) == Or(x < y, z < t)
assert ~(x < y) == Not(x < y)
assert (x < y) >> (z < t) == Implies(x < y, z < t)
assert (x < y) << (z < t) == Implies(z < t, x < y)
assert (x < y) ^ (z < t) == Xor(x < y, z < t)
assert isinstance((x < y) & (z < t), And)
assert isinstance((x < y) | (z < t), Or)
assert isinstance(~(x < y), GreaterThan)
assert isinstance((x < y) >> (z < t), Implies)
assert isinstance((x < y) << (z < t), Implies)
assert isinstance((x < y) ^ (z < t), (Or, Xor))
def test_univariate_relational_as_set():
assert (x > 0).as_set() == Interval(0, oo, True, True)
assert (x >= 0).as_set() == Interval(0, oo)
assert (x < 0).as_set() == Interval(-oo, 0, True, True)
assert (x <= 0).as_set() == Interval(-oo, 0)
assert Eq(x, 0).as_set() == FiniteSet(0)
assert Ne(x, 0).as_set() == Interval(-oo, 0, True, True) + \
Interval(0, oo, True, True)
assert (x**2 >= 4).as_set() == Interval(-oo, -2) + Interval(2, oo)
@XFAIL
def test_multivariate_relational_as_set():
assert (x*y >= 0).as_set() == Interval(0, oo)*Interval(0, oo) + \
Interval(-oo, 0)*Interval(-oo, 0)
def test_Not():
assert Not(Equality(x, y)) == Unequality(x, y)
assert Not(Unequality(x, y)) == Equality(x, y)
assert Not(StrictGreaterThan(x, y)) == LessThan(x, y)
assert Not(StrictLessThan(x, y)) == GreaterThan(x, y)
assert Not(GreaterThan(x, y)) == StrictLessThan(x, y)
assert Not(LessThan(x, y)) == StrictGreaterThan(x, y)
| |
##########################################################################
#
# Copyright (c) 2014, Esteban Tovagliari. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import unittest
import subprocess32 as subprocess
import re
import IECore
import IECoreScene
import Gaffer
import GafferTest
import GafferScene
import GafferAppleseed
import GafferAppleseedTest
import GafferOSL
class AppleseedRenderTest( GafferTest.TestCase ) :
def setUp( self ) :
GafferTest.TestCase.setUp( self )
self.__scriptFileName = self.temporaryDirectory() + "/test.gfr"
def testExecute( self ) :
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["render"] = GafferAppleseed.AppleseedRender()
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["in"].setInput( s["plane"]["out"] )
s["expression"] = Gaffer.Expression()
s["expression"].setExpression( "parent['render']['fileName'] = '" + self.temporaryDirectory() + "/test.%d.appleseed' % int( context['frame'] )" )
s["fileName"].setValue( self.__scriptFileName )
s.save()
subprocess.check_call(
[ "gaffer", "execute", self.__scriptFileName, "-frames", "1-3" ]
)
for i in range( 1, 4 ) :
self.failUnless( os.path.exists( self.temporaryDirectory() + "/test.%d.appleseed" % i ) )
def testWaitForImage( self ) :
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["options"] = GafferAppleseed.AppleseedOptions()
s["options"]["in"].setInput( s["plane"]["out"] )
s["options"]["options"]["maxAASamples"]["value"].setValue( 1 )
s["options"]["options"]["maxAASamples"]["enabled"].setValue( True )
s["outputs"] = GafferScene.Outputs()
s["outputs"].addOutput(
"beauty",
IECoreScene.Output(
self.temporaryDirectory() + "/test.exr",
"exr",
"rgba",
{}
)
)
s["outputs"]["in"].setInput( s["options"]["out"] )
s["render"] = GafferAppleseed.AppleseedRender()
s["render"]["in"].setInput( s["outputs"]["out"] )
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.appleseed" )
s["fileName"].setValue( self.__scriptFileName )
s.save()
s["render"]["task"].execute()
self.failUnless( os.path.exists( self.temporaryDirectory() + "/test.exr" ) )
def testExecuteWithStringSubstitutions( self ) :
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["render"] = GafferAppleseed.AppleseedRender()
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["in"].setInput( s["plane"]["out"] )
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.####.appleseed" )
s["fileName"].setValue( self.__scriptFileName )
s.save()
subprocess.check_call(
[ "gaffer", "execute", self.__scriptFileName, "-frames", "1-3" ]
)
for i in range( 1, 4 ) :
self.failUnless( os.path.exists( self.temporaryDirectory() + "/test.%04d.appleseed" % i ) )
def testImageOutput( self ) :
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["options"] = GafferAppleseed.AppleseedOptions()
s["options"]["in"].setInput( s["plane"]["out"] )
s["options"]["options"]["maxAASamples"]["value"].setValue( 1 )
s["options"]["options"]["maxAASamples"]["enabled"].setValue( True )
s["outputs"] = GafferScene.Outputs()
s["outputs"].addOutput(
"beauty",
IECoreScene.Output(
self.temporaryDirectory() + "/test.####.exr",
"exr",
"rgba",
{}
)
)
s["outputs"]["in"].setInput( s["options"]["out"] )
s["render"] = GafferAppleseed.AppleseedRender()
s["render"]["in"].setInput( s["outputs"]["out"] )
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.####.appleseed" )
s["fileName"].setValue( self.__scriptFileName )
s.save()
c = Gaffer.Context()
for i in range( 1, 4 ) :
c.setFrame( i )
with c :
s["render"]["task"].execute()
for i in range( 1, 4 ) :
self.failUnless( os.path.exists( self.temporaryDirectory() + "/test.%04d.exr" % i ) )
def testTypeNamePrefixes( self ) :
self.assertTypeNamesArePrefixed( GafferAppleseed )
self.assertTypeNamesArePrefixed( GafferAppleseedTest )
def testDefaultNames( self ) :
self.assertDefaultNamesAreCorrect( GafferAppleseed )
self.assertDefaultNamesAreCorrect( GafferAppleseedTest )
def testNodesConstructWithDefaultValues( self ) :
self.assertNodesConstructWithDefaultValues( GafferAppleseed )
self.assertNodesConstructWithDefaultValues( GafferAppleseedTest )
def testDirectoryCreation( self ) :
s = Gaffer.ScriptNode()
s["variables"].addChild( Gaffer.NameValuePlug( "renderDirectory", self.temporaryDirectory() + "/renderTests", Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
s["variables"].addChild( Gaffer.NameValuePlug( "appleseedDirectory", self.temporaryDirectory() + "/appleseedTests", flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic ) )
s["plane"] = GafferScene.Plane()
s["outputs"] = GafferScene.Outputs()
s["outputs"]["in"].setInput( s["plane"]["out"] )
s["outputs"].addOutput(
"beauty",
IECoreScene.Output(
"$renderDirectory/test.####.exr",
"exr",
"rgba",
{}
)
)
s["render"] = GafferAppleseed.AppleseedRender()
s["render"]["in"].setInput( s["outputs"]["out"] )
s["render"]["fileName"].setValue( "$appleseedDirectory/test.####.appleseed" )
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
self.assertFalse( os.path.exists( self.temporaryDirectory() + "/renderTests" ) )
self.assertFalse( os.path.exists( self.temporaryDirectory() + "/appleseedTests" ) )
self.assertFalse( os.path.exists( self.temporaryDirectory() + "/appleseedTests/test.0001.appleseed" ) )
self.assertFalse( os.path.exists( self.__scriptFileName ) )
s["fileName"].setValue( self.__scriptFileName )
s.save()
with s.context() :
s["render"]["task"].execute()
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/renderTests" ) )
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/appleseedTests" ) )
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/appleseedTests/test.0001.appleseed" ) )
self.assertTrue( os.path.exists( self.__scriptFileName ) )
# check it can cope with everything already existing
with s.context() :
s["render"]["task"].execute()
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/renderTests" ) )
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/appleseedTests" ) )
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/appleseedTests/test.0001.appleseed" ) )
def testInternalConnectionsNotSerialised( self ) :
s = Gaffer.ScriptNode()
s["render"] = GafferAppleseed.AppleseedRender()
self.assertFalse( "__adaptedIn" in s.serialise() )
def testNoInput( self ) :
render = GafferAppleseed.AppleseedRender()
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( os.path.join( self.temporaryDirectory(), "test.appleseed" ) )
self.assertEqual( render["task"].hash(), IECore.MurmurHash() )
render["task"].execute()
self.assertFalse( os.path.exists( render["fileName"].getValue() ) )
def testInputFromContextVariables( self ) :
plane = GafferScene.Plane()
variables = Gaffer.ContextVariables()
variables.setup( GafferScene.ScenePlug() )
variables["in"].setInput( plane["out"] )
render = GafferAppleseed.AppleseedRender()
render["in"].setInput( variables["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( os.path.join( self.temporaryDirectory(), "test.appleseed" ) )
self.assertNotEqual( render["task"].hash(), IECore.MurmurHash() )
render["task"].execute()
self.assertTrue( os.path.exists( render["fileName"].getValue() ) )
def testShaderSubstitutions( self ) :
plane = GafferScene.Plane()
planeAttrs = GafferScene.CustomAttributes()
planeAttrs["in"].setInput( plane["out"] )
planeAttrs["attributes"].addChild( Gaffer.NameValuePlug( "A", Gaffer.StringPlug( "value", defaultValue = 'bar' ) ) )
planeAttrs["attributes"].addChild( Gaffer.NameValuePlug( "B", Gaffer.StringPlug( "value", defaultValue = 'foo' ) ) )
cube = GafferScene.Cube()
cubeAttrs = GafferScene.CustomAttributes()
cubeAttrs["in"].setInput( cube["out"] )
cubeAttrs["attributes"].addChild( Gaffer.NameValuePlug( "B", Gaffer.StringPlug( "value", defaultValue = 'override' ) ) )
parent = GafferScene.Parent()
parent["in"].setInput( planeAttrs["out"] )
parent["children"][0].setInput( cubeAttrs["out"] )
parent["parent"].setValue( "/plane" )
shader = GafferOSL.OSLShader()
shader.loadShader( "as_texture" )
shader["parameters"]["in_filename"].setValue( "<attr:A>/path/<attr:B>.tx" )
f = GafferScene.PathFilter()
f["paths"].setValue( IECore.StringVectorData( [ "/plane" ] ) )
shaderAssignment = GafferScene.ShaderAssignment()
shaderAssignment["in"].setInput( parent["out"] )
shaderAssignment["filter"].setInput( f["out"] )
shaderAssignment["shader"].setInput( shader["out"] )
render = GafferAppleseed.AppleseedRender()
render["in"].setInput( shaderAssignment["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( os.path.join( self.temporaryDirectory(), "test.appleseed" ) )
self.assertNotEqual( render["task"].hash(), IECore.MurmurHash() )
render["task"].execute()
self.assertTrue( os.path.exists( render["fileName"].getValue() ) )
f = open( render["fileName"].getValue(), "r" )
texturePaths = set( re.findall( '<parameter name="in_filename" value="string (.*)"', f.read()) )
self.assertEqual( texturePaths, set( ['bar/path/foo.tx', 'bar/path/override.tx' ] ) )
if __name__ == "__main__":
unittest.main()
| |
#
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2014 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""RPC definitions for communication between master and node daemons.
RPC definition fields:
- Name as string
- L{SINGLE} for single-node calls, L{MULTI} for multi-node
- Name resolver option(s), can be callable receiving all arguments in a tuple
- Timeout (e.g. L{constants.RPC_TMO_NORMAL}), or callback receiving all
arguments in a tuple to calculate timeout
- List of arguments as tuples
- Name as string
- Argument kind used for encoding/decoding
- Description for docstring (can be C{None})
- Custom body encoder (e.g. for preparing per-node bodies)
- Return value wrapper (e.g. for deserializing into L{objects}-based objects)
- Short call description for docstring
"""
from ganeti import constants
from ganeti import utils
from ganeti import objects
# Guidelines for choosing timeouts:
# - call used during watcher: timeout of 1min, constants.RPC_TMO_URGENT
# - trivial (but be sure it is trivial)
# (e.g. reading a file): 5min, constants.RPC_TMO_FAST
# - other calls: 15 min, constants.RPC_TMO_NORMAL
# - special calls (instance add, etc.):
# either constants.RPC_TMO_SLOW (1h) or huge timeouts
SINGLE = "single-node"
MULTI = "multi-node"
ACCEPT_OFFLINE_NODE = object()
# Constants for encoding/decoding
(ED_OBJECT_DICT,
ED_OBJECT_DICT_LIST,
ED_INST_DICT,
ED_INST_DICT_HVP_BEP_DP,
ED_NODE_TO_DISK_DICT_DP,
ED_INST_DICT_OSP_DP,
ED_IMPEXP_IO,
ED_FILE_DETAILS,
ED_FINALIZE_EXPORT_DISKS,
ED_COMPRESS,
ED_BLOCKDEV_RENAME,
ED_DISKS_DICT_DP,
ED_MULTI_DISKS_DICT_DP,
ED_SINGLE_DISK_DICT_DP,
ED_NIC_DICT,
ED_DEVICE_DICT) = range(1, 17)
def _Prepare(calls):
"""Converts list of calls to dictionary.
"""
return utils.SequenceToDict(calls)
def _MigrationStatusPostProc(result):
"""Post-processor for L{rpc.node.RpcRunner.call_instance_get_migration_status}
"""
if not result.fail_msg and result.payload is not None:
result.payload = objects.MigrationStatus.FromDict(result.payload)
return result
def _BlockdevFindPostProc(result):
"""Post-processor for L{rpc.node.RpcRunner.call_blockdev_find}.
"""
if not result.fail_msg and result.payload is not None:
result.payload = objects.BlockDevStatus.FromDict(result.payload)
return result
def _BlockdevGetMirrorStatusPostProc(result):
"""Post-processor for call_blockdev_getmirrorstatus.
"""
if not result.fail_msg:
result.payload = map(objects.BlockDevStatus.FromDict, result.payload)
return result
def _BlockdevGetMirrorStatusMultiPreProc(node, args):
"""Prepares the appropriate node values for blockdev_getmirrorstatus_multi.
"""
# there should be only one argument to this RPC, already holding a
# node->disks dictionary, we just need to extract the value for the
# current node
assert len(args) == 1
return [args[0][node]]
def _BlockdevGetMirrorStatusMultiPostProc(result):
"""Post-processor for call_blockdev_getmirrorstatus_multi.
"""
if not result.fail_msg:
for idx, (success, status) in enumerate(result.payload):
if success:
result.payload[idx] = (success, objects.BlockDevStatus.FromDict(status))
return result
def _NodeInfoPreProc(node, args):
"""Prepare the storage_units argument for node_info calls."""
assert len(args) == 2
# The storage_units argument is either a dictionary with one value for each
# node, or a fixed value to be used for all the nodes
if isinstance(args[0], dict):
return [args[0][node], args[1]]
else:
return args
def _ImpExpStatusPostProc(result):
"""Post-processor for import/export status.
@rtype: Payload containing list of L{objects.ImportExportStatus} instances
@return: Returns a list of the state of each named import/export or None if
a status couldn't be retrieved
"""
if not result.fail_msg:
decoded = []
for i in result.payload:
if i is None:
decoded.append(None)
continue
decoded.append(objects.ImportExportStatus.FromDict(i))
result.payload = decoded
return result
def _TestDelayTimeout((duration, )):
"""Calculate timeout for "test_delay" RPC.
"""
return int(duration + 5)
_FILE_STORAGE_CALLS = [
("file_storage_dir_create", SINGLE, None, constants.RPC_TMO_FAST, [
("file_storage_dir", None, "File storage directory"),
], None, None, "Create the given file storage directory"),
("file_storage_dir_remove", SINGLE, None, constants.RPC_TMO_FAST, [
("file_storage_dir", None, "File storage directory"),
], None, None, "Remove the given file storage directory"),
("file_storage_dir_rename", SINGLE, None, constants.RPC_TMO_FAST, [
("old_file_storage_dir", None, "Old name"),
("new_file_storage_dir", None, "New name"),
], None, None, "Rename file storage directory"),
]
_STORAGE_CALLS = [
("storage_list", MULTI, None, constants.RPC_TMO_NORMAL, [
("su_name", None, None),
("su_args", None, None),
("name", None, None),
("fields", None, None),
], None, None, "Get list of storage units"),
("storage_modify", SINGLE, None, constants.RPC_TMO_NORMAL, [
("su_name", None, None),
("su_args", None, None),
("name", None, None),
("changes", None, None),
], None, None, "Modify a storage unit"),
("storage_execute", SINGLE, None, constants.RPC_TMO_NORMAL, [
("su_name", None, None),
("su_args", None, None),
("name", None, None),
("op", None, None),
], None, None, "Executes an operation on a storage unit"),
]
_INSTANCE_CALLS = [
("instance_info", SINGLE, None, constants.RPC_TMO_URGENT, [
("instance", None, "Instance name"),
("hname", None, "Hypervisor type"),
("hvparams", None, "Hypervisor parameters"),
], None, None, "Returns information about a single instance"),
("all_instances_info", MULTI, None, constants.RPC_TMO_URGENT, [
("hypervisor_list", None, "Hypervisors to query for instances"),
("all_hvparams", None, "Dictionary mapping hypervisor names to hvparams"),
], None, None,
"Returns information about all instances on the given nodes"),
("instance_list", MULTI, None, constants.RPC_TMO_URGENT, [
("hypervisor_list", None, "Hypervisors to query for instances"),
("hvparams", None, "Hvparams of all hypervisors"),
], None, None, "Returns the list of running instances on the given nodes"),
("instance_reboot", SINGLE, None, constants.RPC_TMO_NORMAL, [
("inst", ED_INST_DICT, "Instance object"),
("reboot_type", None, None),
("shutdown_timeout", None, None),
("reason", None, "The reason for the reboot"),
], None, None, "Returns the list of running instances on the given nodes"),
("instance_shutdown", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, "Instance object"),
("timeout", None, None),
("reason", None, "The reason for the shutdown"),
], None, None, "Stops an instance"),
("instance_balloon_memory", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, "Instance object"),
("memory", None, None),
], None, None, "Modify the amount of an instance's runtime memory"),
("instance_run_rename", SINGLE, None, constants.RPC_TMO_SLOW, [
("instance", ED_INST_DICT, "Instance object"),
("old_name", None, None),
("debug", None, None),
], None, None, "Run the OS rename script for an instance"),
("instance_migratable", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, "Instance object"),
], None, None, "Checks whether the given instance can be migrated"),
("migration_info", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, "Instance object"),
], None, None,
"Gather the information necessary to prepare an instance migration"),
("accept_instance", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, "Instance object"),
("info", None, "Result for the call_migration_info call"),
("target", None, "Target hostname (usually an IP address)"),
], None, None, "Prepare a node to accept an instance"),
("instance_finalize_migration_dst", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, "Instance object"),
("info", None, "Result for the call_migration_info call"),
("success", None, "Whether the migration was a success or failure"),
], None, None, "Finalize any target-node migration specific operation"),
("instance_migrate", SINGLE, None, constants.RPC_TMO_SLOW, [
("cluster_name", None, "Cluster name"),
("instance", ED_INST_DICT, "Instance object"),
("target", None, "Target node name"),
("live", None, "Whether the migration should be done live or not"),
], None, None, "Migrate an instance"),
("instance_finalize_migration_src", SINGLE, None, constants.RPC_TMO_SLOW, [
("instance", ED_INST_DICT, "Instance object"),
("success", None, "Whether the migration succeeded or not"),
("live", None, "Whether the user requested a live migration or not"),
], None, None, "Finalize the instance migration on the source node"),
("instance_get_migration_status", SINGLE, None, constants.RPC_TMO_SLOW, [
("instance", ED_INST_DICT, "Instance object"),
], None, _MigrationStatusPostProc, "Report migration status"),
("instance_start", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance_hvp_bep", ED_INST_DICT_HVP_BEP_DP, None),
("startup_paused", None, None),
("reason", None, "The reason for the startup"),
], None, None, "Starts an instance"),
("instance_os_add", SINGLE, None, constants.RPC_TMO_1DAY, [
("instance_osp", ED_INST_DICT_OSP_DP, "Tuple: (target instance,"
" temporary OS parameters"
" overriding configuration)"),
("reinstall", None, "Whether the instance is being reinstalled"),
("debug", None, "Debug level for the OS install script to use"),
], None, None, "Installs an operative system onto an instance"),
("hotplug_device", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, "Instance object"),
("action", None, "Hotplug Action"),
("dev_type", None, "Device type"),
("device", ED_DEVICE_DICT, "Device dict"),
("extra", None, "Extra info for device (dev_path for disk)"),
("seq", None, "Device seq"),
], None, None, "Hoplug a device to a running instance"),
("hotplug_supported", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, "Instance object"),
], None, None, "Check if hotplug is supported"),
("instance_metadata_modify", SINGLE, None, constants.RPC_TMO_URGENT, [
("instance", None, "Instance object"),
], None, None, "Modify instance metadata"),
]
_IMPEXP_CALLS = [
("import_start", SINGLE, None, constants.RPC_TMO_NORMAL, [
("opts", ED_OBJECT_DICT, None),
("instance", ED_INST_DICT, None),
("component", None, None),
("dest", ED_IMPEXP_IO, "Import destination"),
], None, None, "Starts an import daemon"),
("export_start", SINGLE, None, constants.RPC_TMO_NORMAL, [
("opts", ED_OBJECT_DICT, None),
("host", None, None),
("port", None, None),
("instance", ED_INST_DICT, None),
("component", None, None),
("source", ED_IMPEXP_IO, "Export source"),
], None, None, "Starts an export daemon"),
("impexp_status", SINGLE, None, constants.RPC_TMO_FAST, [
("names", None, "Import/export names"),
], None, _ImpExpStatusPostProc, "Gets the status of an import or export"),
("impexp_abort", SINGLE, None, constants.RPC_TMO_NORMAL, [
("name", None, "Import/export name"),
], None, None, "Aborts an import or export"),
("impexp_cleanup", SINGLE, None, constants.RPC_TMO_NORMAL, [
("name", None, "Import/export name"),
], None, None, "Cleans up after an import or export"),
("export_info", SINGLE, None, constants.RPC_TMO_FAST, [
("path", None, None),
], None, None, "Queries the export information in a given path"),
("finalize_export", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance", ED_INST_DICT, None),
("snap_disks", ED_FINALIZE_EXPORT_DISKS, None),
], None, None, "Request the completion of an export operation"),
("export_list", MULTI, None, constants.RPC_TMO_FAST, [], None, None,
"Gets the stored exports list"),
("export_remove", SINGLE, None, constants.RPC_TMO_FAST, [
("export", None, None),
], None, None, "Requests removal of a given export"),
]
_X509_CALLS = [
("x509_cert_create", SINGLE, None, constants.RPC_TMO_NORMAL, [
("validity", None, "Validity in seconds"),
], None, None, "Creates a new X509 certificate for SSL/TLS"),
("x509_cert_remove", SINGLE, None, constants.RPC_TMO_NORMAL, [
("name", None, "Certificate name"),
], None, None, "Removes a X509 certificate"),
]
_BLOCKDEV_CALLS = [
("bdev_sizes", MULTI, None, constants.RPC_TMO_URGENT, [
("devices", None, None),
], None, None,
"Gets the sizes of requested block devices present on a node"),
("blockdev_create", SINGLE, None, constants.RPC_TMO_NORMAL, [
("bdev", ED_SINGLE_DISK_DICT_DP, None),
("size", None, None),
("owner", None, None),
("on_primary", None, None),
("info", None, None),
("exclusive_storage", None, None),
], None, None, "Request creation of a given block device"),
("blockdev_convert", SINGLE, None, constants.RPC_TMO_SLOW, [
("bdev_src", ED_SINGLE_DISK_DICT_DP, None),
("bdev_dest", ED_SINGLE_DISK_DICT_DP, None),
], None, None,
"Request the copy of the source block device to the destination one"),
("blockdev_image", SINGLE, None, constants.RPC_TMO_SLOW, [
("bdev", ED_SINGLE_DISK_DICT_DP, None),
("image", None, None),
("size", None, None),
], None, None,
"Request to dump an image with given size onto a block device"),
("blockdev_wipe", SINGLE, None, constants.RPC_TMO_SLOW, [
("bdev", ED_SINGLE_DISK_DICT_DP, None),
("offset", None, None),
("size", None, None),
], None, None,
"Request wipe at given offset with given size of a block device"),
("blockdev_remove", SINGLE, None, constants.RPC_TMO_NORMAL, [
("bdev", ED_SINGLE_DISK_DICT_DP, None),
], None, None, "Request removal of a given block device"),
("blockdev_pause_resume_sync", SINGLE, None, constants.RPC_TMO_NORMAL, [
("disks", ED_DISKS_DICT_DP, None),
("pause", None, None),
], None, None, "Request a pause/resume of given block device"),
("blockdev_assemble", SINGLE, None, constants.RPC_TMO_NORMAL, [
("disk", ED_SINGLE_DISK_DICT_DP, None),
("instance", ED_INST_DICT, None),
("on_primary", None, None),
("idx", None, None),
], None, None, "Request assembling of a given block device"),
("blockdev_shutdown", SINGLE, None, constants.RPC_TMO_NORMAL, [
("disk", ED_SINGLE_DISK_DICT_DP, None),
], None, None, "Request shutdown of a given block device"),
("blockdev_addchildren", SINGLE, None, constants.RPC_TMO_NORMAL, [
("bdev", ED_SINGLE_DISK_DICT_DP, None),
("ndevs", ED_DISKS_DICT_DP, None),
], None, None,
"Request adding a list of children to a (mirroring) device"),
("blockdev_removechildren", SINGLE, None, constants.RPC_TMO_NORMAL, [
("bdev", ED_SINGLE_DISK_DICT_DP, None),
("ndevs", ED_DISKS_DICT_DP, None),
], None, None,
"Request removing a list of children from a (mirroring) device"),
("blockdev_close", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance_name", None, None),
("disks", ED_DISKS_DICT_DP, None),
], None, None, "Closes the given block devices"),
("blockdev_open", SINGLE, None, constants.RPC_TMO_NORMAL, [
("instance_name", None, None),
("disks", ED_DISKS_DICT_DP, None),
("exclusive", None, None),
], None, None, "Opens the given block devices in required mode"),
("blockdev_getdimensions", SINGLE, None, constants.RPC_TMO_NORMAL, [
("disks", ED_MULTI_DISKS_DICT_DP, None),
], None, None, "Returns size and spindles of the given disks"),
("drbd_disconnect_net", MULTI, None, constants.RPC_TMO_NORMAL, [
("disks", ED_DISKS_DICT_DP, None),
], None, None,
"Disconnects the network of the given drbd devices"),
("drbd_attach_net", MULTI, None, constants.RPC_TMO_NORMAL, [
("disks", ED_DISKS_DICT_DP, None),
("multimaster", None, None),
], None, None, "Connects the given DRBD devices"),
("drbd_wait_sync", MULTI, None, constants.RPC_TMO_SLOW, [
("disks", ED_DISKS_DICT_DP, None),
], None, None,
"Waits for the synchronization of drbd devices is complete"),
("drbd_needs_activation", SINGLE, None, constants.RPC_TMO_NORMAL, [
("disks", ED_MULTI_DISKS_DICT_DP, None),
], None, None,
"Returns the drbd disks which need activation"),
("blockdev_grow", SINGLE, None, constants.RPC_TMO_NORMAL, [
("cf_bdev", ED_SINGLE_DISK_DICT_DP, None),
("amount", None, None),
("dryrun", None, None),
("backingstore", None, None),
("es_flag", None, None),
], None, None, "Request growing of the given block device by a"
" given amount"),
("blockdev_snapshot", SINGLE, None, constants.RPC_TMO_NORMAL, [
("cf_bdev", ED_SINGLE_DISK_DICT_DP, None),
("snap_name", None, None),
("snap_size", None, None),
], None, None, "Export a given disk to another node"),
("blockdev_rename", SINGLE, None, constants.RPC_TMO_NORMAL, [
("devlist", ED_BLOCKDEV_RENAME, None),
], None, None, "Request rename of the given block devices"),
("blockdev_find", SINGLE, None, constants.RPC_TMO_NORMAL, [
("disk", ED_SINGLE_DISK_DICT_DP, None),
], None, _BlockdevFindPostProc,
"Request identification of a given block device"),
("blockdev_getmirrorstatus", SINGLE, None, constants.RPC_TMO_NORMAL, [
("disks", ED_DISKS_DICT_DP, None),
], None, _BlockdevGetMirrorStatusPostProc,
"Request status of a (mirroring) device"),
("blockdev_getmirrorstatus_multi", MULTI, None, constants.RPC_TMO_NORMAL, [
("node_disks", ED_NODE_TO_DISK_DICT_DP, None),
], _BlockdevGetMirrorStatusMultiPreProc,
_BlockdevGetMirrorStatusMultiPostProc,
"Request status of (mirroring) devices from multiple nodes"),
("blockdev_setinfo", SINGLE, None, constants.RPC_TMO_NORMAL, [
("disk", ED_SINGLE_DISK_DICT_DP, None),
("info", None, None),
], None, None, "Sets metadata information on a given block device"),
]
_OS_CALLS = [
("os_diagnose", MULTI, None, constants.RPC_TMO_FAST, [], None, None,
"Request a diagnose of OS definitions"),
("os_validate", MULTI, None, constants.RPC_TMO_FAST, [
("required", None, None),
("name", None, None),
("checks", None, None),
("params", None, None),
("force_variant", None, None),
], None, None, "Run a validation routine for a given OS"),
("os_export", SINGLE, None, constants.RPC_TMO_FAST, [
("instance", ED_INST_DICT, None),
("override_env", None, None),
], None, None, "Export an OS for a given instance"),
]
_EXTSTORAGE_CALLS = [
("extstorage_diagnose", MULTI, None, constants.RPC_TMO_FAST, [], None, None,
"Request a diagnose of ExtStorage Providers"),
]
_NODE_CALLS = [
("node_has_ip_address", SINGLE, None, constants.RPC_TMO_FAST, [
("address", None, "IP address"),
], None, None, "Checks if a node has the given IP address"),
("node_info", MULTI, None, constants.RPC_TMO_URGENT, [
("storage_units", None,
"List of tuples '<storage_type>,<key>,[<param>]' to ask for disk space"
" information; the parameter list varies depending on the storage_type"),
("hv_specs", None,
"List of hypervisor specification (name, hvparams) to ask for node "
"information"),
], _NodeInfoPreProc, None, "Return node information"),
("node_verify", MULTI, None, constants.RPC_TMO_NORMAL, [
("checkdict", None, "What to verify"),
("cluster_name", None, "Cluster name"),
("all_hvparams", None, "Dictionary mapping hypervisor names to hvparams"),
], None, None, "Request verification of given parameters"),
("node_volumes", MULTI, None, constants.RPC_TMO_FAST, [], None, None,
"Gets all volumes on node(s)"),
("node_demote_from_mc", SINGLE, None, constants.RPC_TMO_FAST, [], None, None,
"Demote a node from the master candidate role"),
("node_powercycle", SINGLE, ACCEPT_OFFLINE_NODE, constants.RPC_TMO_NORMAL, [
("hypervisor", None, "Hypervisor type"),
("hvparams", None, "Hypervisor parameters"),
], None, None, "Tries to powercycle a node"),
("node_configure_ovs", SINGLE, None, constants.RPC_TMO_NORMAL, [
("ovs_name", None, "Name of the OpenvSwitch to create"),
("ovs_link", None, "Link of the OpenvSwitch to the outside"),
], None, None, "This will create and setup the OpenvSwitch"),
("node_crypto_tokens", SINGLE, None, constants.RPC_TMO_SLOW, [
("token_request", None,
"List of tuples of requested crypto token types, actions"),
], None, None, "Handle crypto tokens of the node."),
("node_ensure_daemon", MULTI, None, constants.RPC_TMO_URGENT, [
("daemon", None, "Daemon name"),
("run", None, "Whether the daemon should be running or stopped"),
], None, None, "Ensure daemon is running on the node."),
("node_ssh_key_add", MULTI, None, constants.RPC_TMO_FAST, [
("node_uuid", None, "UUID of the node whose key is distributed"),
("node_name", None, "Name of the node whose key is distributed"),
("potential_master_candidates", None, "Potential master candidates"),
("to_authorized_keys", None, "Whether the node's key should be added"
" to all nodes' 'authorized_keys' file"),
("to_public_keys", None, "Whether the node's key should be added"
" to all nodes' public key file"),
("get_public_keys", None, "Whether the node should get the other nodes'"
" public keys"),
("debug", None, "Set loglevel of ssh calls to 'debug'."),
("verbose", None, "Set loglevel of ssh calls to 'verbose'.")],
None, None, "Distribute a new node's public SSH key on the cluster."),
("node_ssh_key_remove", MULTI, None, constants.RPC_TMO_FAST, [
("node_uuid", None, "UUID of the node whose key is removed"),
("node_name", None, "Name of the node whose key is removed"),
("master_candidate_uuids", None, "List of UUIDs of master candidates."),
("potential_master_candidates", None, "Potential master candidates"),
("from_authorized_keys", None,
"If the key should be removed from the 'authorized_keys' file."),
("from_public_keys", None,
"If the key should be removed from the public key file."),
("clear_authorized_keys", None,
"If the 'authorized_keys' file of the node should be cleared."),
("clear_public_keys", None,
"If the 'ganeti_pub_keys' file of the node should be cleared."),
("readd", None,
"Whether this is a readd operation."),
("debug", None, "Set loglevel of ssh calls to 'debug'."),
("verbose", None, "Set loglevel of ssh calls to 'verbose'.")],
None, None, "Remove a node's SSH key from the other nodes' key files."),
("node_ssh_keys_renew", MULTI, None, constants.RPC_TMO_4HRS, [
("node_uuids", None, "UUIDs of the nodes whose key is renewed"),
("node_names", None, "Names of the nodes whose key is renewed"),
("master_candidate_uuids", None, "List of UUIDs of master candidates."),
("potential_master_candidates", None, "Potential master candidates"),
("old_key_type", None, "The type of key previously used"),
("new_key_type", None, "The type of key to generate"),
("new_key_bits", None, "The length of the key to generate"),
("debug", None, "Set logging of SSH update tool to 'debug'."),
("verbose", None, "Set logging of SSH update tool to 'info'.")],
None, None, "Renew all SSH key pairs of all nodes nodes."),
("node_ssh_key_remove_light", MULTI, None, constants.RPC_TMO_FAST, [
("node_name", None, "Name of the node whose key is removed")],
None, None, "Remove a node's SSH key from the master's public key file."),
]
_MISC_CALLS = [
("lv_list", MULTI, None, constants.RPC_TMO_URGENT, [
("vg_name", None, None),
], None, None, "Gets the logical volumes present in a given volume group"),
("vg_list", MULTI, None, constants.RPC_TMO_URGENT, [], None, None,
"Gets the volume group list"),
("bridges_exist", SINGLE, None, constants.RPC_TMO_URGENT, [
("bridges_list", None, "Bridges which must be present on remote node"),
], None, None, "Checks if a node has all the bridges given"),
("etc_hosts_modify", SINGLE, None, constants.RPC_TMO_NORMAL, [
("mode", None,
"Mode to operate; currently L{constants.ETC_HOSTS_ADD} or"
" L{constants.ETC_HOSTS_REMOVE}"),
("name", None, "Hostname to be modified"),
("ip", None, "IP address (L{constants.ETC_HOSTS_ADD} only)"),
], None, None, "Modify hosts file with name"),
("drbd_helper", MULTI, None, constants.RPC_TMO_URGENT, [],
None, None, "Gets DRBD helper"),
("restricted_command", MULTI, None, constants.RPC_TMO_SLOW, [
("cmd", None, "Command name"),
], None, None, "Runs restricted command"),
("repair_command", SINGLE, None, constants.RPC_TMO_SLOW, [
("cmd", None, "Command name"),
("inp", None, "Input to be passed as stdin"),
], None, None, "Runs repair command"),
("run_oob", SINGLE, None, constants.RPC_TMO_NORMAL, [
("oob_program", None, None),
("command", None, None),
("remote_node", None, None),
("timeout", None, None),
], None, None, "Runs out-of-band command"),
("hooks_runner", MULTI, None, constants.RPC_TMO_NORMAL, [
("hpath", None, None),
("phase", None, None),
("env", None, None),
], None, None, "Call the hooks runner"),
("iallocator_runner", SINGLE, None, constants.RPC_TMO_NORMAL, [
("name", None, "Iallocator name"),
("idata", None, "JSON-encoded input string"),
("default_iallocator_params", None, "Additional iallocator parameters"),
], None, None, "Call an iallocator on a remote node"),
("test_delay", MULTI, None, _TestDelayTimeout, [
("duration", None, None),
], None, None, "Sleep for a fixed time on given node(s)"),
("hypervisor_validate_params", MULTI, None, constants.RPC_TMO_NORMAL, [
("hvname", None, "Hypervisor name"),
("hvfull", None, "Parameters to be validated"),
], None, None, "Validate hypervisor params"),
("get_watcher_pause", SINGLE, None, constants.RPC_TMO_URGENT, [],
None, None, "Get watcher pause end"),
("set_watcher_pause", MULTI, None, constants.RPC_TMO_URGENT, [
("until", None, None),
], None, None, "Set watcher pause end"),
("get_file_info", SINGLE, None, constants.RPC_TMO_FAST, [
("file_path", None, None),
], None, None, "Checks if a file exists and reports on it"),
]
CALLS = {
"RpcClientDefault":
_Prepare(_IMPEXP_CALLS + _X509_CALLS + _OS_CALLS + _NODE_CALLS +
_FILE_STORAGE_CALLS + _MISC_CALLS + _INSTANCE_CALLS +
_BLOCKDEV_CALLS + _STORAGE_CALLS + _EXTSTORAGE_CALLS),
"RpcClientJobQueue": _Prepare([
("jobqueue_update", MULTI, None, constants.RPC_TMO_URGENT, [
("file_name", None, None),
("content", ED_COMPRESS, None),
], None, None, "Update job queue file"),
("jobqueue_purge", SINGLE, None, constants.RPC_TMO_NORMAL, [], None, None,
"Purge job queue"),
("jobqueue_rename", MULTI, None, constants.RPC_TMO_URGENT, [
("rename", None, None),
], None, None, "Rename job queue file"),
("jobqueue_set_drain_flag", MULTI, None, constants.RPC_TMO_URGENT, [
("flag", None, None),
], None, None, "Set job queue drain flag"),
]),
"RpcClientBootstrap": _Prepare([
("node_start_master_daemons", SINGLE, None, constants.RPC_TMO_FAST, [
("no_voting", None, None),
], None, None, "Starts master daemons on a node"),
("node_activate_master_ip", SINGLE, None, constants.RPC_TMO_FAST, [
("master_params", ED_OBJECT_DICT, "Network parameters of the master"),
("use_external_mip_script", None,
"Whether to use the user-provided master IP address setup script"),
], None, None,
"Activates master IP on a node"),
("node_stop_master", SINGLE, None, constants.RPC_TMO_FAST, [], None, None,
"Deactivates master IP and stops master daemons on a node"),
("node_deactivate_master_ip", SINGLE, None, constants.RPC_TMO_FAST, [
("master_params", ED_OBJECT_DICT, "Network parameters of the master"),
("use_external_mip_script", None,
"Whether to use the user-provided master IP address setup script"),
], None, None,
"Deactivates master IP on a node"),
("node_change_master_netmask", SINGLE, None, constants.RPC_TMO_FAST, [
("old_netmask", None, "The old value of the netmask"),
("netmask", None, "The new value of the netmask"),
("master_ip", None, "The master IP"),
("master_netdev", None, "The master network device"),
], None, None, "Change master IP netmask"),
("node_leave_cluster", SINGLE, None, constants.RPC_TMO_NORMAL, [
("modify_ssh_setup", None, None),
], None, None,
"Requests a node to clean the cluster information it has"),
("master_node_name", MULTI, None, constants.RPC_TMO_URGENT, [], None, None,
"Returns the master node name"),
]),
"RpcClientDnsOnly": _Prepare([
("version", MULTI, ACCEPT_OFFLINE_NODE, constants.RPC_TMO_URGENT, [], None,
None, "Query node version"),
("node_verify_light", MULTI, None, constants.RPC_TMO_NORMAL, [
("checkdict", None, "What to verify"),
("cluster_name", None, "Cluster name"),
("hvparams", None, "Dictionary mapping hypervisor names to hvparams"),
], None, None, "Request verification of given parameters"),
]),
"RpcClientConfig": _Prepare([
("upload_file", MULTI, None, constants.RPC_TMO_NORMAL, [
("file_name", ED_FILE_DETAILS, None),
], None, None, "Upload files"),
("upload_file_single", MULTI, None, constants.RPC_TMO_NORMAL, [
("file_name", None, "The name of the file"),
("content", ED_COMPRESS, "The data to be uploaded"),
("mode", None, "The mode of the file or None"),
("uid", None, "The owner of the file"),
("gid", None, "The group of the file"),
("atime", None, "The file's last access time"),
("mtime", None, "The file's last modification time"),
], None, None, "Upload files"),
("write_ssconf_files", MULTI, None, constants.RPC_TMO_NORMAL, [
("values", None, None),
], None, None, "Write ssconf files"),
]),
}
| |
import logging
import time
import sys
import numpy as np
from joblib import Parallel, delayed
from sklearn.utils.extmath import safe_sparse_dot
from chain_opt import optimize_chain_fast
from common import latent
from trw_utils import optimize_chain, optimize_kappa
from graph_utils import decompose_graph, decompose_grid_graph
from heterogenous_crf import inference_gco
from pyqpbo import binary_general_graph
from scipy.optimize import fmin_l_bfgs_b
import scipy.sparse as sps
def f(x, node_weights, pairwise, edges):
n_nodes, n_states = node_weights.shape
dual = 0
dlambda = np.zeros(n_nodes)
for k in xrange(n_states):
new_unaries = np.zeros((n_nodes, 2))
new_unaries[:,1] = node_weights[:,k] + x
y_hat, energy = binary_general_graph(edges, new_unaries, pairwise[k])
dual += 0.5 * energy
dlambda += y_hat
dlambda -= 1
dual -= np.sum(x)
#print dual
return -dual, -dlambda
class OverWeak(object):
def __init__(self, model, n_states, n_features, n_edge_features,
C=1, verbose=0, max_iter=200, check_every=1,
complete_every=1, alpha=1, update_w_every=50,
update_mu=20):
self.model = model
self.n_states = n_states
self.n_features = n_features
self.n_edge_features = n_edge_features
self.C = C
self.verbose = verbose
self.max_iter = max_iter
self.size_w = (self.n_states * self.n_features +
self.n_states * self.n_edge_features)
self.logger = logging.getLogger(__name__)
self.check_every = check_every
self.complete_every = complete_every
self.alpha = alpha
self.n_jobs = 4
self.update_w_every = update_w_every
self.update_mu = update_mu
def _get_edges(self, x):
return x[1]
def _get_features(self, x):
return x[0]
def _get_edge_features(self, x):
return x[2]
def _get_pairwise_potentials(self, x, w):
edge_features = self._get_edge_features(x)
pairwise = np.asarray(w[self.n_states * self.n_features:])
pairwise = pairwise.reshape(self.n_edge_features, -1)
pairwise = np.dot(edge_features, pairwise)
res = np.zeros((edge_features.shape[0], self.n_states, self.n_states))
for i in range(edge_features.shape[0]):
res[i, :, :] = np.diag(pairwise[i, :])
return res
def _get_unary_potentials(self, x, w):
features = self._get_features(x)
unary_params = w[:self.n_states * self.n_features].reshape(self.n_states, self.n_features)
return safe_sparse_dot(features, unary_params.T, dense_output=True)
def _loss_augment_unaries(self, unaries, y, weights):
unaries = unaries.copy()
for label in xrange(self.n_states):
mask = y != label
unaries[mask, label] += weights[mask]
return unaries
def _joint_features(self, chain, x, y, edge_index, multiplier):
features = self._get_features(x)[chain,:]
n_nodes = features.shape[0]
features *= multiplier[chain,:]
e_ind = []
edges = []
for i in xrange(chain.shape[0] - 1):
edges.append((i, i + 1))
e_ind.append(edge_index[(chain[i], chain[i + 1])])
edges = np.array(edges)
edge_features = self._get_edge_features(x)[e_ind,:]
unary_marginals = np.zeros((n_nodes, self.n_states), dtype=np.float64)
unary_marginals[np.ogrid[:n_nodes], y] = 1
unaries_acc = safe_sparse_dot(unary_marginals.T, features,
dense_output=True)
pw = np.zeros((self.n_edge_features, self.n_states))
for label in xrange(self.n_states):
mask = (y[edges[:, 0]] == label) & (y[edges[:, 1]] == label)
pw[:, label] = np.sum(edge_features[mask], axis=0)
return np.hstack([unaries_acc.ravel(), pw.ravel()])
def _joint_features_full(self, x, y):
features, edges, edge_features = \
self._get_features(x), self._get_edges(x), self._get_edge_features(x)
n_nodes = features.shape[0]
y = y.reshape(n_nodes)
unary_marginals = np.zeros((n_nodes, self.n_states), dtype=np.float64)
unary_marginals[np.ogrid[:n_nodes], y] = 1
unaries_acc = safe_sparse_dot(unary_marginals.T, features,
dense_output=True)
pw = np.zeros((self.n_edge_features, self.n_states))
for label in xrange(self.n_states):
mask = (y[edges[:, 0]] == label) & (y[edges[:, 1]] == label)
pw[:, label] = np.sum(edge_features[mask], axis=0)
return np.hstack([unaries_acc.ravel(), pw.ravel()])
def loss_augmented_inference(self, x, y, w):
unary_potentials = self._get_unary_potentials(x, w)
pairwise_potentials = self._get_pairwise_potentials(x, w)
edges = self._get_edges(x)
label_costs = np.zeros(self.n_states)
c = np.sum(y.weights) / float(self.n_states)
for label in y.weak:
label_costs[label] = c
for label in xrange(0, self.n_states):
if label not in y.weak:
unary_potentials[:, label] += y.weights
h = inference_gco(unary_potentials, pairwise_potentials, edges,
label_costs, n_iter=5, return_energy=True)
return h
def fit(self, X, Y, train_scorer, test_scorer, decompose='general',
use_latent_first_iter=500, undergenerating_weak=True, smd=False):
self.logger.info('Initialization')
if decompose == 'general':
contains_node, chains, edge_index = decompose_graph(X)
elif decompose == 'grid':
contains_node, chains, edge_index = decompose_grid_graph(X)
else:
raise ValueError
y_hat = []
lambdas = []
multiplier = []
xx = []
mu = {}
for k in xrange(len(X)):
x, y = X[k], Y[k]
n_nodes = x[0].shape[0]
xx.append(np.zeros(n_nodes))
_lambdas = []
_y_hat = []
_multiplier = []
for p in xrange(n_nodes):
_multiplier.append(1.0 / len(contains_node[k][p]))
for chain in chains[k]:
_lambdas.append(np.zeros((len(chain), self.n_states)))
_y_hat.append(np.zeros(len(chain), dtype=np.int32))
lambdas.append(_lambdas)
y_hat.append(_y_hat)
_multiplier = np.array(_multiplier)
_multiplier.shape = (n_nodes, 1)
multiplier.append(_multiplier)
if not y.full_labeled:
mu[k] = np.zeros((n_nodes, self.n_states))
w = np.zeros(self.size_w)
self.w = w.copy()
self.start_time = time.time()
self.timestamps = [0]
self.objective_curve = []
self.train_score = []
self.test_score = []
self.w_history = []
learning_rate1 = 0.1
learning_rate2 = 0.1
for iteration in xrange(self.max_iter):
self.logger.info('Iteration %d', iteration)
self.logger.info('Optimize slave MRF and update w')
objective = 0
dw = np.zeros(w.shape)
for k in xrange(len(X)):
x, y = X[k], Y[k]
n_nodes = x[0].shape[0]
# self.logger.info('object %d', k)
if y.full_labeled:
unaries = self._loss_augment_unaries(self._get_unary_potentials(x, w),
y.full, y.weights)
unaries *= multiplier[k]
pairwise = self._get_pairwise_potentials(x, w)
jf = self._joint_features_full(x, y.full)
objective -= np.dot(w, jf)
dw -= jf
for i in xrange(len(chains[k])):
y_hat[k][i], energy = optimize_chain(chains[k][i],
lambdas[k][i] + unaries[chains[k][i],:],
pairwise,
edge_index[k])
dw += self._joint_features(chains[k][i], x, y_hat[k][i], edge_index[k], multiplier[k])
objective += energy
elif iteration > use_latent_first_iter:
if undergenerating_weak:
# Use gco for full K oracle
# y_hat_, energy = self.loss_augmented_inference(x, y, w)
# jf_gt = self._joint_features_full(x, y.full)
# objective -= np.dot(w, jf_gt)
# objective += energy
# dw -= jf_gt
# dw += self._joint_features_full(x, y_hat_)
# use gco for first summand in DD
for mm in xrange(10):
dmu = np.zeros((n_nodes, self.n_states))
unaries = self._get_unary_potentials(x, w) - mu[k]
pairwise = self._get_pairwise_potentials(x, w)
y_hat_gco, energy = inference_gco(unaries, pairwise, self._get_edges(x),
n_iter=5, return_energy=True)
objective -= energy
dmu[np.ogrid[:dmu.shape[0]], y_hat_gco] -= 1
dw += self._joint_features_full(x, y_hat_gco)
jf = self._joint_features_full(x, y.full)
objective -= np.dot(w, jf)
dw -= jf
y_hat_kappa, energy = optimize_kappa(y, mu[k], self.alpha, n_nodes, self.n_states)
objective += energy
dmu[np.ogrid[:dmu.shape[0]], y_hat_kappa] += 1
mu[k] -= learning_rate2 * dmu
elif not smd:
dmu = np.zeros((n_nodes, self.n_states))
unaries = (self._get_unary_potentials(x, w) - mu[k]) * multiplier[k]
pairwise = self._get_pairwise_potentials(x, w)
jf = self._joint_features_full(x, y.full)
objective -= np.dot(w, jf)
dw -= jf
#begin inner (can remove this to restore to previous state)
E = 0
Eprev = -100
for j in xrange(self.update_mu):
E = 0
for i in xrange(len(chains[k])):
y_hat[k][i], energy = optimize_chain(chains[k][i],
lambdas[k][i] + unaries[chains[k][i],:],
pairwise,
edge_index[k])
E += energy
lambda_sum = np.zeros((n_nodes, self.n_states), dtype=np.float64)
for p in xrange(n_nodes):
for i in contains_node[k][p]:
pos = np.where(chains[k][i] == p)[0][0]
lambda_sum[p, y_hat[k][i][pos]] += multiplier[k][p]
for i in xrange(len(chains[k])):
N = lambdas[k][i].shape[0]
lambdas[k][i][np.ogrid[:N], y_hat[k][i]] -= learning_rate2
lambdas[k][i] += learning_rate2 * lambda_sum[chains[k][i],:]
if np.abs(E - Eprev) < 0.1:
break
Eprev = E
#end inner
#last one
for i in xrange(len(chains[k])):
y_hat[k][i], energy = optimize_chain(chains[k][i],
lambdas[k][i] + unaries[chains[k][i],:],
pairwise,
edge_index[k])
dw += self._joint_features(chains[k][i], x, y_hat[k][i], edge_index[k], multiplier[k])
objective += energy
dmu[chains[k][i], y_hat[k][i]] -= multiplier[k][chains[k][i]].flatten()
#
y_hat_kappa, energy = optimize_kappa(y, mu[k], self.alpha, n_nodes, self.n_states)
objective += energy
dmu[np.ogrid[:dmu.shape[0]], y_hat_kappa] += 1
mu[k] -= learning_rate2 * dmu
elif smd:
if iteration > 1500:
mMu = 10
else:
mMu = 1
for mm in xrange(mMu):
dmu = np.zeros((n_nodes, self.n_states))
jf = self._joint_features_full(x, y.full)
objective -= np.dot(w, jf)
dw -= jf
unaries = -self._get_unary_potentials(x, w) + mu[k]
edge_weights = -self._get_pairwise_potentials(x, w)
edges = self._get_edges(x)
n_edges = edges.shape[0]
y_hat2 = []
pairwise = []
for j in xrange(self.n_states):
y_hat2.append(np.zeros(self.n_states))
_pairwise = np.zeros((n_edges, 2, 2))
for i in xrange(n_edges):
_pairwise[i,1,0] = _pairwise[i,0,1] = -0.5 * edge_weights[i,j,j]
pairwise.append(_pairwise)
for i in xrange(n_edges):
e1, e2 = edges[i]
unaries[e1,:] += 0.5 * np.diag(edge_weights[i,:,:])
unaries[e2,:] += 0.5 * np.diag(edge_weights[i,:,:])
xx[k], f_val, d = fmin_l_bfgs_b(f, xx[k],
args=(unaries, pairwise, edges),
maxiter=50,
maxfun=50,
pgtol=1e-2)
E = np.sum(xx[k])
for j in xrange(self.n_states):
new_unaries = np.zeros((n_nodes, 2))
new_unaries[:,1] = unaries[:,j] + xx[k]
y_hat2[j], energy = binary_general_graph(edges, new_unaries, pairwise[j])
E -= 0.5*energy
dmu[:,j] -= y_hat2[j]
dw += self._joint_features_full(x, y_hat2[j] * j)
y_hat_kappa, energy = optimize_kappa(y, mu[k], 1, n_nodes, self.n_states)
E += energy
dmu[np.ogrid[:dmu.shape[0]], y_hat_kappa] += 1
objective += E
mu[k] -= learning_rate2 * dmu
dw += w / self.C
if iteration < 100 or iteration % self.update_w_every == 0:
w -= learning_rate1 * dw
objective = self.C * objective + np.sum(w ** 2) / 2
self.logger.info('Update lambda')
for k in xrange(len(X)):
if undergenerating_weak and not Y[k].full_labeled:
continue
if smd and not Y[k].full_labeled:
continue
n_nodes = X[k][0].shape[0]
lambda_sum = np.zeros((n_nodes, self.n_states), dtype=np.float64)
for p in xrange(n_nodes):
for i in contains_node[k][p]:
pos = np.where(chains[k][i] == p)[0][0]
lambda_sum[p, y_hat[k][i][pos]] += multiplier[k][p]
for i in xrange(len(chains[k])):
N = lambdas[k][i].shape[0]
lambdas[k][i][np.ogrid[:N], y_hat[k][i]] -= learning_rate2
lambdas[k][i] += learning_rate2 * lambda_sum[chains[k][i],:]
if iteration % self.complete_every == 0 or iteration in [51, 80, 101, 130]:
self.logger.info('Complete latent variables')
Y_new = Parallel(n_jobs=self.n_jobs, verbose=0, max_nbytes=1e8)(
delayed(latent)(self.model, x, y, w) for x, y in zip(X, Y))
changes = np.sum([np.any(y_new.full != y.full) for y_new, y in zip(Y_new, Y)])
self.logger.info('changes in latent variables: %d', changes)
Y = Y_new
if iteration and (iteration % self.check_every == 0):
self.logger.info('Compute train and test scores')
self.train_score.append(train_scorer(w))
self.logger.info('Train SCORE: %f', self.train_score[-1])
self.test_score.append(test_scorer(w))
self.logger.info('Test SCORE: %f', self.test_score[-1])
self.logger.info('diff: %f', np.sum((w-self.w)**2))
if iteration:
learning_rate1 = 1.0 / iteration
learning_rate2 = 1.0 / iteration
self.timestamps.append(time.time() - self.start_time)
self.objective_curve.append(objective)
self.logger.info('Objective: %f', objective)
self.w = w.copy()
self.w_history.append(self.w)
self.w = w
self.timestamps = np.array(self.timestamps)
self.objective_curve = np.array(self.objective_curve)
self.train_score = np.array(self.train_score)
self.test_score = np.array(self.test_score)
self.w_history = np.vstack(self.w_history)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Chance Scheduler.
"""
import random
import mox
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.conductor import api as conductor_api
from nova import context
from nova import db
from nova import exception
from nova.scheduler import chance
from nova.scheduler import driver
from nova.tests.scheduler import test_scheduler
class ChanceSchedulerTestCase(test_scheduler.SchedulerTestCase):
"""Test case for Chance Scheduler."""
driver_cls = chance.ChanceScheduler
def test_filter_hosts_avoid(self):
"""Test to make sure _filter_hosts() filters original hosts if
avoid_original_host is True.
"""
hosts = ['host1', 'host2', 'host3']
request_spec = dict(instance_properties=dict(host='host2'))
filter_properties = {'ignore_hosts': ['host2']}
filtered = self.driver._filter_hosts(request_spec, hosts,
filter_properties=filter_properties)
self.assertEqual(filtered, ['host1', 'host3'])
def test_filter_hosts_no_avoid(self):
"""Test to make sure _filter_hosts() does not filter original
hosts if avoid_original_host is False.
"""
hosts = ['host1', 'host2', 'host3']
request_spec = dict(instance_properties=dict(host='host2'))
filter_properties = {'ignore_hosts': []}
filtered = self.driver._filter_hosts(request_spec, hosts,
filter_properties=filter_properties)
self.assertEqual(filtered, hosts)
def test_basic_schedule_run_instance(self):
ctxt = context.RequestContext('fake', 'fake', False)
ctxt_elevated = 'fake-context-elevated'
instance_opts = {'fake_opt1': 'meow', 'launch_index': -1}
instance1 = {'uuid': 'fake-uuid1'}
instance2 = {'uuid': 'fake-uuid2'}
request_spec = {'instance_uuids': ['fake-uuid1', 'fake-uuid2'],
'instance_properties': instance_opts}
def inc_launch_index(*args):
request_spec['instance_properties']['launch_index'] = (
request_spec['instance_properties']['launch_index'] + 1)
self.mox.StubOutWithMock(ctxt, 'elevated')
self.mox.StubOutWithMock(self.driver, 'hosts_up')
self.mox.StubOutWithMock(random, 'choice')
self.mox.StubOutWithMock(driver, 'instance_update_db')
self.mox.StubOutWithMock(compute_rpcapi.ComputeAPI, 'run_instance')
ctxt.elevated().AndReturn(ctxt_elevated)
# instance 1
hosts_full = ['host1', 'host2', 'host3', 'host4']
self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(hosts_full)
random.choice(hosts_full).AndReturn('host3')
driver.instance_update_db(ctxt, instance1['uuid']).WithSideEffects(
inc_launch_index).AndReturn(instance1)
compute_rpcapi.ComputeAPI.run_instance(ctxt, host='host3',
instance=instance1, requested_networks=None,
injected_files=None, admin_password=None, is_first_time=None,
request_spec=request_spec, filter_properties={})
# instance 2
ctxt.elevated().AndReturn(ctxt_elevated)
self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(hosts_full)
random.choice(hosts_full).AndReturn('host1')
driver.instance_update_db(ctxt, instance2['uuid']).WithSideEffects(
inc_launch_index).AndReturn(instance2)
compute_rpcapi.ComputeAPI.run_instance(ctxt, host='host1',
instance=instance2, requested_networks=None,
injected_files=None, admin_password=None, is_first_time=None,
request_spec=request_spec, filter_properties={})
self.mox.ReplayAll()
self.driver.schedule_run_instance(ctxt, request_spec,
None, None, None, None, {})
def test_basic_schedule_run_instance_no_hosts(self):
ctxt = context.RequestContext('fake', 'fake', False)
ctxt_elevated = 'fake-context-elevated'
uuid = 'fake-uuid1'
instance_opts = {'fake_opt1': 'meow', 'launch_index': -1}
request_spec = {'instance_uuids': [uuid],
'instance_properties': instance_opts}
self.mox.StubOutWithMock(ctxt, 'elevated')
self.mox.StubOutWithMock(self.driver, 'hosts_up')
self.mox.StubOutWithMock(compute_utils, 'add_instance_fault_from_exc')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
# instance 1
ctxt.elevated().AndReturn(ctxt_elevated)
self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn([])
old_ref, new_ref = db.instance_update_and_get_original(ctxt, uuid,
{'vm_state': vm_states.ERROR,
'task_state': None}).AndReturn(({}, {}))
compute_utils.add_instance_fault_from_exc(ctxt,
mox.IsA(conductor_api.LocalAPI), new_ref,
mox.IsA(exception.NoValidHost), mox.IgnoreArg())
self.mox.ReplayAll()
self.driver.schedule_run_instance(
ctxt, request_spec, None, None, None, None, {})
def test_select_hosts(self):
ctxt = context.RequestContext('fake', 'fake', False)
ctxt_elevated = 'fake-context-elevated'
instance_opts = {'fake_opt1': 'meow', 'launch_index': -1}
request_spec = {'instance_uuids': ['fake-uuid1', 'fake-uuid2'],
'instance_properties': instance_opts}
self.mox.StubOutWithMock(ctxt, 'elevated')
self.mox.StubOutWithMock(self.driver, 'hosts_up')
self.mox.StubOutWithMock(random, 'choice')
ctxt.elevated().AndReturn(ctxt_elevated)
# instance 1
hosts_full = ['host1', 'host2', 'host3', 'host4']
self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(hosts_full)
random.choice(hosts_full).AndReturn('host3')
# instance 2
ctxt.elevated().AndReturn(ctxt_elevated)
self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(hosts_full)
random.choice(hosts_full).AndReturn('host1')
self.mox.ReplayAll()
hosts = self.driver.select_hosts(ctxt, request_spec, {})
self.assertEquals(['host3', 'host1'], hosts)
def test_select_hosts_no_valid_host(self):
def _return_no_host(*args, **kwargs):
return []
self.stubs.Set(self.driver, '_schedule', _return_no_host)
self.assertRaises(exception.NoValidHost,
self.driver.select_hosts, self.context, {}, {})
def test_select_destinations(self):
ctxt = context.RequestContext('fake', 'fake', False)
ctxt_elevated = 'fake-context-elevated'
request_spec = {'num_instances': 2}
self.mox.StubOutWithMock(ctxt, 'elevated')
self.mox.StubOutWithMock(self.driver, 'hosts_up')
self.mox.StubOutWithMock(random, 'choice')
hosts_full = ['host1', 'host2', 'host3', 'host4']
ctxt.elevated().AndReturn(ctxt_elevated)
self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(hosts_full)
random.choice(hosts_full).AndReturn('host3')
ctxt.elevated().AndReturn(ctxt_elevated)
self.driver.hosts_up(ctxt_elevated, 'compute').AndReturn(hosts_full)
random.choice(hosts_full).AndReturn('host2')
self.mox.ReplayAll()
dests = self.driver.select_destinations(ctxt, request_spec, {})
self.assertEquals(2, len(dests))
(host, node) = (dests[0]['host'], dests[0]['nodename'])
self.assertEquals('host3', host)
self.assertEquals(None, node)
(host, node) = (dests[1]['host'], dests[1]['nodename'])
self.assertEquals('host2', host)
self.assertEquals(None, node)
def test_select_destinations_no_valid_host(self):
def _return_no_host(*args, **kwargs):
return []
self.mox.StubOutWithMock(self.driver, 'hosts_up')
self.driver.hosts_up(mox.IgnoreArg(),
mox.IgnoreArg()).AndReturn([1, 2])
self.stubs.Set(self.driver, '_filter_hosts', _return_no_host)
self.mox.ReplayAll()
request_spec = {'num_instances': 1}
self.assertRaises(exception.NoValidHost,
self.driver.select_destinations, self.context,
request_spec, {})
| |
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import copy
try:
import collections.abc as collections_abc # only works on python 3.3+
except ImportError:
import collections as collections_abc
from elasticsearch.exceptions import TransportError
from elasticsearch.helpers import scan
from six import iteritems, string_types
from .aggs import A, AggBase
from .connections import get_connection
from .exceptions import IllegalOperation
from .query import Bool, Q
from .response import Hit, Response
from .utils import AttrDict, DslBase, recursive_to_dict
class QueryProxy(object):
"""
Simple proxy around DSL objects (queries) that can be called
(to add query/post_filter) and also allows attribute access which is proxied to
the wrapped query.
"""
def __init__(self, search, attr_name):
self._search = search
self._proxied = None
self._attr_name = attr_name
def __nonzero__(self):
return self._proxied is not None
__bool__ = __nonzero__
def __call__(self, *args, **kwargs):
s = self._search._clone()
# we cannot use self._proxied since we just cloned self._search and
# need to access the new self on the clone
proxied = getattr(s, self._attr_name)
if proxied._proxied is None:
proxied._proxied = Q(*args, **kwargs)
else:
proxied._proxied &= Q(*args, **kwargs)
# always return search to be chainable
return s
def __getattr__(self, attr_name):
return getattr(self._proxied, attr_name)
def __setattr__(self, attr_name, value):
if not attr_name.startswith("_"):
self._proxied = Q(self._proxied.to_dict())
setattr(self._proxied, attr_name, value)
super(QueryProxy, self).__setattr__(attr_name, value)
def __getstate__(self):
return self._search, self._proxied, self._attr_name
def __setstate__(self, state):
self._search, self._proxied, self._attr_name = state
class ProxyDescriptor(object):
"""
Simple descriptor to enable setting of queries and filters as:
s = Search()
s.query = Q(...)
"""
def __init__(self, name):
self._attr_name = "_%s_proxy" % name
def __get__(self, instance, owner):
return getattr(instance, self._attr_name)
def __set__(self, instance, value):
proxy = getattr(instance, self._attr_name)
proxy._proxied = Q(value)
class AggsProxy(AggBase, DslBase):
name = "aggs"
def __init__(self, search):
self._base = self
self._search = search
self._params = {"aggs": {}}
def to_dict(self):
return super(AggsProxy, self).to_dict().get("aggs", {})
class Request(object):
def __init__(self, using="default", index=None, doc_type=None, extra=None):
self._using = using
self._index = None
if isinstance(index, (tuple, list)):
self._index = list(index)
elif index:
self._index = [index]
self._doc_type = []
self._doc_type_map = {}
if isinstance(doc_type, (tuple, list)):
self._doc_type.extend(doc_type)
elif isinstance(doc_type, collections_abc.Mapping):
self._doc_type.extend(doc_type.keys())
self._doc_type_map.update(doc_type)
elif doc_type:
self._doc_type.append(doc_type)
self._params = {}
self._extra = extra or {}
def __eq__(self, other):
return (
isinstance(other, Request)
and other._params == self._params
and other._index == self._index
and other._doc_type == self._doc_type
and other.to_dict() == self.to_dict()
)
def __copy__(self):
return self._clone()
def params(self, **kwargs):
"""
Specify query params to be used when executing the search. All the
keyword arguments will override the current values. See
https://elasticsearch-py.readthedocs.io/en/master/api.html#elasticsearch.Elasticsearch.search
for all available parameters.
Example::
s = Search()
s = s.params(routing='user-1', preference='local')
"""
s = self._clone()
s._params.update(kwargs)
return s
def index(self, *index):
"""
Set the index for the search. If called empty it will remove all information.
Example:
s = Search()
s = s.index('twitter-2015.01.01', 'twitter-2015.01.02')
s = s.index(['twitter-2015.01.01', 'twitter-2015.01.02'])
"""
# .index() resets
s = self._clone()
if not index:
s._index = None
else:
indexes = []
for i in index:
if isinstance(i, string_types):
indexes.append(i)
elif isinstance(i, list):
indexes += i
elif isinstance(i, tuple):
indexes += list(i)
s._index = (self._index or []) + indexes
return s
def _resolve_field(self, path):
for dt in self._doc_type:
if not hasattr(dt, "_index"):
continue
field = dt._index.resolve_field(path)
if field is not None:
return field
def _resolve_nested(self, hit, parent_class=None):
doc_class = Hit
nested_path = []
nesting = hit["_nested"]
while nesting and "field" in nesting:
nested_path.append(nesting["field"])
nesting = nesting.get("_nested")
nested_path = ".".join(nested_path)
if hasattr(parent_class, "_index"):
nested_field = parent_class._index.resolve_field(nested_path)
else:
nested_field = self._resolve_field(nested_path)
if nested_field is not None:
return nested_field._doc_class
return doc_class
def _get_result(self, hit, parent_class=None):
doc_class = Hit
dt = hit.get("_type")
if "_nested" in hit:
doc_class = self._resolve_nested(hit, parent_class)
elif dt in self._doc_type_map:
doc_class = self._doc_type_map[dt]
else:
for doc_type in self._doc_type:
if hasattr(doc_type, "_matches") and doc_type._matches(hit):
doc_class = doc_type
break
for t in hit.get("inner_hits", ()):
hit["inner_hits"][t] = Response(
self, hit["inner_hits"][t], doc_class=doc_class
)
callback = getattr(doc_class, "from_es", doc_class)
return callback(hit)
def doc_type(self, *doc_type, **kwargs):
"""
Set the type to search through. You can supply a single value or
multiple. Values can be strings or subclasses of ``Document``.
You can also pass in any keyword arguments, mapping a doc_type to a
callback that should be used instead of the Hit class.
If no doc_type is supplied any information stored on the instance will
be erased.
Example:
s = Search().doc_type('product', 'store', User, custom=my_callback)
"""
# .doc_type() resets
s = self._clone()
if not doc_type and not kwargs:
s._doc_type = []
s._doc_type_map = {}
else:
s._doc_type.extend(doc_type)
s._doc_type.extend(kwargs.keys())
s._doc_type_map.update(kwargs)
return s
def using(self, client):
"""
Associate the search request with an elasticsearch client. A fresh copy
will be returned with current instance remaining unchanged.
:arg client: an instance of ``elasticsearch.Elasticsearch`` to use or
an alias to look up in ``elasticsearch_dsl.connections``
"""
s = self._clone()
s._using = client
return s
def extra(self, **kwargs):
"""
Add extra keys to the request body. Mostly here for backwards
compatibility.
"""
s = self._clone()
if "from_" in kwargs:
kwargs["from"] = kwargs.pop("from_")
s._extra.update(kwargs)
return s
def _clone(self):
s = self.__class__(
using=self._using, index=self._index, doc_type=self._doc_type
)
s._doc_type_map = self._doc_type_map.copy()
s._extra = self._extra.copy()
s._params = self._params.copy()
return s
class Search(Request):
query = ProxyDescriptor("query")
post_filter = ProxyDescriptor("post_filter")
def __init__(self, **kwargs):
"""
Search request to elasticsearch.
:arg using: `Elasticsearch` instance to use
:arg index: limit the search to index
:arg doc_type: only query this type.
All the parameters supplied (or omitted) at creation type can be later
overridden by methods (`using`, `index` and `doc_type` respectively).
"""
super(Search, self).__init__(**kwargs)
self.aggs = AggsProxy(self)
self._sort = []
self._source = None
self._highlight = {}
self._highlight_opts = {}
self._suggest = {}
self._script_fields = {}
self._response_class = Response
self._query_proxy = QueryProxy(self, "query")
self._post_filter_proxy = QueryProxy(self, "post_filter")
def filter(self, *args, **kwargs):
return self.query(Bool(filter=[Q(*args, **kwargs)]))
def exclude(self, *args, **kwargs):
return self.query(Bool(filter=[~Q(*args, **kwargs)]))
def __iter__(self):
"""
Iterate over the hits.
"""
return iter(self.execute())
def __getitem__(self, n):
"""
Support slicing the `Search` instance for pagination.
Slicing equates to the from/size parameters. E.g.::
s = Search().query(...)[0:25]
is equivalent to::
s = Search().query(...).extra(from_=0, size=25)
"""
s = self._clone()
if isinstance(n, slice):
# If negative slicing, abort.
if n.start and n.start < 0 or n.stop and n.stop < 0:
raise ValueError("Search does not support negative slicing.")
# Elasticsearch won't get all results so we default to size: 10 if
# stop not given.
s._extra["from"] = n.start or 0
s._extra["size"] = max(
0, n.stop - (n.start or 0) if n.stop is not None else 10
)
return s
else: # This is an index lookup, equivalent to slicing by [n:n+1].
# If negative index, abort.
if n < 0:
raise ValueError("Search does not support negative indexing.")
s._extra["from"] = n
s._extra["size"] = 1
return s
@classmethod
def from_dict(cls, d):
"""
Construct a new `Search` instance from a raw dict containing the search
body. Useful when migrating from raw dictionaries.
Example::
s = Search.from_dict({
"query": {
"bool": {
"must": [...]
}
},
"aggs": {...}
})
s = s.filter('term', published=True)
"""
s = cls()
s.update_from_dict(d)
return s
def _clone(self):
"""
Return a clone of the current search request. Performs a shallow copy
of all the underlying objects. Used internally by most state modifying
APIs.
"""
s = super(Search, self)._clone()
s._response_class = self._response_class
s._sort = self._sort[:]
s._source = copy.copy(self._source) if self._source is not None else None
s._highlight = self._highlight.copy()
s._highlight_opts = self._highlight_opts.copy()
s._suggest = self._suggest.copy()
s._script_fields = self._script_fields.copy()
for x in ("query", "post_filter"):
getattr(s, x)._proxied = getattr(self, x)._proxied
# copy top-level bucket definitions
if self.aggs._params.get("aggs"):
s.aggs._params = {"aggs": self.aggs._params["aggs"].copy()}
return s
def response_class(self, cls):
"""
Override the default wrapper used for the response.
"""
s = self._clone()
s._response_class = cls
return s
def update_from_dict(self, d):
"""
Apply options from a serialized body to the current instance. Modifies
the object in-place. Used mostly by ``from_dict``.
"""
d = d.copy()
if "query" in d:
self.query._proxied = Q(d.pop("query"))
if "post_filter" in d:
self.post_filter._proxied = Q(d.pop("post_filter"))
aggs = d.pop("aggs", d.pop("aggregations", {}))
if aggs:
self.aggs._params = {
"aggs": {name: A(value) for (name, value) in iteritems(aggs)}
}
if "sort" in d:
self._sort = d.pop("sort")
if "_source" in d:
self._source = d.pop("_source")
if "highlight" in d:
high = d.pop("highlight").copy()
self._highlight = high.pop("fields")
self._highlight_opts = high
if "suggest" in d:
self._suggest = d.pop("suggest")
if "text" in self._suggest:
text = self._suggest.pop("text")
for s in self._suggest.values():
s.setdefault("text", text)
if "script_fields" in d:
self._script_fields = d.pop("script_fields")
self._extra.update(d)
return self
def script_fields(self, **kwargs):
"""
Define script fields to be calculated on hits. See
https://www.elastic.co/guide/en/elasticsearch/reference/current/search-request-script-fields.html
for more details.
Example::
s = Search()
s = s.script_fields(times_two="doc['field'].value * 2")
s = s.script_fields(
times_three={
'script': {
'lang': 'painless',
'source': "doc['field'].value * params.n",
'params': {'n': 3}
}
}
)
"""
s = self._clone()
for name in kwargs:
if isinstance(kwargs[name], string_types):
kwargs[name] = {"script": kwargs[name]}
s._script_fields.update(kwargs)
return s
def source(self, fields=None, **kwargs):
"""
Selectively control how the _source field is returned.
:arg fields: wildcard string, array of wildcards, or dictionary of includes and excludes
If ``fields`` is None, the entire document will be returned for
each hit. If fields is a dictionary with keys of 'includes' and/or
'excludes' the fields will be either included or excluded appropriately.
Calling this multiple times with the same named parameter will override the
previous values with the new ones.
Example::
s = Search()
s = s.source(includes=['obj1.*'], excludes=["*.description"])
s = Search()
s = s.source(includes=['obj1.*']).source(excludes=["*.description"])
"""
s = self._clone()
if fields and kwargs:
raise ValueError("You cannot specify fields and kwargs at the same time.")
if fields is not None:
s._source = fields
return s
if kwargs and not isinstance(s._source, dict):
s._source = {}
for key, value in kwargs.items():
if value is None:
try:
del s._source[key]
except KeyError:
pass
else:
s._source[key] = value
return s
def sort(self, *keys):
"""
Add sorting information to the search request. If called without
arguments it will remove all sort requirements. Otherwise it will
replace them. Acceptable arguments are::
'some.field'
'-some.other.field'
{'different.field': {'any': 'dict'}}
so for example::
s = Search().sort(
'category',
'-title',
{"price" : {"order" : "asc", "mode" : "avg"}}
)
will sort by ``category``, ``title`` (in descending order) and
``price`` in ascending order using the ``avg`` mode.
The API returns a copy of the Search object and can thus be chained.
"""
s = self._clone()
s._sort = []
for k in keys:
if isinstance(k, string_types) and k.startswith("-"):
if k[1:] == "_score":
raise IllegalOperation("Sorting by `-_score` is not allowed.")
k = {k[1:]: {"order": "desc"}}
s._sort.append(k)
return s
def highlight_options(self, **kwargs):
"""
Update the global highlighting options used for this request. For
example::
s = Search()
s = s.highlight_options(order='score')
"""
s = self._clone()
s._highlight_opts.update(kwargs)
return s
def highlight(self, *fields, **kwargs):
"""
Request highlighting of some fields. All keyword arguments passed in will be
used as parameters for all the fields in the ``fields`` parameter. Example::
Search().highlight('title', 'body', fragment_size=50)
will produce the equivalent of::
{
"highlight": {
"fields": {
"body": {"fragment_size": 50},
"title": {"fragment_size": 50}
}
}
}
If you want to have different options for different fields
you can call ``highlight`` twice::
Search().highlight('title', fragment_size=50).highlight('body', fragment_size=100)
which will produce::
{
"highlight": {
"fields": {
"body": {"fragment_size": 100},
"title": {"fragment_size": 50}
}
}
}
"""
s = self._clone()
for f in fields:
s._highlight[f] = kwargs
return s
def suggest(self, name, text, **kwargs):
"""
Add a suggestions request to the search.
:arg name: name of the suggestion
:arg text: text to suggest on
All keyword arguments will be added to the suggestions body. For example::
s = Search()
s = s.suggest('suggestion-1', 'Elasticsearch', term={'field': 'body'})
"""
s = self._clone()
s._suggest[name] = {"text": text}
s._suggest[name].update(kwargs)
return s
def to_dict(self, count=False, **kwargs):
"""
Serialize the search into the dictionary that will be sent over as the
request's body.
:arg count: a flag to specify if we are interested in a body for count -
no aggregations, no pagination bounds etc.
All additional keyword arguments will be included into the dictionary.
"""
d = {}
if self.query:
d["query"] = self.query.to_dict()
# count request doesn't care for sorting and other things
if not count:
if self.post_filter:
d["post_filter"] = self.post_filter.to_dict()
if self.aggs.aggs:
d.update(self.aggs.to_dict())
if self._sort:
d["sort"] = self._sort
d.update(recursive_to_dict(self._extra))
if self._source not in (None, {}):
d["_source"] = self._source
if self._highlight:
d["highlight"] = {"fields": self._highlight}
d["highlight"].update(self._highlight_opts)
if self._suggest:
d["suggest"] = self._suggest
if self._script_fields:
d["script_fields"] = self._script_fields
d.update(recursive_to_dict(kwargs))
return d
def count(self):
"""
Return the number of hits matching the query and filters. Note that
only the actual number is returned.
"""
if hasattr(self, "_response") and self._response.hits.total.relation == "eq":
return self._response.hits.total.value
es = get_connection(self._using)
d = self.to_dict(count=True)
# TODO: failed shards detection
return es.count(index=self._index, body=d, **self._params)["count"]
def execute(self, ignore_cache=False):
"""
Execute the search and return an instance of ``Response`` wrapping all
the data.
:arg ignore_cache: if set to ``True``, consecutive calls will hit
ES, while cached result will be ignored. Defaults to `False`
"""
if ignore_cache or not hasattr(self, "_response"):
es = get_connection(self._using)
self._response = self._response_class(
self, es.search(index=self._index, body=self.to_dict(), **self._params)
)
return self._response
def scan(self):
"""
Turn the search into a scan search and return a generator that will
iterate over all the documents matching the query.
Use ``params`` method to specify any additional arguments you with to
pass to the underlying ``scan`` helper from ``elasticsearch-py`` -
https://elasticsearch-py.readthedocs.io/en/master/helpers.html#elasticsearch.helpers.scan
"""
es = get_connection(self._using)
for hit in scan(es, query=self.to_dict(), index=self._index, **self._params):
yield self._get_result(hit)
def delete(self):
"""
delete() executes the query by delegating to delete_by_query()
"""
es = get_connection(self._using)
return AttrDict(
es.delete_by_query(index=self._index, body=self.to_dict(), **self._params)
)
class MultiSearch(Request):
"""
Combine multiple :class:`~elasticsearch_dsl.Search` objects into a single
request.
"""
def __init__(self, **kwargs):
super(MultiSearch, self).__init__(**kwargs)
self._searches = []
def __getitem__(self, key):
return self._searches[key]
def __iter__(self):
return iter(self._searches)
def _clone(self):
ms = super(MultiSearch, self)._clone()
ms._searches = self._searches[:]
return ms
def add(self, search):
"""
Adds a new :class:`~elasticsearch_dsl.Search` object to the request::
ms = MultiSearch(index='my-index')
ms = ms.add(Search(doc_type=Category).filter('term', category='python'))
ms = ms.add(Search(doc_type=Blog))
"""
ms = self._clone()
ms._searches.append(search)
return ms
def to_dict(self):
out = []
for s in self._searches:
meta = {}
if s._index:
meta["index"] = s._index
meta.update(s._params)
out.append(meta)
out.append(s.to_dict())
return out
def execute(self, ignore_cache=False, raise_on_error=True):
"""
Execute the multi search request and return a list of search results.
"""
if ignore_cache or not hasattr(self, "_response"):
es = get_connection(self._using)
responses = es.msearch(
index=self._index, body=self.to_dict(), **self._params
)
out = []
for s, r in zip(self._searches, responses["responses"]):
if r.get("error", False):
if raise_on_error:
raise TransportError("N/A", r["error"]["type"], r["error"])
r = None
else:
r = Response(s, r)
out.append(r)
self._response = out
return self._response
| |
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""RADOS Block Device Driver"""
from __future__ import absolute_import
import io
import json
import math
import os
import tempfile
from eventlet import tpool
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import fileutils
from oslo_utils import units
from six.moves import urllib
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.image import image_utils
from cinder import utils
from cinder.volume import driver
try:
import rados
import rbd
except ImportError:
rados = None
rbd = None
LOG = logging.getLogger(__name__)
rbd_opts = [
cfg.StrOpt('rbd_cluster_name',
default='ceph',
help='The name of ceph cluster'),
cfg.StrOpt('rbd_pool',
default='rbd',
help='The RADOS pool where rbd volumes are stored'),
cfg.StrOpt('rbd_user',
help='The RADOS client name for accessing rbd volumes '
'- only set when using cephx authentication'),
cfg.StrOpt('rbd_ceph_conf',
default='', # default determined by librados
help='Path to the ceph configuration file'),
cfg.BoolOpt('rbd_flatten_volume_from_snapshot',
default=False,
help='Flatten volumes created from snapshots to remove '
'dependency from volume to snapshot'),
cfg.StrOpt('rbd_secret_uuid',
help='The libvirt uuid of the secret for the rbd_user '
'volumes'),
cfg.StrOpt('volume_tmp_dir',
help='Directory where temporary image files are stored '
'when the volume driver does not write them directly '
'to the volume. Warning: this option is now deprecated, '
'please use image_conversion_dir instead.'),
cfg.IntOpt('rbd_max_clone_depth',
default=5,
help='Maximum number of nested volume clones that are '
'taken before a flatten occurs. Set to 0 to disable '
'cloning.'),
cfg.IntOpt('rbd_store_chunk_size', default=4,
help=_('Volumes will be chunked into objects of this size '
'(in megabytes).')),
cfg.IntOpt('rados_connect_timeout', default=-1,
help=_('Timeout value (in seconds) used when connecting to '
'ceph cluster. If value < 0, no timeout is set and '
'default librados value is used.')),
cfg.IntOpt('rados_connection_retries', default=3,
help=_('Number of retries if connection to ceph cluster '
'failed.')),
cfg.IntOpt('rados_connection_interval', default=5,
help=_('Interval value (in seconds) between connection '
'retries to ceph cluster.'))
]
CONF = cfg.CONF
CONF.register_opts(rbd_opts)
class RBDImageMetadata(object):
"""RBD image metadata to be used with RBDImageIOWrapper."""
def __init__(self, image, pool, user, conf):
self.image = image
self.pool = utils.convert_str(pool)
self.user = utils.convert_str(user)
self.conf = utils.convert_str(conf)
class RBDImageIOWrapper(io.RawIOBase):
"""Enables LibRBD.Image objects to be treated as Python IO objects.
Calling unimplemented interfaces will raise IOError.
"""
def __init__(self, rbd_meta):
super(RBDImageIOWrapper, self).__init__()
self._rbd_meta = rbd_meta
self._offset = 0
def _inc_offset(self, length):
self._offset += length
@property
def rbd_image(self):
return self._rbd_meta.image
@property
def rbd_user(self):
return self._rbd_meta.user
@property
def rbd_pool(self):
return self._rbd_meta.pool
@property
def rbd_conf(self):
return self._rbd_meta.conf
def read(self, length=None):
offset = self._offset
total = self._rbd_meta.image.size()
# NOTE(dosaboy): posix files do not barf if you read beyond their
# length (they just return nothing) but rbd images do so we need to
# return empty string if we have reached the end of the image.
if (offset >= total):
return b''
if length is None:
length = total
if (offset + length) > total:
length = total - offset
self._inc_offset(length)
return self._rbd_meta.image.read(int(offset), int(length))
def write(self, data):
self._rbd_meta.image.write(data, self._offset)
self._inc_offset(len(data))
def seekable(self):
return True
def seek(self, offset, whence=0):
if whence == 0:
new_offset = offset
elif whence == 1:
new_offset = self._offset + offset
elif whence == 2:
new_offset = self._rbd_meta.image.size()
new_offset += offset
else:
raise IOError(_("Invalid argument - whence=%s not supported") %
(whence))
if (new_offset < 0):
raise IOError(_("Invalid argument"))
self._offset = new_offset
def tell(self):
return self._offset
def flush(self):
try:
self._rbd_meta.image.flush()
except AttributeError:
LOG.warning(_LW("flush() not supported in "
"this version of librbd"))
def fileno(self):
"""RBD does not have support for fileno() so we raise IOError.
Raising IOError is recommended way to notify caller that interface is
not supported - see http://docs.python.org/2/library/io.html#io.IOBase
"""
raise IOError(_("fileno() not supported by RBD()"))
# NOTE(dosaboy): if IO object is not closed explicitly, Python auto closes
# it which, if this is not overridden, calls flush() prior to close which
# in this case is unwanted since the rbd image may have been closed prior
# to the autoclean - currently triggering a segfault in librbd.
def close(self):
pass
class RBDVolumeProxy(object):
"""Context manager for dealing with an existing rbd volume.
This handles connecting to rados and opening an ioctx automatically, and
otherwise acts like a librbd Image object.
The underlying librados client and ioctx can be accessed as the attributes
'client' and 'ioctx'.
"""
def __init__(self, driver, name, pool=None, snapshot=None,
read_only=False):
client, ioctx = driver._connect_to_rados(pool)
if snapshot is not None:
snapshot = utils.convert_str(snapshot)
try:
self.volume = driver.rbd.Image(ioctx,
utils.convert_str(name),
snapshot=snapshot,
read_only=read_only)
except driver.rbd.Error:
LOG.exception(_LE("error opening rbd image %s"), name)
driver._disconnect_from_rados(client, ioctx)
raise
self.driver = driver
self.client = client
self.ioctx = ioctx
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
try:
self.volume.close()
finally:
self.driver._disconnect_from_rados(self.client, self.ioctx)
def __getattr__(self, attrib):
return getattr(self.volume, attrib)
class RADOSClient(object):
"""Context manager to simplify error handling for connecting to ceph."""
def __init__(self, driver, pool=None):
self.driver = driver
self.cluster, self.ioctx = driver._connect_to_rados(pool)
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self.driver._disconnect_from_rados(self.cluster, self.ioctx)
@property
def features(self):
features = self.cluster.conf_get('rbd_default_features')
if ((features is None) or (int(features) == 0)):
features = self.driver.rbd.RBD_FEATURE_LAYERING
return int(features)
class RBDDriver(driver.TransferVD, driver.ExtendVD,
driver.CloneableImageVD, driver.SnapshotVD,
driver.MigrateVD, driver.BaseVD):
"""Implements RADOS block device (RBD) volume commands."""
VERSION = '1.2.0'
def __init__(self, *args, **kwargs):
super(RBDDriver, self).__init__(*args, **kwargs)
self.configuration.append_config_values(rbd_opts)
self._stats = {}
# allow overrides for testing
self.rados = kwargs.get('rados', rados)
self.rbd = kwargs.get('rbd', rbd)
# All string args used with librbd must be None or utf-8 otherwise
# librbd will break.
for attr in ['rbd_cluster_name', 'rbd_user',
'rbd_ceph_conf', 'rbd_pool']:
val = getattr(self.configuration, attr)
if val is not None:
setattr(self.configuration, attr, utils.convert_str(val))
def check_for_setup_error(self):
"""Returns an error if prerequisites aren't met."""
if rados is None:
msg = _('rados and rbd python libraries not found')
raise exception.VolumeBackendAPIException(data=msg)
# NOTE: Checking connection to ceph
# RADOSClient __init__ method invokes _connect_to_rados
# so no need to check for self.rados.Error here.
with RADOSClient(self):
pass
def RBDProxy(self):
return tpool.Proxy(self.rbd.RBD())
def _ceph_args(self):
args = []
if self.configuration.rbd_user:
args.extend(['--id', self.configuration.rbd_user])
if self.configuration.rbd_ceph_conf:
args.extend(['--conf', self.configuration.rbd_ceph_conf])
if self.configuration.rbd_cluster_name:
args.extend(['--cluster', self.configuration.rbd_cluster_name])
return args
@utils.retry(exception.VolumeBackendAPIException,
CONF.rados_connection_interval,
CONF.rados_connection_retries)
def _connect_to_rados(self, pool=None):
LOG.debug("opening connection to ceph cluster (timeout=%s).",
self.configuration.rados_connect_timeout)
client = self.rados.Rados(
rados_id=self.configuration.rbd_user,
clustername=self.configuration.rbd_cluster_name,
conffile=self.configuration.rbd_ceph_conf)
if pool is not None:
pool = utils.convert_str(pool)
else:
pool = self.configuration.rbd_pool
try:
if self.configuration.rados_connect_timeout >= 0:
client.connect(timeout=
self.configuration.rados_connect_timeout)
else:
client.connect()
ioctx = client.open_ioctx(pool)
return client, ioctx
except self.rados.Error:
msg = _("Error connecting to ceph cluster.")
LOG.exception(msg)
client.shutdown()
raise exception.VolumeBackendAPIException(data=msg)
def _disconnect_from_rados(self, client, ioctx):
# closing an ioctx cannot raise an exception
ioctx.close()
client.shutdown()
def _get_backup_snaps(self, rbd_image):
"""Get list of any backup snapshots that exist on this volume.
There should only ever be one but accept all since they need to be
deleted before the volume can be.
"""
# NOTE(dosaboy): we do the import here otherwise we get import conflict
# issues between the rbd driver and the ceph backup driver. These
# issues only seem to occur when NOT using them together and are
# triggered when the ceph backup driver imports the rbd volume driver.
from cinder.backup.drivers import ceph
return ceph.CephBackupDriver.get_backup_snaps(rbd_image)
def _get_mon_addrs(self):
args = ['ceph', 'mon', 'dump', '--format=json']
args.extend(self._ceph_args())
out, _ = self._execute(*args)
lines = out.split('\n')
if lines[0].startswith('dumped monmap epoch'):
lines = lines[1:]
monmap = json.loads('\n'.join(lines))
addrs = [mon['addr'] for mon in monmap['mons']]
hosts = []
ports = []
for addr in addrs:
host_port = addr[:addr.rindex('/')]
host, port = host_port.rsplit(':', 1)
hosts.append(host.strip('[]'))
ports.append(port)
return hosts, ports
def _update_volume_stats(self):
stats = {
'vendor_name': 'Open Source',
'driver_version': self.VERSION,
'storage_protocol': 'ceph',
'total_capacity_gb': 'unknown',
'free_capacity_gb': 'unknown',
'reserved_percentage': 0,
'multiattach': True,
}
backend_name = self.configuration.safe_get('volume_backend_name')
stats['volume_backend_name'] = backend_name or 'RBD'
try:
with RADOSClient(self) as client:
ret, outbuf, _outs = client.cluster.mon_command(
'{"prefix":"df", "format":"json"}', '')
if ret != 0:
LOG.warning(_LW('Unable to get rados pool stats.'))
else:
outbuf = json.loads(outbuf)
pool_stats = [pool for pool in outbuf['pools'] if
pool['name'] ==
self.configuration.rbd_pool][0]['stats']
stats['free_capacity_gb'] = (
pool_stats['max_avail'] // units.Gi)
used_capacity_gb = pool_stats['bytes_used'] // units.Gi
stats['total_capacity_gb'] = (stats['free_capacity_gb']
+ used_capacity_gb)
except self.rados.Error:
# just log and return unknown capacities
LOG.exception(_LE('error refreshing volume stats'))
self._stats = stats
def get_volume_stats(self, refresh=False):
"""Return the current state of the volume service.
If 'refresh' is True, run the update first.
"""
if refresh:
self._update_volume_stats()
return self._stats
def _get_clone_depth(self, client, volume_name, depth=0):
"""Returns the number of ancestral clones of the given volume."""
parent_volume = self.rbd.Image(client.ioctx, volume_name)
try:
_pool, parent, _snap = self._get_clone_info(parent_volume,
volume_name)
finally:
parent_volume.close()
if not parent:
return depth
# If clone depth was reached, flatten should have occurred so if it has
# been exceeded then something has gone wrong.
if depth > self.configuration.rbd_max_clone_depth:
raise Exception(_("clone depth exceeds limit of %s") %
(self.configuration.rbd_max_clone_depth))
return self._get_clone_depth(client, parent, depth + 1)
def create_cloned_volume(self, volume, src_vref):
"""Create a cloned volume from another volume.
Since we are cloning from a volume and not a snapshot, we must first
create a snapshot of the source volume.
The user has the option to limit how long a volume's clone chain can be
by setting rbd_max_clone_depth. If a clone is made of another clone
and that clone has rbd_max_clone_depth clones behind it, the source
volume will be flattened.
"""
src_name = utils.convert_str(src_vref['name'])
dest_name = utils.convert_str(volume['name'])
flatten_parent = False
# Do full copy if requested
if self.configuration.rbd_max_clone_depth <= 0:
with RBDVolumeProxy(self, src_name, read_only=True) as vol:
vol.copy(vol.ioctx, dest_name)
return
# Otherwise do COW clone.
with RADOSClient(self) as client:
depth = self._get_clone_depth(client, src_name)
# If source volume is a clone and rbd_max_clone_depth reached,
# flatten the source before cloning. Zero rbd_max_clone_depth means
# infinite is allowed.
if depth == self.configuration.rbd_max_clone_depth:
LOG.debug("maximum clone depth (%d) has been reached - "
"flattening source volume",
self.configuration.rbd_max_clone_depth)
flatten_parent = True
src_volume = self.rbd.Image(client.ioctx, src_name)
try:
# First flatten source volume if required.
if flatten_parent:
_pool, parent, snap = self._get_clone_info(src_volume,
src_name)
# Flatten source volume
LOG.debug("flattening source volume %s", src_name)
src_volume.flatten()
# Delete parent clone snap
parent_volume = self.rbd.Image(client.ioctx, parent)
try:
parent_volume.unprotect_snap(snap)
parent_volume.remove_snap(snap)
finally:
parent_volume.close()
# Create new snapshot of source volume
clone_snap = "%s.clone_snap" % dest_name
LOG.debug("creating snapshot='%s'", clone_snap)
src_volume.create_snap(clone_snap)
src_volume.protect_snap(clone_snap)
except Exception:
# Only close if exception since we still need it.
src_volume.close()
raise
# Now clone source volume snapshot
try:
LOG.debug("cloning '%(src_vol)s@%(src_snap)s' to "
"'%(dest)s'",
{'src_vol': src_name, 'src_snap': clone_snap,
'dest': dest_name})
self.RBDProxy().clone(client.ioctx, src_name, clone_snap,
client.ioctx, dest_name,
features=client.features)
except Exception:
src_volume.unprotect_snap(clone_snap)
src_volume.remove_snap(clone_snap)
raise
finally:
src_volume.close()
if volume['size'] != src_vref['size']:
LOG.debug("resize volume '%(dst_vol)s' from %(src_size)d to "
"%(dst_size)d",
{'dst_vol': volume['name'], 'src_size': src_vref['size'],
'dst_size': volume['size']})
self._resize(volume)
LOG.debug("clone created successfully")
def create_volume(self, volume):
"""Creates a logical volume."""
size = int(volume['size']) * units.Gi
LOG.debug("creating volume '%s'", volume['name'])
chunk_size = self.configuration.rbd_store_chunk_size * units.Mi
order = int(math.log(chunk_size, 2))
with RADOSClient(self) as client:
self.RBDProxy().create(client.ioctx,
utils.convert_str(volume['name']),
size,
order,
old_format=False,
features=client.features)
def _flatten(self, pool, volume_name):
LOG.debug('flattening %(pool)s/%(img)s',
dict(pool=pool, img=volume_name))
with RBDVolumeProxy(self, volume_name, pool) as vol:
vol.flatten()
def _clone(self, volume, src_pool, src_image, src_snap):
LOG.debug('cloning %(pool)s/%(img)s@%(snap)s to %(dst)s',
dict(pool=src_pool, img=src_image, snap=src_snap,
dst=volume['name']))
with RADOSClient(self, src_pool) as src_client:
with RADOSClient(self) as dest_client:
self.RBDProxy().clone(src_client.ioctx,
utils.convert_str(src_image),
utils.convert_str(src_snap),
dest_client.ioctx,
utils.convert_str(volume['name']),
features=src_client.features)
def _resize(self, volume, **kwargs):
size = kwargs.get('size', None)
if not size:
size = int(volume['size']) * units.Gi
with RBDVolumeProxy(self, volume['name']) as vol:
vol.resize(size)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
self._clone(volume, self.configuration.rbd_pool,
snapshot['volume_name'], snapshot['name'])
if self.configuration.rbd_flatten_volume_from_snapshot:
self._flatten(self.configuration.rbd_pool, volume['name'])
if int(volume['size']):
self._resize(volume)
def _delete_backup_snaps(self, rbd_image):
backup_snaps = self._get_backup_snaps(rbd_image)
if backup_snaps:
for snap in backup_snaps:
rbd_image.remove_snap(snap['name'])
else:
LOG.debug("volume has no backup snaps")
def _get_clone_info(self, volume, volume_name, snap=None):
"""If volume is a clone, return its parent info.
Returns a tuple of (pool, parent, snap). A snapshot may optionally be
provided for the case where a cloned volume has been flattened but it's
snapshot still depends on the parent.
"""
try:
if snap:
volume.set_snap(snap)
pool, parent, parent_snap = tuple(volume.parent_info())
if snap:
volume.set_snap(None)
# Strip the tag off the end of the volume name since it will not be
# in the snap name.
if volume_name.endswith('.deleted'):
volume_name = volume_name[:-len('.deleted')]
# Now check the snap name matches.
if parent_snap == "%s.clone_snap" % volume_name:
return pool, parent, parent_snap
except self.rbd.ImageNotFound:
LOG.debug("Volume %s is not a clone.", volume_name)
volume.set_snap(None)
return (None, None, None)
def _get_children_info(self, volume, snap):
"""List children for the given snapshot of an volume(image).
Returns a list of (pool, image).
"""
children_list = []
if snap:
volume.set_snap(snap)
children_list = volume.list_children()
volume.set_snap(None)
return children_list
def _delete_clone_parent_refs(self, client, parent_name, parent_snap):
"""Walk back up the clone chain and delete references.
Deletes references i.e. deleted parent volumes and snapshots.
"""
parent_rbd = self.rbd.Image(client.ioctx, parent_name)
parent_has_snaps = False
try:
# Check for grandparent
_pool, g_parent, g_parent_snap = self._get_clone_info(parent_rbd,
parent_name,
parent_snap)
LOG.debug("deleting parent snapshot %s", parent_snap)
parent_rbd.unprotect_snap(parent_snap)
parent_rbd.remove_snap(parent_snap)
parent_has_snaps = bool(list(parent_rbd.list_snaps()))
finally:
parent_rbd.close()
# If parent has been deleted in Cinder, delete the silent reference and
# keep walking up the chain if it is itself a clone.
if (not parent_has_snaps) and parent_name.endswith('.deleted'):
LOG.debug("deleting parent %s", parent_name)
self.RBDProxy().remove(client.ioctx, parent_name)
# Now move up to grandparent if there is one
if g_parent:
self._delete_clone_parent_refs(client, g_parent, g_parent_snap)
def delete_volume(self, volume):
"""Deletes a logical volume."""
# NOTE(dosaboy): this was broken by commit cbe1d5f. Ensure names are
# utf-8 otherwise librbd will barf.
volume_name = utils.convert_str(volume['name'])
with RADOSClient(self) as client:
try:
rbd_image = self.rbd.Image(client.ioctx, volume_name)
except self.rbd.ImageNotFound:
LOG.info(_LI("volume %s no longer exists in backend"),
volume_name)
return
clone_snap = None
parent = None
# Ensure any backup snapshots are deleted
self._delete_backup_snaps(rbd_image)
# If the volume has non-clone snapshots this delete is expected to
# raise VolumeIsBusy so do so straight away.
try:
snaps = rbd_image.list_snaps()
for snap in snaps:
if snap['name'].endswith('.clone_snap'):
LOG.debug("volume has clone snapshot(s)")
# We grab one of these and use it when fetching parent
# info in case the volume has been flattened.
clone_snap = snap['name']
break
raise exception.VolumeIsBusy(volume_name=volume_name)
# Determine if this volume is itself a clone
_pool, parent, parent_snap = self._get_clone_info(rbd_image,
volume_name,
clone_snap)
finally:
rbd_image.close()
@utils.retry(self.rbd.ImageBusy, retries=3)
def _try_remove_volume(client, volume_name):
self.RBDProxy().remove(client.ioctx, volume_name)
if clone_snap is None:
LOG.debug("deleting rbd volume %s", volume_name)
try:
_try_remove_volume(client, volume_name)
except self.rbd.ImageBusy:
msg = (_("ImageBusy error raised while deleting rbd "
"volume. This may have been caused by a "
"connection from a client that has crashed and, "
"if so, may be resolved by retrying the delete "
"after 30 seconds has elapsed."))
LOG.warning(msg)
# Now raise this so that volume stays available so that we
# delete can be retried.
raise exception.VolumeIsBusy(msg, volume_name=volume_name)
except self.rbd.ImageNotFound:
LOG.info(_LI("RBD volume %s not found, allowing delete "
"operation to proceed."), volume_name)
return
# If it is a clone, walk back up the parent chain deleting
# references.
if parent:
LOG.debug("volume is a clone so cleaning references")
self._delete_clone_parent_refs(client, parent, parent_snap)
else:
# If the volume has copy-on-write clones we will not be able to
# delete it. Instead we will keep it as a silent volume which
# will be deleted when it's snapshot and clones are deleted.
new_name = "%s.deleted" % (volume_name)
self.RBDProxy().rename(client.ioctx, volume_name, new_name)
def create_snapshot(self, snapshot):
"""Creates an rbd snapshot."""
with RBDVolumeProxy(self, snapshot['volume_name']) as volume:
snap = utils.convert_str(snapshot['name'])
volume.create_snap(snap)
volume.protect_snap(snap)
def delete_snapshot(self, snapshot):
"""Deletes an rbd snapshot."""
# NOTE(dosaboy): this was broken by commit cbe1d5f. Ensure names are
# utf-8 otherwise librbd will barf.
volume_name = utils.convert_str(snapshot['volume_name'])
snap_name = utils.convert_str(snapshot['name'])
with RBDVolumeProxy(self, volume_name) as volume:
try:
volume.unprotect_snap(snap_name)
except self.rbd.ImageNotFound:
LOG.info(_LI("Snapshot %s does not exist in backend."),
snap_name)
except self.rbd.ImageBusy:
children_list = self._get_children_info(volume, snap_name)
if children_list:
for (pool, image) in children_list:
LOG.info(_LI('Image %(pool)s/%(image)s is dependent '
'on the snapshot %(snap)s.'),
{'pool': pool,
'image': image,
'snap': snap_name})
raise exception.SnapshotIsBusy(snapshot_name=snap_name)
volume.remove_snap(snap_name)
def retype(self, context, volume, new_type, diff, host):
"""Retypes a volume, allow Qos and extra_specs change."""
# No need to check encryption, extra_specs and Qos here as:
# encryptions have been checked as same.
# extra_specs are not used in the driver.
# Qos settings are not used in the driver.
LOG.debug('RBD retype called for volume %s. No action '
'required for RBD volumes.', volume.id)
return True
def ensure_export(self, context, volume):
"""Synchronously recreates an export for a logical volume."""
pass
def create_export(self, context, volume, connector):
"""Exports the volume."""
pass
def remove_export(self, context, volume):
"""Removes an export for a logical volume."""
pass
def initialize_connection(self, volume, connector):
hosts, ports = self._get_mon_addrs()
data = {
'driver_volume_type': 'rbd',
'data': {
'name': '%s/%s' % (self.configuration.rbd_pool,
volume['name']),
'hosts': hosts,
'ports': ports,
'auth_enabled': (self.configuration.rbd_user is not None),
'auth_username': self.configuration.rbd_user,
'secret_type': 'ceph',
'secret_uuid': self.configuration.rbd_secret_uuid,
'volume_id': volume['id'],
}
}
LOG.debug('connection data: %s', data)
return data
def terminate_connection(self, volume, connector, **kwargs):
pass
def _parse_location(self, location):
prefix = 'rbd://'
if not location.startswith(prefix):
reason = _('Not stored in rbd')
raise exception.ImageUnacceptable(image_id=location, reason=reason)
pieces = [urllib.parse.unquote(loc)
for loc in location[len(prefix):].split('/')]
if any(map(lambda p: p == '', pieces)):
reason = _('Blank components')
raise exception.ImageUnacceptable(image_id=location, reason=reason)
if len(pieces) != 4:
reason = _('Not an rbd snapshot')
raise exception.ImageUnacceptable(image_id=location, reason=reason)
return pieces
def _get_fsid(self):
with RADOSClient(self) as client:
return client.cluster.get_fsid()
def _is_cloneable(self, image_location, image_meta):
try:
fsid, pool, image, snapshot = self._parse_location(image_location)
except exception.ImageUnacceptable as e:
LOG.debug('not cloneable: %s.', e)
return False
if self._get_fsid() != fsid:
LOG.debug('%s is in a different ceph cluster.', image_location)
return False
if image_meta['disk_format'] != 'raw':
LOG.debug("rbd image clone requires image format to be "
"'raw' but image %(image)s is '%(format)s'",
{"image": image_location,
"format": image_meta['disk_format']})
return False
# check that we can read the image
try:
with RBDVolumeProxy(self, image,
pool=pool,
snapshot=snapshot,
read_only=True):
return True
except self.rbd.Error as e:
LOG.debug('Unable to open image %(loc)s: %(err)s.',
dict(loc=image_location, err=e))
return False
def clone_image(self, context, volume,
image_location, image_meta,
image_service):
if image_location:
# Note: image_location[0] is glance image direct_url.
# image_location[1] contains the list of all locations (including
# direct_url) or None if show_multiple_locations is False in
# glance configuration.
if image_location[1]:
url_locations = [location['url'] for
location in image_location[1]]
else:
url_locations = [image_location[0]]
# iterate all locations to look for a cloneable one.
for url_location in url_locations:
if url_location and self._is_cloneable(
url_location, image_meta):
_prefix, pool, image, snapshot = \
self._parse_location(url_location)
self._clone(volume, pool, image, snapshot)
self._resize(volume)
return {'provider_location': None}, True
return ({}, False)
def _image_conversion_dir(self):
tmpdir = (self.configuration.volume_tmp_dir or
CONF.image_conversion_dir or
tempfile.gettempdir())
if tmpdir == self.configuration.volume_tmp_dir:
LOG.warning(_LW('volume_tmp_dir is now deprecated, please use '
'image_conversion_dir.'))
# ensure temporary directory exists
if not os.path.exists(tmpdir):
os.makedirs(tmpdir)
return tmpdir
def copy_image_to_volume(self, context, volume, image_service, image_id):
tmp_dir = self._image_conversion_dir()
with tempfile.NamedTemporaryFile(dir=tmp_dir) as tmp:
image_utils.fetch_to_raw(context, image_service, image_id,
tmp.name,
self.configuration.volume_dd_blocksize,
size=volume['size'])
self.delete_volume(volume)
chunk_size = self.configuration.rbd_store_chunk_size * units.Mi
order = int(math.log(chunk_size, 2))
# keep using the command line import instead of librbd since it
# detects zeroes to preserve sparseness in the image
args = ['rbd', 'import',
'--pool', self.configuration.rbd_pool,
'--order', order,
tmp.name, volume['name'],
'--new-format']
args.extend(self._ceph_args())
self._try_execute(*args)
self._resize(volume)
def copy_volume_to_image(self, context, volume, image_service, image_meta):
tmp_dir = self._image_conversion_dir()
tmp_file = os.path.join(tmp_dir,
volume['name'] + '-' + image_meta['id'])
with fileutils.remove_path_on_error(tmp_file):
args = ['rbd', 'export',
'--pool', self.configuration.rbd_pool,
volume['name'], tmp_file]
args.extend(self._ceph_args())
self._try_execute(*args)
image_utils.upload_volume(context, image_service,
image_meta, tmp_file)
os.unlink(tmp_file)
def backup_volume(self, context, backup, backup_service):
"""Create a new backup from an existing volume."""
volume = self.db.volume_get(context, backup['volume_id'])
with RBDVolumeProxy(self, volume['name'],
self.configuration.rbd_pool) as rbd_image:
rbd_meta = RBDImageMetadata(rbd_image, self.configuration.rbd_pool,
self.configuration.rbd_user,
self.configuration.rbd_ceph_conf)
rbd_fd = RBDImageIOWrapper(rbd_meta)
backup_service.backup(backup, rbd_fd)
LOG.debug("volume backup complete.")
def restore_backup(self, context, backup, volume, backup_service):
"""Restore an existing backup to a new or existing volume."""
with RBDVolumeProxy(self, volume['name'],
self.configuration.rbd_pool) as rbd_image:
rbd_meta = RBDImageMetadata(rbd_image, self.configuration.rbd_pool,
self.configuration.rbd_user,
self.configuration.rbd_ceph_conf)
rbd_fd = RBDImageIOWrapper(rbd_meta)
backup_service.restore(backup, volume['id'], rbd_fd)
LOG.debug("volume restore complete.")
def extend_volume(self, volume, new_size):
"""Extend an existing volume."""
old_size = volume['size']
try:
size = int(new_size) * units.Gi
self._resize(volume, size=size)
except Exception:
msg = _('Failed to Extend Volume '
'%(volname)s') % {'volname': volume['name']}
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug("Extend volume from %(old_size)s GB to %(new_size)s GB.",
{'old_size': old_size, 'new_size': new_size})
def manage_existing(self, volume, existing_ref):
"""Manages an existing image.
Renames the image name to match the expected name for the volume.
Error checking done by manage_existing_get_size is not repeated.
:param volume:
volume ref info to be set
:param existing_ref:
existing_ref is a dictionary of the form:
{'source-name': <name of rbd image>}
"""
# Raise an exception if we didn't find a suitable rbd image.
with RADOSClient(self) as client:
rbd_name = existing_ref['source-name']
self.RBDProxy().rename(client.ioctx,
utils.convert_str(rbd_name),
utils.convert_str(volume['name']))
def manage_existing_get_size(self, volume, existing_ref):
"""Return size of an existing image for manage_existing.
:param volume:
volume ref info to be set
:param existing_ref:
existing_ref is a dictionary of the form:
{'source-name': <name of rbd image>}
"""
# Check that the reference is valid
if 'source-name' not in existing_ref:
reason = _('Reference must contain source-name element.')
raise exception.ManageExistingInvalidReference(
existing_ref=existing_ref, reason=reason)
rbd_name = utils.convert_str(existing_ref['source-name'])
with RADOSClient(self) as client:
# Raise an exception if we didn't find a suitable rbd image.
try:
rbd_image = self.rbd.Image(client.ioctx, rbd_name)
image_size = rbd_image.size()
except self.rbd.ImageNotFound:
kwargs = {'existing_ref': rbd_name,
'reason': 'Specified rbd image does not exist.'}
raise exception.ManageExistingInvalidReference(**kwargs)
finally:
rbd_image.close()
# RBD image size is returned in bytes. Attempt to parse
# size as a float and round up to the next integer.
try:
convert_size = int(math.ceil(int(image_size))) / units.Gi
return convert_size
except ValueError:
exception_message = (_("Failed to manage existing volume "
"%(name)s, because reported size "
"%(size)s was not a floating-point"
" number.")
% {'name': rbd_name,
'size': image_size})
raise exception.VolumeBackendAPIException(
data=exception_message)
def update_migrated_volume(self, ctxt, volume, new_volume,
original_volume_status):
"""Return model update from RBD for migrated volume.
This method should rename the back-end volume name(id) on the
destination host back to its original name(id) on the source host.
:param ctxt: The context used to run the method update_migrated_volume
:param volume: The original volume that was migrated to this backend
:param new_volume: The migration volume object that was created on
this backend as part of the migration process
:param original_volume_status: The status of the original volume
:return model_update to update DB with any needed changes
"""
name_id = None
provider_location = None
existing_name = CONF.volume_name_template % new_volume['id']
wanted_name = CONF.volume_name_template % volume['id']
with RADOSClient(self) as client:
try:
self.RBDProxy().rename(client.ioctx,
utils.convert_str(existing_name),
utils.convert_str(wanted_name))
except self.rbd.ImageNotFound:
LOG.error(_LE('Unable to rename the logical volume '
'for volume %s.'), volume['id'])
# If the rename fails, _name_id should be set to the new
# volume id and provider_location should be set to the
# one from the new volume as well.
name_id = new_volume['_name_id'] or new_volume['id']
provider_location = new_volume['provider_location']
return {'_name_id': name_id, 'provider_location': provider_location}
def migrate_volume(self, context, volume, host):
return (False, None)
| |
import json
import http
import asyncio
from waterbutler.core import streams
from waterbutler.core import provider
from waterbutler.core import exceptions
from waterbutler.core.path import WaterButlerPath
from waterbutler.providers.dropbox import settings
from waterbutler.providers.dropbox.metadata import DropboxRevision
from waterbutler.providers.dropbox.metadata import DropboxFileMetadata
from waterbutler.providers.dropbox.metadata import DropboxFolderMetadata
class DropboxProvider(provider.BaseProvider):
NAME = 'dropbox'
BASE_URL = settings.BASE_URL
def __init__(self, auth, credentials, settings):
super().__init__(auth, credentials, settings)
self.token = self.credentials['token']
self.folder = self.settings['folder']
@asyncio.coroutine
def validate_path(self, path, **kwargs):
return WaterButlerPath(path, prepend=self.folder)
@property
def default_headers(self):
return {
'Authorization': 'Bearer {}'.format(self.token),
}
@asyncio.coroutine
def intra_copy(self, dest_provider, src_path, dest_path):
try:
if self == dest_provider:
resp = yield from self.make_request(
'POST',
self.build_url('fileops', 'copy'),
data={
'root': 'auto',
'from_path': src_path.full_path,
'to_path': dest_path.full_path,
},
expects=(200, 201),
throws=exceptions.IntraCopyError,
)
else:
from_ref_resp = yield from self.make_request(
'GET',
self.build_url('copy_ref', 'auto', src_path.full_path),
)
from_ref_data = yield from from_ref_resp.json()
resp = yield from self.make_request(
'POST',
self.build_url('fileops', 'copy'),
data={
'root': 'auto',
'from_copy_ref': from_ref_data['copy_ref'],
'to_path': dest_path,
},
headers=dest_provider.default_headers,
expects=(200, 201),
throws=exceptions.IntraCopyError,
)
except exceptions.IntraCopyError as e:
if e.code != 403:
raise
yield from dest_provider.delete(dest_path)
resp, _ = yield from self.intra_copy(dest_provider, src_path, dest_path)
return resp, False
# TODO Refactor into a function
data = yield from resp.json()
if not data['is_dir']:
return DropboxFileMetadata(data, self.folder), True
folder = DropboxFolderMetadata(data, self.folder)
folder.children = []
for item in data['contents']:
if item['is_dir']:
folder.children.append(DropboxFolderMetadata(item, self.folder))
else:
folder.children.append(DropboxFileMetadata(item, self.folder))
return folder, True
@asyncio.coroutine
def intra_move(self, dest_provider, src_path, dest_path):
if dest_path.full_path.lower() == src_path.full_path.lower():
# Dropbox does not support changing the casing in a file name
raise exceptions.InvalidPathError('In Dropbox to change case, add or subtract other characters.')
try:
resp = yield from self.make_request(
'POST',
self.build_url('fileops', 'move'),
data={
'root': 'auto',
'to_path': dest_path.full_path,
'from_path': src_path.full_path,
},
expects=(200, ),
throws=exceptions.IntraMoveError,
)
except exceptions.IntraMoveError as e:
if e.code != 403:
raise
yield from dest_provider.delete(dest_path)
resp, _ = yield from self.intra_move(dest_provider, src_path, dest_path)
return resp, False
data = yield from resp.json()
if not data['is_dir']:
return DropboxFileMetadata(data, self.folder), True
folder = DropboxFolderMetadata(data, self.folder)
folder.children = []
for item in data['contents']:
if item['is_dir']:
folder.children.append(DropboxFolderMetadata(item, self.folder))
else:
folder.children.append(DropboxFileMetadata(item, self.folder))
return folder, True
@asyncio.coroutine
def download(self, path, revision=None, range=None, **kwargs):
if revision:
url = self._build_content_url('files', 'auto', path.full_path, rev=revision)
else:
# Dont add unused query parameters
url = self._build_content_url('files', 'auto', path.full_path)
resp = yield from self.make_request(
'GET',
url,
range=range,
expects=(200, 206),
throws=exceptions.DownloadError,
)
if 'Content-Length' not in resp.headers:
size = json.loads(resp.headers['X-DROPBOX-METADATA'])['bytes']
else:
size = None
return streams.ResponseStreamReader(resp, size=size)
@asyncio.coroutine
def upload(self, stream, path, conflict='replace', **kwargs):
path, exists = yield from self.handle_name_conflict(path, conflict=conflict)
resp = yield from self.make_request(
'PUT',
self._build_content_url('files_put', 'auto', path.full_path),
headers={'Content-Length': str(stream.size)},
data=stream,
expects=(200, ),
throws=exceptions.UploadError,
)
data = yield from resp.json()
return DropboxFileMetadata(data, self.folder), not exists
@asyncio.coroutine
def delete(self, path, **kwargs):
yield from self.make_request(
'POST',
self.build_url('fileops', 'delete'),
data={'root': 'auto', 'path': path.full_path},
expects=(200, ),
throws=exceptions.DeleteError,
)
@asyncio.coroutine
def metadata(self, path, **kwargs):
resp = yield from self.make_request(
'GET',
self.build_url('metadata', 'auto', path.full_path),
expects=(200, ),
throws=exceptions.MetadataError
)
data = yield from resp.json()
# Dropbox will match a file or folder by name within the requested path
if path.is_file and data['is_dir']:
raise exceptions.MetadataError(
"Could not retrieve file '{}'".format(path),
code=http.client.NOT_FOUND,
)
if data.get('is_deleted'):
raise exceptions.MetadataError(
"Could not retrieve {kind} '{path}'".format(
kind='folder' if data['is_dir'] else 'file',
path=path,
),
code=http.client.NOT_FOUND,
)
if data['is_dir']:
ret = []
for item in data['contents']:
if item['is_dir']:
ret.append(DropboxFolderMetadata(item, self.folder))
else:
ret.append(DropboxFileMetadata(item, self.folder))
return ret
return DropboxFileMetadata(data, self.folder)
@asyncio.coroutine
def revisions(self, path, **kwargs):
response = yield from self.make_request(
'GET',
self.build_url('revisions', 'auto', path.full_path, rev_limit=250),
expects=(200, ),
throws=exceptions.RevisionsError
)
data = yield from response.json()
return [
DropboxRevision(item)
for item in data
if not item.get('is_deleted')
]
@asyncio.coroutine
def create_folder(self, path, **kwargs):
"""
:param str path: The path to create a folder at
"""
WaterButlerPath.validate_folder(path)
response = yield from self.make_request(
'POST',
self.build_url('fileops', 'create_folder'),
params={
'root': 'auto',
'path': path.full_path
},
expects=(200, 403),
throws=exceptions.CreateFolderError
)
data = yield from response.json()
if response.status == 403:
if 'because a file or folder already exists at path' in data.get('error'):
raise exceptions.FolderNamingConflict(str(path))
raise exceptions.CreateFolderError(data, code=403)
return DropboxFolderMetadata(data, self.folder)
def can_intra_copy(self, dest_provider, path=None):
return type(self) == type(dest_provider)
def can_intra_move(self, dest_provider, path=None):
return self == dest_provider
def _build_content_url(self, *segments, **query):
return provider.build_url(settings.BASE_CONTENT_URL, *segments, **query)
| |
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import math
import re
import sys
import unicodedata
import six
from muranoagent.openstack.common.gettextutils import _
UNIT_PREFIX_EXPONENT = {
'k': 1,
'K': 1,
'Ki': 1,
'M': 2,
'Mi': 2,
'G': 3,
'Gi': 3,
'T': 4,
'Ti': 4,
}
UNIT_SYSTEM_INFO = {
'IEC': (1024, re.compile(r'(^[-+]?\d*\.?\d+)([KMGT]i?)?(b|bit|B)$')),
'SI': (1000, re.compile(r'(^[-+]?\d*\.?\d+)([kMGT])?(b|bit|B)$')),
}
TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes')
FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no')
SLUGIFY_STRIP_RE = re.compile(r"[^\w\s-]")
SLUGIFY_HYPHENATE_RE = re.compile(r"[-\s]+")
# NOTE(flaper87): The following 3 globals are used by `mask_password`
_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password']
# NOTE(ldbragst): Let's build a list of regex objects using the list of
# _SANITIZE_KEYS we already have. This way, we only have to add the new key
# to the list of _SANITIZE_KEYS and we can generate regular expressions
# for XML and JSON automatically.
_SANITIZE_PATTERNS = []
_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])',
r'(<%(key)s>).*?(</%(key)s>)',
r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])',
r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])',
r'([\'"].*?%(key)s[\'"]\s*,\s*\'--?[A-z]+\'\s*,\s*u?[\'"])'
'.*?([\'"])',
r'(%(key)s\s*--?[A-z]+\s*)\S+(\s*)']
for key in _SANITIZE_KEYS:
for pattern in _FORMAT_PATTERNS:
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
_SANITIZE_PATTERNS.append(reg_ex)
def int_from_bool_as_string(subject):
"""Interpret a string as a boolean and return either 1 or 0.
Any string value in:
('True', 'true', 'On', 'on', '1')
is interpreted as a boolean True.
Useful for JSON-decoded stuff and config file parsing
"""
return bool_from_string(subject) and 1 or 0
def bool_from_string(subject, strict=False, default=False):
"""Interpret a string as a boolean.
A case-insensitive match is performed such that strings matching 't',
'true', 'on', 'y', 'yes', or '1' are considered True and, when
`strict=False`, anything else returns the value specified by 'default'.
Useful for JSON-decoded stuff and config file parsing.
If `strict=True`, unrecognized values, including None, will raise a
ValueError which is useful when parsing values passed in from an API call.
Strings yielding False are 'f', 'false', 'off', 'n', 'no', or '0'.
"""
if not isinstance(subject, six.string_types):
subject = six.text_type(subject)
lowered = subject.strip().lower()
if lowered in TRUE_STRINGS:
return True
elif lowered in FALSE_STRINGS:
return False
elif strict:
acceptable = ', '.join(
"'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS))
msg = _("Unrecognized value '%(val)s', acceptable values are:"
" %(acceptable)s") % {'val': subject,
'acceptable': acceptable}
raise ValueError(msg)
else:
return default
def safe_decode(text, incoming=None, errors='strict'):
"""Decodes incoming text/bytes string using `incoming` if they're not
already unicode.
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a unicode `incoming` encoded
representation of it.
:raises TypeError: If text is not an instance of str
"""
if not isinstance(text, (six.string_types, six.binary_type)):
raise TypeError("%s can't be decoded" % type(text))
if isinstance(text, six.text_type):
return text
if not incoming:
incoming = (sys.stdin.encoding or
sys.getdefaultencoding())
try:
return text.decode(incoming, errors)
except UnicodeDecodeError:
# Note(flaper87) If we get here, it means that
# sys.stdin.encoding / sys.getdefaultencoding
# didn't return a suitable encoding to decode
# text. This happens mostly when global LANG
# var is not set correctly and there's no
# default encoding. In this case, most likely
# python will use ASCII or ANSI encoders as
# default encodings but they won't be capable
# of decoding non-ASCII characters.
#
# Also, UTF-8 is being used since it's an ASCII
# extension.
return text.decode('utf-8', errors)
def safe_encode(text, incoming=None,
encoding='utf-8', errors='strict'):
"""Encodes incoming text/bytes string using `encoding`.
If incoming is not specified, text is expected to be encoded with
current python's default encoding. (`sys.getdefaultencoding`)
:param incoming: Text's current encoding
:param encoding: Expected encoding for text (Default UTF-8)
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: text or a bytestring `encoding` encoded
representation of it.
:raises TypeError: If text is not an instance of str
"""
if not isinstance(text, (six.string_types, six.binary_type)):
raise TypeError("%s can't be encoded" % type(text))
if not incoming:
incoming = (sys.stdin.encoding or
sys.getdefaultencoding())
if isinstance(text, six.text_type):
return text.encode(encoding, errors)
elif text and encoding != incoming:
# Decode text before encoding it with `encoding`
text = safe_decode(text, incoming, errors)
return text.encode(encoding, errors)
else:
return text
def string_to_bytes(text, unit_system='IEC', return_int=False):
"""Converts a string into an float representation of bytes.
The units supported for IEC ::
Kb(it), Kib(it), Mb(it), Mib(it), Gb(it), Gib(it), Tb(it), Tib(it)
KB, KiB, MB, MiB, GB, GiB, TB, TiB
The units supported for SI ::
kb(it), Mb(it), Gb(it), Tb(it)
kB, MB, GB, TB
Note that the SI unit system does not support capital letter 'K'
:param text: String input for bytes size conversion.
:param unit_system: Unit system for byte size conversion.
:param return_int: If True, returns integer representation of text
in bytes. (default: decimal)
:returns: Numerical representation of text in bytes.
:raises ValueError: If text has an invalid value.
"""
try:
base, reg_ex = UNIT_SYSTEM_INFO[unit_system]
except KeyError:
msg = _('Invalid unit system: "%s"') % unit_system
raise ValueError(msg)
match = reg_ex.match(text)
if match:
magnitude = float(match.group(1))
unit_prefix = match.group(2)
if match.group(3) in ['b', 'bit']:
magnitude /= 8
else:
msg = _('Invalid string format: %s') % text
raise ValueError(msg)
if not unit_prefix:
res = magnitude
else:
res = magnitude * pow(base, UNIT_PREFIX_EXPONENT[unit_prefix])
if return_int:
return int(math.ceil(res))
return res
def to_slug(value, incoming=None, errors="strict"):
"""Normalize string.
Convert to lowercase, remove non-word characters, and convert spaces
to hyphens.
Inspired by Django's `slugify` filter.
:param value: Text to slugify
:param incoming: Text's current encoding
:param errors: Errors handling policy. See here for valid
values http://docs.python.org/2/library/codecs.html
:returns: slugified unicode representation of `value`
:raises TypeError: If text is not an instance of str
"""
value = safe_decode(value, incoming, errors)
# NOTE(aababilov): no need to use safe_(encode|decode) here:
# encodings are always "ascii", error handling is always "ignore"
# and types are always known (first: unicode; second: str)
value = unicodedata.normalize("NFKD", value).encode(
"ascii", "ignore").decode("ascii")
value = SLUGIFY_STRIP_RE.sub("", value).strip().lower()
return SLUGIFY_HYPHENATE_RE.sub("-", value)
def mask_password(message, secret="***"):
"""Replace password with 'secret' in message.
:param message: The string which includes security information.
:param secret: value with which to replace passwords.
:returns: The unicode value of message with the password fields masked.
For example:
>>> mask_password("'adminPass' : 'aaaaa'")
"'adminPass' : '***'"
>>> mask_password("'admin_pass' : 'aaaaa'")
"'admin_pass' : '***'"
>>> mask_password('"password" : "aaaaa"')
'"password" : "***"'
>>> mask_password("'original_password' : 'aaaaa'")
"'original_password' : '***'"
>>> mask_password("u'original_password' : u'aaaaa'")
"u'original_password' : u'***'"
"""
message = six.text_type(message)
# NOTE(ldbragst): Check to see if anything in message contains any key
# specified in _SANITIZE_KEYS, if not then just return the message since
# we don't have to mask any passwords.
if not any(key in message for key in _SANITIZE_KEYS):
return message
secret = r'\g<1>' + secret + r'\g<2>'
for pattern in _SANITIZE_PATTERNS:
message = re.sub(pattern, secret, message)
return message
| |
# -*- test-case-name: txdav.who.test.test_delegates -*-
##
# Copyright (c) 2013-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Delegate assignments
"""
from twisted.python.constants import Names, NamedConstant
from twisted.internet.defer import inlineCallbacks, returnValue, succeed, \
DeferredList
from twistedcaldav.config import config
from twistedcaldav.memcacher import Memcacher
from twext.python.log import Logger
from twext.who.idirectory import (
RecordType as BaseRecordType, FieldName, NotAllowedError
)
from twext.who.directory import (
DirectoryService as BaseDirectoryService,
DirectoryRecord as BaseDirectoryRecord
)
from twext.who.expression import MatchExpression, MatchType
log = Logger()
class RecordType(Names):
"""
Constants for read-only delegates and read-write delegate groups
"""
readDelegateGroup = NamedConstant()
readDelegateGroup.description = u"read-delegate-group"
writeDelegateGroup = NamedConstant()
writeDelegateGroup.description = u"write-delegate-group"
readDelegatorGroup = NamedConstant()
readDelegatorGroup.description = u"read-delegator-group"
writeDelegatorGroup = NamedConstant()
writeDelegatorGroup.description = u"write-delegator-group"
class DirectoryRecord(BaseDirectoryRecord):
@inlineCallbacks
def _membersUIDs(self, expanded=False):
"""
If this is a readDelegateGroup or writeDelegateGroup, the result
will consist of the UIDs who are delegates *of* this record.
If this is a readDelegatorGroup or writeDelegatorGroup,
the results will consist of the UIDs who have delegated *to*
this record.
"""
parentUID, _ignore_proxyType = self.uid.split(u"#")
parentRecord = yield self.service._masterDirectory.recordWithUID(parentUID)
@inlineCallbacks
def _members(txn):
if self.recordType in (
RecordType.readDelegateGroup, RecordType.writeDelegateGroup
): # Members are delegates of this record
readWrite = (self.recordType is RecordType.writeDelegateGroup)
delegateUIDs = yield Delegates._delegatesOfUIDs(txn, parentRecord, readWrite, expanded=expanded)
else: # Members have delegated to this record
readWrite = (self.recordType is RecordType.writeDelegatorGroup)
delegateUIDs = yield Delegates._delegatedToUIDs(txn, parentRecord, readWrite)
returnValue(delegateUIDs)
delegateUIDs = yield self.service._store.inTransaction(
"DirectoryRecord.members", _members
)
returnValue(delegateUIDs)
@inlineCallbacks
def members(self, expanded=False):
"""
If this is a readDelegateGroup or writeDelegateGroup, the members
will consist of the records who are delegates *of* this record.
If this is a readDelegatorGroup or writeDelegatorGroup,
the members will consist of the records who have delegated *to*
this record.
"""
parentUID, _ignore_proxyType = self.uid.split(u"#")
delegateUIDs = yield self._membersUIDs(expanded=expanded)
records = []
for uid in delegateUIDs:
if uid != parentUID:
record = yield self.service._masterDirectory.recordWithUID(uid)
if record is not None:
records.append(record)
returnValue(records)
def expandedMembers(self):
return self.members(expanded=True)
@inlineCallbacks
def setMembers(self, memberRecords):
"""
Replace the members of this group with the new members.
@param memberRecords: The new members of the group
@type memberRecords: iterable of L{iDirectoryRecord}s
"""
if self.recordType not in (
RecordType.readDelegateGroup, RecordType.writeDelegateGroup
):
raise NotAllowedError("Setting members not supported")
parentUID, _ignore_proxyType = self.uid.split(u"#")
readWrite = (self.recordType is RecordType.writeDelegateGroup)
log.info(
"Setting delegate assignments for {u} ({rw}) to {m}",
u=parentUID, rw=("write" if readWrite else "read"),
m=[r.uid for r in memberRecords]
)
delegator = (
yield self.service._masterDirectory.recordWithUID(parentUID)
)
def _setMembers(txn):
return Delegates.setDelegates(txn, delegator, memberRecords, readWrite)
yield self.service._store.inTransaction(
"DirectoryRecord.setMembers", _setMembers
)
@inlineCallbacks
def containsUID(self, uid):
"""
Is the supplied UID an expanded member of this proxy group.
@param uid: UID to test
@type uid: L{str}
@return: result
@rtype: L{bool}
"""
delegateUIDs = yield self._membersUIDs(expanded=True)
returnValue(uid in delegateUIDs)
def recordTypeToProxyType(recordType):
return {
RecordType.readDelegateGroup: "calendar-proxy-read",
RecordType.writeDelegateGroup: "calendar-proxy-write",
RecordType.readDelegatorGroup: "calendar-proxy-read-for",
RecordType.writeDelegatorGroup: "calendar-proxy-write-for",
}.get(recordType, None)
def proxyTypeToRecordType(proxyType):
return {
"calendar-proxy-read": RecordType.readDelegateGroup,
"calendar-proxy-write": RecordType.writeDelegateGroup,
"calendar-proxy-read-for": RecordType.readDelegatorGroup,
"calendar-proxy-write-for": RecordType.writeDelegatorGroup,
}.get(proxyType, None)
class DirectoryService(BaseDirectoryService):
"""
Delegate directory service
"""
recordType = RecordType
def __init__(self, realmName, store):
BaseDirectoryService.__init__(self, realmName)
self._store = store
self._masterDirectory = None
def setMasterDirectory(self, masterDirectory):
self._masterDirectory = masterDirectory
def recordWithShortName(self, recordType, shortName, timeoutSeconds=None):
uid = shortName + "#" + recordTypeToProxyType(recordType)
record = DirectoryRecord(self, {
FieldName.uid: uid,
FieldName.recordType: recordType,
FieldName.shortNames: (shortName,),
})
return succeed(record)
def recordWithUID(self, uid, timeoutSeconds=None):
if "#" not in uid: # Not a delegate group uid
return succeed(None)
uid, proxyType = uid.split("#")
recordType = proxyTypeToRecordType(proxyType)
if recordType is None:
return succeed(None)
return self.recordWithShortName(
recordType, uid, timeoutSeconds=timeoutSeconds
)
@inlineCallbacks
def recordsFromExpression(
self, expression, recordTypes=None, records=None,
limitResults=None, timeoutSeconds=None
):
"""
It's only ever appropriate to look up delegate group record by
shortName or uid. When wrapped by an aggregate directory, looking up
by shortName will already go directly to recordWithShortName. However
when looking up by UID, it won't. Inspect the expression to see if
it's one we can handle.
"""
if isinstance(expression, MatchExpression):
if(
(expression.fieldName is FieldName.uid) and
(expression.matchType is MatchType.equals) and
("#" in expression.fieldValue)
):
record = yield self.recordWithUID(
expression.fieldValue, timeoutSeconds=timeoutSeconds
)
if record is not None:
returnValue((record,))
returnValue(())
class CachingDelegates(object):
"""
Manages access to the store's delegates API, including caching of results.
"""
cacheNotifier = None
class DelegatesMemcacher(Memcacher):
def __init__(self, namespace):
super(CachingDelegates.DelegatesMemcacher, self).__init__(namespace, key_normalization=True)
def _key(self, keyname, uid, readWrite, expanded):
return "{}{}:{}#{}".format(
keyname,
"-expanded" if expanded else "",
uid.encode("utf-8"),
"write" if readWrite else "read",
)
def _membersKey(self, uid, readWrite, expanded):
return self._key("members", uid, readWrite, expanded)
def _membershipsKey(self, uid, readWrite):
return self._key("memberships", uid, readWrite, False)
def setMembers(self, uid, readWrite, members, expanded):
return self.set(
self._membersKey(uid, readWrite, expanded),
",".join(members).encode("utf-8"),
)
def setMemberships(self, uid, readWrite, memberships):
return self.set(
self._membershipsKey(uid, readWrite),
",".join(memberships).encode("utf-8"),
)
@staticmethod
def _value_decode(value):
if value:
return set(value.decode("utf-8").split(","))
elif value is None:
return None
else:
return set()
@inlineCallbacks
def getMembers(self, uid, readWrite, expanded):
value = yield self.get(self._membersKey(uid, readWrite, expanded))
returnValue(self._value_decode(value))
@inlineCallbacks
def getMemberships(self, uid, readWrite):
value = yield self.get(self._membershipsKey(uid, readWrite))
returnValue(self._value_decode(value))
@inlineCallbacks
def deleteMember(self, uid, readWrite):
"""
Delete both the regular and expanded keys.
"""
yield self.delete(self._membersKey(uid, readWrite, False))
yield self.delete(self._membersKey(uid, readWrite, True))
@inlineCallbacks
def deleteMembership(self, uid, readWrite):
"""
Delete both the regular and expanded keys.
"""
yield self.delete(self._membershipsKey(uid, readWrite))
def __init__(self):
self._memcacher = CachingDelegates.DelegatesMemcacher("DelegatesDB")
@inlineCallbacks
def setDelegates(self, txn, delegator, delegates, readWrite):
"""
Sets the full set of delegates for a delegator.
We need to take multiple pods into account by re-directing this request
to the cross-pod conduit if the delegator is not local to this pod.
@param delegator: the delegator's directory record
@type delegator: L{IDirectoryRecord}
@param delegates: the delegates directory records
@type delegates: L{list}} of L{IDirectoryRecord}
@param readWrite: if True, read and write access is granted; read-only
access otherwise
"""
existingDelegates = yield self.delegatesOf(txn, delegator, readWrite)
if delegator.thisServer():
# Remove some
for delegate in set(existingDelegates) - set(delegates):
yield self.removeDelegate(txn, delegator, delegate, readWrite)
for delegate in set(delegates) - set(existingDelegates):
yield self.addDelegate(txn, delegator, delegate, readWrite)
else:
yield self._podSetDelegates(txn, delegator, delegates, readWrite)
@inlineCallbacks
def addDelegate(self, txn, delegator, delegate, readWrite):
"""
Adds "delegate" as a delegate of "delegator". The type of access is
specified by the "readWrite" parameter.
@param delegator: the delegator's directory record
@type delegator: L{IDirectoryRecord}
@param delegate: the delegate's directory record
@type delegate: L{IDirectoryRecord}
@param readWrite: if True, read and write access is granted; read-only
access otherwise
"""
# Never add the delegator as a delegate
if delegator.uid == delegate.uid:
returnValue(None)
existingDelegateUIDs = yield self._delegatesOfUIDs(txn, delegator, readWrite, expanded=True)
if delegate.recordType == BaseRecordType.group:
# find the groupID
group = yield txn.groupByUID(delegate.uid)
yield txn.addDelegateGroup(delegator.uid, group.groupID, readWrite)
else:
yield txn.addDelegate(delegator.uid, delegate.uid, readWrite)
# Make sure notifications are sent
if self.cacheNotifier is not None:
yield self.cacheNotifier.changed("/principals/__uids__/{}/".format(delegator.uid))
yield self.cacheNotifier.changed("/principals/__uids__/{}/".format(delegate.uid))
# Update cache (remove the member cache entry first as we need to recalculate it for
# memberships removal)
yield self._memcacher.deleteMember(delegator.uid, readWrite)
newDelegateUIDs = yield self._delegatesOfUIDs(txn, delegator, readWrite, expanded=True)
for uid in set(newDelegateUIDs) - set(existingDelegateUIDs):
yield self._memcacher.deleteMembership(uid, readWrite)
@inlineCallbacks
def removeDelegate(self, txn, delegator, delegate, readWrite):
"""
Removes "delegate" as a delegate of "delegator". The type of access is
specified by the "readWrite" parameter.
@param delegator: the delegator's directory record
@type delegator: L{IDirectoryRecord}
@param delegate: the delegate's directory record
@type delegate: L{IDirectoryRecord}
@param readWrite: if True, read and write access is revoked; read-only
access otherwise
"""
# Never remove the delegator as a delegate
if delegator.uid == delegate.uid:
returnValue(None)
existingDelegateUIDs = yield self._delegatesOfUIDs(txn, delegator, readWrite, expanded=True)
if delegate.recordType == BaseRecordType.group:
# find the groupID
group = yield txn.groupByUID(delegate.uid)
yield txn.removeDelegateGroup(delegator.uid, group.groupID, readWrite)
else:
yield txn.removeDelegate(delegator.uid, delegate.uid, readWrite)
# Make sure notifications are sent
if self.cacheNotifier is not None:
yield self.cacheNotifier.changed("/principals/__uids__/{}/".format(delegator.uid))
yield self.cacheNotifier.changed("/principals/__uids__/{}/".format(delegate.uid))
# Update cache (remove the member cache entry first as we need to recalculate it for
# memberships removal)
yield self._memcacher.deleteMember(delegator.uid, readWrite)
newDelegateUIDs = yield self._delegatesOfUIDs(txn, delegator, readWrite, expanded=True)
for uid in set(existingDelegateUIDs) - set(newDelegateUIDs):
yield self._memcacher.deleteMembership(uid, readWrite)
@inlineCallbacks
def groupChanged(self, txn, groupID, addedUIDs, removedUIDs):
"""
A group has changed. We need to see which delegators might be using this group
and invalidate caches.
@param groupID: group id of group that changed
@type groupID: L{str}
@param addedUIDs: set of new member UIDs added to the group
@type addedUIDs: L{set} of L{str}
@param removedUIDs: set of old member UIDs removed from the group
@type removedUIDs: L{set} of L{str}
"""
# Remove member cache entry for delegators using the group
delegators = set()
for readWrite in (True, False):
delegators.update((yield txn.delegatorsToGroup(groupID, readWrite)))
for delegator in delegators:
yield self._memcacher.deleteMember(delegator, True)
yield self._memcacher.deleteMember(delegator, False)
# Remove membership cache entries for added/removed delegates
for delegate in (addedUIDs | removedUIDs):
yield self._memcacher.deleteMembership(delegate, True)
yield self._memcacher.deleteMembership(delegate, False)
@inlineCallbacks
def delegatesOf(self, txn, delegator, readWrite, expanded=False):
"""
Return the records of the delegates of "delegator". The type of access
is specified by the "readWrite" parameter.
@param delegator: the delegator's directory record
@type delegator: L{IDirectoryRecord}
@param readWrite: if True, read and write access delegates are returned;
read-only access otherwise
@return: the set of directory records
@rtype: a Deferred which fires a set of L{IDirectoryRecord}
"""
delegateUIDs = yield self._delegatesOfUIDs(txn, delegator, readWrite, expanded)
records = []
directory = delegator.service
for uid in delegateUIDs:
if uid != delegator.uid:
record = (yield directory.recordWithUID(uid))
if record is not None:
records.append(record)
returnValue(records)
@inlineCallbacks
def delegatedTo(self, txn, delegate, readWrite):
"""
Return the records of those who have delegated to "delegate". The type of
access is specified by the "readWrite" parameter.
@param delegate: the delegate's directory record
@type delegate: L{IDirectoryRecord}
@param readWrite: if True, read and write access delegators are returned;
read-only access otherwise
@return: the set of directory records
@rtype: a Deferred which fires a set of L{IDirectoryRecord}
"""
delegatorUIDs = yield self._delegatedToUIDs(txn, delegate, readWrite)
records = []
directory = delegate.service
for uid in delegatorUIDs:
if uid != delegate.uid:
record = (yield directory.recordWithUID(uid))
if record is not None:
records.append(record)
returnValue(records)
@inlineCallbacks
def _delegatesOfUIDs(self, txn, delegator, readWrite, expanded=False):
"""
Return the UIDs of the delegates of "delegator". The type of access
is specified by the "readWrite" parameter.
We need to take multiple pods into account by re-directing this request
to the cross-pod conduit if the delegator is not local to this pod.
@param delegator: the delegator's directory record
@type delegator: L{IDirectoryRecord}
@param readWrite: if True, read and write access delegates are returned;
read-only access otherwise
@return: the set of directory record uids
@rtype: a Deferred which fires a set of L{str}
"""
# Try cache first
delegateUIDs = yield self._memcacher.getMembers(delegator.uid, readWrite, expanded)
if delegateUIDs is not None:
log.debug("_delegatesOfUIDs cached for: {uid} and read-write = {rw} and expanded = {expanded}", uid=delegator.uid, rw=readWrite, expanded=expanded)
returnValue(delegateUIDs)
# Get from the store
log.debug("_delegatesOfUIDs for: {uid} and read-write = {rw} and expanded = {expanded}", uid=delegator.uid, rw=readWrite, expanded=expanded)
if delegator.thisServer():
delegateUIDs = yield txn.delegates(delegator.uid, readWrite, expanded=expanded)
# Cache result - only need to do this on the host
yield self._memcacher.setMembers(delegator.uid, readWrite, delegateUIDs, expanded)
else:
delegateUIDs = yield self._podDelegates(txn, delegator, readWrite, expanded=expanded)
returnValue(delegateUIDs)
@inlineCallbacks
def _delegatedToUIDs(self, txn, delegate, readWrite, onlyThisServer=False):
"""
Return the UIDs of those who have delegated to "delegate". The type of
access is specified by the "readWrite" parameter.
We need to take multiple pods into account by re-directing this request
to the cross-pod conduit if the delegate is not local to this pod.
@param delegate: the delegate's directory record
@type delegate: L{IDirectoryRecord}
@param readWrite: if True, read and write access delegators are returned;
read-only access otherwise
@param onlyThisServer: used when doing the query as part of a cross-pod request since that
should only returns results for this server
@type onlyThisServer: L{bool}
@return: the set of directory record uids
@rtype: a Deferred which fires a set of L{str}
"""
# Try cache first
delegatorUIDs = yield self._memcacher.getMemberships(delegate.uid, readWrite)
if delegatorUIDs is not None:
log.debug("_delegatedToUIDs cached for: {uid} and read-write = {rw}", uid=delegate.uid, rw=readWrite)
returnValue(delegatorUIDs)
# Get from the store
log.debug("_delegatedToUIDs for: {uid} and read-write = {rw}", uid=delegate.uid, rw=readWrite)
delegatorUIDs = (yield txn.delegators(delegate.uid, readWrite))
if not onlyThisServer and config.Servers.Enabled:
delegatorUIDs.update((yield self._podDelegators(txn, delegate, readWrite)))
# Cache result - only need to do this on the host
yield self._memcacher.setMemberships(delegate.uid, readWrite, delegatorUIDs)
returnValue(delegatorUIDs)
def _podSetDelegates(self, txn, delegator, delegates, readWrite):
"""
Sets the full set of delegates for a delegator.
We need to take multiple pods into account by re-directing this request
to the cross-pod conduit if the delegator is not local to this pod.
@param delegator: the delegator's directory record
@type delegator: L{IDirectoryRecord}
@param delegates: the delegates directory records
@type delegates: L{list}} of L{IDirectoryRecord}
@param readWrite: if True, read and write access is granted; read-only
access otherwise
"""
if delegator.server().v5:
return succeed(None)
else:
return txn.store().conduit.send_set_delegates(txn, delegator, delegates, readWrite)
def _podDelegates(self, txn, delegator, readWrite, expanded=False):
"""
Do a cross-pod request to get the delegates for this delegator.
@param delegator: the delegator's directory record
@type delegator: L{IDirectoryRecord}
@param readWrite: if True, read and write access delegates are returned;
read-only access otherwise
@return: the set of directory record uids
@rtype: a Deferred which fires a set of L{str}
"""
log.debug("_podDelegates for: {uid} and read-write = {rw} and expanded = {expanded}", uid=delegator.uid, rw=readWrite, expanded=expanded)
if delegator.server().v5:
return succeed(set())
else:
return txn.store().conduit.send_get_delegates(txn, delegator, readWrite, expanded)
@inlineCallbacks
def _podDelegators(self, txn, delegate, readWrite):
"""
Do a cross-pod request to get the delegators for this delegate. We need to iterate over all
other pod servers to get results from each one.
@param delegate: the delegate's directory record
@type delegate: L{IDirectoryRecord}
@param readWrite: if True, read and write access delegates are returned;
read-only access otherwise
@return: the set of directory record uids
@rtype: a Deferred which fires a set of L{str}
"""
log.debug("_podDelegators for: {uid} and read-write = {rw}", uid=delegate.uid, rw=readWrite)
otherServers = txn.directoryService().serversDB().allServersExceptThis(filter_v5=True)
if len(otherServers) != 0:
results = yield DeferredList([
txn.store().conduit.send_get_delegators(txn, server, delegate, readWrite) for
server in txn.directoryService().serversDB().allServersExceptThis()
], consumeErrors=True)
else:
results = []
delegators = set()
for result in results:
if result and result[0]:
delegators.update(result[1])
returnValue(delegators)
@inlineCallbacks
def invalidateExternalAssignment(self, txn, delegatorUID, readDelegateUID, writeDelegateUID, previousReadDelegateUID, previousWriteDelegateUID):
"""
Invalidate the relevant memcache entries containing delegation info
"""
yield self._memcacher.deleteMember(delegatorUID, False)
yield self._memcacher.deleteMember(delegatorUID, True)
if previousReadDelegateUID:
yield self.deleteMembershipForGroup(txn, previousReadDelegateUID, False)
if previousWriteDelegateUID:
yield self.deleteMembershipForGroup(txn, previousWriteDelegateUID, True)
if readDelegateUID:
yield self.deleteMembershipForGroup(txn, readDelegateUID, False)
if writeDelegateUID:
yield self.deleteMembershipForGroup(txn, writeDelegateUID, True)
@inlineCallbacks
def deleteMembershipForGroup(self, txn, groupUID, readWrite):
if groupUID:
log.debug("Invalidating memcached delegate membership for group {group}, r/w={readWrite}", group=groupUID, readWrite=readWrite)
yield self._memcacher.deleteMembership(groupUID, readWrite)
group = yield txn.groupByUID(groupUID, create=False)
if group is not None:
uids = yield txn.groupMemberUIDs(group.groupID)
for uid in uids:
log.debug("Invalidating memcached delegate membership for user {user}, r/w={readWrite}", user=uid, readWrite=readWrite)
yield self._memcacher.deleteMembership(uid, readWrite)
Delegates = CachingDelegates()
| |
###########################################################################
# Joshua R. Boverhof, LBNL
# See Copyright for copyright notice!
# $Id: WSsecurity.py 1134 2006-02-24 00:23:06Z boverhof $
###########################################################################
import sys, time, warnings
import sha, base64
# twisted & related imports
from zope.interface import classProvides, implements, Interface
from twisted.python import log, failure
from twisted.web.error import NoResource
from twisted.web.server import NOT_DONE_YET
from twisted.internet import reactor
import twisted.web.http
import twisted.web.resource
# ZSI imports
from pyremotevbox.ZSI import _get_element_nsuri_name, EvaluateException, ParseException
from pyremotevbox.ZSI.parse import ParsedSoap
from pyremotevbox.ZSI.writer import SoapWriter
from pyremotevbox.ZSI.TC import _get_global_element_declaration as GED
from pyremotevbox.ZSI import fault
from pyremotevbox.ZSI.wstools.Namespaces import OASIS, DSIG
from WSresource import DefaultHandlerChain, HandlerChainInterface,\
WSAddressCallbackHandler, DataHandler, WSAddressHandler
#
# Global Element Declarations
#
UsernameTokenDec = GED(OASIS.WSSE, "UsernameToken")
SecurityDec = GED(OASIS.WSSE, "Security")
SignatureDec = GED(DSIG.BASE, "Signature")
PasswordDec = GED(OASIS.WSSE, "Password")
NonceDec = GED(OASIS.WSSE, "Nonce")
CreatedDec = GED(OASIS.UTILITY, "Created")
if None in [UsernameTokenDec,SecurityDec,SignatureDec,PasswordDec,NonceDec,CreatedDec]:
raise ImportError, 'required global element(s) unavailable: %s ' %({
(OASIS.WSSE, "UsernameToken"):UsernameTokenDec,
(OASIS.WSSE, "Security"):SecurityDec,
(DSIG.BASE, "Signature"):SignatureDec,
(OASIS.WSSE, "Password"):PasswordDec,
(OASIS.WSSE, "Nonce"):NonceDec,
(OASIS.UTILITY, "Created"):CreatedDec,
})
#
# Stability: Unstable, Untested, Not Finished.
#
class WSSecurityHandler:
"""Web Services Security: SOAP Message Security 1.0
Class Variables:
debug -- If True provide more detailed SOAP:Fault information to clients.
"""
classProvides(HandlerChainInterface)
debug = True
@classmethod
def processRequest(cls, ps, **kw):
if type(ps) is not ParsedSoap:
raise TypeError,'Expecting ParsedSoap instance'
security = ps.ParseHeaderElements([cls.securityDec])
# Assume all security headers are supposed to be processed here.
for pyobj in security or []:
for any in pyobj.Any or []:
if any.typecode is UsernameTokenDec:
try:
ps = cls.UsernameTokenProfileHandler.processRequest(ps, any)
except Exception, ex:
if cls.debug: raise
raise RuntimeError, 'Unauthorized Username/passphrase combination'
continue
if any.typecode is SignatureDec:
try:
ps = cls.SignatureHandler.processRequest(ps, any)
except Exception, ex:
if cls.debug: raise
raise RuntimeError, 'Invalid Security Header'
continue
raise RuntimeError, 'WS-Security, Unsupported token %s' %str(any)
return ps
@classmethod
def processResponse(cls, output, **kw):
return output
class UsernameTokenProfileHandler:
"""Web Services Security UsernameToken Profile 1.0
Class Variables:
targetNamespace --
"""
classProvides(HandlerChainInterface)
# Class Variables
targetNamespace = OASIS.WSSE
sweepInterval = 60*5
nonces = None
# Set to None to disable
PasswordText = targetNamespace + "#PasswordText"
PasswordDigest = targetNamespace + "#PasswordDigest"
# Override passwordCallback
passwordCallback = lambda cls,username: None
@classmethod
def sweep(cls, index):
"""remove nonces every sweepInterval.
Parameters:
index -- remove all nonces up to this index.
"""
if cls.nonces is None:
cls.nonces = []
seconds = cls.sweepInterval
cls.nonces = cls.nonces[index:]
reactor.callLater(seconds, cls.sweep, len(cls.nonces))
@classmethod
def processRequest(cls, ps, token, **kw):
"""
Parameters:
ps -- ParsedSoap instance
token -- UsernameToken pyclass instance
"""
if token.typecode is not UsernameTokenDec:
raise TypeError, 'expecting GED (%s,%s) representation.' %(
UsernameTokenDec.nspname, UsernameTokenDec.pname)
username = token.Username
# expecting only one password
# may have a nonce and a created
password = nonce = timestamp = None
for any in token.Any or []:
if any.typecode is PasswordDec:
password = any
continue
if any.typecode is NonceTypeDec:
nonce = any
continue
if any.typecode is CreatedTypeDec:
timestamp = any
continue
raise TypeError, 'UsernameTokenProfileHander unexpected %s' %str(any)
if password is None:
raise RuntimeError, 'Unauthorized, no password'
# TODO: not yet supporting complexType simpleContent in pyclass_type
attrs = getattr(password, password.typecode.attrs_aname, {})
pwtype = attrs.get('Type', cls.PasswordText)
# Clear Text Passwords
if cls.PasswordText is not None and pwtype == cls.PasswordText:
if password == cls.passwordCallback(username):
return ps
raise RuntimeError, 'Unauthorized, clear text password failed'
if cls.nonces is None: cls.sweep(0)
if nonce is not None:
if nonce in cls.nonces:
raise RuntimeError, 'Invalid Nonce'
# created was 10 seconds ago or sooner
if created is not None and created < time.gmtime(time.time()-10):
raise RuntimeError, 'UsernameToken created is expired'
cls.nonces.append(nonce)
# PasswordDigest, recommended that implemenations
# require a Nonce and Created
if cls.PasswordDigest is not None and pwtype == cls.PasswordDigest:
digest = sha.sha()
for i in (nonce, created, cls.passwordCallback(username)):
if i is None: continue
digest.update(i)
if password == base64.encodestring(digest.digest()).strip():
return ps
raise RuntimeError, 'Unauthorized, digest failed'
raise RuntimeError, 'Unauthorized, contents of UsernameToken unknown'
@classmethod
def processResponse(cls, output, **kw):
return output
@staticmethod
def hmac_sha1(xml):
return
class SignatureHandler:
"""Web Services Security UsernameToken Profile 1.0
"""
digestMethods = {
DSIG.BASE+"#sha1":sha.sha,
}
signingMethods = {
DSIG.BASE+"#hmac-sha1":hmac_sha1,
}
canonicalizationMethods = {
DSIG.C14N_EXCL:lambda node: Canonicalize(node, unsuppressedPrefixes=[]),
DSIG.C14N:lambda node: Canonicalize(node),
}
@classmethod
def processRequest(cls, ps, signature, **kw):
"""
Parameters:
ps -- ParsedSoap instance
signature -- Signature pyclass instance
"""
if token.typecode is not SignatureDec:
raise TypeError, 'expecting GED (%s,%s) representation.' %(
SignatureDec.nspname, SignatureDec.pname)
si = signature.SignedInfo
si.CanonicalizationMethod
calgo = si.CanonicalizationMethod.get_attribute_Algorithm()
for any in si.CanonicalizationMethod.Any:
pass
# Check Digest
si.Reference
context = XPath.Context.Context(ps.dom, processContents={'wsu':OASIS.UTILITY})
exp = XPath.Compile('//*[@wsu:Id="%s"]' %si.Reference.get_attribute_URI())
nodes = exp.evaluate(context)
if len(nodes) != 1:
raise RuntimeError, 'A SignedInfo Reference must refer to one node %s.' %(
si.Reference.get_attribute_URI())
try:
xml = cls.canonicalizeMethods[calgo](nodes[0])
except IndexError:
raise RuntimeError, 'Unsupported canonicalization algorithm'
try:
digest = cls.digestMethods[salgo]
except IndexError:
raise RuntimeError, 'unknown digestMethods Algorithm'
digestValue = base64.encodestring(digest(xml).digest()).strip()
if si.Reference.DigestValue != digestValue:
raise RuntimeError, 'digest does not match'
if si.Reference.Transforms:
pass
signature.KeyInfo
signature.KeyInfo.KeyName
signature.KeyInfo.KeyValue
signature.KeyInfo.RetrievalMethod
signature.KeyInfo.X509Data
signature.KeyInfo.PGPData
signature.KeyInfo.SPKIData
signature.KeyInfo.MgmtData
signature.KeyInfo.Any
signature.Object
# TODO: Check Signature
signature.SignatureValue
si.SignatureMethod
salgo = si.SignatureMethod.get_attribute_Algorithm()
if si.SignatureMethod.HMACOutputLength:
pass
for any in si.SignatureMethod.Any:
pass
# <SignedInfo><Reference URI="">
exp = XPath.Compile('//child::*[attribute::URI = "%s"]/..' %(
si.Reference.get_attribute_URI()))
nodes = exp.evaluate(context)
if len(nodes) != 1:
raise RuntimeError, 'A SignedInfo Reference must refer to one node %s.' %(
si.Reference.get_attribute_URI())
try:
xml = cls.canonicalizeMethods[calgo](nodes[0])
except IndexError:
raise RuntimeError, 'Unsupported canonicalization algorithm'
# TODO: Check SignatureValue
@classmethod
def processResponse(cls, output, **kw):
return output
class X509TokenProfileHandler:
"""Web Services Security UsernameToken Profile 1.0
"""
targetNamespace = DSIG.BASE
# Token Types
singleCertificate = targetNamespace + "#X509v3"
certificatePath = targetNamespace + "#X509PKIPathv1"
setCerticatesCRLs = targetNamespace + "#PKCS7"
@classmethod
def processRequest(cls, ps, signature, **kw):
return ps
"""
<element name="KeyInfo" type="ds:KeyInfoType"/>
<complexType name="KeyInfoType" mixed="true">
<choice maxOccurs="unbounded">
<element ref="ds:KeyName"/>
<element ref="ds:KeyValue"/>
<element ref="ds:RetrievalMethod"/>
<element ref="ds:X509Data"/>
<element ref="ds:PGPData"/>
<element ref="ds:SPKIData"/>
<element ref="ds:MgmtData"/>
<any processContents="lax" namespace="##other"/>
<!-- (1,1) elements from (0,unbounded) namespaces -->
</choice>
<attribute name="Id" type="ID" use="optional"/>
</complexType>
<element name="Signature" type="ds:SignatureType"/>
<complexType name="SignatureType">
<sequence>
<element ref="ds:SignedInfo"/>
<element ref="ds:SignatureValue"/>
<element ref="ds:KeyInfo" minOccurs="0"/>
<element ref="ds:Object" minOccurs="0" maxOccurs="unbounded"/>
</sequence>
<attribute name="Id" type="ID" use="optional"/>
</complexType>
<element name="SignatureValue" type="ds:SignatureValueType"/>
<complexType name="SignatureValueType">
<simpleContent>
<extension base="base64Binary">
<attribute name="Id" type="ID" use="optional"/>
</extension>
</simpleContent>
</complexType>
<!-- Start SignedInfo -->
<element name="SignedInfo" type="ds:SignedInfoType"/>
<complexType name="SignedInfoType">
<sequence>
<element ref="ds:CanonicalizationMethod"/>
<element ref="ds:SignatureMethod"/>
<element ref="ds:Reference" maxOccurs="unbounded"/>
</sequence>
<attribute name="Id" type="ID" use="optional"/>
</complexType>
"""
class WSSecurityHandlerChainFactory:
protocol = DefaultHandlerChain
@classmethod
def newInstance(cls):
return cls.protocol(WSAddressCallbackHandler, DataHandler,
WSSecurityHandler, WSAddressHandler())
| |
#!/usr/bin/env python2
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Run transperf's orchestrator."""
import calendar
import datetime
import getopt
import logging
import math
import os
import random
import socket
import sys
import time
import xmlrpclib
from transperf import bits
from transperf import cfgutil
from transperf import ip_modes
from transperf import log
from transperf import parse_ip_map
from transperf import path
from transperf import shell
LOG = logging.getLogger('transperf/orchestrator')
def _load_configs():
"""Loads and yields all the config files in the __config directory."""
script_dir = os.path.dirname(os.path.abspath(__file__))
cfg_files = path.list_files(os.path.join(script_dir, '__config'))
# Load the configuration scripts (eg, cfg0.py, cfg1.py, ...) in order.
cfg_scripts = [open(f, 'r').read() for f in sorted(cfg_files)]
for script in cfg_scripts:
yield cfgutil.config_from_script(script)
def _allocate_rand_portrange(exp, saddrs):
"""Allocates a random port range for the experiment.
This is a bit complicated because of the policer tc filters. For an
experiment that needs P ports, we allocate a range with the next higher
power of two of P. For example, if we need 6 ports, we allocate a port range
that can accommodate 8 ports. This is because we need to police a group of
flows using masked ports.
To do this we first find the size of the port range. Let's say it requires
B bits. We fill the higher 16-B bits with a random number. Because the port
numbers must be larger than 1024, we select a random number larger than
(1024 >> B) and lower than ((65536-1024) >> B).
Note: we limit the maximum number of ports at 1024. This gives us a 64 bit
random ranges at the most least.
Note: When there is no policer we match on exact ports. Thus, the whole
range is only matched when we have a policer.
Args:
exp: The experiment.
saddrs: The list of sender addresses.
Returns:
Tuples of ports to sender addresses.
Raises:
RuntimeError: When encountered a critial error.
"""
cnt = 0
for s in range(exp.nsenders()):
sconns, _, _ = exp.sender_info(s)
# Count the number of connections in all conns.
cnt += sum([c[1] for c in sconns])
cnt = bits.next_power_of_two(cnt)
if cnt >= 1024:
raise RuntimeError('upto 1024 ports is supported')
zbits = bits.trailing_zero_bits(cnt)
start = 1024 >> zbits
end = 0xFBFF >> zbits
next_port = random.randint(start, end) << zbits
port_to_addr = []
for s in range(exp.nsenders()):
sconns, _, _ = exp.sender_info(s)
for c in sconns:
port_to_addr.append((next_port, saddrs[s]))
next_port += 1
return port_to_addr
def _validate_netperf(exp):
"""Validates netperf binary to make sure it has all the options we need.
Args:
exp: The experiment object.
Returns:
The error message, if netperf cannot run the experiment.
"""
has_burst = [c for c in exp.conn.conn_list if c.burst_tuple()]
if not has_burst:
return None
out, _, _ = shell.run(path.netperf() + ' -b 1 -w 1 -H 1 -p 1')
if out.find('not compiled in') == -1:
return None
return 'netperf is not compiled with interval support'
def _run_experiment(exp, out_dir, out_dir_rel, rproxy, sproxies,
sslog_interval):
"""Runs an experiment.
Args:
exp: The experiment object.
out_dir: The base output directory for all experiments.
out_dir_rel: The relative output directory for this experiment.
rproxy: The receiver proxy.
sproxies: Tuples of sender address and sender proxy.
sslog_interval: The time interval in seconds to sample ss log.
Raises:
RuntimeError: When encountered a critial error.
"""
LOG.info('Run experiment, log base dir %s relative dir %s',
out_dir, out_dir_rel)
nsenders = exp.nsenders()
if nsenders > len(sproxies):
# TODO(soheil): Find a better log message for this.
raise RuntimeError('experiment %s: needs %d senders' % (exp, nsenders))
np_err = _validate_netperf(exp)
if np_err:
raise RuntimeError('experiment %s: %s' % (exp, np_err))
rproxy.reset(exp.cmd)
for _, sproxy in sproxies:
sproxy.reset(exp.cmd)
sproxy.set_ss(sslog_interval)
out_dir_exp = os.path.join(out_dir, '__out', out_dir_rel)
if not os.path.exists(out_dir_exp):
os.makedirs(out_dir_exp)
port_to_addr = _allocate_rand_portrange(exp,
[addr.rsplit(':', 1)[0]
for addr, _ in sproxies])
LOG.debug('receiver machine_cmds:\n%s', exp.cmds_of_receiver())
rproxy.set_exp_info(exp.bw_infos(), exp.buf, exp.loss, exp.out_loss,
exp.slot_info(), exp.policer_info(), port_to_addr,
exp.cmds_of_receiver())
conn_port = port_to_addr[0][0]
for j, (addr, sproxy) in enumerate(sproxies[:nsenders]):
sconns, rtts, scmds = exp.sender_info(j)
# Count the number of connections in all conns.
LOG.debug('sender(%s), machine_cmds:%s', j, scmds)
cnt = sum([c[1] for c in sconns])
err = sproxy.set_cmds(scmds)
if err:
raise RuntimeError('error in set_cmds: %s' % err)
err = sproxy.set_conns(sconns, conn_port)
if err:
raise RuntimeError('error in set_conns: %s' % err)
err = rproxy.set_sender_info(j, (conn_port, conn_port + cnt),
rtts)
if err:
raise RuntimeError('error in set_sender_info: %s' % err)
conn_port += cnt
try:
changed = rproxy.setup_ifaces(nsenders)
except:
# It is very likely the interface has gone down and we received a
# timeout here. Let's wait for the interface to come back up again.
changed = True
# We need to sleep for 30s here because changing LRO/GRO on some platforms
# can take cycle the interface (down and up again).
if changed:
time.sleep(30)
# 2s grace period for each machine.
grace = 2 + int(math.ceil(nsenders * 2))
utc_start = datetime.datetime.utcnow()
start_ts = calendar.timegm(utc_start.utctimetuple()) + grace
end_ts = start_ts + exp.dur
err = rproxy.run(exp.all_tools(), start_ts, exp.dur, nsenders,
os.path.join(out_dir_rel, 'R'))
if err:
raise RuntimeError('cannot start receiver: %s' % err)
for j, (_, sproxy) in enumerate(sproxies[:nsenders]):
err = sproxy.run(start_ts, exp.dur, os.path.join(out_dir_rel, str(j)))
if err:
raise RuntimeError('cannot start sender: %s' % err)
now = calendar.timegm(datetime.datetime.utcnow().utctimetuple())
if now < end_ts:
time.sleep(end_ts - now)
for _, sproxy in sproxies[:nsenders]:
sproxy.maybe_join()
exp_if_path = os.path.join(out_dir_exp, 'exp.info')
LOG.info('Writing experiment info to %s', exp_if_path)
expif = open(exp_if_path, 'w')
expif.write(exp.pretty_str())
expif.close()
def print_usage():
"""Prints how to use orch.py."""
print '''./orch.py [options] -r receiver -s sender1,sender2,...
options:
-v: verbose output.
--sslog_interval seconds: set the time interval between two ss commands,
default 0.1. A value <0 disables ss logging.'''
def _replace_host_with_ip(ipport_str, num_try, ip_mode, ip_map):
"""Replaces hostname part of ipport_str to numeric IP and return.
Expects a string of format host:port, otherwise return as is.
Attempts to resolve up to num_try times when a gaierror is encountered.
Does not work with IPv6.
Args:
ipport_str: host:port string.
num_try: number of attempts to resolve before giving up
ip_mode: Whether we are using IPv4 or IPv6.
ip_map: A map from hostname to IP address.
Returns:
IP:port string
"""
ip_port_split = ipport_str.split(':')
if len(ip_port_split) == 2:
for i in range(num_try):
try:
hostname = ip_port_split[0]
port = ip_port_split[1]
if hostname in ip_map:
host_ip = ip_map[hostname]
else:
host_ip = socket.getaddrinfo(hostname, 0, ip_mode,
socket.SOCK_STREAM,
socket.IPPROTO_TCP)[0][4][0]
return '%s:%s' % (host_ip, port)
except (socket.error, socket.herror,
socket.gaierror, socket.timeout) as err:
# Log and retry
LOG.error('name resolution failed (%d/%d) %s %s',
i + 1, num_try, ip_port_split[0], err)
return ipport_str
def _create_proxy_with_retry(url, num_try):
"""Create xmlrpclib.ServerProxy and verify if online with retry.
Proxy must have a procedure ping() that returns 0.
Added to better handle occasional delay in proxy launch.
Args:
url: url of proxy.
num_try: number of attempts to ping before giving up
Returns:
xmlrpclib.ServerProxy instance if successful, or None.
"""
for i in range(num_try):
try:
proxy = xmlrpclib.ServerProxy(url, allow_none=True)
if proxy.ping() == 0:
return proxy
except:
# Log and retry.
LOG.error('Proxy not ready yet (%d/%d) %s', i + 1, num_try, url)
time.sleep(1)
return None
def main():
ip_mode = socket.AF_INET6
raddr_val = None
out_dir = None
hosts = None
opts, _ = getopt.getopt(sys.argv[1:], 'vr:s:', ['sslog_interval=',
'ip_mode=',
'out_dir=',
'hosts='])
for opt, val in opts:
if opt == '-v':
continue
elif opt == '-r':
raddr_val = val
elif opt == '-s':
saddrs = val.split(',')
elif opt == '--out_dir':
out_dir = os.path.abspath(os.path.expanduser(val))
elif opt == '--sslog_interval':
sslog_interval = float(val)
elif opt == '--ip_mode':
key = int(val)
assert key in ip_modes, 'ip_mode must be in %s' % str(
ip_modes.keys())
ip_mode = ip_modes[key]
elif opt == '--hosts':
hosts = os.path.abspath(os.path.expanduser(val))
else:
print_usage()
return -1
log.setup_logging(opts)
assert out_dir is not None, 'Missing output directory'
assert raddr_val is not None, 'Missing receiver address.'
if hosts is not None:
ip_map = parse_ip_map(hosts)
LOG.info('IP map: %s', ip_map)
else:
ip_map = {}
LOG.info('No hosts provided to orchestrator.')
raddr = _replace_host_with_ip(raddr_val, 3, ip_mode, ip_map)
cfgs = _load_configs()
rproxy = _create_proxy_with_retry('http://%s/' % (raddr), 10)
sproxies = []
for addr in saddrs:
addr = _replace_host_with_ip(addr, 3, ip_mode, ip_map)
sproxy = _create_proxy_with_retry('http://%s/' % (addr), 10)
receiver = ':'.join(raddr.split(':')[:-1])
sender = ':'.join(addr.split(':')[:-1])
sproxy.register_receiver(receiver)
rproxy.register_sender(sender)
sproxies.append((addr, sproxy))
for i, cfg in enumerate(cfgs):
for exp in cfg.experiments():
out_dir_rel = os.path.join(str(i), exp.get_dir_name())
_run_experiment(exp, out_dir, out_dir_rel, rproxy, sproxies,
sslog_interval)
return 0
if __name__ == '__main__':
sys.exit(main())
| |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import functools
import inspect
import os
from initpy.templates import blank, falcon, flask, tornado_web
from initpy.exceptions import RootPathDoesNotExists
def name_validator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
func_args = inspect.getcallargs(func, *args, **kwargs)
if func_args.get('validate'):
_filter = "!@#$%^&*()-+=[]{}|\"'."
name = func_args.get('name').replace('.py', '')\
.replace('.html', '')\
.replace('.txt', '')
if (len(list(set(list(name)).intersection(list(_filter)))) > 0
or name[0].isdigit()):
title = func.__name__.split('_')[1].title()
raise globals()['Invalid' + title + 'Name']
return func(*args, **kwargs)
return wrapper
class Creator(object):
errors = []
root_path = None
def __init__(self, root_path):
if not os.path.isdir(root_path):
raise RootPathDoesNotExists
self.root_path = root_path
@name_validator
def create_file(self, _path, name, template, validate=True):
file_path = os.path.join(_path, name)
with open(file_path, 'w') as _file:
_file.write(template)
@name_validator
def create_folder(self, _path, name, validate=True):
try:
folder_path = os.path.join(_path, name)
os.mkdir(folder_path)
except OSError:
self.errors.append('Creating skipped: '+name+' already exists')
def create_module(self, _path, name, template=blank.python):
self.create_folder(_path, name)
module_path = os.path.join(_path, name)
self.create_file(module_path, '__init__.py', template, False)
class FlaskCreator(Creator):
def create_app(self, _path, module):
self.create_module(_path, "app",
flask.app_init.substitute(module=module))
app_path = os.path.join(_path, "app")
self.create_folder(app_path, "static")
self.create_templates(app_path, module)
self.create_app_module(app_path, module)
def create_app_module(self, _path, name):
self.create_folder(_path, name)
module_path = os.path.join(_path, name)
self.create_file(module_path, "__init__.py",
flask.module_init.substitute(module=name), False)
self.create_file(module_path, "views.py",
flask.module_views.substitute(module=name))
self.create_file(module_path, "models.py", blank.python)
def create_templates(self, _path, module):
self.create_folder(_path, "templates")
template_path = os.path.join(_path, "templates")
self.create_file(template_path, "base.html", flask.base_html)
self.create_folder(template_path, module)
self.create_file(os.path.join(template_path, module), "index.html",
flask.module_html)
def create_requirements(self, _path):
self.create_folder(_path, "requirements")
self.create_file(os.path.join(_path, "requirements"), "dev.txt",
flask.requirements)
def create_project(self, name, module):
self.create_folder(self.root_path, name)
project_path = os.path.join(self.root_path, name)
self.create_file(project_path, "manage.py", flask.manager)
self.create_app(project_path, module)
self.create_requirements(project_path)
class TornadoCreator(Creator):
def create_handlers(self, _path, name):
self.create_module(_path, "handlers")
handlers_path = os.path.join(_path, "handlers")
self.create_file(handlers_path, name+".py", tornado_web.tornado_handler)
def create_requirements(self, _path):
self.create_folder(_path, "requirements")
self.create_file(os.path.join(_path, "requirements"), "dev.txt",
tornado_web.requirements)
def create_project(self, name, module):
self.create_folder(self.root_path, name)
project_path = os.path.join(self.root_path, name)
self.create_file(project_path, "app.py", tornado_web.tornado_app)
self.create_file(project_path, "urls.py",
tornado_web.tornado_urls.substitute(module=module))
self.create_handlers(project_path, module)
self.create_requirements(project_path)
class FalconCreator(Creator):
def create_app(self, _path, module):
args = dict(module=module, module_title=module.title())
self.create_module(_path, "app",
falcon.app_init.safe_substitute(args))
app_path = os.path.join(_path, "app")
self.create_middleware(app_path)
self.create_models(app_path, module)
self.create_app_module(app_path, module)
def create_app_module(self, _path, name):
args = dict(module=name, module_title=name.title())
self.create_folder(_path, 'resources')
module_path = os.path.join(_path, 'resources')
self.create_file(module_path, "__init__.py",
falcon.resource_init.safe_substitute(args), False)
self.create_file(module_path, "{}.py".format(name),
falcon.resource_controller.safe_substitute(args))
def create_models(self, _path, name):
self.create_module(_path, "models")
models_path = os.path.join(_path, "models")
self.create_file(models_path, "__init__.py", blank.python)
self.create_file(models_path, "{}.py".format(name), blank.python)
def create_middleware(self, _path):
self.create_module(_path, "middleware")
middleware_path = os.path.join(_path, "middleware")
self.create_file(middleware_path, "__init__.py", blank.python)
def create_requirements(self, _path):
self.create_folder(_path, "requirements")
self.create_file(os.path.join(_path, "requirements"), "dev.txt",
falcon.requirements)
def create_project(self, name, module):
self.create_folder(self.root_path, name)
project_path = os.path.join(self.root_path, name)
self.create_file(project_path, "manage.py", falcon.manager)
self.create_app(project_path, module)
self.create_requirements(project_path)
def downloader(args):
url = args.download
from urllib2 import urlopen, HTTPError
try:
res = urlopen(url)
except HTTPError:
from initpy.prompt import color_print
color_print("Wrong downloadable url!", "red")
return
from initpy.compact import StringIO
from zipfile import ZipFile, BadZipfile
try:
template_zip = ZipFile(StringIO(res.read()))
except BadZipfile:
from initpy.prompt import color_print
color_print("initpy only support zip file!", "red")
return
from os import path, getcwd, mkdir
proj_path = path.join(getcwd(), args.name)
try:
mkdir(proj_path)
except OSError:
# Folder Exists
pass
zip_root = template_zip.namelist()[0]
for fn in template_zip.namelist()[1:]:
file_name = fn.replace(zip_root, '')
file_path = path.join(proj_path, file_name)
if file_path.endswith('/'):
try:
mkdir(file_path)
except OSError:
# Folder Exists
pass
else:
_file = open(file_path, 'w')
_file.write(template_zip.read(fn))
_file.close()
| |
import numpy as np
import matplotlib.pyplot as plt
import os
import sys
import json
import re
import shutil
from PIL import Image
from PIL import ImageFont, ImageDraw
import caffe
from caffe import layers as L
from caffe import params as P
from vqa_data_provider_layer import VQADataProvider
from vqa_data_provider_layer import VQADataProviderLayer
import config
sys.path.append(config.VQA_TOOLS_PATH)
sys.path.append(config.VQA_EVAL_TOOLS_PATH)
from vqaTools.vqa import VQA
from vqaEvaluation.vqaEval import VQAEval
def visualize_failures(stat_list,mode):
def save_qtype(qtype_list, save_filename, mode):
if mode == 'val':
savepath = os.path.join('./eval', save_filename)
# TODO
img_pre = '/home/dhpseth/vqa/02_tools/VQA/Images/val2014'
elif mode == 'test-dev':
savepath = os.path.join('./test-dev', save_filename)
# TODO
img_pre = '/home/dhpseth/vqa/02_tools/VQA/Images/test2015'
elif mode == 'test':
savepath = os.path.join('./test', save_filename)
# TODO
img_pre = '/home/dhpseth/vqa/02_tools/VQA/Images/test2015'
else:
raise Exception('Unsupported mode')
if os.path.exists(savepath): shutil.rmtree(savepath)
if not os.path.exists(savepath): os.makedirs(savepath)
for qt in qtype_list:
count = 0
for t_question in stat_list:
#print count, t_question
if count < 40/len(qtype_list):
t_question_list = t_question['q_list']
saveflag = False
#print 'debug****************************'
#print qt
#print t_question_list
#print t_question_list[0] == qt[0]
#print t_question_list[1] == qt[1]
if t_question_list[0] == qt[0] and t_question_list[1] == qt[1]:
saveflag = True
else:
saveflag = False
if saveflag == True:
t_iid = t_question['iid']
if mode == 'val':
t_img = Image.open(os.path.join(img_pre, \
'COCO_val2014_' + str(t_iid).zfill(12) + '.jpg'))
elif mode == 'test-dev' or 'test':
t_img = Image.open(os.path.join(img_pre, \
'COCO_test2015_' + str(t_iid).zfill(12) + '.jpg'))
# for caption
#print t_iid
#annIds = caps.getAnnIds(t_iid)
#anns = caps.loadAnns(annIds)
#cap_list = [ann['caption'] for ann in anns]
ans_list = t_question['ans_list']
draw = ImageDraw.Draw(t_img)
for i in range(len(ans_list)):
try:
draw.text((10,10*i), str(ans_list[i]))
except:
pass
ans = t_question['answer']
pred = t_question['pred']
if ans == -1:
pre = ''
elif ans == pred:
pre = 'correct '
else:
pre = 'failure '
#print ' aaa ', ans, pred
ans = re.sub( '/', ' ', str(ans))
pred = re.sub( '/', ' ', str(pred))
img_title = pre + str(' '.join(t_question_list)) + '. a_' + \
str(ans) + ' p_' + str(pred) + '.png'
count += 1
print((os.path.join(savepath,img_title)))
t_img.save(os.path.join(savepath,img_title))
print('saving whatis')
qt_color_list = [['what','color']]
save_qtype(qt_color_list, 'colors', mode)
print('saving whatis')
qt_whatis_list = [['what','is'],['what','kind'],['what','are']]
save_qtype(qt_whatis_list, 'whatis', mode)
print('saving is')
qt_is_list = [['is','the'], ['is','this'],['is','there']]
save_qtype(qt_is_list, 'is', mode)
print('saving how many')
qt_howmany_list =[['how','many']]
save_qtype(qt_howmany_list, 'howmany', mode)
def exec_validation(device_id, mode, it='', visualize=False):
caffe.set_device(device_id)
caffe.set_mode_gpu()
net = caffe.Net('./result/proto_test.prototxt',\
'./result/tmp.caffemodel',\
caffe.TEST)
dp = VQADataProvider(mode=mode,batchsize=64)
total_questions = len(dp.getQuesIds())
epoch = 0
pred_list = []
testloss_list = []
stat_list = []
while epoch == 0:
t_word, t_cont, t_img_feature, t_answer, t_glove_matrix, t_qid_list, t_iid_list, epoch = dp.get_batch_vec()
net.blobs['data'].data[...] = np.transpose(t_word,(1,0))
net.blobs['cont'].data[...] = np.transpose(t_cont,(1,0))
net.blobs['img_feature'].data[...] = t_img_feature
net.blobs['label'].data[...] = t_answer
net.blobs['glove'].data[...] = np.transpose(t_glove_matrix, (1,0,2))
net.forward()
t_pred_list = net.blobs['prediction'].data.argmax(axis=1)
t_pred_str = [dp.vec_to_answer(pred_symbol) for pred_symbol in t_pred_list]
testloss_list.append(net.blobs['loss'].data)
for qid, iid, ans, pred in zip(t_qid_list, t_iid_list, t_answer.tolist(), t_pred_str):
pred_list.append({'answer':pred, 'question_id': int(dp.getStrippedQuesId(qid))})
if visualize:
q_list = dp.seq_to_list(dp.getQuesStr(qid))
if mode == 'test-dev' or 'test':
ans_str = ''
ans_list = ['']*10
else:
ans_str = dp.vec_to_answer(ans)
ans_list = [ dp.getAnsObj(qid)[i]['answer'] for i in range(10)]
stat_list.append({\
'qid' : qid,
'q_list' : q_list,
'iid' : iid,
'answer': ans_str,
'ans_list': ans_list,
'pred' : pred })
percent = 100 * float(len(pred_list)) / total_questions
sys.stdout.write('\r' + ('%.2f' % percent) + '%')
sys.stdout.flush()
mean_testloss = np.array(testloss_list).mean()
if mode == 'val':
valFile = './result/val2015_resfile'
with open(valFile, 'w') as f:
json.dump(pred_list, f)
if visualize:
visualize_failures(stat_list,mode)
annFile = config.DATA_PATHS['val']['ans_file']
quesFile = config.DATA_PATHS['val']['ques_file']
vqa = VQA(annFile, quesFile)
vqaRes = vqa.loadRes(valFile, quesFile)
vqaEval = VQAEval(vqa, vqaRes, n=2)
vqaEval.evaluate()
acc_overall = vqaEval.accuracy['overall']
acc_perQuestionType = vqaEval.accuracy['perQuestionType']
acc_perAnswerType = vqaEval.accuracy['perAnswerType']
return mean_testloss, acc_overall, acc_perQuestionType, acc_perAnswerType
elif mode == 'test-dev':
filename = './result/vqa_OpenEnded_mscoco_test-dev2015_v3t'+str(it).zfill(8)+'_results'
with open(filename+'.json', 'w') as f:
json.dump(pred_list, f)
if visualize:
visualize_failures(stat_list,mode)
elif mode == 'test':
filename = './result/vqa_OpenEnded_mscoco_test2015_v3c'+str(it).zfill(8)+'_results'
with open(filename+'.json', 'w') as f:
json.dump(pred_list, f)
if visualize:
visualize_failures(stat_list,mode)
def drawgraph(results, save_question_type_graphs=False):
# 0:it
# 1:trainloss
# 2:testloss
# 3:oa_acc
# 4:qt_acc
# 5:at_acc
# training curve
it = np.array([l[0] for l in results])
loss = np.array([l[1] for l in results])
valloss = np.array([l[2] for l in results])
valacc = np.array([l[3] for l in results])
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax2 = ax1.twinx()
ax1.plot(it,loss, color='blue', label='train loss')
ax1.plot(it,valloss, '--', color='blue', label='test loss')
ax2.plot(it,valacc, color='red', label='acc on val')
plt.legend(loc='lower left')
ax1.set_xlabel('Iterations')
ax1.set_ylabel('Loss Value')
ax2.set_ylabel('Accuracy on Val [%]')
plt.savefig('./learning_curve max_%2.2f.png'%valacc.max())
plt.clf()
plt.close("all")
# question type
it = np.array([l[0] for l in results])
oa_acc = np.array([l[3] for l in results])
qt_dic_list = [l[4] for l in results]
def draw_qt_acc(target_key_list, figname):
fig = plt.figure()
for k in target_key_list:
print((k,type(k)))
t_val = np.array([ qt_dic[k] for qt_dic in qt_dic_list])
plt.plot(it,t_val,label=str(k))
plt.legend(fontsize='small')
plt.ylim(0,100.)
#plt.legend(prop={'size':6})
plt.xlabel('Iterations')
plt.ylabel('Accuracy on Val [%]')
plt.savefig(figname,dpi=200)
plt.clf()
plt.close("all")
if save_question_type_graphs:
s_keys = sorted(qt_dic_list[0].keys())
draw_qt_acc(s_keys[ 0:13]+[s_keys[31],], './ind_qt_are.png')
draw_qt_acc(s_keys[13:17]+s_keys[49:], './ind_qt_how_where_who_why.png')
draw_qt_acc(s_keys[17:31]+[s_keys[32],], './ind_qt_is.png')
draw_qt_acc(s_keys[33:49], './ind_qt_what.png')
draw_qt_acc(['what color is the','what color are the','what color is',\
'what color','what is the color of the'],'./qt_color.png')
draw_qt_acc(['how many','how','how many people are',\
'how many people are in'],'./qt_number.png')
draw_qt_acc(['who is','why','why is the','where is the','where are the',\
'which'],'./qt_who_why_where_which.png')
draw_qt_acc(['what is the man','is the man','are they','is he',\
'is the woman','is this person','what is the woman','is the person',\
'what is the person'],'./qt_human.png')
| |
from __future__ import unicode_literals
import os
import shutil
from unittest import skipIf
from django.core.exceptions import ImproperlyConfigured
from django.core.files import File
from django.core.files.images import ImageFile
from django.test import TestCase
from django.utils._os import upath
try:
from .models import Image
except ImproperlyConfigured:
Image = None
if Image:
from .models import (Person, PersonWithHeight, PersonWithHeightAndWidth,
PersonDimensionsFirst, PersonTwoImages, TestImageFieldFile)
from .models import temp_storage_dir
else:
# Pillow not available, create dummy classes (tests will be skipped anyway)
class Person():
pass
PersonWithHeight = PersonWithHeightAndWidth = PersonDimensionsFirst = Person
PersonTwoImages = Person
class ImageFieldTestMixin(object):
"""
Mixin class to provide common functionality to ImageField test classes.
"""
# Person model to use for tests.
PersonModel = PersonWithHeightAndWidth
# File class to use for file instances.
File = ImageFile
def setUp(self):
"""
Creates a pristine temp directory (or deletes and recreates if it
already exists) that the model uses as its storage directory.
Sets up two ImageFile instances for use in tests.
"""
if os.path.exists(temp_storage_dir):
shutil.rmtree(temp_storage_dir)
os.mkdir(temp_storage_dir)
file_path1 = os.path.join(os.path.dirname(upath(__file__)), "4x8.png")
self.file1 = self.File(open(file_path1, 'rb'))
file_path2 = os.path.join(os.path.dirname(upath(__file__)), "8x4.png")
self.file2 = self.File(open(file_path2, 'rb'))
def tearDown(self):
"""
Removes temp directory and all its contents.
"""
shutil.rmtree(temp_storage_dir)
def check_dimensions(self, instance, width, height,
field_name='mugshot'):
"""
Asserts that the given width and height values match both the
field's height and width attributes and the height and width fields
(if defined) the image field is caching to.
Note, this method will check for dimension fields named by adding
"_width" or "_height" to the name of the ImageField. So, the
models used in these tests must have their fields named
accordingly.
By default, we check the field named "mugshot", but this can be
specified by passing the field_name parameter.
"""
field = getattr(instance, field_name)
# Check height/width attributes of field.
if width is None and height is None:
self.assertRaises(ValueError, getattr, field, 'width')
self.assertRaises(ValueError, getattr, field, 'height')
else:
self.assertEqual(field.width, width)
self.assertEqual(field.height, height)
# Check height/width fields of model, if defined.
width_field_name = field_name + '_width'
if hasattr(instance, width_field_name):
self.assertEqual(getattr(instance, width_field_name), width)
height_field_name = field_name + '_height'
if hasattr(instance, height_field_name):
self.assertEqual(getattr(instance, height_field_name), height)
@skipIf(Image is None, "PIL is required to test ImageField")
class ImageFieldTests(ImageFieldTestMixin, TestCase):
"""
Tests for ImageField that don't need to be run with each of the
different test model classes.
"""
def test_equal_notequal_hash(self):
"""
Bug #9786: Ensure '==' and '!=' work correctly.
Bug #9508: make sure hash() works as expected (equal items must
hash to the same value).
"""
# Create two Persons with different mugshots.
p1 = self.PersonModel(name="Joe")
p1.mugshot.save("mug", self.file1)
p2 = self.PersonModel(name="Bob")
p2.mugshot.save("mug", self.file2)
self.assertEqual(p1.mugshot == p2.mugshot, False)
self.assertEqual(p1.mugshot != p2.mugshot, True)
# Test again with an instance fetched from the db.
p1_db = self.PersonModel.objects.get(name="Joe")
self.assertEqual(p1_db.mugshot == p2.mugshot, False)
self.assertEqual(p1_db.mugshot != p2.mugshot, True)
# Instance from db should match the local instance.
self.assertEqual(p1_db.mugshot == p1.mugshot, True)
self.assertEqual(hash(p1_db.mugshot), hash(p1.mugshot))
self.assertEqual(p1_db.mugshot != p1.mugshot, False)
def test_instantiate_missing(self):
"""
If the underlying file is unavailable, still create instantiate the
object without error.
"""
p = self.PersonModel(name="Joan")
p.mugshot.save("shot", self.file1)
p = self.PersonModel.objects.get(name="Joan")
path = p.mugshot.path
shutil.move(path, path + '.moved')
self.PersonModel.objects.get(name="Joan")
def test_delete_when_missing(self):
"""
Bug #8175: correctly delete an object where the file no longer
exists on the file system.
"""
p = self.PersonModel(name="Fred")
p.mugshot.save("shot", self.file1)
os.remove(p.mugshot.path)
p.delete()
def test_size_method(self):
"""
Bug #8534: FileField.size should not leave the file open.
"""
p = self.PersonModel(name="Joan")
p.mugshot.save("shot", self.file1)
# Get a "clean" model instance
p = self.PersonModel.objects.get(name="Joan")
# It won't have an opened file.
self.assertEqual(p.mugshot.closed, True)
# After asking for the size, the file should still be closed.
p.mugshot.size
self.assertEqual(p.mugshot.closed, True)
def test_pickle(self):
"""
Tests that ImageField can be pickled, unpickled, and that the
image of the unpickled version is the same as the original.
"""
import pickle
p = Person(name="Joe")
p.mugshot.save("mug", self.file1)
dump = pickle.dumps(p)
p2 = Person(name="Bob")
p2.mugshot = self.file1
loaded_p = pickle.loads(dump)
self.assertEqual(p.mugshot, loaded_p.mugshot)
@skipIf(Image is None, "PIL is required to test ImageField")
class ImageFieldTwoDimensionsTests(ImageFieldTestMixin, TestCase):
"""
Tests behavior of an ImageField and its dimensions fields.
"""
def test_constructor(self):
"""
Tests assigning an image field through the model's constructor.
"""
p = self.PersonModel(name='Joe', mugshot=self.file1)
self.check_dimensions(p, 4, 8)
p.save()
self.check_dimensions(p, 4, 8)
def test_image_after_constructor(self):
"""
Tests behavior when image is not passed in constructor.
"""
p = self.PersonModel(name='Joe')
# TestImageField value will default to being an instance of its
# attr_class, a TestImageFieldFile, with name == None, which will
# cause it to evaluate as False.
self.assertEqual(isinstance(p.mugshot, TestImageFieldFile), True)
self.assertEqual(bool(p.mugshot), False)
# Test setting a fresh created model instance.
p = self.PersonModel(name='Joe')
p.mugshot = self.file1
self.check_dimensions(p, 4, 8)
def test_create(self):
"""
Tests assigning an image in Manager.create().
"""
p = self.PersonModel.objects.create(name='Joe', mugshot=self.file1)
self.check_dimensions(p, 4, 8)
def test_default_value(self):
"""
Tests that the default value for an ImageField is an instance of
the field's attr_class (TestImageFieldFile in this case) with no
name (name set to None).
"""
p = self.PersonModel()
self.assertEqual(isinstance(p.mugshot, TestImageFieldFile), True)
self.assertEqual(bool(p.mugshot), False)
def test_assignment_to_None(self):
"""
Tests that assigning ImageField to None clears dimensions.
"""
p = self.PersonModel(name='Joe', mugshot=self.file1)
self.check_dimensions(p, 4, 8)
# If image assigned to None, dimension fields should be cleared.
p.mugshot = None
self.check_dimensions(p, None, None)
p.mugshot = self.file2
self.check_dimensions(p, 8, 4)
def test_field_save_and_delete_methods(self):
"""
Tests assignment using the field's save method and deletion using
the field's delete method.
"""
p = self.PersonModel(name='Joe')
p.mugshot.save("mug", self.file1)
self.check_dimensions(p, 4, 8)
# A new file should update dimensions.
p.mugshot.save("mug", self.file2)
self.check_dimensions(p, 8, 4)
# Field and dimensions should be cleared after a delete.
p.mugshot.delete(save=False)
self.assertEqual(p.mugshot, None)
self.check_dimensions(p, None, None)
def test_dimensions(self):
"""
Checks that dimensions are updated correctly in various situations.
"""
p = self.PersonModel(name='Joe')
# Dimensions should get set if file is saved.
p.mugshot.save("mug", self.file1)
self.check_dimensions(p, 4, 8)
# Test dimensions after fetching from database.
p = self.PersonModel.objects.get(name='Joe')
# Bug 11084: Dimensions should not get recalculated if file is
# coming from the database. We test this by checking if the file
# was opened.
self.assertEqual(p.mugshot.was_opened, False)
self.check_dimensions(p, 4, 8)
# After checking dimensions on the image field, the file will have
# opened.
self.assertEqual(p.mugshot.was_opened, True)
# Dimensions should now be cached, and if we reset was_opened and
# check dimensions again, the file should not have opened.
p.mugshot.was_opened = False
self.check_dimensions(p, 4, 8)
self.assertEqual(p.mugshot.was_opened, False)
# If we assign a new image to the instance, the dimensions should
# update.
p.mugshot = self.file2
self.check_dimensions(p, 8, 4)
# Dimensions were recalculated, and hence file should have opened.
self.assertEqual(p.mugshot.was_opened, True)
@skipIf(Image is None, "PIL is required to test ImageField")
class ImageFieldNoDimensionsTests(ImageFieldTwoDimensionsTests):
"""
Tests behavior of an ImageField with no dimension fields.
"""
PersonModel = Person
@skipIf(Image is None, "PIL is required to test ImageField")
class ImageFieldOneDimensionTests(ImageFieldTwoDimensionsTests):
"""
Tests behavior of an ImageField with one dimensions field.
"""
PersonModel = PersonWithHeight
@skipIf(Image is None, "PIL is required to test ImageField")
class ImageFieldDimensionsFirstTests(ImageFieldTwoDimensionsTests):
"""
Tests behavior of an ImageField where the dimensions fields are
defined before the ImageField.
"""
PersonModel = PersonDimensionsFirst
@skipIf(Image is None, "PIL is required to test ImageField")
class ImageFieldUsingFileTests(ImageFieldTwoDimensionsTests):
"""
Tests behavior of an ImageField when assigning it a File instance
rather than an ImageFile instance.
"""
PersonModel = PersonDimensionsFirst
File = File
@skipIf(Image is None, "PIL is required to test ImageField")
class TwoImageFieldTests(ImageFieldTestMixin, TestCase):
"""
Tests a model with two ImageFields.
"""
PersonModel = PersonTwoImages
def test_constructor(self):
p = self.PersonModel(mugshot=self.file1, headshot=self.file2)
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
p.save()
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
def test_create(self):
p = self.PersonModel.objects.create(mugshot=self.file1,
headshot=self.file2)
self.check_dimensions(p, 4, 8)
self.check_dimensions(p, 8, 4, 'headshot')
def test_assignment(self):
p = self.PersonModel()
self.check_dimensions(p, None, None, 'mugshot')
self.check_dimensions(p, None, None, 'headshot')
p.mugshot = self.file1
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, None, None, 'headshot')
p.headshot = self.file2
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
# Clear the ImageFields one at a time.
p.mugshot = None
self.check_dimensions(p, None, None, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
p.headshot = None
self.check_dimensions(p, None, None, 'mugshot')
self.check_dimensions(p, None, None, 'headshot')
def test_field_save_and_delete_methods(self):
p = self.PersonModel(name='Joe')
p.mugshot.save("mug", self.file1)
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, None, None, 'headshot')
p.headshot.save("head", self.file2)
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
# We can use save=True when deleting the image field with null=True
# dimension fields and the other field has an image.
p.headshot.delete(save=True)
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, None, None, 'headshot')
p.mugshot.delete(save=False)
self.check_dimensions(p, None, None, 'mugshot')
self.check_dimensions(p, None, None, 'headshot')
def test_dimensions(self):
"""
Checks that dimensions are updated correctly in various situations.
"""
p = self.PersonModel(name='Joe')
# Dimensions should get set for the saved file.
p.mugshot.save("mug", self.file1)
p.headshot.save("head", self.file2)
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
# Test dimensions after fetching from database.
p = self.PersonModel.objects.get(name='Joe')
# Bug 11084: Dimensions should not get recalculated if file is
# coming from the database. We test this by checking if the file
# was opened.
self.assertEqual(p.mugshot.was_opened, False)
self.assertEqual(p.headshot.was_opened, False)
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
# After checking dimensions on the image fields, the files will
# have been opened.
self.assertEqual(p.mugshot.was_opened, True)
self.assertEqual(p.headshot.was_opened, True)
# Dimensions should now be cached, and if we reset was_opened and
# check dimensions again, the file should not have opened.
p.mugshot.was_opened = False
p.headshot.was_opened = False
self.check_dimensions(p, 4, 8, 'mugshot')
self.check_dimensions(p, 8, 4, 'headshot')
self.assertEqual(p.mugshot.was_opened, False)
self.assertEqual(p.headshot.was_opened, False)
# If we assign a new image to the instance, the dimensions should
# update.
p.mugshot = self.file2
p.headshot = self.file1
self.check_dimensions(p, 8, 4, 'mugshot')
self.check_dimensions(p, 4, 8, 'headshot')
# Dimensions were recalculated, and hence file should have opened.
self.assertEqual(p.mugshot.was_opened, True)
self.assertEqual(p.headshot.was_opened, True)
| |
"""
** deeplean-ai.com **
** dl-lab **
created by :: GauravBh1010tt
"""
import pandas as pd
import numpy as np
import re
#from tqdm import tqdm
from nltk.corpus import wordnet
from nltk import bigrams, trigrams
from collections import Counter, defaultdict
from gensim.models import Word2Vec
from scipy.spatial.distance import cosine as cos
from stop_words import get_stop_words
from gensim import corpora, models
from nltk.tokenize import RegexpTokenizer
tokenizer = RegexpTokenizer(r'\w+')
def tokenize(sent):
return [x.strip() for x in re.split('(\W+)?', sent) if x.strip()]
#Number Of Words In A String(Returns Integer):
def length(val):
return len(val.split())
#Whether A String Is Subset Of Other(Returns 1 and 0):
def substringCheck(sen_A, sen_B):
if sen_A in sen_B or sen_B in sen_A:
return 1
else:
return 0
#Number Of Same Words In Two Sentences(Returns Float):
def overlap(sen_A, sen_B):
a = sen_A.split()
b = sen_B.split()
count = 0
for word_a in a:
for word_b in b:
if(word_a == word_b):
count += 1
return count
#Number Of Synonyms In Two Sentences(Returns Float):
def overlapSyn(sen_A, sen_B):
a = sen_A.split()
b = sen_B.split()
word_synonyms = []
for word in a:
for synset in wordnet.synsets(word):
for lemma in synset.lemma_names():
if lemma in b and lemma != word:
word_synonyms.append(lemma)
return len(word_synonyms)
#Forming Bag Of Words[BOW][Returns BOW Dictionary]:
def train_BOW(lst):
temp = []
for sent in lst:
temp.extend(sent.split())
counts = Counter(temp)
total_count = len(set(temp))
for word in counts:
counts[word] /= float(total_count)
return counts
#Sum Of BOW Values For A Sent[Returns Float]:
def Sum_BOW(sent, dic):
tot = 0.0
for word in sent.split():
try:
tot += dic[word]
except:
continue
return tot
#Training Bigram Model[Returns Dictionary of Dictionaries]:
def train_bigram(lst):
model = defaultdict(lambda: defaultdict(lambda: 0))
for sent in lst:
sent = sent.split()
for w1, w2 in bigrams(sent, pad_right=True, pad_left=True):
model[w1][w2] += 1
total_count = 0
for w1 in model:
total_count = float(sum(model[w1].values()))
for w2 in model[w1]:
model[w1][w2] /= total_count
return model
#Total Sum Of Bigram Probablity Of A Sentence[Returns Float]:
def sum_bigram(sent, model):
sent = sent.split()
first = True
tot = 0
for i in range(len(sent)):
try:
if first:
tot += model[None][sent[i]]
first = False
else:
tot += model[sent[i-1]][sent[i]]
except:
continue
return tot
#Training Trigram Model[Returns Dictionary of Dictionaries]:
def train_trigram(lst):
model = defaultdict(lambda: defaultdict(lambda: 0))
for sent in lst:
sent = sent.split()
for w1, w2, w3 in trigrams(sent, pad_right=True, pad_left=True):
model[(w1,w2)][w2] += 1
total_count = 0
for w1,w2 in model:
total_count = float(sum(model[(w1, w2)].values()))
for w3 in model[(w1,w2)]:
model[(w1, w2)][w3] /= total_count
#Total Sum Of Trigram Probablity Of A Sentence[Returns Float]:
def sum_trigram(sent, model):
sent = sent.split()
first = True
second = True
tot = 0
for i in range(len(sent)):
try:
if first:
tot += model[None, None][sent[i]]
first = False
elif second:
tot += model[None, sent[i-1]][sent[i]]
second = False
else:
tot += model[sent[i-2], sent[i-1]][sent[i]]
except:
continue
return tot
#Word2Vec Training(Returns Vector):
def W2V_train(lst1, lst2):
vocab = []
for i in range(len(lst1)):
w1 = lst1[i]
w2 = lst2[i]
vocab.append(w1.split())
vocab.append(w2.split())
for temp in vocab:
for j in range(len(temp)):
temp[j] = temp[j].lower()
return Word2Vec(vocab)
#Returns The Difference Between Word2Vec Sum Of All The Words In Two Sentences(Returns Vec):
def W2V_Vec(sent_A, sent_B, vec):
if len(sent_A) <= 1:
sent_A += 'none'
elif len(sent_B) <= 1:
sent_B += 'none'
vec1 = 0
vec2 = 0
sent_A = tokenize(sent_A)
sent_B = tokenize(sent_B)
for word in sent_A:
if word not in ", . ? ! # $ % ^ & * ( ) { } [ ]".split():
try:
vec1 += vec[word]
except:
continue
for word in sent_B:
if word not in ", . ? ! # $ % ^ & * ( ) { } [ ]".split():
try:
vec2 += vec[word]
except:
continue
try:
result = cos(vec1, vec2)
except:
result = 0.0
if np.isnan(result):
return 0.0
else:
return result
#Trains LDA Model (Returns Model):
def LDA_train(doc):
red = []
en_stop = get_stop_words('en')
for d in doc:
try:
raw = d.lower()
tokens = tokenizer.tokenize(raw)
stopped_tokens = [i for i in tokens if not i in en_stop]
red.append(stopped_tokens)
except:
continue
print("Forming Dictionary.....")
dictionary = corpora.Dictionary(red)
print("Forming Corpus.....")
corpus = [dictionary.doc2bow(text) for text in red]
print("Training Model.....")
lda = models.ldamodel.LdaModel(corpus, num_topics=10, id2word = dictionary, passes=1)
return lda
#Returns Average Of Probablity Of Word Present In LDA Model For Input Document(Returns Float):
def LDA(doc1, doc2, lda):
word = pd.DataFrame()
weight = pd.DataFrame()
vec1 = []
vec2 = []
for i in range(10):
vec1.append(0)
vec2.append(0)
for i in range(10):
a = []
wrd = []
wgt = []
for x in lda.print_topic(i).split():
if x != '+':
a.append(x)
for w in a:
t = w.split("*")
wrd.append(t[1][1:-1])
wgt.append(float(t[0]))
word[i] = wrd
weight[i] = wgt
num = 0
wrd1 = []
wrd2 = []
# print 'Vector Formation for doc1.....'
for d in doc1.split():
for i in range(10):
for j in range(10):
if d.lower() == word[i][j]:
vec1[j] += float(weight[i][j])
wrd1.append(word[i][j])
# print 'Vector Formation for doc2.....'
for d in doc2.split():
for i in range(10):
for j in range(10):
if d.lower() == word[i][j]:
vec2[i] += float(weight[i][j])
wrd2.append(word[i][j])
v1 = 0.0
v2 = 0.0
for i in range(10):
if vec1[i] >= v1:
t1 = i
v1 = vec1[i]
if vec2[i] >= v2:
t2 = i
v2 = vec2[i]
wrd1_list = list(set(wrd1))
wrd2_list = list(set(wrd2))
w1_len = len(wrd1_list)
w2_len = len(wrd2_list)
w1_new = []
w2_new = []
for i in range(w1_len):
d = wrd1_list[i]
for i in range(10):
if d != word[t2][i]:
w1_new.append(d)
for i in range(w2_len):
d = wrd2_list[i]
for i in range(10):
if d != word[t1][i]:
w2_new.append(d)
num = len(list(set(w1_new))) + len(set(w2_new))
try:
return num
except:
return 0.0
| |
# Copyright 2017, Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# EDITING INSTRUCTIONS
# This file was generated from the file
# https://github.com/google/googleapis/blob/master/google/cloud/language/v1/language_service.proto,
# and updates to that file get reflected here through a refresh process.
# For the short term, the refresh process will only be runnable by Google engineers.
#
# The only allowed edits are to method and file documentation. A 3-way
# merge preserves those additions if the generated source changes.
"""Accesses the google.cloud.language.v1 LanguageService API."""
import collections
import json
import os
import pkg_resources
import platform
from google.gax import api_callable
from google.gax import config
from google.gax import path_template
import google.gax
from google.cloud.gapic.language.v1 import enums
from google.cloud.proto.language.v1 import language_service_pb2
class LanguageServiceClient(object):
"""
Provides text analysis operations such as sentiment analysis and entity
recognition.
"""
SERVICE_ADDRESS = 'language.googleapis.com'
"""The default address of the service."""
DEFAULT_SERVICE_PORT = 443
"""The default port of the service."""
# The scopes needed to make gRPC calls to all of the methods defined in
# this service
_ALL_SCOPES = ('https://www.googleapis.com/auth/cloud-platform', )
def __init__(self,
service_path=SERVICE_ADDRESS,
port=DEFAULT_SERVICE_PORT,
channel=None,
credentials=None,
ssl_credentials=None,
scopes=None,
client_config=None,
app_name=None,
app_version='',
lib_name=None,
lib_version='',
metrics_headers=()):
"""Constructor.
Args:
service_path (string): The domain name of the API remote host.
port (int): The port on which to connect to the remote host.
channel (:class:`grpc.Channel`): A ``Channel`` instance through
which to make calls.
credentials (object): The authorization credentials to attach to
requests. These credentials identify this application to the
service.
ssl_credentials (:class:`grpc.ChannelCredentials`): A
``ChannelCredentials`` instance for use with an SSL-enabled
channel.
scopes (list[string]): A list of OAuth2 scopes to attach to requests.
client_config (dict):
A dictionary for call options for each method. See
:func:`google.gax.construct_settings` for the structure of
this data. Falls back to the default config if not specified
or the specified config is missing data points.
app_name (string): The name of the application calling
the service. Recommended for analytics purposes.
app_version (string): The version of the application calling
the service. Recommended for analytics purposes.
lib_name (string): The API library software used for calling
the service. (Unless you are writing an API client itself,
leave this as default.)
lib_version (string): The API library software version used
for calling the service. (Unless you are writing an API client
itself, leave this as default.)
metrics_headers (dict): A dictionary of values for tracking
client library metrics. Ultimately serializes to a string
(e.g. 'foo/1.2.3 bar/3.14.1'). This argument should be
considered private.
Returns:
A LanguageServiceClient object.
"""
# Unless the calling application specifically requested
# OAuth scopes, request everything.
if scopes is None:
scopes = self._ALL_SCOPES
# Initialize an empty client config, if none is set.
if client_config is None:
client_config = {}
# Initialize metrics_headers as an ordered dictionary
# (cuts down on cardinality of the resulting string slightly).
metrics_headers = collections.OrderedDict(metrics_headers)
metrics_headers['gl-python'] = platform.python_version()
# The library may or may not be set, depending on what is
# calling this client. Newer client libraries set the library name
# and version.
if lib_name:
metrics_headers[lib_name] = lib_version
# Finally, track the GAPIC package version.
metrics_headers['gapic'] = pkg_resources.get_distribution(
'gapic-google-cloud-language-v1', ).version
# Load the configuration defaults.
default_client_config = json.loads(
pkg_resources.resource_string(
__name__, 'language_service_client_config.json').decode())
defaults = api_callable.construct_settings(
'google.cloud.language.v1.LanguageService',
default_client_config,
client_config,
config.STATUS_CODE_NAMES,
metrics_headers=metrics_headers, )
self.language_service_stub = config.create_stub(
language_service_pb2.LanguageServiceStub,
channel=channel,
service_path=service_path,
service_port=port,
credentials=credentials,
scopes=scopes,
ssl_credentials=ssl_credentials)
self._analyze_sentiment = api_callable.create_api_call(
self.language_service_stub.AnalyzeSentiment,
settings=defaults['analyze_sentiment'])
self._analyze_entities = api_callable.create_api_call(
self.language_service_stub.AnalyzeEntities,
settings=defaults['analyze_entities'])
self._analyze_syntax = api_callable.create_api_call(
self.language_service_stub.AnalyzeSyntax,
settings=defaults['analyze_syntax'])
self._annotate_text = api_callable.create_api_call(
self.language_service_stub.AnnotateText,
settings=defaults['annotate_text'])
# Service calls
def analyze_sentiment(self, document, encoding_type=None, options=None):
"""
Analyzes the sentiment of the provided text.
Example:
>>> from google.cloud.gapic.language.v1 import language_service_client
>>> from google.cloud.proto.language.v1 import language_service_pb2
>>> client = language_service_client.LanguageServiceClient()
>>> document = language_service_pb2.Document()
>>> response = client.analyze_sentiment(document)
Args:
document (:class:`google.cloud.proto.language.v1.language_service_pb2.Document`): Input document.
encoding_type (enum :class:`google.cloud.gapic.language.v1.enums.EncodingType`): The encoding type used by the API to calculate sentence offsets.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.language.v1.language_service_pb2.AnalyzeSentimentResponse` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = language_service_pb2.AnalyzeSentimentRequest(
document=document, encoding_type=encoding_type)
return self._analyze_sentiment(request, options)
def analyze_entities(self, document, encoding_type, options=None):
"""
Finds named entities (currently proper names and common nouns) in the text
along with entity types, salience, mentions for each entity, and
other properties.
Example:
>>> from google.cloud.gapic.language.v1 import language_service_client
>>> from google.cloud.gapic.language.v1 import enums
>>> from google.cloud.proto.language.v1 import language_service_pb2
>>> client = language_service_client.LanguageServiceClient()
>>> document = language_service_pb2.Document()
>>> encoding_type = enums.EncodingType.NONE
>>> response = client.analyze_entities(document, encoding_type)
Args:
document (:class:`google.cloud.proto.language.v1.language_service_pb2.Document`): Input document.
encoding_type (enum :class:`google.cloud.gapic.language.v1.enums.EncodingType`): The encoding type used by the API to calculate offsets.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.language.v1.language_service_pb2.AnalyzeEntitiesResponse` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = language_service_pb2.AnalyzeEntitiesRequest(
document=document, encoding_type=encoding_type)
return self._analyze_entities(request, options)
def analyze_syntax(self, document, encoding_type, options=None):
"""
Analyzes the syntax of the text and provides sentence boundaries and
tokenization along with part of speech tags, dependency trees, and other
properties.
Example:
>>> from google.cloud.gapic.language.v1 import language_service_client
>>> from google.cloud.gapic.language.v1 import enums
>>> from google.cloud.proto.language.v1 import language_service_pb2
>>> client = language_service_client.LanguageServiceClient()
>>> document = language_service_pb2.Document()
>>> encoding_type = enums.EncodingType.NONE
>>> response = client.analyze_syntax(document, encoding_type)
Args:
document (:class:`google.cloud.proto.language.v1.language_service_pb2.Document`): Input document.
encoding_type (enum :class:`google.cloud.gapic.language.v1.enums.EncodingType`): The encoding type used by the API to calculate offsets.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.language.v1.language_service_pb2.AnalyzeSyntaxResponse` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = language_service_pb2.AnalyzeSyntaxRequest(
document=document, encoding_type=encoding_type)
return self._analyze_syntax(request, options)
def annotate_text(self, document, features, encoding_type, options=None):
"""
A convenience method that provides all the features that analyzeSentiment,
analyzeEntities, and analyzeSyntax provide in one call.
Example:
>>> from google.cloud.gapic.language.v1 import language_service_client
>>> from google.cloud.gapic.language.v1 import enums
>>> from google.cloud.proto.language.v1 import language_service_pb2
>>> client = language_service_client.LanguageServiceClient()
>>> document = language_service_pb2.Document()
>>> features = language_service_pb2.AnnotateTextRequest.Features()
>>> encoding_type = enums.EncodingType.NONE
>>> response = client.annotate_text(document, features, encoding_type)
Args:
document (:class:`google.cloud.proto.language.v1.language_service_pb2.Document`): Input document.
features (:class:`google.cloud.proto.language.v1.language_service_pb2.AnnotateTextRequest.Features`): The enabled features.
encoding_type (enum :class:`google.cloud.gapic.language.v1.enums.EncodingType`): The encoding type used by the API to calculate offsets.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.language.v1.language_service_pb2.AnnotateTextResponse` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
# Create the request object.
request = language_service_pb2.AnnotateTextRequest(
document=document, features=features, encoding_type=encoding_type)
return self._annotate_text(request, options)
| |
# file xmlmap/fields.py
#
# Copyright 2010 Emory University General Library
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import logging
from lxml import etree
from lxml.builder import ElementMaker
from eulcore.xpath import ast, parse, serialize
from types import ListType, FloatType
__all__ = [
'StringField', 'StringListField',
'IntegerField', 'IntegerListField',
'NodeField', 'NodeListField',
'ItemField', 'SimpleBooleanField',
# NOTE: DateField and DateListField are undertested and underdocumented. If
# you really need them, you should import them explicitly. Or even better,
# flesh them out so they can be properly released.
'SchemaField',
]
logger = logging.getLogger(__name__)
class Field(object):
"""Base class for all xmlmap fields.
Takes an optional ``required`` value to indicate that the field is required
or not required in the XML. By default, required is ``None``, which indicates
that it is unknown whether the field is required or not. The required value
for an xmlmap field should not conflict with the schema or DTD for that xml,
if there is one.
"""
# track each time a Field instance is created, to retain order
creation_counter = 0
def __init__(self, xpath, manager, mapper, required=None, verbose_name=None,
help_text=None):
# compile xpath in order to catch an invalid xpath at load time
etree.XPath(xpath)
# NOTE: not saving compiled xpath because namespaces must be
# passed in at compile time when evaluating an etree.XPath on a node
self.xpath = xpath
self.manager = manager
self.mapper = mapper
self.required = required
self.verbose_name = verbose_name
self.help_text = help_text
# pre-parse the xpath for setters, etc
self.parsed_xpath = parse(xpath)
# adjust creation counter, save local copy of current count
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
def get_for_node(self, node, context):
return self.manager.get(self.xpath, node, context, self.mapper, self.parsed_xpath)
def set_for_node(self, node, context, value):
return self.manager.set(self.xpath, self.parsed_xpath, node, context, self.mapper, value)
def delete_for_node(self, node, context):
return self.manager.delete(self.xpath, self.parsed_xpath, node, context, self.mapper)
# data mappers to translate between identified xml nodes and Python values
class Mapper(object):
# generic mapper to_xml function
def to_xml(self, value):
if value is None:
return value
else:
return unicode(value)
class StringMapper(Mapper):
XPATH = etree.XPath('string()')
def __init__(self, normalize=False):
if normalize:
self.XPATH = etree.XPath('normalize-space(string())')
def to_python(self, node):
if node is None:
return None
if isinstance(node, basestring):
return node
return self.XPATH(node)
class IntegerMapper(Mapper):
XPATH = etree.XPath('number()')
def to_python(self, node):
if node is None:
return None
try:
# xpath functions such as count return a float and must be converted to int
if isinstance(node, basestring) or isinstance(node, FloatType):
return int(node)
return int(self.XPATH(node))
except ValueError:
# anything that can't be converted to an Integer
return None
class SimpleBooleanMapper(Mapper):
XPATH = etree.XPath('string()')
def __init__(self, true, false):
self.true = true
self.false = false
def to_python(self, node):
if node is None and \
self.false is None:
return False
if isinstance(node, basestring):
value = node
else:
value = self.XPATH(node)
if value == str(self.true):
return True
if self.false is not None and \
value == str(self.false):
return False
# what happens if it is neither of these?
raise Exception("Boolean field value '%s' is neither '%s' nor '%s'" % (value, self.true, self.false))
def to_xml(self, value):
if value:
return str(self.true)
elif self.false is not None:
return str(self.false)
else:
return None
class DateMapper(object):
XPATH = etree.XPath('string()')
def to_python(self, node):
if node is None:
return None
if isinstance(node, basestring):
rep = node
else:
rep = self.XPATH(node)
if rep.endswith('Z'): # strip Z
rep = rep[:-1]
if rep[-6] in '+-': # strip tz
rep = rep[:-6]
try:
dt = datetime.strptime(rep, '%Y-%m-%dT%H:%M:%S')
except ValueError, v:
# if initial format fails, attempt to parse with microseconds
dt = datetime.strptime(rep, '%Y-%m-%dT%H:%M:%S.%f')
return dt
def to_xml(self, dt):
# NOTE: untested! this is probably close to what we need, but should be tested
return unicode(dt.isoformat())
class NullMapper(object):
def to_python(self, node):
return node
class NodeMapper(object):
def __init__(self, node_class):
self.node_class = node_class
def to_python(self, node):
if node is None:
return None
return self.node_class(node)
def to_xml(self, xmlobject):
if xmlobject:
return xmlobject.node
# internal xml utility functions for use by managers
def _find_terminal_step(xast):
if isinstance(xast, ast.Step):
return xast
elif isinstance(xast, ast.BinaryExpression):
if xast.op in ('/', '//'):
return _find_terminal_step(xast.right)
return None
def _find_xml_node(xpath, node, context):
#In some cases the this will return a value not a node
matches = node.xpath(xpath, **context)
if matches and isinstance(matches, ListType):
return matches[0]
elif matches:
return matches
def _create_xml_node(xast, node, context, insert_index=None):
if isinstance(xast, ast.Step):
if isinstance(xast.node_test, ast.NameTest):
# check the predicates (if any) to verify they're constructable
for pred in xast.predicates:
if not _predicate_is_constructible(pred):
msg = ("Missing element for '%s', and node creation is " +
"supported only for simple child and attribute " +
"nodes with simple predicates.") % (serialize(xast),)
raise Exception(msg)
# create the node itself
if xast.axis in (None, 'child'):
new_node = _create_child_node(node, context, xast, insert_index)
elif xast.axis in ('@', 'attribute'):
new_node = _create_attribute_node(node, context, xast)
# and create any nodes necessary for the predicates
for pred in xast.predicates:
_construct_predicate(pred, new_node, context)
return new_node
# if this is a text() node, we don't need to create anything further
# return the node that will be parent to text()
elif _is_text_nodetest(xast):
return node
elif isinstance(xast, ast.BinaryExpression):
if xast.op == '/':
left_xpath = serialize(xast.left)
left_node = _find_xml_node(left_xpath, node, context)
if left_node is None:
left_node = _create_xml_node(xast.left, node, context)
return _create_xml_node(xast.right, left_node, context)
# anything else, throw an exception:
msg = ("Missing element for '%s', and node creation is supported " + \
"only for simple child and attribute nodes.") % (serialize(xast),)
raise Exception(msg)
def _create_child_node(node, context, step, insert_index=None):
opts = {}
ns_uri = None
if 'namespaces' in context:
opts['nsmap'] = context['namespaces']
if step.node_test.prefix:
ns_uri = context['namespaces'][step.node_test.prefix]
E = ElementMaker(namespace=ns_uri, **opts)
new_node = E(step.node_test.name)
if insert_index is not None:
node.insert(insert_index, new_node)
else:
node.append(new_node)
return new_node
def _create_attribute_node(node, context, step):
node_name, node_xpath, nsmap = _get_attribute_name(step, context)
# create an empty attribute node
node.set(node_name, '')
# find via xpath so a 'smart' string can be returned and set normally
result = node.xpath(node_xpath, namespaces=nsmap)
return result[0]
def _predicate_is_constructible(pred):
if isinstance(pred, ast.Step):
# only child and attribute for now
if pred.axis not in (None, 'child', '@', 'attribute'):
return False
# no node tests for now: only name tests
if not isinstance(pred.node_test, ast.NameTest):
return False
# only constructible if its own predicates are
if any((not _predicate_is_constructible(sub_pred)
for sub_pred in pred.predicates)):
return False
elif isinstance(pred, ast.BinaryExpression):
if pred.op == '/':
# path expressions are constructible if each side is
if not _predicate_is_constructible(pred.left):
return False
if not _predicate_is_constructible(pred.right):
return False
elif pred.op == '=':
# = expressions are constructible for now only if the left side
# is constructible and the right side is a literal or variable
if not _predicate_is_constructible(pred.left):
return False
if not isinstance(pred.right,
(int, long, basestring, ast.VariableReference)):
return False
# otherwise, i guess we're ok
return True
def _construct_predicate(xast, node, context):
if isinstance(xast, ast.Step):
return _create_xml_node(xast, node, context)
elif isinstance(xast, ast.BinaryExpression):
if xast.op == '/':
left_leaf = _construct_predicate(xast.left, node, context)
right_node = _construct_predicate(xast.right, left_node, context)
return right_node
elif xast.op == '=':
left_leaf = _construct_predicate(xast.left, node, context)
step = _find_terminal_step(xast.left)
if isinstance(xast.right, ast.VariableReference):
name = xast.right.name
ctxval = context.get(name, None)
if ctxval is None:
ctxval = context[name[1]]
xvalue = str(ctxval)
else:
xvalue = str(xast.right)
_set_in_xml(left_leaf, xvalue, context, step)
return left_leaf
def _set_in_xml(node, val, context, step):
# node could be either an element or an attribute
if isinstance(node, etree._Element): # if it's an element
if isinstance(val, etree._Element):
# remove node children and graft val children in.
node.clear()
node.text = val.text
for child in val:
node.append(child)
for name, val in val.attrib.iteritems():
node.set(name, val)
else: # set node contents to string val
if not list(node): # no child elements
node.text = val
else:
raise Exception("Cannot set string value - not a text node!")
# by default, etree returns a "smart" string for attributes and text.
# if it's not an element (above) then it is either a text node
# or an attribute
elif hasattr(node, 'getparent'):
# if node test is text(), set the text of the parent node
if _is_text_nodetest(step):
node.getparent().text = val
# otherwise, treat it as an attribute
else:
attribute, node_xpath, nsmap = _get_attribute_name(step, context)
node.getparent().set(attribute, val)
def _remove_xml(xast, node, context):
'Remove a node or attribute; returns True when something is deleted'
if isinstance(xast, ast.Step):
if isinstance(xast.node_test, ast.NameTest):
if xast.axis in (None, 'child'):
return _remove_child_node(node, context, xast)
elif xast.axis in ('@', 'attribute'):
return _remove_attribute_node(node, context, xast)
# special case for text()
# since it can't be removed, at least clear out any value in the text node
elif _is_text_nodetest(xast):
node.text = ''
return True
elif isinstance(xast, ast.BinaryExpression):
if xast.op == '/':
left_xpath = serialize(xast.left)
left_node = _find_xml_node(left_xpath, node, context)
if left_node is not None:
return _remove_xml(xast.right, left_node, context)
return False
def _remove_child_node(node, context, xast):
xpath = serialize(xast)
child = _find_xml_node(xpath, node, context)
if child is not None:
node.remove(child)
return True
def _remove_attribute_node(node, context, xast):
node_name, node_xpath, nsmap = _get_attribute_name(xast, context)
del node.attrib[node_name]
return True
def _get_attribute_name(step, context):
# calculate attribute name, xpath, and nsmap based on node info and context namespaces
if not step.node_test.prefix:
nsmap = {}
ns_uri = None
node_name = step.node_test.name
node_xpath = '@%s' % node_name
else:
# if node has a prefix, the namespace *should* be defined in context
if 'namespaces' in context and step.node_test.prefix in context['namespaces']:
ns_uri = context['namespaces'][step.node_test.prefix]
else:
ns_uri = None
# we could throw an exception here if ns_uri wasn't found, but
# for now assume the user knows what he's doing...
node_xpath = '@%s:%s' % (step.node_test.prefix, step.node_test.name)
node_name = '{%s}%s' % (ns_uri, step.node_test.name)
nsmap = {step.node_test.prefix: ns_uri}
return node_name, node_xpath, nsmap
def _is_text_nodetest(step):
'''Fields selected with an xpath of text() need special handling; Check if
a xpath step is a text() node test. '''
try:
return step.node_test.name == 'text'
except:
pass
return False
# managers to map operations to either a single identified node or a
# list of them
class SingleNodeManager(object):
def __init__(self, instantiate_on_get=False):
# DEPRECATED: don't use instantiate_on_get. Use create_for_node() as
# described in XmlObjectType.__new__ comments and used by NodeField.
self.instantiate_on_get = instantiate_on_get
def get(self, xpath, node, context, mapper, xast):
match = _find_xml_node(xpath, node, context)
if match is None and self.instantiate_on_get:
return mapper.to_python(_create_xml_node(xast, node, context))
# else, non-None match, or not instantiate
return mapper.to_python(match)
def set(self, xpath, xast, node, context, mapper, value):
xvalue = mapper.to_xml(value)
match = _find_xml_node(xpath, node, context)
if xvalue is None:
# match must be None. if it exists, delete it.
if match is not None:
removed = _remove_xml(xast, node, context)
# if a node can't be removed, warn since it could have unexpected results
if not removed:
logger.warn('''Could not remove xml for '%s' from %r''' % \
(serialize(xast), node))
else:
if match is None:
match = _create_xml_node(xast, node, context)
# terminal (rightmost) step informs how we update the xml
step = _find_terminal_step(xast)
_set_in_xml(match, xvalue, context, step)
def create(self, xpath, xast, node, context):
# most clients will want to use get() or set(), but occasially we
# just want a basic node to match the xpath.
match = _find_xml_node(xpath, node, context)
if match is not None:
return match
return _create_xml_node(xast, node, context)
def delete(self, xpath, xast, node, context, mapper):
match = _find_xml_node(xpath, node, context)
# match must be None. if it exists, delete it.
if match is not None:
_remove_xml(xast, node, context)
class NodeList(object):
"""Custom List-like object to handle ListFields like :class:`IntegerListField`,
:class:`StringListField`, and :class:`NodeListField`, which allows for getting,
setting, and deleting list members. :class:`NodeList` should **not** be
initialized directly, but instead should only be accessed as the return type
from a ListField.
Supports common list functions and operators, including the following: len();
**in**; equal and not equal comparison to standard python Lists. Items can
be retrieved, set, and deleted by index, but slice indexing is not supported.
Supports the methods that Python documentation indicates should be provided
by Mutable sequences, with the exceptions of reverse and sort; in the
particular case of :class:`NodeListField`, it is unclear how a list of
:class:`~eulcore.xmlmap.XmlObject` should be sorted, or whether or not such
a thing would be useful or meaningful for XML content.
When a new element is appended to a :class:`~eulcore.xmlmap.fields.NodeList`,
it will be added to the XML immediately after the last element in the list.
In the case of an empty list, the new content will be appended at the end of
the appropriate XML parent node. For XML content where element order is important
for schema validity, extra care may be required when constructing content.
"""
def __init__(self, xpath, node, context, mapper, xast):
self.xpath = xpath
self.node = node
self.context = context
self.mapper = mapper
self.xast = xast
@property
def matches(self):
# current matches from the xml tree
# NOTE: retrieving from the xml every time rather than caching
# because the xml document could change, and we want the latest data
return self.node.xpath(self.xpath, **self.context)
@property
def data(self):
# data in list form - basis for several other list-y functions
return [ self.mapper.to_python(match) for match in self.matches ]
def __str__(self):
return str(self.data)
def __repr__(self):
return str(self.data)
def __len__(self):
return len(self.data)
def __contains__(self, item):
return item in self.data
def __iter__(self):
for item in self.matches:
yield self.mapper.to_python(item)
def __eq__(self, other):
# FIXME: is any other comparison possible ?
return self.data == other
def __ne__(self, other):
return self.data != other
def _check_key_type(self, key):
# check argument type for getitem, setitem, delitem
if not isinstance(key, (slice, int, long)):
raise TypeError
assert not isinstance(key, slice), "Slice indexing is not supported"
def __getitem__(self, key):
self._check_key_type(key)
return self.mapper.to_python(self.matches[key])
def __setitem__(self, key, value):
self._check_key_type(key)
if key == len(self.matches):
# just after the end of the list - create a new node
if len(self.matches):
# if there are existing nodes, use last element in list
# to determine where the new node should be created
last_item = self.matches[-1]
position = last_item.getparent().index(last_item)
insert_index = position + 1
else:
insert_index = None
match = _create_xml_node(self.xast, self.node, self.context, insert_index)
elif key > len(self.matches):
raise IndexError("Can't set at index %d - out of range" % key )
else:
match = self.matches[key]
if isinstance(self.mapper, NodeMapper):
# if this is a NodeListField, the value should be an xmlobject
# replace the indexed node with the node specified
# NOTE: lxml does not require dom-style import before append/replace
match.getparent().replace(match, value.node)
else: # not a NodeListField - set single-node value in xml
# terminal (rightmost) step informs how we update the xml
step = _find_terminal_step(self.xast)
_set_in_xml(match, self.mapper.to_xml(value), self.context, step)
def __delitem__(self, key):
self._check_key_type(key)
if key >= len(self.matches):
raise IndexError("Can't delete at index %d - out of range" % key )
match = self.matches[key]
match.getparent().remove(match)
# according to python docs, Mutable sequences should provide the following methods:
# append, count, index, extend, insert, pop, remove, reverse and sort
# NOTE: not implementing sort/reverse at this time; not clear
def count(self, x):
"Return the number of times x appears in the list."
return self.data.count(x)
def append(self, x):
"Add an item to the end of the list."
self[len(self)] = x
def index(self, x):
"""Return the index in the list of the first item whose value is x,
or error if there is no such item."""
return self.data.index(x)
def remove(self, x):
"""Remove the first item from the list whose value is x,
or error if there is no such item."""
del(self[self.index(x)])
def pop(self, i=None):
"""Remove the item at the given position in the list, and return it.
If no index is specified, removes and returns the last item in the list."""
if i is None:
i = len(self) - 1
val = self[i]
del(self[i])
return val
def extend(self, list):
"""Extend the list by appending all the items in the given list."""
for item in list:
self.append(item)
def insert(self, i, x):
"""Insert an item (x) at a given position (i)."""
if i == len(self): # end of list or empty list: append
self.append(x)
elif len(self.matches) > i:
# create a new xml node at the requested position
insert_index = self.matches[i].getparent().index(self.matches[i])
_create_xml_node(self.xast, self.node, self.context, insert_index)
# then use default set logic
self[i] = x
else:
raise IndexError("Can't insert '%s' at index %d - list length is only %d" \
% (x, i, len(self)))
class NodeListManager(object):
def get(self, xpath, node, context, mapper, xast):
return NodeList(xpath, node, context, mapper, xast)
def delete(self, xpath, xast, node, context, mapper):
current_list = self.get(xpath, node, context, mapper, xast)
[current_list.remove(x) for x in current_list]
def set(self, xpath, xast, node, context, mapper, value):
current_list = self.get(xpath, node, context, mapper, xast)
# for each value in the new list, set the equivalent value
# in the NodeList
for i in range(len(value)):
current_list[i] = value[i]
# remove any extra values from end of the current list
while len(current_list) > len(value):
current_list.pop()
# finished field classes mixing a manager and a mapper
class StringField(Field):
"""Map an XPath expression to a single Python string. If the XPath
expression evaluates to an empty NodeList, a StringField evaluates to
`None`.
Takes an optional parameter to indicate that the string contents should have
whitespace normalized. By default, does not normalize.
Takes an optional list of choices to restrict possible values.
Supports setting values for attributes, empty nodes, or text-only nodes.
"""
def __init__(self, xpath, normalize=False, choices=None, *args, **kwargs):
self.choices = choices
# FIXME: handle at a higher level, common to all/more field types?
# does choice list need to be checked in the python ?
super(StringField, self).__init__(xpath,
manager = SingleNodeManager(),
mapper = StringMapper(normalize=normalize), *args, **kwargs)
class StringListField(Field):
"""Map an XPath expression to a list of Python strings. If the XPath
expression evaluates to an empty NodeList, a StringListField evaluates to
an empty list.
Takes an optional parameter to indicate that the string contents should have
whitespace normalized. By default, does not normalize.
Takes an optional list of choices to restrict possible values.
Actual return type is :class:`~eulcore.xmlmap.fields.NodeList`, which can be
treated like a regular Python list, and includes set and delete functionality.
"""
def __init__(self, xpath, normalize=False, choices=None, *args, **kwargs):
self.choices = choices
super(StringListField, self).__init__(xpath,
manager = NodeListManager(),
mapper = StringMapper(normalize=normalize), *args, **kwargs)
class IntegerField(Field):
"""Map an XPath expression to a single Python integer. If the XPath
expression evaluates to an empty NodeList, an IntegerField evaluates to
`None`.
Supports setting values for attributes, empty nodes, or text-only nodes.
"""
def __init__(self, xpath, *args, **kwargs):
super(IntegerField, self).__init__(xpath,
manager = SingleNodeManager(),
mapper = IntegerMapper(), *args, **kwargs)
class IntegerListField(Field):
"""Map an XPath expression to a list of Python integers. If the XPath
expression evaluates to an empty NodeList, an IntegerListField evaluates to
an empty list.
Actual return type is :class:`~eulcore.xmlmap.fields.NodeList`, which can be
treated like a regular Python list, and includes set and delete functionality.
"""
def __init__(self, xpath, *args, **kwargs):
super(IntegerListField, self).__init__(xpath,
manager = NodeListManager(),
mapper = IntegerMapper(), *args, **kwargs)
class SimpleBooleanField(Field):
"""Map an XPath expression to a Python boolean. Constructor takes additional
parameter of true, false values for comparison and setting in xml. This only
handles simple boolean that can be read and set via string comparison.
Supports setting values for attributes, empty nodes, or text-only nodes.
"""
def __init__(self, xpath, true, false, *args, **kwargs):
super(SimpleBooleanField, self).__init__(xpath,
manager = SingleNodeManager(),
mapper = SimpleBooleanMapper(true, false), *args, **kwargs)
class DateField(Field):
"""Map an XPath expression to a single Python `datetime.datetime`. If
the XPath expression evaluates to an empty NodeList, a DateField evaluates
to `None`.
.. WARNING::
DateField processing is minimal, undocumented, and liable to change.
It is not part of any official release. Use it at your own risk.
"""
def __init__(self, xpath, *args, **kwargs):
super(DateField, self).__init__(xpath,
manager = SingleNodeManager(),
mapper = DateMapper(), *args, **kwargs)
class DateListField(Field):
"""Map an XPath expression to a list of Python `datetime.datetime`
objects. If the XPath expression evaluates to an empty NodeList, a
DateListField evaluates to an empty list.
.. WARNING::
DateListField processing is minimal, undocumented, and liable to
change. It is not part of any official release. Use it at your own
risk.
Actual return type is :class:`~eulcore.xmlmap.fields.NodeList`, which can be
treated like a regular Python list, and includes set and delete functionality.
"""
def __init__(self, xpath, *args, **kwargs):
super(DateListField, self).__init__(xpath,
manager = NodeListManager(),
mapper = DateMapper(), *args, **kwargs)
class NodeField(Field):
"""Map an XPath expression to a single :class:`XmlObject` subclass
instance. If the XPath expression evaluates to an empty NodeList, a
NodeField evaluates to `None`.
Normally a ``NodeField``'s ``node_class`` is a class. As a special
exception, it may be the string ``"self"``, in which case it recursively
refers to objects of its containing :class:`XmlObject` class.
If an :class:`XmlObject` contains a NodeField named ``foo``, then the
object will automatically have a ``create_foo()`` method in addition to
its ``foo`` property. Code can call this ``create_foo()`` method to
create the child element if it doesn't exist; the method will have no
effect if the element is already present.
Deprecated ``instantiate_on_get`` flag: set to True if you need a
non-existent node to be created when the NodeField is accessed. This
feature is deprecated: Instead, create your node explicitly with
``create_foo()`` as described above.
"""
def __init__(self, xpath, node_class, instantiate_on_get=False, *args, **kwargs):
super(NodeField, self).__init__(xpath,
manager = SingleNodeManager(instantiate_on_get=instantiate_on_get),
mapper = NodeMapper(node_class), *args, **kwargs)
def _get_node_class(self):
return self.mapper.node_class
def _set_node_class(self, val):
self.mapper.node_class = val
node_class = property(_get_node_class, _set_node_class)
def create_for_node(self, node, context):
return self.manager.create(self.xpath, self.parsed_xpath, node, context)
class NodeListField(Field):
"""Map an XPath expression to a list of :class:`XmlObject` subclass
instances. If the XPath expression evalues to an empty NodeList, a
NodeListField evaluates to an empty list.
Normally a ``NodeListField``'s ``node_class`` is a class. As a special
exception, it may be the string ``"self"``, in which case it recursively
refers to objects of its containing :class:`XmlObject` class.
Actual return type is :class:`~eulcore.xmlmap.fields.NodeList`, which can be
treated like a regular Python list, and includes set and delete functionality.
"""
def __init__(self, xpath, node_class, *args, **kwargs):
super(NodeListField, self).__init__(xpath,
manager = NodeListManager(),
mapper = NodeMapper(node_class), *args, **kwargs)
def _get_node_class(self):
return self.mapper.node_class
def _set_node_class(self, val):
self.mapper.node_class = val
node_class = property(_get_node_class, _set_node_class)
class ItemField(Field):
"""Access the results of an XPath expression directly. An ItemField does no
conversion on the result of evaluating the XPath expression."""
def __init__(self, xpath, *args, **kwargs):
super(ItemField, self).__init__(xpath,
manager = SingleNodeManager(),
mapper = NullMapper(), *args, **kwargs)
class SchemaField(Field):
"""Schema-based field. At class definition time, a SchemaField will be
**replaced** with the appropriate :class:`eulcore.xmlmap.fields.Field` type
based on the schema type definition.
Takes an xpath (which will be passed on to the real Field init) and a schema
type definition name. If the schema type has enumerated restricted values,
those will be passed as choices to the Field.
Currently only supports simple string-based schema types.
"""
def __init__(self, xpath, schema_type, *args, **kwargs):
self.xpath = xpath
self.schema_type = schema_type
super(SchemaField, self).__init__(xpath, manager=None, mapper=None,
*args, **kwargs)
# SchemaField does not use common Field init logic; handle creation counter
#self.creation_counter = Field.creation_counter
#Field.creation_counter += 1
def get_field(self, schema):
"""Get the requested type definition from the schema and return the
appropriate :class:`~eulcore.xmlmap.fields.Field`.
:param schema: instance of :class:`eulcore.xmlmap.core.XsdSchema`
:rtype: :class:`eulcore.xmlmap.fields.Field`
"""
type = schema.get_type(self.schema_type)
kwargs = {}
if type.restricted_values:
# field has a restriction with enumerated values - pass as choices to field
# - empty value at beginning of list for unset value; for required fields,
# will force user to select a value, rather than first item being default
choices = []
choices.extend(type.restricted_values)
# restricted values could include a blank
# if it's there, remove it so we don't get two
if '' in choices:
choices.remove('')
choices.insert(0, '') # add blank choice at the beginning of the list
kwargs['choices'] = choices
# TODO: possibly also useful to look for pattern restrictions
basetype = type.base_type()
if basetype == 'string':
newfield = StringField(self.xpath, required=self.required, **kwargs)
# copy original creation counter to newly created field
# to preserve declaration order
newfield.creation_counter = self.creation_counter
return newfield
else:
raise Exception("basetype %s is not yet supported by SchemaField" % basetype)
| |
# -*- coding: utf-8 -*-
'''
Specto Add-on
Copyright (C) 2016 mrknow
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os,sys,re,urllib,urlparse,datetime
import json
import re
try: action = dict(urlparse.parse_qsl(sys.argv[2].replace('?','')))['action']
except: action = None
from resources.lib.lib import control
from resources.lib.lib import client
from resources.lib.lib import cache
#from resources.lib.lib import favourites
from resources.lib.lib import workers
from resources.lib.sources import looknij
from resources.lib.sources import videostar
from resources.lib.sources import yoy
from resources.lib.sources import weeb
from resources.lib.sources import wizja
from resources.lib.sources import ipla
from resources.lib.sources import telewizjadanet
from resources.lib.sources import pierwsza
from resources.lib.sources import itivi
from resources.lib.lib import views
class tv:
def __init__(self):
self.list = []
self.datetime = (datetime.datetime.utcnow() - datetime.timedelta(hours = 5))
self.systime = (self.datetime).strftime('%Y%m%d%H%M%S%f')
self.telewizjadanet_link = 'http://www.telewizjada.net'
self.videostar_link = 'https://api.videostar.pl'
self.yoy_link = 'http://yoy.tv'
self.weeb_link = 'http://weeb.tv'
self.wizja_link = 'http://wizja.tv'
self.eskago_link = 'http://www.eskago.pl/tv'
self.itivi_link = 'http://itivi.pl/program-telewizyjny/'
self.looknij_link = 'https://looknij.in'
self.ipla = 'http://ipla.tv/'
self.pierwsza_link = 'http://pierwsza.tv'
def get(self, url, idx=True):
try:
try: url = getattr(self, url + '_link')
except: pass
try: u = urlparse.urlparse(url).netloc.lower()
except: pass
if url in self.telewizjadanet_link:
control.log('TUU')
self.telewizjadanet_list(url)
if url in self.pierwsza_link:
self.pierwsza_list(url)
if url in self.ipla:
self.ipla_list(url)
if url in self.itivi_link:
self.itivi_list(url)
if url in self.eskago_link:
control.log('1AAAi %s' % url)
self.eskago_list(url)
if url in self.videostar_link:
self.videostar_list(url)
if url in self.yoy_link:
self.yoy_list(url)
if url in self.weeb_link:
self.weeb_list(url)
if url in self.wizja_link:
self.wizja_list(url)
if url in self.looknij_link:
if sys.version_info < (2, 7, 9):
mystring = 'Not Supported python version %s.%s.%s Minimum: 2.7.9' % (sys.version_info[:3])
control.dialog.ok(control.addonInfo('name'), mystring.encode('utf-8'), '')
self.looknij_list(url)
if idx == True: self.movieDirectory(self.list)
return self.list
except Exception as e:
control.log('Error: %s' % e)
pass
def telewizjadanet_list(self,url):
try:
next = ''
#items = cache.get(telewizjadanet.chanels, 2)
items = telewizjadanet.chanels()
#control.log('Items %s' % items)
self.list=items
import operator
self.list.sort(key=operator.itemgetter('title'))
control.log('Ile %s' %len(self.list))
return self.list
except Exception as e:
control.log('ERR TELEWIZJADA %s' % e)
pass
def ipla_list(self,url):
try:
next = ''
#items = cache.get(ipla.ipla_chanels, 2)
items = ipla.ipla_chanels()
#control.log('Items %s' % items)
self.list=items
import operator
self.list.sort(key=operator.itemgetter('title'))
return self.list
except Exception as e:
control.log('ERR IPLA %s' % e)
pass
def looknij_list(self,url):
try:
next = ''
#items = cache.get(weeb.weebchanels, 8640)
items = looknij.weebchanels()
#control.log('Items %s' % items)
self.list=items
import operator
self.list.sort(key=operator.itemgetter('title'))
return self.list
except:
pass
def itivi_list(self, url):
items = []
next = ''
try:
#items = cache.get(weeb.weebchanels, 8640)
items = itivi.itivichanels()
#control.log('Items %s' % items)
self.list=items
import operator
self.list.sort(key=operator.itemgetter('title'))
return self.list
except:
pass
try:
result = client.request(url)
result = re.compile('<a href="([^"]+)"><img alt="([^"]+)" src="([^"]+)" style="width:155px;height:155px; margin: 30px; border: 1px solid #CCC; border-radius: 30px;"/></a>').findall(result)
if len(result)>0:
for i in result:
try:
control.log('i %s' % i[1])
id = str(i[0])
id = id.encode('utf-8')
title = i[1].replace('Telewizja online - ','').replace('_',' ')
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
poster = '0'
try:
poster = i[2]
except: pass
poster = poster.encode('utf-8')
try:
fanart = control.addonFanart()
fanart = fanart.encode('utf-8')
except:
fanart = '0'
fanart = fanart.encode('utf-8')
pass
plot = '0'
plot = plot.encode('utf-8')
tagline = '0'
tagline = client.replaceHTMLCodes(tagline)
try: tagline = tagline.encode('utf-8')
except: pass
self.list.append({'title': title, 'originaltitle': title, 'genre': '0', 'plot': plot, 'name':title, 'tagline': tagline, 'poster': poster, 'fanart': fanart, 'id':id, 'service':'itivi', 'next': next})
#control.log("##################><><><><> pierwsza item %s" % self.list)
except:
pass
except Exception as e:
control.log('Error itiv tv.get2 %s' % e)
pass
import operator
self.list.sort(key=operator.itemgetter('title'))
return self.list
return self.list
def eskago_list(self, url):
control.log('AAAi %s' % url)
items = []
next = ''
try:
result = client.request(url)
result = re.compile('''<li><a href="([^"]+)" title="([^"]+)"><i class="big_icon"></i><img alt="([^"]+)" src="([^"]+)"/></a><span>([^"]+)</span></li>''').findall(result)
control.log('AAAi %s' % result)
if len(result)>0:
for i in result:
control.log('i %s' % i[3])
id = str(i[0])
id = id.encode('utf-8')
title = i[1]
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
poster = '0'
try:
poster = i[3]
except: pass
poster = poster.encode('utf-8')
try:
fanart = control.addonFanart()
fanart = fanart.encode('utf-8')
except:
fanart = '0'
fanart = fanart.encode('utf-8')
pass
plot = '0'
plot = plot.encode('utf-8')
tagline = '0'
tagline = client.replaceHTMLCodes(tagline)
try: tagline = tagline.encode('utf-8')
except: pass
self.list.append({'title': title, 'originaltitle': title, 'genre': '0', 'plot': plot, 'name':title, 'tagline': tagline, 'poster': poster, 'fanart': fanart, 'id':id, 'service':'eskago', 'next': next})
#control.log("##################><><><><> pierwsza item %s" % self.list)
except Exception as e:
control.log('Error EskaGo tv.get2 %s' % e)
pass
#self.list.sort()
import operator
self.list.sort(key=operator.itemgetter('title'))
#control.log("##################><><><><> pierwsza item %s" % newlist)
return self.list
def wizja_list(self, url):
try:
next = ''
items = wizja.wizjachanels()
for item in items:
id = item['id']
title = item['title']
title = client.replaceHTMLCodes(title)
poster = '0'
try:
poster = item['img']
except: pass
poster = poster.encode('utf-8')
try:
fanart = control.addonFanart()
fanart = fanart.encode('utf-8')
except:
fanart = '0'
fanart = fanart.encode('utf-8')
pass
plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
tagline = None
if tagline == None and not plot == '0': tagline = re.compile('[.!?][\s]{1,2}(?=[A-Z])').split(plot)[0]
elif tagline == None: tagline = '0'
tagline = client.replaceHTMLCodes(tagline)
try: tagline = tagline.encode('utf-8')
except: pass
self.list.append({'title': title, 'originaltitle': title, 'genre': '0', 'plot': plot, 'name':title, 'tagline': tagline, 'poster': poster, 'fanart': fanart, 'id':id, 'service':'wizja', 'next': next})
#control.log("##################><><><><> pierwsza item %s" % self.list)
import operator
self.list.sort(key=operator.itemgetter('title'))
return self.list
except:
pass
def weeb_list(self, url):
try:
next = ''
#items = cache.get(weeb.weebchanels, 8640)
items = weeb.weebchanels()
#control.log('Items %s' % items)
self.list=items
import operator
self.list.sort(key=operator.itemgetter('title'))
return self.list
except:
pass
def yoy_list(self, url):
#try:
next = ''
items = cache.get(yoy.getchanels, 4)
#items = yoy.getchanels()
for item in items:
id = item['id']
title = item['title']
title = client.replaceHTMLCodes(title)
poster = '0'
try:
poster = 'http://yoy.tv/channel/covers/%s.jpg' % id
except: pass
poster = poster.encode('utf-8')
try:
fanart = control.addonFanart()
except:
fanart = '0'
pass
fanart = fanart.encode('utf-8')
plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
tagline = None
if tagline == None and not plot == '0': tagline = re.compile('[.!?][\s]{1,2}(?=[A-Z])').split(plot)[0]
elif tagline == None: tagline = '0'
tagline = client.replaceHTMLCodes(tagline)
try: tagline = tagline.encode('utf-8')
except: pass
self.list.append({'title': title, 'originaltitle': title, 'genre': '0', 'plot': plot, 'name':title, 'tagline': tagline, 'poster': poster, 'fanart': fanart, 'id':id, 'service':'yoy', 'next': next})
#control.log("##################><><><><> yoy item %s" % self.list)
import operator
self.list.sort(key=operator.itemgetter('title'))
return self.list
#except:
# pass
def videostar_list(self, url):
items = []
try:
result = videostar.get('/channels/list/ios-plus')
result = json.loads(result)
control.log('A tv.get %s' % result)
for i in result['channels']:
control.log('Result %s' % i)
if i['access_status']== 'subscribed' or i['access_status']== 'free':
try: items.append(i)
except: pass
if len(items) == 0:
items = result
except Exception as e:
control.log('Error tv.get %s' % e)
next = ''
for item in items:
control.log('Result %s' % item)
try:
id = str(item['id'])
id = id.encode('utf-8')
title = item['name']
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
poster = '0'
try:
poster = item['thumbnail']
except: pass
poster = poster.encode('utf-8')
try:
fanart = control.addonFanart()
except:
fanart = '0'
pass
fanart = fanart.encode('utf-8')
plot = '0'
try: plot = item['overview']
except: pass
if plot == None: plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
try: tagline = item['tagline']
except: tagline = None
if tagline == None and not plot == '0': tagline = re.compile('[.!?][\s]{1,2}(?=[A-Z])').split(plot)[0]
elif tagline == None: tagline = '0'
tagline = client.replaceHTMLCodes(tagline)
try: tagline = tagline.encode('utf-8')
except: pass
self.list.append({'title': title, 'originaltitle': title, 'genre': '0', 'plot': plot, 'name':title, 'tagline': tagline, 'poster': poster, 'fanart': fanart, 'id':id, 'service':'videostar', 'next': next})
#control.log("##################><><><><> pierwsza item %s" % self.list)
except Exception as e:
control.log('Error videostar tv.get2 %s' % e)
pass
#self.list.sort()
import operator
self.list.sort(key=operator.itemgetter('title'))
#control.log("##################><><><><> pierwsza item %s" % newlist)
return self.list
def pierwsza_list(self, url):
#items = cache.get(pierwsza.chanels, 2)
items = pierwsza.chanels()
next = ''
for item in items:
try:
id = str(item['id'])
id = id.encode('utf-8')
title = item['name']
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
poster = '0'
try:
poster = item['thumbail']
poster = self.pierwsza_link+poster
except: pass
poster = poster.encode('utf-8')
try:
fanart = control.addonFanart()
except:
fanart = '0'
pass
fanart = fanart.encode('utf-8')
plot = '0'
try: plot = item['overview']
except: pass
if plot == None: plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
try: tagline = item['tagline']
except: tagline = None
if tagline == None and not plot == '0': tagline = re.compile('[.!?][\s]{1,2}(?=[A-Z])').split(plot)[0]
elif tagline == None: tagline = '0'
tagline = client.replaceHTMLCodes(tagline)
try: tagline = tagline.encode('utf-8')
except: pass
self.list.append({'title': title, 'originaltitle': title, 'genre': '0', 'plot': plot, 'name':title, 'tagline': tagline, 'poster': poster, 'fanart': fanart, 'id':id, 'service':'pierwsza', 'next': next})
#control.log("##################><><><><> pierwsza item %s" % self.list)
except:
#control.log("##################><><><><> pierwsza item %s" % newlist)
pass
import operator
self.list.sort(key=operator.itemgetter('title'))
return self.list
def widget(self):
setting = control.setting('movie_widget')
if setting == '2':
self.get(self.featured_link)
elif setting == '3':
self.get(self.trending_link)
else:
self.get(self.added_link)
def favourites(self):
try:
items = favourites.getFavourites('movies')
self.list = [i[1] for i in items]
for i in self.list:
if not 'name' in i: i['name'] = '%s (%s)' % (i['title'], i['year'])
try: i['title'] = i['title'].encode('utf-8')
except: pass
try: i['name'] = i['name'].encode('utf-8')
except: pass
if not 'duration' in i: i['duration'] = '0'
if not 'imdb' in i: i['imdb'] = '0'
if not 'tmdb' in i: i['tmdb'] = '0'
if not 'tvdb' in i: i['tvdb'] = '0'
if not 'tvrage' in i: i['tvrage'] = '0'
if not 'poster' in i: i['poster'] = '0'
if not 'banner' in i: i['banner'] = '0'
if not 'fanart' in i: i['fanart'] = '0'
self.worker()
self.list = sorted(self.list, key=lambda k: k['title'])
self.movieDirectory(self.list)
except:
return
def search(self, query=None):
#try:
if query == None:
t = control.lang(30201).encode('utf-8')
k = control.keyboard('', t) ; k.doModal()
self.query = k.getText() if k.isConfirmed() else None
else:
self.query = query
if (self.query == None or self.query == ''): return
url = self.search_link % (urllib.quote_plus(self.query))
self.list = cache.get(self.trakt_list, 0, url, self.trakt_user)
self.worker()
self.movieDirectory(self.list)
return self.list
#except:
# return
def userlists(self):
try:
userlists = []
if trakt.getTraktCredentialsInfo() == False: raise Exception()
activity = trakt.getActivity()
except:
pass
#control.log('@@ TRAKT LIST %s - %s' %(userlists,activity))
try:
if trakt.getTraktCredentialsInfo() == False: raise Exception()
try:
if activity > cache.timeout(self.trakt_user_list, self.traktlists_link,
self.trakt_user): raise Exception()
userlists += cache.get(self.trakt_user_list, 720, self.traktlists_link, self.trakt_user)
except:
userlists += cache.get(self.trakt_user_list, 0, self.traktlists_link, self.trakt_user)
except:
pass
try:
self.list = []
if self.imdb_user == '': raise Exception()
userlists += cache.get(self.imdb_user_list, 0, self.imdblists_link)
except:
pass
try:
self.list = []
if trakt.getTraktCredentialsInfo() == False: raise Exception()
try:
if activity > cache.timeout(self.trakt_user_list, self.traktlikedlists_link,
self.trakt_user): raise Exception()
userlists += cache.get(self.trakt_user_list, 720, self.traktlikedlists_link, self.trakt_user)
except:
userlists += cache.get(self.trakt_user_list, 0, self.traktlikedlists_link, self.trakt_user)
except:
pass
self.list = userlists
for i in range(0, len(self.list)): self.list[i].update({'image': 'userlists.png', 'action': 'movies'})
#self.addDirectory(self.list, queue=True)
self.addDirectory(self.list)
return self.list
def trakt_list(self, url, user):
#control.log('### TRAKT LISTS')
try:
q = dict(urlparse.parse_qsl(urlparse.urlsplit(url).query))
q.update({'extended': 'full,images'})
q = (urllib.urlencode(q)).replace('%2C', ',')
u = url.replace('?' + urlparse.urlparse(url).query, '') + '?' + q
result = trakt.getTrakt(u)
result = json.loads(result)
items = []
for i in result:
try: items.append(i['movie'])
except: pass
if len(items) == 0:
items = result
except:
return
try:
q = dict(urlparse.parse_qsl(urlparse.urlsplit(url).query))
p = str(int(q['page']) + 1)
if p == '5': raise Exception()
q.update({'page': p})
q = (urllib.urlencode(q)).replace('%2C', ',')
next = url.replace('?' + urlparse.urlparse(url).query, '') + '?' + q
next = next.encode('utf-8')
except:
next = ''
for item in items:
try:
title = item['name']
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
poster = '0'
try: poster = item['thumbail']
except: pass
poster = poster.encode('utf-8')
fanart = '0'
try: fanart = item['thumbail']
except: pass
fanart = fanart.encode('utf-8')
plot = '0'
try: plot = item['overview']
except: pass
if plot == None: plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
try: tagline = item['tagline']
except: tagline = None
if tagline == None and not plot == '0': tagline = re.compile('[.!?][\s]{1,2}(?=[A-Z])').split(plot)[0]
elif tagline == None: tagline = '0'
tagline = client.replaceHTMLCodes(tagline)
try: tagline = tagline.encode('utf-8')
except: pass
self.list.append({'title': title, 'originaltitle': title, 'genre': '0', 'plot': plot, 'tagline': tagline, 'name': name, 'poster': poster, 'fanart': fanart, 'next': next})
except:
pass
return self.list
def trakt_user_list(self, url, user):
try:
result = trakt.getTrakt(url)
items = json.loads(result)
except:
pass
for item in items:
try:
try:
name = item['list']['name']
except:
name = item['name']
name = client.replaceHTMLCodes(name)
name = name.encode('utf-8')
try:
url = (trakt.slug(item['list']['user']['username']), item['list']['ids']['slug'])
except:
url = ('me', item['ids']['slug'])
url = self.traktlist_link % url
url = url.encode('utf-8')
self.list.append({'name': name, 'url': url, 'context': url})
except:
pass
self.list = sorted(self.list, key=lambda k: re.sub('(^the |^a )', '', k['name'].lower()))
return self.list
def imdb_list(self, url, idx=True):
try:
if url == self.imdbwatchlist_link:
def imdb_watchlist_id(url):
return re.compile('/export[?]list_id=(ls\d*)').findall(client.request(url))[0]
url = cache.get(imdb_watchlist_id, 8640, url)
url = self.imdblist_link % url
headers = {'Accept-Language': 'en-US'}
result = str(client.request(url,headers=headers))
try:
if idx == True: raise Exception()
pages = client.parseDOM(result, 'div', attrs = {'class': 'desc'})[0]
pages = re.compile('Page \d+? of (\d*)').findall(pages)[0]
for i in range(1, int(pages)):
u = url.replace('&start=1', '&start=%s' % str(i*100+1))
result += str(client.request(u))
except:
pass
result = result.replace('\n','')
result = result.decode('iso-8859-1').encode('utf-8')
items = client.parseDOM(result, 'tr', attrs = {'class': '.+?'})
items += client.parseDOM(result, 'div', attrs = {'class': 'list_item.+?'})
except:
return
try:
next = client.parseDOM(result, 'span', attrs = {'class': 'pagination'})
next += client.parseDOM(result, 'div', attrs = {'class': 'pagination'})
name = client.parseDOM(next[-1], 'a')[-1]
if 'laquo' in name: raise Exception()
next = client.parseDOM(next, 'a', ret='href')[-1]
next = url.replace(urlparse.urlparse(url).query, urlparse.urlparse(next).query)
next = client.replaceHTMLCodes(next)
next = next.encode('utf-8')
except:
next = ''
for item in items:
try:
try: title = client.parseDOM(item, 'a')[1]
except: pass
try: title = client.parseDOM(item, 'a', attrs = {'onclick': '.+?'})[-1]
except: pass
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
year = client.parseDOM(item, 'span', attrs = {'class': 'year_type'})[0]
year = re.compile('(\d{4})').findall(year)[-1]
year = year.encode('utf-8')
if int(year) > int((self.datetime).strftime('%Y')): raise Exception()
name = '%s (%s)' % (title, year)
try: name = name.encode('utf-8')
except: pass
imdb = client.parseDOM(item, 'a', ret='href')[0]
imdb = 'tt' + re.sub('[^0-9]', '', imdb.rsplit('tt', 1)[-1])
imdb = imdb.encode('utf-8')
poster = '0'
try: poster = client.parseDOM(item, 'img', ret='src')[0]
except: pass
try: poster = client.parseDOM(item, 'img', ret='loadlate')[0]
except: pass
if not ('_SX' in poster or '_SY' in poster): poster = '0'
poster = re.sub('_SX\d*|_SY\d*|_CR\d+?,\d+?,\d+?,\d*','_SX500', poster)
poster = client.replaceHTMLCodes(poster)
poster = poster.encode('utf-8')
genre = client.parseDOM(item, 'span', attrs = {'class': 'genre'})
genre = client.parseDOM(genre, 'a')
genre = ' / '.join(genre)
if genre == '': genre = '0'
genre = client.replaceHTMLCodes(genre)
genre = genre.encode('utf-8')
try: duration = re.compile('(\d+?) mins').findall(item)[-1]
except: duration = '0'
duration = client.replaceHTMLCodes(duration)
duration = duration.encode('utf-8')
try: rating = client.parseDOM(item, 'span', attrs = {'class': 'rating-rating'})[0]
except: rating = '0'
try: rating = client.parseDOM(rating, 'span', attrs = {'class': 'value'})[0]
except: rating = '0'
if rating == '' or rating == '-': rating = '0'
rating = client.replaceHTMLCodes(rating)
rating = rating.encode('utf-8')
try: votes = client.parseDOM(item, 'div', ret='title', attrs = {'class': 'rating rating-list'})[0]
except: votes = '0'
try: votes = re.compile('[(](.+?) votes[)]').findall(votes)[0]
except: votes = '0'
if votes == '': votes = '0'
votes = client.replaceHTMLCodes(votes)
votes = votes.encode('utf-8')
try: mpaa = client.parseDOM(item, 'span', attrs = {'class': 'certificate'})[0]
except: mpaa = '0'
try: mpaa = client.parseDOM(mpaa, 'span', ret='title')[0]
except: mpaa = '0'
if mpaa == '' or mpaa == 'NOT_RATED': mpaa = '0'
mpaa = mpaa.replace('_', '-')
mpaa = client.replaceHTMLCodes(mpaa)
mpaa = mpaa.encode('utf-8')
director = client.parseDOM(item, 'span', attrs = {'class': 'credit'})
director += client.parseDOM(item, 'div', attrs = {'class': 'secondary'})
try: director = [i for i in director if 'Director:' in i or 'Dir:' in i][0]
except: director = '0'
director = director.split('With:', 1)[0].strip()
director = client.parseDOM(director, 'a')
director = ' / '.join(director)
if director == '': director = '0'
director = client.replaceHTMLCodes(director)
director = director.encode('utf-8')
cast = client.parseDOM(item, 'span', attrs = {'class': 'credit'})
cast += client.parseDOM(item, 'div', attrs = {'class': 'secondary'})
try: cast = [i for i in cast if 'With:' in i or 'Stars:' in i][0]
except: cast = '0'
cast = cast.split('With:', 1)[-1].strip()
cast = client.replaceHTMLCodes(cast)
cast = cast.encode('utf-8')
cast = client.parseDOM(cast, 'a')
if cast == []: cast = '0'
plot = '0'
try: plot = client.parseDOM(item, 'span', attrs = {'class': 'outline'})[0]
except: pass
try: plot = client.parseDOM(item, 'div', attrs = {'class': 'item_description'})[0]
except: pass
plot = plot.rsplit('<span>', 1)[0].strip()
if plot == '': plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
fanart = 'http://films4u.org/imdb/bgs/'+imdb+'.jpg'
fanart = fanart.encode('utf-8')
tagline = re.compile('[.!?][\s]{1,2}(?=[A-Z])').split(plot)[0]
try: tagline = tagline.encode('utf-8')
except: pass
self.list.append({'title': title, 'originaltitle': title, 'year': year, 'premiered': '0', 'studio': '0', 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': '0', 'cast': cast, 'plot': plot, 'tagline': tagline, 'name': name, 'code': imdb, 'imdb': imdb, 'tmdb': '0', 'tvdb': '0', 'tvrage': '0', 'poster': poster, 'banner': '0', 'fanart': fanart, 'next': next})
except:
pass
return self.list
def imdb_user_list(self, url):
try:
result = client.request(url)
result = result.decode('iso-8859-1').encode('utf-8')
items = client.parseDOM(result, 'div', attrs = {'class': 'list_name'})
except:
pass
for item in items:
try:
name = client.parseDOM(item, 'a')[0]
name = client.replaceHTMLCodes(name)
name = name.encode('utf-8')
url = client.parseDOM(item, 'a', ret='href')[0]
url = url.split('/list/', 1)[-1].replace('/', '')
url = self.imdblist_link % url
url = client.replaceHTMLCodes(url)
url = url.encode('utf-8')
self.list.append({'name': name, 'url': url, 'context': url})
except:
pass
return self.list
def scn_list(self, url):
def predb_items():
try:
years = [(self.datetime).strftime('%Y'), (self.datetime - datetime.timedelta(days = 365)).strftime('%Y')]
months = (self.datetime - datetime.timedelta(days = 180)).strftime('%Y%m%d')
result = ''
for i in years:
result += client.request(self.scn_page % (str(i), '1'))
result += client.request(self.scn_page % (str(i), '2'))
items = client.parseDOM(result, 'div', attrs = {'class': 'post'})
items = [(client.parseDOM(i, 'a', attrs = {'class': 'p-title'}), re.compile('(\d{4}-\d{2}-\d{2})').findall(i)) for i in items]
items = [(i[0][0], i[1][0]) for i in items if len(i[0]) > 0 and len(i[1]) > 0]
items = [(re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|3D)(\.|\)|\]|\s)(.+)', '', i[0]), re.compile('[\.|\(|\[|\s](\d{4})[\.|\)|\]|\s]').findall(i[0]), re.sub('[^0-9]', '', i[1])) for i in items]
items = [(i[0], i[1][-1], i[2]) for i in items if len(i[1]) > 0]
items = [i for i in items if int(months) <= int(i[2])]
items = sorted(items,key=lambda x: x[2])[::-1]
items = [(re.sub('(\.|\(|\[|LIMITED|UNCUT)', ' ', i[0]).strip(), i[1]) for i in items]
items = [x for y,x in enumerate(items) if x not in items[:y]]
items = items[:150]
return items
except:
return
def predb_list(i):
try:
url = self.imdb_by_query % (urllib.quote_plus(i[0]), i[1])
item = client.request(url, timeout='10')
item = json.loads(item)
title = item['Title']
title = client.replaceHTMLCodes(title)
title = title.encode('utf-8')
year = item['Year']
year = re.sub('[^0-9]', '', str(year))
year = year.encode('utf-8')
name = '%s (%s)' % (title, year)
try: name = name.encode('utf-8')
except: pass
imdb = item['imdbID']
if imdb == None or imdb == '' or imdb == 'N/A': raise Exception()
imdb = 'tt' + re.sub('[^0-9]', '', str(imdb))
imdb = imdb.encode('utf-8')
poster = item['Poster']
if poster == None or poster == '' or poster == 'N/A': poster = '0'
if not ('_SX' in poster or '_SY' in poster): poster = '0'
poster = re.sub('_SX\d*|_SY\d*|_CR\d+?,\d+?,\d+?,\d*','_SX500', poster)
poster = poster.encode('utf-8')
genre = item['Genre']
if genre == None or genre == '' or genre == 'N/A': genre = '0'
genre = genre.replace(', ', ' / ')
genre = genre.encode('utf-8')
duration = item['Runtime']
if duration == None or duration == '' or duration == 'N/A': duration = '0'
duration = re.sub('[^0-9]', '', str(duration))
duration = duration.encode('utf-8')
rating = item['imdbRating']
if rating == None or rating == '' or rating == 'N/A' or rating == '0.0': rating = '0'
rating = rating.encode('utf-8')
votes = item['imdbVotes']
try: votes = str(format(int(votes),',d'))
except: pass
if votes == None or votes == '' or votes == 'N/A': votes = '0'
votes = votes.encode('utf-8')
mpaa = item['Rated']
if mpaa == None or mpaa == '' or mpaa == 'N/A': mpaa = '0'
mpaa = mpaa.encode('utf-8')
director = item['Director']
if director == None or director == '' or director == 'N/A': director = '0'
director = director.replace(', ', ' / ')
director = re.sub(r'\(.*?\)', '', director)
director = ' '.join(director.split())
director = director.encode('utf-8')
writer = item['Writer']
if writer == None or writer == '' or writer == 'N/A': writer = '0'
writer = writer.replace(', ', ' / ')
writer = re.sub(r'\(.*?\)', '', writer)
writer = ' '.join(writer.split())
writer = writer.encode('utf-8')
cast = item['Actors']
if cast == None or cast == '' or cast == 'N/A': cast = '0'
cast = [x.strip() for x in cast.split(',') if not x == '']
try: cast = [(x.encode('utf-8'), '') for x in cast]
except: cast = []
if cast == []: cast = '0'
plot = item['Plot']
if plot == None or plot == '' or plot == 'N/A': plot = '0'
plot = client.replaceHTMLCodes(plot)
plot = plot.encode('utf-8')
tagline = re.compile('[.!?][\s]{1,2}(?=[A-Z])').split(plot)[0]
try: tagline = tagline.encode('utf-8')
except: pass
self.list.append({'title': title, 'originaltitle': title, 'year': year, 'premiered': '0', 'studio': '0', 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': plot, 'tagline': tagline, 'name': name, 'code': imdb, 'imdb': imdb, 'tmdb': '0', 'tvdb': '0', 'tvrage': '0', 'poster': poster, 'banner': '0', 'fanart': '0'})
except:
pass
try:
items = cache.get(predb_items, 24)
start = re.compile('start=(\d*)').findall(url)[-1]
start = int(start)
if len(items) > (start + 30): next = self.scn_link + '?start=%s' % (start + 30)
else: next = ''
except:
return
threads = []
for i in range(start - 1, start + 29):
try: threads.append(workers.Thread(predb_list, items[i]))
except: pass
[i.start() for i in threads]
[i.join() for i in threads]
for i in range(0, len(self.list)): self.list[i].update({'next': next})
return self.list
def worker(self):
self.meta = []
total = len(self.list)
#control.log("##################><><><><> WORKER TOTAL %s" % total)
for i in range(0, total): self.list[i].update({'metacache': False})
self.list = metacache.fetch(self.list, self.info_lang)
for r in range(0, total, 25):
threads = []
for i in range(r, r+25):
if i <= total: threads.append(workers.Thread(self.super_info, i))
[i.start() for i in threads]
[i.join() for i in threads]
self.list = [i for i in self.list if not i['imdb'] == '0']
#control.log("##################><><><><> WORKER %s" % str(len(self.meta)))
if len(self.meta) > 0: metacache.insert(self.meta)
def super_info(self, i):
try:
#control.log("##################><><><><> META ID %s" % str(i))
zero ='0'.encode('utf-8')
if self.list[i]['metacache'] == True: raise Exception()
try: imdb = self.list[i]['imdb']
except: imdb = '0'
if not imdb == '0': url = self.imdb_info_link % imdb
else: raise Exception()
item = client.request(url, timeout='10')
item = json.loads(item)
#control.log("##################><><><><> META TITLE %s" % item['Title'])
#control.log("##################><><><><> META ALL %s" % item)
imdb = item['imdbID']
if imdb == '' or imdb == None: imdb = '0'
if not imdb == '0': imdb = 'tt' + re.sub('[^0-9]', '', str(imdb))
imdb = imdb.encode('utf-8')
if not imdb == '0': self.list[i].update({'imdb': imdb, 'code': imdb})
"""
try:
#url2 = 'http://webservice.fanart.tv/v3/movies/%s?api_key=%s' % (imdb, self.fanarttv_key)
#item2 = client.request(url2, timeout='10')
#item2 = json.loads(item2)
#control.log("><><><><> ITEM4 %s" % item2['moviebackground'][0]['url'])
except:
pass
try:
tmdb = item2['tmdb_id']
if tmdb == '' or tmdb == None: tmdb = '0'
tmdb = re.sub('[^0-9]', '', str(tmdb))
tmdb = tmdb.encode('utf-8')
if not tmdb == '0': self.list[i].update({'tmdb': tmdb})
except:
tmdb = zero
"""
try:
poster = item['Poster']
if poster == '' or poster == None: poster = '0'
#if not poster == '0': poster = '%s%s' % (self.tmdb_poster, poster)
poster = poster.encode('utf-8')
if not poster == '0': self.list[i].update({'poster': poster})
except:
poster = zero
"""
try:
fanart = item2['moviebackground'][0]['url']
if fanart == '' or fanart == None: fanart = '0'
#if not fanart == '0': fanart = '%s%s' % (self.tmdb_image, fanart)
fanart = fanart.encode('utf-8')
if not fanart == '0' and self.list[i]['fanart'] == '0': self.list[i].update({'fanart': fanart})
except:
fanart = zero
"""
try:
if not imdb == '0':
fanart = 'http://films4u.org/imdb/bgs/'+imdb+'.jpg'
fanart= fanart.encode('utf-8')
else:
fanart = zero
except:
fanart = zero
# http://fanart.filmkodi.com/tt0006333.jpg
try:
premiered = item['Released']
premiered = re.compile('(\d{4}-\d{2}-\d{2})').findall(premiered)[0]
except: premiered = '0'
if premiered == '' or premiered == None: premiered = '0'
premiered = premiered.encode('utf-8')
if not premiered == '0': self.list[i].update({'premiered': premiered})
#studio = item['production_companies']
#try: studio = [x['name'] for x in studio][0]
#except:
studio = '0'
#if studio == '' or studio == None: studio = '0'
studio = studio.encode('utf-8')
#if not studio == '0': self.list[i].update({'studio': studio})
try: genre = item['Genre']
except: genre = '0'
if genre == '' or genre == None or genre == []: genre = '0'
genre = genre.encode('utf-8')
if not genre == '0': self.list[i].update({'genre': genre})
try: duration = str(item['Runtime'].replace(' min',''))
except: duration = '0'
if duration == '' or duration == None: duration = '0'
duration = duration.encode('utf-8')
if not duration == '0': self.list[i].update({'duration': duration})
try: rating = str(item['imdbRating'])
except: rating = '0'
if rating == '' or rating == None: rating = '0'
rating = rating.encode('utf-8')
if not rating == '0': self.list[i].update({'rating': rating})
try:
votes = str(item['imdbVotes'])
votes = str(format(int(votes),',d'))
except:
votes = '0'
if votes == '' or votes == None: votes = '0'
votes = votes.encode('utf-8')
if not votes == '0': self.list[i].update({'votes': votes})
try:
mpaa = item['Country']
except:
mpaa = '0'
if mpaa == '' or mpaa == None: mpaa = '0'
mpaa = mpaa.encode('utf-8')
if not mpaa == '0': self.list[i].update({'mpaa': mpaa})
try: cast = item['Actors']
except: cast = '0'
if cast == None or cast == '' or cast == 'N/A': cast = '0'
cast = [x.strip() for x in cast.split(',') if not x == '']
try: cast = [(x.encode('utf-8'), '') for x in cast]
except: cast = []
if cast == []: cast = '0'
if not cast == '0': self.list[i].update({'cast': cast})
try: writer = item['Writer']
except: writer = '0'
if writer == '' or writer == None: writer= '0'
writer = writer.encode('utf-8').replace(', ', ' / ')
if len(writer) > 0: self.list[i].update({'writer': writer})
"""
tagline = item['tagline']
if (tagline == '' or tagline == None) and not plot == '0': tagline = re.compile('[.!?][\s]{1,2}(?=[A-Z])').split(plot)[0]
elif tagline == '' or tagline == None: tagline = '0'
try: tagline = tagline.encode('utf-8')
except: pass
if not tagline == '0': self.list[i].update({'tagline': tagline})
"""
plot = item['Plot']
if plot == '' or plot == None: plot = '0'
plot = plot.encode('utf-8')
if not plot == '0': self.list[i].update({'plot': plot})
director = item['Director']
if director == '' or director == None or director == []: director = '0'
director = director.encode('utf-8')
if not director == '0': self.list[i].update({'director': director})
#self.meta.append({'imdb': imdb, 'tmdb': tmdb, 'tvdb': '0', 'lang': self.info_lang, 'item': {'code': imdb, 'imdb': imdb, 'tmdb': tmdb, 'poster': poster, 'fanart': fanart, 'premiered': premiered, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': plot, 'tagline': tagline}})
self.meta.append({'imdb': imdb, 'tmdb': '0', 'tvdb': '0', 'lang': self.info_lang, 'item': {'code': imdb, 'imdb': imdb, 'tmdb': '0', 'poster': poster, 'fanart': fanart, 'premiered': premiered, 'studio': studio, 'genre': genre, 'duration': duration, 'rating': rating, 'votes': votes, 'mpaa': mpaa, 'director': director, 'writer': writer, 'cast': cast, 'plot': plot, 'tagline': zero}})
#control.log("><><><><> ITEM META IMDB %s" % imdb)
except:
pass
def movieDirectory(self, items):
if items == None or len(items) == 0: return
isFolder = True if control.setting('autoplay') == 'false' and control.setting('host_select') == '1' else False
isFolder = False if control.window.getProperty('PseudoTVRunning') == 'True' else isFolder
playbackMenu = control.lang(30204).encode('utf-8') if control.setting('autoplay') == 'true' else control.lang(30203).encode('utf-8')
cacheToDisc = False if not action == 'movieSearch' else True
addonPoster, addonBanner = control.addonPoster(), control.addonBanner()
addonFanart, settingFanart = control.addonFanart(), control.setting('fanart')
sysaddon = sys.argv[0]
#try:
# favitems = favourites.getFavourites('movies')
# favitems = [i[0] for i in favitems]
#except:
# pass
for i in items:
try:
label = i['name']
syshandle = int(sys.argv[1])
sysname = urllib.quote_plus(label)
systitle = urllib.quote_plus(i['title'])
#imdb, tmdb, year = i['imdb'], i['tmdb'], i['year']
service = i['service']
poster, fanart = i['poster'], i['fanart']
if poster == '0': poster = addonPoster
meta = dict((k,v) for k, v in i.iteritems() if not v == '0')
sysmeta = urllib.quote_plus(json.dumps(meta))
url = '%s?action=play&name=%s&title=%s&service=%s&meta=%s' % (sysaddon, sysname, systitle, service, sysmeta)
sysurl = urllib.quote_plus(url)
item = control.item(label=label, iconImage=poster, thumbnailImage=poster)
try: item.setArt({'poster': poster})
except: pass
if settingFanart == 'true' and not fanart == '0':
item.setProperty('Fanart_Image', fanart)
elif not addonFanart == None:
item.setProperty('Fanart_Image', addonFanart)
isFolder = False
item.setInfo(type='Video', infoLabels = meta)
item.setProperty('IsPlayable', 'true')
#item.addContextMenuItems(cm, replaceItems=True)
control.addItem(handle=syshandle, url=url, listitem=item, isFolder=isFolder)
except:
pass
try:
url = items[0]['next']
if url == '': raise Exception()
url = '%s?action=movies&url=%s' % (sysaddon, urllib.quote_plus(url))
addonNext = control.addonNext()
item = control.item(label=control.lang(30213).encode('utf-8'), iconImage=addonNext, thumbnailImage=addonNext)
item.addContextMenuItems([], replaceItems=False)
if not addonFanart == None: item.setProperty('Fanart_Image', addonFanart)
control.addItem(handle=syshandle, url=url, listitem=item, isFolder=True)
except:
pass
control.content(syshandle, 'movies')
control.directory(syshandle, cacheToDisc=cacheToDisc)
#control.directory(syshandle)
#views.setView('movies', {'skin.confluence': 500})
def addDirectory(self, items):
if items == None or len(items) == 0: return
sysaddon = sys.argv[0]
addonFanart = control.addonFanart()
addonThumb = control.addonThumb()
artPath = control.artPath()
for i in items:
try:
try: name = control.lang(i['name']).encode('utf-8')
except: name = i['name']
if i['image'].startswith('http://'): thumb = i['image']
elif not artPath == None: thumb = os.path.join(artPath, i['image'])
else: thumb = addonThumb
url = '%s?action=%s' % (sysaddon, i['action'])
try: url += '&url=%s' % urllib.quote_plus(i['url'])
except: pass
cm = []
try: cm.append((control.lang(30211).encode('utf-8'), 'RunPlugin(%s?action=moviesToLibrary&url=%s)' % (sysaddon, urllib.quote_plus(i['context']))))
except: pass
item = control.item(label=name, iconImage=thumb, thumbnailImage=thumb)
item.addContextMenuItems(cm, replaceItems=False)
if not addonFanart == None: item.setProperty('Fanart_Image', addonFanart)
control.addItem(handle=int(sys.argv[1]), url=url, listitem=item, isFolder=True)
except:
pass
control.directory(int(sys.argv[1]), cacheToDisc=True)
| |
import re
from functools import wraps
import json
import django
from django import template
from django.apps import apps
from django.conf import settings
from django.utils.safestring import mark_for_escaping, mark_safe
from django.utils.html import escape
register = template.Library()
if django.VERSION >= (1, 10):
from django.templatetags.static import static as _static
else:
_static = None
@register.simple_tag
def static(path):
global _static
if _static is None:
if apps.is_installed('django-contrib.staticfiles'):
from django.contrib.staticfiles.templatetags.staticfiles import static as _static
else:
from django.templatetags.static import static as _static
if django.VERSION >= (1, 9) and path == 'admin/img/icon-unknown.gif':
path = 'admin/img/icon-unknown.svg'
return _static(path)
@register.filter
def form_index(form):
"""
The id of the root 'form' element in a formset is the form's 'prefix'
without the '-' preceding the index of the form. So, for instance, in a case
where the form's 'id' field has the field name:
prefix-2-id
the 'form' element's id would be:
prefix2
and the form's index is '2'
"""
matches = re.search(r'\-(\d+)$', form.prefix)
if not matches:
raise Exception("Form with invalid prefix passed to templatetag")
return int(matches.group(1))
@register.filter
def strip_parent_name(nested_name, parent_name):
if nested_name.find(parent_name + " ") == 0:
return nested_name[len(parent_name)+1:]
else:
return nested_name
strip_parent_name.is_safe = True
# These tags are defined in grappelli.templatetags.grp_tags. The issue is that
# they are wrapped in mark_safe(), so we can't use them reliably inside of
# attributes.
@register.filter
def json_encode(data):
return mark_for_escaping(json.dumps(data))
def json_else_list_tag(f):
"""
Decorator. Registers function as a simple_tag.
Try: Return value of the decorated function json encoded.
Except: Return []
"""
@wraps(f)
def inner(model_admin):
try:
return mark_safe(escape(json.dumps(f(model_admin))))
except:
return []
return register.simple_tag(inner)
@json_else_list_tag
def get_safe_related_lookup_fields_fk(model_admin):
return model_admin.related_lookup_fields.get("fk", [])
@json_else_list_tag
def get_safe_related_lookup_fields_m2m(model_admin):
return model_admin.related_lookup_fields.get("m2m", [])
@json_else_list_tag
def get_safe_related_lookup_fields_generic(model_admin):
return model_admin.related_lookup_fields.get("generic", [])
# AUTOCOMPLETES
@json_else_list_tag
def get_safe_autocomplete_lookup_fields_fk(model_admin):
return model_admin.autocomplete_lookup_fields.get("fk", [])
@json_else_list_tag
def get_safe_autocomplete_lookup_fields_m2m(model_admin):
return model_admin.autocomplete_lookup_fields.get("m2m", [])
@json_else_list_tag
def get_safe_autocomplete_lookup_fields_generic(model_admin):
return model_admin.autocomplete_lookup_fields.get("generic", [])
@register.filter
def formsetsort(formset, arg):
"""
Takes a list of formset dicts, returns that list sorted by the sortable field.
"""
if arg:
sorted_list = []
for item in formset:
position = item.form[arg].data
if position and position != "-1":
sorted_list.append((int(position), item))
sorted_list.sort()
sorted_list = [item[1] for item in sorted_list]
for item in formset:
position = item.form[arg].data
if not position or position == "-1":
sorted_list.append(item)
else:
sorted_list = formset
return sorted_list
@register.filter
def cell_count(inline_admin_form):
"""Returns the number of cells used in a tabular inline"""
count = 1 # Hidden cell with hidden 'id' field
for fieldset in inline_admin_form:
# Loop through all the fields (one per cell)
for line in fieldset:
for field in line:
count += 1
if inline_admin_form.formset.can_delete:
# Delete checkbox
count += 1
return count
class IfConditionNode(template.Node):
def __init__(self, nodelist_true, nodelist_false, value):
self.nodelist_true = nodelist_true
self.nodelist_false = nodelist_false
self.value = value
def render(self, context):
if self.value:
return self.nodelist_true.render(context)
else:
return self.nodelist_false.render(context)
@register.tag
def ifdj110(parser, token):
nodelist_true = parser.parse(('else', 'endifdj110'))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse(('endifdj110',))
parser.delete_first_token()
else:
nodelist_false = template.NodeList()
return IfConditionNode(nodelist_true, nodelist_false, django.VERSION[:2] == (1, 10))
@register.tag
def ifnotdj110(parser, token):
nodelist_true = parser.parse(('else', 'endifnotdj110'))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse(('endifnotdj110',))
parser.delete_first_token()
else:
nodelist_false = template.NodeList()
return IfConditionNode(nodelist_true, nodelist_false, django.VERSION[:2] != (1, 10))
@register.tag
def ifsuit(parser, token):
nodelist_true = parser.parse(('else', 'endifsuit'))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse(('endifsuit',))
parser.delete_first_token()
else:
nodelist_false = template.NodeList()
return IfConditionNode(nodelist_true, nodelist_false, 'suit' in settings.INSTALLED_APPS)
@register.tag
def ifnotsuit(parser, token):
nodelist_true = parser.parse(('else', 'endifnotsuit'))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse(('endifnotsuit',))
parser.delete_first_token()
else:
nodelist_false = template.NodeList()
return IfConditionNode(nodelist_true, nodelist_false, 'suit' not in settings.INSTALLED_APPS)
| |
import six
import unittest
import warnings
try:
from unittest import mock
except ImportError:
import mock
from scrapy.settings import Settings, SettingsAttribute, CrawlerSettings
from . import default_settings
class SettingsAttributeTest(unittest.TestCase):
def setUp(self):
self.attribute = SettingsAttribute('value', 10)
def test_set_greater_priority(self):
self.attribute.set('value2', 20)
self.assertEqual(self.attribute.value, 'value2')
self.assertEqual(self.attribute.priority, 20)
def test_set_equal_priority(self):
self.attribute.set('value2', 10)
self.assertEqual(self.attribute.value, 'value2')
self.assertEqual(self.attribute.priority, 10)
def test_set_less_priority(self):
self.attribute.set('value2', 0)
self.assertEqual(self.attribute.value, 'value')
self.assertEqual(self.attribute.priority, 10)
class SettingsTest(unittest.TestCase):
def setUp(self):
self.settings = Settings()
@mock.patch.dict('scrapy.settings.SETTINGS_PRIORITIES', {'default': 10})
@mock.patch('scrapy.settings.default_settings', default_settings)
def test_initial_defaults(self):
settings = Settings()
self.assertEqual(len(settings.attributes), 1)
self.assertIn('TEST_DEFAULT', settings.attributes)
attr = settings.attributes['TEST_DEFAULT']
self.assertIsInstance(attr, SettingsAttribute)
self.assertEqual(attr.value, 'defvalue')
self.assertEqual(attr.priority, 10)
@mock.patch.dict('scrapy.settings.SETTINGS_PRIORITIES', {})
@mock.patch('scrapy.settings.default_settings', {})
def test_initial_values(self):
settings = Settings({'TEST_OPTION': 'value'}, 10)
self.assertEqual(len(settings.attributes), 1)
self.assertIn('TEST_OPTION', settings.attributes)
attr = settings.attributes['TEST_OPTION']
self.assertIsInstance(attr, SettingsAttribute)
self.assertEqual(attr.value, 'value')
self.assertEqual(attr.priority, 10)
def test_set_new_attribute(self):
self.settings.attributes = {}
self.settings.set('TEST_OPTION', 'value', 0)
self.assertIn('TEST_OPTION', self.settings.attributes)
attr = self.settings.attributes['TEST_OPTION']
self.assertIsInstance(attr, SettingsAttribute)
self.assertEqual(attr.value, 'value')
self.assertEqual(attr.priority, 0)
def test_set_instance_identity_on_update(self):
attr = SettingsAttribute('value', 0)
self.settings.attributes = {'TEST_OPTION': attr}
self.settings.set('TEST_OPTION', 'othervalue', 10)
self.assertIn('TEST_OPTION', self.settings.attributes)
self.assertIs(attr, self.settings.attributes['TEST_OPTION'])
def test_set_calls_settings_attributes_methods_on_update(self):
with mock.patch.object(SettingsAttribute, '__setattr__') as mock_setattr, \
mock.patch.object(SettingsAttribute, 'set') as mock_set:
attr = SettingsAttribute('value', 10)
self.settings.attributes = {'TEST_OPTION': attr}
mock_set.reset_mock()
mock_setattr.reset_mock()
for priority in (0, 10, 20):
self.settings.set('TEST_OPTION', 'othervalue', priority)
mock_set.assert_called_once_with('othervalue', priority)
self.assertFalse(mock_setattr.called)
mock_set.reset_mock()
mock_setattr.reset_mock()
def test_setdict_alias(self):
with mock.patch.object(self.settings, 'set') as mock_set:
self.settings.setdict({'TEST_1': 'value1', 'TEST_2': 'value2'}, 10)
self.assertEqual(mock_set.call_count, 2)
calls = [mock.call('TEST_1', 'value1', 10),
mock.call('TEST_2', 'value2', 10)]
mock_set.assert_has_calls(calls, any_order=True)
def test_setmodule_only_load_uppercase_vars(self):
class ModuleMock():
UPPERCASE_VAR = 'value'
MIXEDcase_VAR = 'othervalue'
lowercase_var = 'anothervalue'
self.settings.attributes = {}
self.settings.setmodule(ModuleMock(), 10)
self.assertIn('UPPERCASE_VAR', self.settings.attributes)
self.assertNotIn('MIXEDcase_VAR', self.settings.attributes)
self.assertNotIn('lowercase_var', self.settings.attributes)
self.assertEqual(len(self.settings.attributes), 1)
def test_setmodule_alias(self):
with mock.patch.object(self.settings, 'set') as mock_set:
self.settings.setmodule(default_settings, 10)
mock_set.assert_called_with('TEST_DEFAULT', 'defvalue', 10)
def test_setmodule_by_path(self):
self.settings.attributes = {}
self.settings.setmodule(default_settings, 10)
ctrl_attributes = self.settings.attributes.copy()
self.settings.attributes = {}
self.settings.setmodule(
'tests.test_settings.default_settings', 10)
self.assertItemsEqual(six.iterkeys(self.settings.attributes),
six.iterkeys(ctrl_attributes))
for attr, ctrl_attr in zip(six.itervalues(self.settings.attributes),
six.itervalues(ctrl_attributes)):
self.assertEqual(attr.value, ctrl_attr.value)
self.assertEqual(attr.priority, ctrl_attr.priority)
def test_get(self):
test_configuration = {
'TEST_ENABLED1': '1',
'TEST_ENABLED2': True,
'TEST_ENABLED3': 1,
'TEST_DISABLED1': '0',
'TEST_DISABLED2': False,
'TEST_DISABLED3': 0,
'TEST_INT1': 123,
'TEST_INT2': '123',
'TEST_FLOAT1': 123.45,
'TEST_FLOAT2': '123.45',
'TEST_LIST1': ['one', 'two'],
'TEST_LIST2': 'one,two',
'TEST_STR': 'value',
'TEST_DICT1': {'key1': 'val1', 'ke2': 3},
'TEST_DICT2': '{"key1": "val1", "ke2": 3}',
}
settings = self.settings
settings.attributes = {key: SettingsAttribute(value, 0) for key, value
in six.iteritems(test_configuration)}
self.assertTrue(settings.getbool('TEST_ENABLED1'))
self.assertTrue(settings.getbool('TEST_ENABLED2'))
self.assertTrue(settings.getbool('TEST_ENABLED3'))
self.assertFalse(settings.getbool('TEST_ENABLEDx'))
self.assertTrue(settings.getbool('TEST_ENABLEDx', True))
self.assertFalse(settings.getbool('TEST_DISABLED1'))
self.assertFalse(settings.getbool('TEST_DISABLED2'))
self.assertFalse(settings.getbool('TEST_DISABLED3'))
self.assertEqual(settings.getint('TEST_INT1'), 123)
self.assertEqual(settings.getint('TEST_INT2'), 123)
self.assertEqual(settings.getint('TEST_INTx'), 0)
self.assertEqual(settings.getint('TEST_INTx', 45), 45)
self.assertEqual(settings.getfloat('TEST_FLOAT1'), 123.45)
self.assertEqual(settings.getfloat('TEST_FLOAT2'), 123.45)
self.assertEqual(settings.getfloat('TEST_FLOATx'), 0.0)
self.assertEqual(settings.getfloat('TEST_FLOATx', 55.0), 55.0)
self.assertEqual(settings.getlist('TEST_LIST1'), ['one', 'two'])
self.assertEqual(settings.getlist('TEST_LIST2'), ['one', 'two'])
self.assertEqual(settings.getlist('TEST_LISTx'), [])
self.assertEqual(settings.getlist('TEST_LISTx', ['default']), ['default'])
self.assertEqual(settings['TEST_STR'], 'value')
self.assertEqual(settings.get('TEST_STR'), 'value')
self.assertEqual(settings['TEST_STRx'], None)
self.assertEqual(settings.get('TEST_STRx'), None)
self.assertEqual(settings.get('TEST_STRx', 'default'), 'default')
self.assertEqual(settings.getdict('TEST_DICT1'), {'key1': 'val1', 'ke2': 3})
self.assertEqual(settings.getdict('TEST_DICT2'), {'key1': 'val1', 'ke2': 3})
self.assertEqual(settings.getdict('TEST_DICT3'), {})
self.assertEqual(settings.getdict('TEST_DICT3', {'key1': 5}), {'key1': 5})
self.assertRaises(ValueError, settings.getdict, 'TEST_LIST1')
def test_copy(self):
values = {
'TEST_BOOL': True,
'TEST_LIST': ['one', 'two'],
'TEST_LIST_OF_LISTS': [['first_one', 'first_two'],
['second_one', 'second_two']]
}
self.settings.setdict(values)
copy = self.settings.copy()
self.settings.set('TEST_BOOL', False)
self.assertTrue(copy.get('TEST_BOOL'))
test_list = self.settings.get('TEST_LIST')
test_list.append('three')
self.assertListEqual(copy.get('TEST_LIST'), ['one', 'two'])
test_list_of_lists = self.settings.get('TEST_LIST_OF_LISTS')
test_list_of_lists[0].append('first_three')
self.assertListEqual(copy.get('TEST_LIST_OF_LISTS')[0],
['first_one', 'first_two'])
def test_freeze(self):
self.settings.freeze()
with self.assertRaises(TypeError) as cm:
self.settings.set('TEST_BOOL', False)
self.assertEqual(str(cm.exception),
"Trying to modify an immutable Settings object")
def test_frozencopy(self):
with mock.patch.object(self.settings, 'copy') as mock_copy:
with mock.patch.object(mock_copy, 'freeze') as mock_freeze:
mock_object = self.settings.frozencopy()
mock_copy.assert_call_once()
mock_freeze.assert_call_once()
self.assertEqual(mock_object, mock_copy.return_value)
def test_deprecated_attribute_overrides(self):
self.settings.set('BAR', 'fuz', priority='cmdline')
with warnings.catch_warnings(record=True) as w:
self.settings.overrides['BAR'] = 'foo'
self.assertIn("Settings.overrides", str(w[0].message))
self.assertEqual(self.settings.get('BAR'), 'foo')
self.assertEqual(self.settings.overrides.get('BAR'), 'foo')
self.assertIn('BAR', self.settings.overrides)
self.settings.overrides.update(BAR='bus')
self.assertEqual(self.settings.get('BAR'), 'bus')
self.assertEqual(self.settings.overrides.get('BAR'), 'bus')
self.settings.overrides.setdefault('BAR', 'fez')
self.assertEqual(self.settings.get('BAR'), 'bus')
self.settings.overrides.setdefault('FOO', 'fez')
self.assertEqual(self.settings.get('FOO'), 'fez')
self.assertEqual(self.settings.overrides.get('FOO'), 'fez')
def test_deprecated_attribute_defaults(self):
self.settings.set('BAR', 'fuz', priority='default')
with warnings.catch_warnings(record=True) as w:
self.settings.defaults['BAR'] = 'foo'
self.assertIn("Settings.defaults", str(w[0].message))
self.assertEqual(self.settings.get('BAR'), 'foo')
self.assertEqual(self.settings.defaults.get('BAR'), 'foo')
self.assertIn('BAR', self.settings.defaults)
class CrawlerSettingsTest(unittest.TestCase):
def test_deprecated_crawlersettings(self):
def _get_settings(settings_dict=None):
settings_module = type('SettingsModuleMock', (object,), settings_dict or {})
return CrawlerSettings(settings_module)
with warnings.catch_warnings(record=True) as w:
settings = _get_settings()
self.assertIn("CrawlerSettings is deprecated", str(w[0].message))
# test_global_defaults
self.assertEqual(settings.getint('DOWNLOAD_TIMEOUT'), 180)
# test_defaults
settings.defaults['DOWNLOAD_TIMEOUT'] = '99'
self.assertEqual(settings.getint('DOWNLOAD_TIMEOUT'), 99)
# test_settings_module
settings = _get_settings({'DOWNLOAD_TIMEOUT': '3'})
self.assertEqual(settings.getint('DOWNLOAD_TIMEOUT'), 3)
# test_overrides
settings = _get_settings({'DOWNLOAD_TIMEOUT': '3'})
settings.overrides['DOWNLOAD_TIMEOUT'] = '15'
self.assertEqual(settings.getint('DOWNLOAD_TIMEOUT'), 15)
if __name__ == "__main__":
unittest.main()
| |
# -*- coding: utf-8 -*-
import ast
from PyQt4 import Qt, QtCore
from . import util
CPP_CLASS_HEADER_BASE = '''class {0} {1}{{
public:
{0}();
~{0}();
{2}
}};'''
QT_SIGNAL_HEADER_BASE = '''signals:\n {0}\n'''
QT_SIGNAL_FUNCTION_BASE = 'void {0}({1});'
class ParserBase(object):
def __init__(self, node):
self.source_node = node
@property
def source_node(self):
return self.__source_node
@source_node.setter
def source_node(self, node):
self.__source_node = node
class PyQtClass(ParserBase):
def __init__(self, node):
super(PyQtClass, self).__init__(node)
self.bases = []
self.slots = []
self.signals = []
self.members = []
def toCppHeader(self):
return CPP_CLASS_HEADER_BASE.format(self.name, self._bases(), self._header())
def _bases(self):
if self.bases:
return ':\n public {}\n'.format(
',\n public '.join([x.toStr() for x in self.bases])
)
return ''
def _header(self):
body = []
# members
# signals
signals = []
for signal in self.signals:
signals.append(signal.toCppDefinition())
if signals:
body.append(QT_SIGNAL_HEADER_BASE.format('\n '.join(signals)))
# slots
return '\n'.join(body)
@property
def name(self):
return self.source_node.name
@staticmethod
def parse(node):
result = PyQtClass(node)
for child in node.bases:
type = TypeParser.parse(child)
if type is None:
raise UnknownType
result.bases.append(type)
for child in node.body:
if isinstance(child, ast.Assign):
result.signals += PyQtSignal.parse(child)
return result
class PyQtSignal(ParserBase):
def __init__(self, node, name, params=[]):
super(PyQtSignal, self).__init__(node)
self.name = name
self.params = params
def toCppDefinition(self):
return QT_SIGNAL_FUNCTION_BASE.format(
self.name,
', '.join([x.toStr() for x in self.params])
)
@property
def name(self):
return self.__name
@name.setter
def name(self, name):
self.__name = name
@property
def params(self):
return self.__params
@params.setter
def params(self, params):
self.__params = params
@classmethod
def parse(cls, node):
# check assign of pyqtSignal
if not isinstance(node, ast.Assign):
return []
# assign target
names = []
for target in node.targets:
if isinstance(target, ast.Name):
names.append(target.id)
# check call of pyqtSignal
if isinstance(node.value, ast.Call) and cls._isPyQtSignal(node.value.func):
# signal args
args = []
for arg in node.value.args:
# Basic type
type = TypeParser.parse(arg)
if type is not None:
args.append(type)
else:
raise UnknownType
# build signals
result = []
for name in names:
result.append(PyQtSignal(node, name, args))
return result
# is not pyqtSignal
return []
@classmethod
def _isPyQtSignal(cls, node):
if isinstance(node, ast.Attribute) and isinstance(node.value, ast.Name):
return node.value.id == 'QtCore' and node.attr == 'pyqtSignal'
elif isinstance(node, ast.Name):
return node.id == 'pyqtSignal'
return False
class TypeException(Exception):
pass
class UnknownType(TypeException):
pass
class TypeParser(object):
TYPES = []
@classmethod
def parse(cls, node):
for parser in cls.TYPES:
result = parser.parse(node)
if result is not None:
return result
return None
@classmethod
def register(cls, parser):
cls.TYPES.append(parser)
class TypeBase(ParserBase):
def __init__(self, node, name):
super(TypeBase, self).__init__(node)
self.name = name
def toStr(self):
return self.name
@property
def name(self):
return self.__name
@name.setter
def name(self, name):
self.__name = name
class BasicType(TypeBase):
AllTypes = [
'bool',
'int',
'long',
'float',
'complex',
'str',
'unicode',
'tuple',
'list',
'dict',
]
def toStr(self):
if self.name in ['str', 'unicode']:
return 'QString'
elif self.name in ['tuple', 'list', 'dict']:
return 'QVariant'
elif self.name == 'complex':
return 'std::complex<double>'
else:
return super(BasicType, self).toStr()
@classmethod
def parse(cls, node):
if isinstance(node, ast.Name) and node.id in cls.AllTypes:
return BasicType(node, node.id)
return None
class PyQtType(TypeBase):
AllTypes = []
@classmethod
def parse(cls, node):
if isinstance(node, ast.Attribute) and node.attr in cls.AllTypes:
return PyQtType(node, node.attr)
elif isinstance(node, ast.Name) and node.id in cls.AllTypes:
return PyQtType(node, node.id)
return None
@classmethod
def register(cls, name):
cls.AllTypes.append(name)
class UnknownType(TypeBase):
AllTypes = []
@classmethod
def parse(cls, node):
if isinstance(node, ast.Attribute):
return UnknownType(node, '::'.join(util.attr2list(node)))
elif isinstance(node, ast.Name):
return UnknownType(node, util.name2str(node))
return None
@classmethod
def register(cls, name):
cls.AllTypes.append(name)
# register
def pyqt_register(base):
for name in dir(base):
if isinstance(getattr(Qt, name), QtCore.pyqtWrapperType):
PyQtType.register(name)
pyqt_register(Qt)
TypeParser.register(BasicType)
TypeParser.register(PyQtType)
TypeParser.register(UnknownType)
| |
# Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Unit tests for the nova-policy-check CLI interfaces.
"""
import fixtures
import mock
from six.moves import StringIO
from nova.cmd import policy_check
import nova.conf
from nova import context as nova_context
from nova import db
from nova import exception
from nova.policies import base as base_policies
from nova.policies import instance_actions as ia_policies
from nova import test
from nova.tests.unit import fake_instance
from nova.tests.unit import policy_fixture
CONF = nova.conf.CONF
class TestPolicyCheck(test.NoDBTestCase):
def setUp(self):
super(TestPolicyCheck, self).setUp()
self.output = StringIO()
self.useFixture(fixtures.MonkeyPatch('sys.stdout', self.output))
self.policy = self.useFixture(policy_fixture.RealPolicyFixture())
self.cmd = policy_check.PolicyCommands()
@mock.patch.object(policy_check.PolicyCommands, '_filter_rules')
@mock.patch.object(policy_check.PolicyCommands, '_get_target')
@mock.patch.object(policy_check.PolicyCommands, '_get_context')
def test_check(self, mock_get_context, mock_get_target,
mock_filter_rules):
fake_rules = ['fake:rule', 'faux:roule']
mock_filter_rules.return_value = fake_rules
self.cmd.check(target=mock.sentinel.target)
mock_get_context.assert_called_once_with()
mock_get_target.assert_called_once_with(mock_get_context.return_value,
mock.sentinel.target)
mock_filter_rules.assert_called_once_with(
mock_get_context.return_value, '', mock_get_target.return_value)
self.assertEqual('\n'.join(fake_rules) + '\n', self.output.getvalue())
@mock.patch.object(nova_context, 'RequestContext')
@mock.patch.object(policy_check, 'CONF')
def test_get_context(self, mock_CONF, mock_RequestContext):
context = self.cmd._get_context()
self.assertEqual(mock_RequestContext.return_value, context)
mock_RequestContext.assert_called_once_with(
roles=mock_CONF.os_roles,
user_id=mock_CONF.os_user_id,
project_id=mock_CONF.os_tenant_id)
def test_get_target_none(self):
target = self.cmd._get_target(mock.sentinel.context, None)
self.assertIsNone(target)
def test_get_target_invalid_attribute(self):
self.assertRaises(exception.InvalidAttribute, self.cmd._get_target,
mock.sentinel.context, ['nope=nada'])
def test_get_target(self):
expected_target = {
'project_id': 'fake-proj',
'user_id': 'fake-user',
'quota_class': 'fake-quota-class',
'availability_zone': 'fake-az',
}
given_target = ['='.join([key, val])
for key, val in expected_target.items()]
actual_target = self.cmd._get_target(mock.sentinel.context,
given_target)
self.assertDictEqual(expected_target, actual_target)
@mock.patch.object(nova_context, 'get_admin_context')
@mock.patch.object(db, 'instance_get_by_uuid')
def test_get_target_instance(self, mock_instance_get,
mock_get_admin_context):
admin_context = nova_context.RequestContext(is_admin=True)
mock_get_admin_context.return_value = admin_context
given_target = ['instance_id=fake_id']
mock_instance_get.return_value = fake_instance.fake_db_instance()
target = self.cmd._get_target(mock.sentinel.context,
given_target)
self.assertEqual(target,
{'user_id': 'fake-user', 'project_id': 'fake-project'})
mock_instance_get.assert_called_once_with(admin_context,
'fake_id')
def _check_filter_rules(self, context=None, target=None,
expected_rules=None):
context = context or nova_context.get_admin_context()
if expected_rules is None:
expected_rules = [
r.name for r in ia_policies.list_rules()]
passing_rules = self.cmd._filter_rules(
context, 'os-instance-actions', target)
self.assertEqual(set(expected_rules), set(passing_rules))
def test_filter_rules_non_admin(self):
context = nova_context.RequestContext()
rule_conditions = [base_policies.RULE_ANY,
base_policies.RULE_ADMIN_OR_OWNER]
expected_rules = [r.name for r in ia_policies.list_rules() if
r.check_str in rule_conditions]
self._check_filter_rules(context, expected_rules=expected_rules)
def test_filter_rules_admin(self):
self._check_filter_rules()
def test_filter_rules_instance_non_admin(self):
db_context = nova_context.RequestContext(user_id='fake-user',
project_id='fake-project')
instance = fake_instance.fake_instance_obj(db_context)
context = nova_context.RequestContext()
expected_rules = [r.name for r in ia_policies.list_rules() if
r.check_str == base_policies.RULE_ANY]
self._check_filter_rules(context, instance, expected_rules)
def test_filter_rules_instance_admin(self):
db_context = nova_context.RequestContext(user_id='fake-user',
project_id='fake-project')
instance = fake_instance.fake_instance_obj(db_context)
self._check_filter_rules(target=instance)
def test_filter_rules_instance_owner(self):
db_context = nova_context.RequestContext(user_id='fake-user',
project_id='fake-project')
instance = fake_instance.fake_instance_obj(db_context)
rule_conditions = [base_policies.RULE_ANY,
base_policies.RULE_ADMIN_OR_OWNER]
expected_rules = [r.name for r in ia_policies.list_rules() if
r.check_str in rule_conditions]
self._check_filter_rules(db_context, instance, expected_rules)
@mock.patch.object(policy_check.config, 'parse_args')
@mock.patch.object(policy_check, 'CONF')
def _check_main(self, mock_CONF, mock_parse_args,
category_name='check', expected_return_value=0):
mock_CONF.category.name = category_name
return_value = policy_check.main()
self.assertEqual(expected_return_value, return_value)
mock_CONF.register_cli_opts.assert_called_once_with(
policy_check.cli_opts)
mock_CONF.register_cli_opt.assert_called_once_with(
policy_check.category_opt)
@mock.patch.object(policy_check.version, 'version_string_with_package',
return_value="x.x.x")
def test_main_version(self, mock_version_string):
self._check_main(category_name='version')
self.assertEqual("x.x.x\n", self.output.getvalue())
@mock.patch.object(policy_check.cmd_common, 'print_bash_completion')
def test_main_bash_completion(self, mock_print_bash):
self._check_main(category_name='bash-completion')
mock_print_bash.assert_called_once_with(policy_check.CATEGORIES)
@mock.patch.object(policy_check.cmd_common, 'get_action_fn')
def test_main(self, mock_get_action_fn):
mock_fn = mock.Mock()
mock_fn_args = [mock.sentinel.arg]
mock_fn_kwargs = {'key': mock.sentinel.value}
mock_get_action_fn.return_value = (mock_fn, mock_fn_args,
mock_fn_kwargs)
self._check_main(expected_return_value=mock_fn.return_value)
mock_fn.assert_called_once_with(mock.sentinel.arg,
key=mock.sentinel.value)
@mock.patch.object(policy_check.cmd_common, 'get_action_fn')
def test_main_error(self, mock_get_action_fn):
mock_fn = mock.Mock(side_effect=Exception)
mock_get_action_fn.return_value = (mock_fn, [], {})
self._check_main(expected_return_value=1)
self.assertIn("error: ", self.output.getvalue())
| |
#
# Package analogous to 'threading.py' but using processes
#
# multiprocessing/__init__.py
#
# This package is intended to duplicate the functionality (and much of
# the API) of threading.py but uses processes instead of threads. A
# subpackage 'multiprocessing.dummy' has the same API but is a simple
# wrapper for 'threading'.
#
# Try calling `multiprocessing.doc.main()` to read the html
# documentation in in a webbrowser.
#
#
# Copyright (c) 2006-2008, R Oudkerk
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of author nor the names of any contributors may be
# used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
__version__ = '0.70.4.dev0'
__all__ = [
'Process', 'current_process', 'active_children', 'freeze_support',
'Manager', 'Pipe', 'cpu_count', 'log_to_stderr', 'get_logger',
'allow_connection_pickling', 'BufferTooShort', 'TimeoutError',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
'Event', 'Queue', 'JoinableQueue', 'Pool', 'Value', 'Array',
'RawValue', 'RawArray', 'SUBDEBUG', 'SUBWARNING',
]
__author__ = 'R. Oudkerk (r.m.oudkerk@gmail.com)'
#
# Imports
#
import os
import sys
from multiprocess.process import Process, current_process, active_children
from multiprocess.util import SUBDEBUG, SUBWARNING
#
# Exceptions
#
class ProcessError(Exception):
pass
class BufferTooShort(ProcessError):
pass
class TimeoutError(ProcessError):
pass
class AuthenticationError(ProcessError):
pass
import _multiprocess as _multiprocessing
#
# Definitions not depending on native semaphores
#
def Manager():
'''
Returns a manager associated with a running server process
The managers methods such as `Lock()`, `Condition()` and `Queue()`
can be used to create shared objects.
'''
from multiprocess.managers import SyncManager
m = SyncManager()
m.start()
return m
def Pipe(duplex=True):
'''
Returns two connection object connected by a pipe
'''
from multiprocess.connection import Pipe
return Pipe(duplex)
def cpu_count():
'''
Returns the number of CPUs in the system
'''
if sys.platform == 'win32':
try:
num = int(os.environ['NUMBER_OF_PROCESSORS'])
except (ValueError, KeyError):
num = 0
elif 'bsd' in sys.platform or sys.platform == 'darwin':
comm = '/sbin/sysctl -n hw.ncpu'
if sys.platform == 'darwin':
comm = '/usr' + comm
try:
with os.popen(comm) as p:
num = int(p.read())
except ValueError:
num = 0
else:
try:
num = os.sysconf('SC_NPROCESSORS_ONLN')
except (ValueError, OSError, AttributeError):
num = 0
if num >= 1:
return num
else:
raise NotImplementedError('cannot determine number of cpus')
def freeze_support():
'''
Check whether this is a fake forked process in a frozen executable.
If so then run code specified by commandline and exit.
'''
if sys.platform == 'win32' and getattr(sys, 'frozen', False):
from multiprocess.forking import freeze_support
freeze_support()
def get_logger():
'''
Return package logger -- if it does not already exist then it is created
'''
from multiprocess.util import get_logger
return get_logger()
def log_to_stderr(level=None):
'''
Turn on logging and add a handler which prints to stderr
'''
from multiprocess.util import log_to_stderr
return log_to_stderr(level)
def allow_connection_pickling():
'''
Install support for sending connections and sockets between processes
'''
from multiprocess import reduction
#
# Definitions depending on native semaphores
#
def Lock():
'''
Returns a non-recursive lock object
'''
from multiprocess.synchronize import Lock
return Lock()
def RLock():
'''
Returns a recursive lock object
'''
from multiprocess.synchronize import RLock
return RLock()
def Condition(lock=None):
'''
Returns a condition object
'''
from multiprocess.synchronize import Condition
return Condition(lock)
def Semaphore(value=1):
'''
Returns a semaphore object
'''
from multiprocess.synchronize import Semaphore
return Semaphore(value)
def BoundedSemaphore(value=1):
'''
Returns a bounded semaphore object
'''
from multiprocess.synchronize import BoundedSemaphore
return BoundedSemaphore(value)
def Event():
'''
Returns an event object
'''
from multiprocess.synchronize import Event
return Event()
def Queue(maxsize=0):
'''
Returns a queue object
'''
from multiprocess.queues import Queue
return Queue(maxsize)
def JoinableQueue(maxsize=0):
'''
Returns a queue object
'''
from multiprocess.queues import JoinableQueue
return JoinableQueue(maxsize)
def Pool(processes=None, initializer=None, initargs=()):
'''
Returns a process pool object
'''
from multiprocess.pool import Pool
return Pool(processes, initializer, initargs)
def RawValue(typecode_or_type, *args):
'''
Returns a shared object
'''
from multiprocess.sharedctypes import RawValue
return RawValue(typecode_or_type, *args)
def RawArray(typecode_or_type, size_or_initializer):
'''
Returns a shared array
'''
from multiprocess.sharedctypes import RawArray
return RawArray(typecode_or_type, size_or_initializer)
def Value(typecode_or_type, *args, **kwds):
'''
Returns a synchronized shared object
'''
from multiprocess.sharedctypes import Value
return Value(typecode_or_type, *args, **kwds)
def Array(typecode_or_type, size_or_initializer, **kwds):
'''
Returns a synchronized shared array
'''
from multiprocess.sharedctypes import Array
return Array(typecode_or_type, size_or_initializer, **kwds)
#
#
#
if sys.platform == 'win32':
def set_executable(executable):
'''
Sets the path to a python.exe or pythonw.exe binary used to run
child processes on Windows instead of sys.executable.
Useful for people embedding Python.
'''
from multiprocess.forking import set_executable
set_executable(executable)
__all__ += ['set_executable']
| |
import os
import re
from statistics import mean
import numpy as np
import scipy
from conch import analyze_segments
from conch.analysis.praat import PraatAnalysisFunction
from conch.analysis.segments import SegmentMapping
from conch.analysis.formants import PraatSegmentFormantTrackFunction, FormantTrackFunction, \
PraatSegmentFormantPointFunction
from pyraat.parse_outputs import parse_point_script_output
from ...exceptions import AcousticError
from ..io import point_measures_from_csv, point_measures_to_csv
from ..classes import Track, TimePoint
def sanitize_bandwidths(value):
"""Cleans bandwidth data from dictionary form.
Parameters
----------
value : dict
Observation values produced by reading out from Praat.
Returns
-------
float
The first bandwidth.
float
The second bandwidth.
float
The third bandwidth.
"""
try:
b1 = value['B1'][0]
except TypeError:
b1 = value['B1']
if b1 is None:
b1 = 0
try:
b2 = value['B2'][0]
except TypeError:
b2 = value['B2']
if b2 is None:
b2 = 0
try:
b3 = value['B3'][0]
except TypeError:
b3 = value['B3']
if b3 is None:
b3 = 0
return b1, b2, b3
def track_nformants(track):
"""Gets the number of formants used to arrive at a given track.
Parameters
----------
track : dict
The measured track.
Returns
-------
int
The number of formants used to measure that track
"""
numbers = set(int(x[1]) for x in track.keys() if x.startswith('F'))
return max(numbers)
def parse_multiple_formant_output(output):
output = output.replace(r'\r\n', r'\n')
listing_list = re.split(r'\r?\n\r?\n', output)
to_return = {}
for item in listing_list:
output = parse_point_script_output(item)
reported_nformants = output.pop('num_formants')
to_return[reported_nformants] = output
return to_return
def generate_variable_formants_point_function(corpus_context, min_formants, max_formants):
"""Generates a function used to call Praat to measure formants and bandwidths with variable num_formants.
This specific function returns a single point per formant at a third of the way through the segment
Parameters
----------
corpus_context : :class:`~polyglot.corpus.context.CorpusContext`
The CorpusContext object of the corpus.
min_formants : int
The minimum number of formants to measure with on subsequent passes (default is 4).
max_formants : int
The maximum number of formants to measure with on subsequent passes (default is 7).
Returns
-------
formant_function : Partial function object
The function used to call Praat.
"""
max_freq = 5500
script_dir = os.path.dirname(os.path.abspath(__file__))
script = os.path.join(script_dir, 'multiple_num_formants.praat')
formant_function = PraatAnalysisFunction(script, praat_path=corpus_context.config.praat_path,
arguments=[0.01, 0.025, min_formants, max_formants, max_freq])
formant_function._function._output_parse_function = parse_multiple_formant_output
return formant_function
def generate_formants_point_function(corpus_context, gender=None):
"""Generates a function used to call Praat to measure formants and bandwidths with variable num_formants.
Parameters
----------
corpus_context : :class:`~polyglot.corpus.context.CorpusContext`
The CorpusContext object of the corpus.
min_formants : int
The minimum number of formants to measure with on subsequent passes (default is 4).
max_formants : int
The maximum number of formants to measure with on subsequent passes (default is 7).
Returns
-------
formant_function : Partial function object
The function used to call Praat.
"""
max_freq = 5500
formant_function = PraatSegmentFormantPointFunction(praat_path=corpus_context.config.praat_path,
max_frequency=max_freq, num_formants=5, window_length=0.025,
time_step=0.01)
return formant_function
def get_mean_SD(data, prototype_parameters=None):
"""Generates per-vowel-class means and covariance matrices for an arbitrary set of parameters (such as F1, F2, F3, B1, B2, B3) .
Parameters
----------
corpus_context : :class:`~polyglot.corpus.context.CorpusContext`
The CorpusContext object of the corpus.
data : dict
Track data from which means and covariance matrices will be generated.
Returns
-------
metadata : dict
Means and covariance matrices per vowel class.
"""
if prototype_parameters is None:
prototype_parameters = ['F1', 'F2', 'F3', 'B1', 'B2', 'B3']
metadata = {}
phones = set()
for seg, value in data.items():
phones.add(seg['label'])
for phone in phones:
observation_list = []
for seg, value in data.items():
if seg['label'] == phone:
observation = [value[pp] for pp in prototype_parameters]
# observation = [
# value['F1'],
# value['F2'],
# value['F3'],
# value['B1'],
# value['B2'],
# value['B3']
# ]
observation_list.append([x if x else 0 for x in observation])
# f1_mean, f2_mean, f3_mean = mean(x[0] for x in observation_list), mean(x[1] for x in observation_list), mean(
# x[2] for x in observation_list)
# b1_mean, b2_mean, b3_mean = mean(x[3] for x in observation_list), mean(x[4] for x in observation_list), mean(
# x[5] for x in observation_list)
# all_means = [f1_mean, f2_mean, f3_mean, b1_mean, b2_mean, b3_mean]
all_means = [mean(x[i] for x in observation_list) for i, pp in enumerate(prototype_parameters)]
observation_list = np.array(observation_list)
cov = np.cov(observation_list.T)
measurements = [all_means, cov.tolist()]
metadata[phone] = measurements
return metadata
def get_mahalanobis(prototype, observation, inverse_covariance):
"""Gets the Mahalanobis distance between an observation and the prototype.
Parameters
----------
prototype : list
Prototype data.
observation : list
Given observation of a vowel instance.
inverse_covariance : list
The inverse of the covariance matrix for the vowel class.
Returns
-------
distance : float
The Mahalanobis distance for the observation.
"""
prototype = np.array(prototype)
observation = np.array(observation)
inverse_covariance = np.array(inverse_covariance)
distance = scipy.spatial.distance.mahalanobis(prototype, observation, inverse_covariance)
return distance
def save_formant_point_data(corpus_context, data, num_formants=False):
header = ['id', 'F1', 'F2', 'F3', 'B1', 'B2', 'B3', 'A1', 'A2', 'A3', 'Ax', 'drop_formant']
if num_formants:
header += ['num_formants']
point_measures_to_csv(corpus_context, data, header)
header_info = {}
for h in header:
if h == 'id':
continue
if h != 'num_formants' or h != 'drop_formant':
header_info[h] = float
# elif h != 'Fx':
# header_info[h] = str
else:
header_info[h] = int
point_measures_from_csv(corpus_context, header_info)
def extract_and_save_formant_tracks(corpus_context, data, num_formants=False, stop_check=None, multiprocessing=True):
'''This function takes a dictionary with the best parameters for each vowels, then recalculates the formants
as tracks rather than as points'''
#Dictionary of segment mapping objects where each n_formants has its own segment mapping object
segment_mappings = {}
save_padding = 0.02
for k, v in data.items():
k.begin -= save_padding
k.end += save_padding
if "num_formants" in v:
n_formants = v["num_formants"]
else:
#There was not enough samples, so we use the default n
n_formants = 5
if not n_formants in segment_mappings:
segment_mappings[n_formants] = SegmentMapping()
segment_mappings[n_formants].segments.append(k)
outputs = {}
for n_formants in segment_mappings:
func = PraatSegmentFormantTrackFunction(praat_path=corpus_context.config.praat_path,
max_frequency=5500, num_formants=n_formants,
window_length=0.025,
time_step=0.01)
output = analyze_segments(segment_mappings[n_formants], func,
stop_check=stop_check,
multiprocessing=multiprocessing) # Analyze the phone
outputs.update(output)
formant_tracks = ['F1', 'F2', 'F3', 'B1', 'B2', 'B3']
tracks = {}
for k, v in outputs.items():
vowel_id = k.properties["id"]
track = Track()
for time, formants in v.items():
tp = TimePoint(time)
for f in formant_tracks:
tp.add_value(f, formants[f])
track.add(tp)
if not k["speaker"] in tracks:
tracks[k["speaker"]] = {}
tracks[k["speaker"]][k] = track
if 'formants' not in corpus_context.hierarchy.acoustics:
corpus_context.hierarchy.add_acoustic_properties(corpus_context, 'formants', [(x, float) for x in formant_tracks])
for speaker, track_dict in tracks.items():
corpus_context.save_acoustic_tracks('formants', track_dict, speaker)
def generate_base_formants_function(corpus_context, gender=None, source='praat'):
"""
Parameters
----------
corpus_context : :class:`polyglot.corpus.context.CorpusContext`
The CorpusContext object of the corpus.
gender : str
The gender to use for the function, if "M"(male) then
the max frequency is 5000 Hz, otherwise 5500
source : str
The source of the function, if it is "praat" then the formants
will be calculated with Praat over each segment otherwise
it will simply be tracks
Returns
-------
formant_function : Partial function object
The function used to call Praat.
"""
max_freq = 5500
if gender == 'M':
max_freq = 5000
if source == 'praat':
if getattr(corpus_context.config, 'praat_path', None) is None:
raise (AcousticError('Could not find the Praat executable'))
formant_function = PraatSegmentFormantTrackFunction(praat_path=corpus_context.config.praat_path,
max_frequency=max_freq, num_formants=5, window_length=0.025,
time_step=0.01)
else:
formant_function = FormantTrackFunction(max_frequency=max_freq,
time_step=0.01, num_formants=5,
window_length=0.025)
return formant_function
| |
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Argument-less script to select what to run on the buildbots."""
import filecmp
import os
import shutil
import subprocess
import sys
if sys.platform in ['win32', 'cygwin']:
EXE_SUFFIX = '.exe'
else:
EXE_SUFFIX = ''
BUILDBOT_DIR = os.path.dirname(os.path.abspath(__file__))
TRUNK_DIR = os.path.dirname(BUILDBOT_DIR)
ROOT_DIR = os.path.dirname(TRUNK_DIR)
ANDROID_DIR = os.path.join(ROOT_DIR, 'android')
CMAKE_DIR = os.path.join(ROOT_DIR, 'cmake')
CMAKE_BIN_DIR = os.path.join(CMAKE_DIR, 'bin')
OUT_DIR = os.path.join(TRUNK_DIR, 'out')
def CallSubProcess(*args, **kwargs):
"""Wrapper around subprocess.call which treats errors as build exceptions."""
with open(os.devnull) as devnull_fd:
retcode = subprocess.call(stdin=devnull_fd, *args, **kwargs)
if retcode != 0:
print '@@@STEP_EXCEPTION@@@'
sys.exit(1)
def PrepareCmake():
"""Build CMake 2.8.8 since the version in Precise is 2.8.7."""
if os.environ['BUILDBOT_CLOBBER'] == '1':
print '@@@BUILD_STEP Clobber CMake checkout@@@'
shutil.rmtree(CMAKE_DIR)
# We always build CMake 2.8.8, so no need to do anything
# if the directory already exists.
if os.path.isdir(CMAKE_DIR):
return
print '@@@BUILD_STEP Initialize CMake checkout@@@'
os.mkdir(CMAKE_DIR)
print '@@@BUILD_STEP Sync CMake@@@'
CallSubProcess(
['git', 'clone',
'--depth', '1',
'--single-branch',
'--branch', 'v2.8.8',
'--',
'git://cmake.org/cmake.git',
CMAKE_DIR],
cwd=CMAKE_DIR)
print '@@@BUILD_STEP Build CMake@@@'
CallSubProcess(
['/bin/bash', 'bootstrap', '--prefix=%s' % CMAKE_DIR],
cwd=CMAKE_DIR)
CallSubProcess( ['make', 'cmake'], cwd=CMAKE_DIR)
_ANDROID_SETUP = 'source build/envsetup.sh && lunch full-eng'
def PrepareAndroidTree():
"""Prepare an Android tree to run 'android' format tests."""
if os.environ['BUILDBOT_CLOBBER'] == '1':
print '@@@BUILD_STEP Clobber Android checkout@@@'
shutil.rmtree(ANDROID_DIR)
# (Re)create the directory so that the following steps will succeed.
if not os.path.isdir(ANDROID_DIR):
os.mkdir(ANDROID_DIR)
# We use a manifest from the gyp project listing pinned revisions of AOSP to
# use, to ensure that we test against a stable target. This needs to be
# updated to pick up new build system changes sometimes, so we must test if
# it has changed.
manifest_filename = 'aosp_manifest.xml'
gyp_manifest = os.path.join(BUILDBOT_DIR, manifest_filename)
android_manifest = os.path.join(ANDROID_DIR, '.repo', 'manifests',
manifest_filename)
manifest_is_current = (os.path.isfile(android_manifest) and
filecmp.cmp(gyp_manifest, android_manifest))
if not manifest_is_current:
# It's safe to repeat these steps, so just do them again to make sure we are
# in a good state.
print '@@@BUILD_STEP Initialize Android checkout@@@'
CallSubProcess(
['repo', 'init',
'-u', 'https://android.googlesource.com/platform/manifest',
'-b', 'master',
'-g', 'all,-notdefault,-device,-darwin,-mips,-x86'],
cwd=ANDROID_DIR)
shutil.copy(gyp_manifest, android_manifest)
print '@@@BUILD_STEP Sync Android@@@'
CallSubProcess(['repo', 'sync', '-j4', '-m', manifest_filename],
cwd=ANDROID_DIR)
# If we already built the system image successfully and didn't sync to a new
# version of the source, skip running the build again as it's expensive even
# when there's nothing to do.
system_img = os.path.join(ANDROID_DIR, 'out', 'target', 'product', 'generic',
'system.img')
if manifest_is_current and os.path.isfile(system_img):
return
print '@@@BUILD_STEP Build Android@@@'
CallSubProcess(
['/bin/bash',
'-c', '%s && make -j4' % _ANDROID_SETUP],
cwd=ANDROID_DIR)
def StartAndroidEmulator():
"""Start an android emulator from the built android tree."""
print '@@@BUILD_STEP Start Android emulator@@@'
CallSubProcess(['/bin/bash', '-c',
'%s && adb kill-server ' % _ANDROID_SETUP],
cwd=ANDROID_DIR)
# If taskset is available, use it to force adbd to run only on one core, as,
# sadly, it improves its reliability (see crbug.com/268450).
adbd_wrapper = ''
with open(os.devnull, 'w') as devnull_fd:
if subprocess.call(['which', 'taskset'], stdout=devnull_fd) == 0:
adbd_wrapper = 'taskset -c 0'
CallSubProcess(['/bin/bash', '-c',
'%s && %s adb start-server ' % (_ANDROID_SETUP, adbd_wrapper)],
cwd=ANDROID_DIR)
subprocess.Popen(
['/bin/bash', '-c',
'%s && emulator -no-window' % _ANDROID_SETUP],
cwd=ANDROID_DIR)
CallSubProcess(
['/bin/bash', '-c',
'%s && adb wait-for-device' % _ANDROID_SETUP],
cwd=ANDROID_DIR)
def StopAndroidEmulator():
"""Stop all android emulators."""
print '@@@BUILD_STEP Stop Android emulator@@@'
# If this fails, it's because there is no emulator running.
subprocess.call(['pkill', 'emulator.*'])
def GypTestFormat(title, format=None, msvs_version=None, tests=[]):
"""Run the gyp tests for a given format, emitting annotator tags.
See annotator docs at:
https://sites.google.com/a/chromium.org/dev/developers/testing/chromium-build-infrastructure/buildbot-annotations
Args:
format: gyp format to test.
Returns:
0 for sucesss, 1 for failure.
"""
if not format:
format = title
print '@@@BUILD_STEP ' + title + '@@@'
sys.stdout.flush()
env = os.environ.copy()
if msvs_version:
env['GYP_MSVS_VERSION'] = msvs_version
command = ' '.join(
[sys.executable, 'trunk/gyptest.py',
'--all',
'--passed',
'--format', format,
'--path', CMAKE_BIN_DIR,
'--chdir', 'trunk'] + tests)
if format == 'android':
# gyptest needs the environment setup from envsetup/lunch in order to build
# using the 'android' backend, so this is done in a single shell.
retcode = subprocess.call(
['/bin/bash',
'-c', '%s && cd %s && %s' % (_ANDROID_SETUP, ROOT_DIR, command)],
cwd=ANDROID_DIR, env=env)
else:
retcode = subprocess.call(command, cwd=ROOT_DIR, env=env, shell=True)
if retcode:
# Emit failure tag, and keep going.
print '@@@STEP_FAILURE@@@'
return 1
return 0
def GypBuild():
# Dump out/ directory.
print '@@@BUILD_STEP cleanup@@@'
print 'Removing %s...' % OUT_DIR
shutil.rmtree(OUT_DIR, ignore_errors=True)
print 'Done.'
retcode = 0
# The Android gyp bot runs on linux so this must be tested first.
if os.environ['BUILDBOT_BUILDERNAME'] == 'gyp-android':
PrepareAndroidTree()
StartAndroidEmulator()
try:
retcode += GypTestFormat('android')
finally:
StopAndroidEmulator()
elif sys.platform.startswith('linux'):
retcode += GypTestFormat('ninja')
retcode += GypTestFormat('make')
PrepareCmake()
retcode += GypTestFormat('cmake')
elif sys.platform == 'darwin':
retcode += GypTestFormat('ninja')
retcode += GypTestFormat('xcode')
retcode += GypTestFormat('make')
elif sys.platform == 'win32':
retcode += GypTestFormat('ninja')
if os.environ['BUILDBOT_BUILDERNAME'] == 'gyp-win64':
retcode += GypTestFormat('msvs-ninja-2012', format='msvs-ninja',
msvs_version='2012',
tests=[
'test\generator-output\gyptest-actions.py',
'test\generator-output\gyptest-relocate.py',
'test\generator-output\gyptest-rules.py'])
retcode += GypTestFormat('msvs-2010', format='msvs', msvs_version='2010')
retcode += GypTestFormat('msvs-2012', format='msvs', msvs_version='2012')
else:
raise Exception('Unknown platform')
if retcode:
# TODO(bradnelson): once the annotator supports a postscript (section for
# after the build proper that could be used for cumulative failures),
# use that instead of this. This isolates the final return value so
# that it isn't misattributed to the last stage.
print '@@@BUILD_STEP failures@@@'
sys.exit(retcode)
if __name__ == '__main__':
GypBuild()
| |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2010 OpenStack Foundation
# Copyright 2014 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility methods for working with WSGI servers
"""
from __future__ import print_function
import errno
import functools
import os
import signal
import sys
import time
import eventlet
from eventlet.green import socket
from eventlet.green import ssl
import eventlet.greenio
import eventlet.wsgi
import glance_store
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_log import loggers
from oslo_serialization import jsonutils
import routes
import routes.middleware
import six
import webob.dec
import webob.exc
from webob import multidict
from glance.common import exception
from glance.common import utils
from glance import i18n
_ = i18n._
_LE = i18n._LE
_LI = i18n._LI
_LW = i18n._LW
bind_opts = [
cfg.StrOpt('bind_host', default='0.0.0.0',
help=_('Address to bind the server. Useful when '
'selecting a particular network interface.')),
cfg.IntOpt('bind_port',
help=_('The port on which the server will listen.')),
]
socket_opts = [
cfg.IntOpt('backlog', default=4096,
help=_('The backlog value that will be used when creating the '
'TCP listener socket.')),
cfg.IntOpt('tcp_keepidle', default=600,
help=_('The value for the socket option TCP_KEEPIDLE. This is '
'the time in seconds that the connection must be idle '
'before TCP starts sending keepalive probes.')),
cfg.StrOpt('ca_file', help=_('CA certificate file to use to verify '
'connecting clients.')),
cfg.StrOpt('cert_file', help=_('Certificate file to use when starting API '
'server securely.')),
cfg.StrOpt('key_file', help=_('Private key file to use when starting API '
'server securely.')),
]
eventlet_opts = [
cfg.IntOpt('workers', default=processutils.get_worker_count(),
help=_('The number of child process workers that will be '
'created to service requests. The default will be '
'equal to the number of CPUs available.')),
cfg.IntOpt('max_header_line', default=16384,
help=_('Maximum line size of message headers to be accepted. '
'max_header_line may need to be increased when using '
'large tokens (typically those generated by the '
'Keystone v3 API with big service catalogs')),
cfg.BoolOpt('http_keepalive', default=True,
help=_('If False, server will return the header '
'"Connection: close", '
'If True, server will return "Connection: Keep-Alive" '
'in its responses. In order to close the client socket '
'connection explicitly after the response is sent and '
'read successfully by the client, you simply have to '
'set this option to False when you create a wsgi '
'server.')),
cfg.IntOpt('client_socket_timeout', default=900,
help=_('Timeout for client connections\' socket operations. '
'If an incoming connection is idle for this number of '
'seconds it will be closed. A value of \'0\' means '
'wait forever.')),
]
profiler_opts = [
cfg.BoolOpt("enabled", default=False,
help=_('If False fully disable profiling feature.')),
cfg.BoolOpt("trace_sqlalchemy", default=False,
help=_("If False doesn't trace SQL requests."))
]
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
CONF.register_opts(bind_opts)
CONF.register_opts(socket_opts)
CONF.register_opts(eventlet_opts)
CONF.register_opts(profiler_opts, group="profiler")
ASYNC_EVENTLET_THREAD_POOL_LIST = []
def get_bind_addr(default_port=None):
"""Return the host and port to bind to."""
return (CONF.bind_host, CONF.bind_port or default_port)
def ssl_wrap_socket(sock):
"""
Wrap an existing socket in SSL
:param sock: non-SSL socket to wrap
:returns: An SSL wrapped socket
"""
utils.validate_key_cert(CONF.key_file, CONF.cert_file)
ssl_kwargs = {
'server_side': True,
'certfile': CONF.cert_file,
'keyfile': CONF.key_file,
'cert_reqs': ssl.CERT_NONE,
}
if CONF.ca_file:
ssl_kwargs['ca_certs'] = CONF.ca_file
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
return ssl.wrap_socket(sock, **ssl_kwargs)
def get_socket(default_port):
"""
Bind socket to bind ip:port in conf
note: Mostly comes from Swift with a few small changes...
:param default_port: port to bind to if none is specified in conf
:returns : a socket object as returned from socket.listen or
ssl.wrap_socket if conf specifies cert_file
"""
bind_addr = get_bind_addr(default_port)
# TODO(jaypipes): eventlet's greened socket module does not actually
# support IPv6 in getaddrinfo(). We need to get around this in the
# future or monitor upstream for a fix
address_family = [
addr[0] for addr in socket.getaddrinfo(bind_addr[0],
bind_addr[1],
socket.AF_UNSPEC,
socket.SOCK_STREAM)
if addr[0] in (socket.AF_INET, socket.AF_INET6)
][0]
use_ssl = CONF.key_file or CONF.cert_file
if use_ssl and (not CONF.key_file or not CONF.cert_file):
raise RuntimeError(_("When running server in SSL mode, you must "
"specify both a cert_file and key_file "
"option value in your configuration file"))
sock = utils.get_test_suite_socket()
retry_until = time.time() + 30
while not sock and time.time() < retry_until:
try:
sock = eventlet.listen(bind_addr,
backlog=CONF.backlog,
family=address_family)
except socket.error as err:
if err.args[0] != errno.EADDRINUSE:
raise
eventlet.sleep(0.1)
if not sock:
raise RuntimeError(_("Could not bind to %(host)s:%(port)s after"
" trying for 30 seconds") %
{'host': bind_addr[0],
'port': bind_addr[1]})
return sock
def set_eventlet_hub():
try:
eventlet.hubs.use_hub('poll')
except Exception:
try:
eventlet.hubs.use_hub('selects')
except Exception:
msg = _("eventlet 'poll' nor 'selects' hubs are available "
"on this platform")
raise exception.WorkerCreationFailure(
reason=msg)
def initialize_glance_store():
"""Initialize glance store."""
glance_store.register_opts(CONF)
glance_store.create_stores(CONF)
glance_store.verify_default_store()
def get_asynchronous_eventlet_pool(size=1000):
"""Return eventlet pool to caller.
Also store pools created in global list, to wait on
it after getting signal for graceful shutdown.
:param size: eventlet pool size
:returns: eventlet pool
"""
global ASYNC_EVENTLET_THREAD_POOL_LIST
pool = eventlet.GreenPool(size=size)
# Add pool to global ASYNC_EVENTLET_THREAD_POOL_LIST
ASYNC_EVENTLET_THREAD_POOL_LIST.append(pool)
return pool
class Server(object):
"""Server class to manage multiple WSGI sockets and applications.
This class requires initialize_glance_store set to True if
glance store needs to be initialized.
"""
def __init__(self, threads=1000, initialize_glance_store=False):
os.umask(0o27) # ensure files are created with the correct privileges
self._logger = logging.getLogger("eventlet.wsgi.server")
self._wsgi_logger = loggers.WritableLogger(self._logger)
self.threads = threads
self.children = set()
self.stale_children = set()
self.running = True
# NOTE(abhishek): Allows us to only re-initialize glance_store when
# the API's configuration reloads.
self.initialize_glance_store = initialize_glance_store
self.pgid = os.getpid()
try:
# NOTE(flaper87): Make sure this process
# runs in its own process group.
os.setpgid(self.pgid, self.pgid)
except OSError:
# NOTE(flaper87): When running glance-control,
# (glance's functional tests, for example)
# setpgid fails with EPERM as glance-control
# creates a fresh session, of which the newly
# launched service becomes the leader (session
# leaders may not change process groups)
#
# Running glance-(api|registry) is safe and
# shouldn't raise any error here.
self.pgid = 0
def hup(self, *args):
"""
Reloads configuration files with zero down time
"""
signal.signal(signal.SIGHUP, signal.SIG_IGN)
raise exception.SIGHUPInterrupt
def kill_children(self, *args):
"""Kills the entire process group."""
signal.signal(signal.SIGTERM, signal.SIG_IGN)
signal.signal(signal.SIGINT, signal.SIG_IGN)
self.running = False
os.killpg(self.pgid, signal.SIGTERM)
def start(self, application, default_port):
"""
Run a WSGI server with the given application.
:param application: The application to be run in the WSGI server
:param default_port: Port to bind to if none is specified in conf
"""
self.application = application
self.default_port = default_port
self.configure()
self.start_wsgi()
def start_wsgi(self):
if CONF.workers == 0:
# Useful for profiling, test, debug etc.
self.pool = self.create_pool()
self.pool.spawn_n(self._single_run, self.application, self.sock)
return
else:
LOG.info(_LI("Starting %d workers") % CONF.workers)
signal.signal(signal.SIGTERM, self.kill_children)
signal.signal(signal.SIGINT, self.kill_children)
signal.signal(signal.SIGHUP, self.hup)
while len(self.children) < CONF.workers:
self.run_child()
def create_pool(self):
return eventlet.GreenPool(size=self.threads)
def _remove_children(self, pid):
if pid in self.children:
self.children.remove(pid)
LOG.info(_LI('Removed dead child %s') % pid)
elif pid in self.stale_children:
self.stale_children.remove(pid)
LOG.info(_LI('Removed stale child %s') % pid)
else:
LOG.warn(_LW('Unrecognised child %s') % pid)
def _verify_and_respawn_children(self, pid, status):
if len(self.stale_children) == 0:
LOG.debug('No stale children')
if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0:
LOG.error(_LE('Not respawning child %d, cannot '
'recover from termination') % pid)
if not self.children and not self.stale_children:
LOG.info(
_LI('All workers have terminated. Exiting'))
self.running = False
else:
if len(self.children) < CONF.workers:
self.run_child()
def wait_on_children(self):
while self.running:
try:
pid, status = os.wait()
if os.WIFEXITED(status) or os.WIFSIGNALED(status):
self._remove_children(pid)
self._verify_and_respawn_children(pid, status)
except OSError as err:
if err.errno not in (errno.EINTR, errno.ECHILD):
raise
except KeyboardInterrupt:
LOG.info(_LI('Caught keyboard interrupt. Exiting.'))
break
except exception.SIGHUPInterrupt:
self.reload()
continue
eventlet.greenio.shutdown_safe(self.sock)
self.sock.close()
LOG.debug('Exited')
def configure(self, old_conf=None, has_changed=None):
"""
Apply configuration settings
:param old_conf: Cached old configuration settings (if any)
:param has changed: callable to determine if a parameter has changed
"""
eventlet.wsgi.MAX_HEADER_LINE = CONF.max_header_line
self.client_socket_timeout = CONF.client_socket_timeout or None
self.configure_socket(old_conf, has_changed)
if self.initialize_glance_store:
initialize_glance_store()
def reload(self):
"""
Reload and re-apply configuration settings
Existing child processes are sent a SIGHUP signal
and will exit after completing existing requests.
New child processes, which will have the updated
configuration, are spawned. This allows preventing
interruption to the service.
"""
def _has_changed(old, new, param):
old = old.get(param)
new = getattr(new, param)
return (new != old)
old_conf = utils.stash_conf_values()
has_changed = functools.partial(_has_changed, old_conf, CONF)
CONF.reload_config_files()
os.killpg(self.pgid, signal.SIGHUP)
self.stale_children = self.children
self.children = set()
# Ensure any logging config changes are picked up
logging.setup(CONF, 'glance')
self.configure(old_conf, has_changed)
self.start_wsgi()
def wait(self):
"""Wait until all servers have completed running."""
try:
if self.children:
self.wait_on_children()
else:
self.pool.waitall()
except KeyboardInterrupt:
pass
def run_child(self):
def child_hup(*args):
"""Shuts down child processes, existing requests are handled."""
signal.signal(signal.SIGHUP, signal.SIG_IGN)
eventlet.wsgi.is_accepting = False
self.sock.close()
pid = os.fork()
if pid == 0:
signal.signal(signal.SIGHUP, child_hup)
signal.signal(signal.SIGTERM, signal.SIG_DFL)
# ignore the interrupt signal to avoid a race whereby
# a child worker receives the signal before the parent
# and is respawned unnecessarily as a result
signal.signal(signal.SIGINT, signal.SIG_IGN)
# The child has no need to stash the unwrapped
# socket, and the reference prevents a clean
# exit on sighup
self._sock = None
self.run_server()
LOG.info(_LI('Child %d exiting normally') % os.getpid())
# self.pool.waitall() is now called in wsgi's server so
# it's safe to exit here
sys.exit(0)
else:
LOG.info(_LI('Started child %s') % pid)
self.children.add(pid)
def run_server(self):
"""Run a WSGI server."""
if cfg.CONF.pydev_worker_debug_host:
utils.setup_remote_pydev_debug(cfg.CONF.pydev_worker_debug_host,
cfg.CONF.pydev_worker_debug_port)
eventlet.wsgi.HttpProtocol.default_request_version = "HTTP/1.0"
self.pool = self.create_pool()
try:
eventlet.wsgi.server(self.sock,
self.application,
log=self._wsgi_logger,
custom_pool=self.pool,
debug=False,
keepalive=CONF.http_keepalive,
socket_timeout=self.client_socket_timeout)
except socket.error as err:
if err[0] != errno.EINVAL:
raise
# waiting on async pools
if ASYNC_EVENTLET_THREAD_POOL_LIST:
for pool in ASYNC_EVENTLET_THREAD_POOL_LIST:
pool.waitall()
def _single_run(self, application, sock):
"""Start a WSGI server in a new green thread."""
LOG.info(_LI("Starting single process server"))
eventlet.wsgi.server(sock, application, custom_pool=self.pool,
log=self._wsgi_logger,
debug=False,
keepalive=CONF.http_keepalive,
socket_timeout=self.client_socket_timeout)
def configure_socket(self, old_conf=None, has_changed=None):
"""
Ensure a socket exists and is appropriately configured.
This function is called on start up, and can also be
called in the event of a configuration reload.
When called for the first time a new socket is created.
If reloading and either bind_host or bind port have been
changed the existing socket must be closed and a new
socket opened (laws of physics).
In all other cases (bind_host/bind_port have not changed)
the existing socket is reused.
:param old_conf: Cached old configuration settings (if any)
:param has changed: callable to determine if a parameter has changed
"""
# Do we need a fresh socket?
new_sock = (old_conf is None or (
has_changed('bind_host') or
has_changed('bind_port')))
# Will we be using https?
use_ssl = not (not CONF.cert_file or not CONF.key_file)
# Were we using https before?
old_use_ssl = (old_conf is not None and not (
not old_conf.get('key_file') or
not old_conf.get('cert_file')))
# Do we now need to perform an SSL wrap on the socket?
wrap_sock = use_ssl is True and (old_use_ssl is False or new_sock)
# Do we now need to perform an SSL unwrap on the socket?
unwrap_sock = use_ssl is False and old_use_ssl is True
if new_sock:
self._sock = None
if old_conf is not None:
self.sock.close()
_sock = get_socket(self.default_port)
_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_REUSEADDR, 1)
# sockets can hang around forever without keepalive
_sock.setsockopt(socket.SOL_SOCKET,
socket.SO_KEEPALIVE, 1)
self._sock = _sock
if wrap_sock:
self.sock = ssl_wrap_socket(self._sock)
if unwrap_sock:
self.sock = self._sock
if new_sock and not use_ssl:
self.sock = self._sock
# Pick up newly deployed certs
if old_conf is not None and use_ssl is True and old_use_ssl is True:
if has_changed('cert_file') or has_changed('key_file'):
utils.validate_key_cert(CONF.key_file, CONF.cert_file)
if has_changed('cert_file'):
self.sock.certfile = CONF.cert_file
if has_changed('key_file'):
self.sock.keyfile = CONF.key_file
if new_sock or (old_conf is not None and has_changed('tcp_keepidle')):
# This option isn't available in the OS X version of eventlet
if hasattr(socket, 'TCP_KEEPIDLE'):
self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE,
CONF.tcp_keepidle)
if old_conf is not None and has_changed('backlog'):
self.sock.listen(CONF.backlog)
class Middleware(object):
"""
Base WSGI middleware wrapper. These classes require an application to be
initialized that will be called next. By default the middleware will
simply call its wrapped app, or you can override __call__ to customize its
behavior.
"""
def __init__(self, application):
self.application = application
@classmethod
def factory(cls, global_conf, **local_conf):
def filter(app):
return cls(app)
return filter
def process_request(self, req):
"""
Called on each request.
If this returns None, the next application down the stack will be
executed. If it returns a response then that response will be returned
and execution will stop here.
"""
return None
def process_response(self, response):
"""Do whatever you'd like to the response."""
return response
@webob.dec.wsgify
def __call__(self, req):
response = self.process_request(req)
if response:
return response
response = req.get_response(self.application)
response.request = req
try:
return self.process_response(response)
except webob.exc.HTTPException as e:
return e
class Debug(Middleware):
"""
Helper class that can be inserted into any WSGI application chain
to get information about the request and response.
"""
@webob.dec.wsgify
def __call__(self, req):
print(("*" * 40) + " REQUEST ENVIRON")
for key, value in req.environ.items():
print(key, "=", value)
print('')
resp = req.get_response(self.application)
print(("*" * 40) + " RESPONSE HEADERS")
for (key, value) in six.iteritems(resp.headers):
print(key, "=", value)
print('')
resp.app_iter = self.print_generator(resp.app_iter)
return resp
@staticmethod
def print_generator(app_iter):
"""
Iterator that prints the contents of a wrapper string iterator
when iterated.
"""
print(("*" * 40) + " BODY")
for part in app_iter:
sys.stdout.write(part)
sys.stdout.flush()
yield part
print()
class APIMapper(routes.Mapper):
"""
Handle route matching when url is '' because routes.Mapper returns
an error in this case.
"""
def routematch(self, url=None, environ=None):
if url is "":
result = self._match("", environ)
return result[0], result[1]
return routes.Mapper.routematch(self, url, environ)
class RejectMethodController(object):
def reject(self, req, allowed_methods, *args, **kwargs):
LOG.debug("The method %s is not allowed for this resource" %
req.environ['REQUEST_METHOD'])
raise webob.exc.HTTPMethodNotAllowed(
headers=[('Allow', allowed_methods)])
class Router(object):
"""
WSGI middleware that maps incoming requests to WSGI apps.
"""
def __init__(self, mapper):
"""
Create a router for the given routes.Mapper.
Each route in `mapper` must specify a 'controller', which is a
WSGI app to call. You'll probably want to specify an 'action' as
well and have your controller be a wsgi.Controller, who will route
the request to the action method.
Examples:
mapper = routes.Mapper()
sc = ServerController()
# Explicit mapping of one route to a controller+action
mapper.connect(None, "/svrlist", controller=sc, action="list")
# Actions are all implicitly defined
mapper.resource("server", "servers", controller=sc)
# Pointing to an arbitrary WSGI app. You can specify the
# {path_info:.*} parameter so the target app can be handed just that
# section of the URL.
mapper.connect(None, "/v1.0/{path_info:.*}", controller=BlogApp())
"""
mapper.redirect("", "/")
self.map = mapper
self._router = routes.middleware.RoutesMiddleware(self._dispatch,
self.map)
@classmethod
def factory(cls, global_conf, **local_conf):
return cls(APIMapper())
@webob.dec.wsgify
def __call__(self, req):
"""
Route the incoming request to a controller based on self.map.
If no match, return either a 404(Not Found) or 501(Not Implemented).
"""
return self._router
@staticmethod
@webob.dec.wsgify
def _dispatch(req):
"""
Called by self._router after matching the incoming request to a route
and putting the information into req.environ. Either returns 404,
501, or the routed WSGI app's response.
"""
match = req.environ['wsgiorg.routing_args'][1]
if not match:
implemented_http_methods = ['GET', 'HEAD', 'POST', 'PUT',
'DELETE', 'PATCH']
if req.environ['REQUEST_METHOD'] not in implemented_http_methods:
return webob.exc.HTTPNotImplemented()
else:
return webob.exc.HTTPNotFound()
app = match['controller']
return app
class Request(webob.Request):
"""Add some OpenStack API-specific logic to the base webob.Request."""
def best_match_content_type(self):
"""Determine the requested response content-type."""
supported = ('application/json',)
bm = self.accept.best_match(supported)
return bm or 'application/json'
def get_content_type(self, allowed_content_types):
"""Determine content type of the request body."""
if "Content-Type" not in self.headers:
raise exception.InvalidContentType(content_type=None)
content_type = self.content_type
if content_type not in allowed_content_types:
raise exception.InvalidContentType(content_type=content_type)
else:
return content_type
def best_match_language(self):
"""Determines best available locale from the Accept-Language header.
:returns: the best language match or None if the 'Accept-Language'
header was not available in the request.
"""
if not self.accept_language:
return None
langs = i18n.get_available_languages('glance')
return self.accept_language.best_match(langs)
def get_content_range(self):
"""Return the `Range` in a request."""
range_str = self.headers.get('Content-Range')
if range_str is not None:
range_ = webob.byterange.ContentRange.parse(range_str)
if range_ is None:
msg = _('Malformed Content-Range header: %s') % range_str
raise webob.exc.HTTPBadRequest(explanation=msg)
return range_
class JSONRequestDeserializer(object):
valid_transfer_encoding = frozenset(['chunked', 'compress', 'deflate',
'gzip', 'identity'])
def has_body(self, request):
"""
Returns whether a Webob.Request object will possess an entity body.
:param request: Webob.Request object
"""
request_encoding = request.headers.get('transfer-encoding', '').lower()
is_valid_encoding = request_encoding in self.valid_transfer_encoding
if is_valid_encoding and request.is_body_readable:
return True
elif request.content_length > 0:
return True
return False
@staticmethod
def _sanitizer(obj):
"""Sanitizer method that will be passed to jsonutils.loads."""
return obj
def from_json(self, datastring):
try:
return jsonutils.loads(datastring, object_hook=self._sanitizer)
except ValueError:
msg = _('Malformed JSON in request body.')
raise webob.exc.HTTPBadRequest(explanation=msg)
def default(self, request):
if self.has_body(request):
return {'body': self.from_json(request.body)}
else:
return {}
class JSONResponseSerializer(object):
def _sanitizer(self, obj):
"""Sanitizer method that will be passed to jsonutils.dumps."""
if hasattr(obj, "to_dict"):
return obj.to_dict()
if isinstance(obj, multidict.MultiDict):
return obj.mixed()
return jsonutils.to_primitive(obj)
def to_json(self, data):
return jsonutils.dumps(data, default=self._sanitizer)
def default(self, response, result):
response.content_type = 'application/json'
response.body = self.to_json(result)
def translate_exception(req, e):
"""Translates all translatable elements of the given exception."""
# The RequestClass attribute in the webob.dec.wsgify decorator
# does not guarantee that the request object will be a particular
# type; this check is therefore necessary.
if not hasattr(req, "best_match_language"):
return e
locale = req.best_match_language()
if isinstance(e, webob.exc.HTTPError):
e.explanation = i18n.translate(e.explanation, locale)
e.detail = i18n.translate(e.detail, locale)
if getattr(e, 'body_template', None):
e.body_template = i18n.translate(e.body_template, locale)
return e
class Resource(object):
"""
WSGI app that handles (de)serialization and controller dispatch.
Reads routing information supplied by RoutesMiddleware and calls
the requested action method upon its deserializer, controller,
and serializer. Those three objects may implement any of the basic
controller action methods (create, update, show, index, delete)
along with any that may be specified in the api router. A 'default'
method may also be implemented to be used in place of any
non-implemented actions. Deserializer methods must accept a request
argument and return a dictionary. Controller methods must accept a
request argument. Additionally, they must also accept keyword
arguments that represent the keys returned by the Deserializer. They
may raise a webob.exc exception or return a dict, which will be
serialized by requested content type.
"""
def __init__(self, controller, deserializer=None, serializer=None):
"""
:param controller: object that implement methods created by routes lib
:param deserializer: object that supports webob request deserialization
through controller-like actions
:param serializer: object that supports webob response serialization
through controller-like actions
"""
self.controller = controller
self.serializer = serializer or JSONResponseSerializer()
self.deserializer = deserializer or JSONRequestDeserializer()
@webob.dec.wsgify(RequestClass=Request)
def __call__(self, request):
"""WSGI method that controls (de)serialization and method dispatch."""
action_args = self.get_action_args(request.environ)
action = action_args.pop('action', None)
try:
deserialized_request = self.dispatch(self.deserializer,
action, request)
action_args.update(deserialized_request)
action_result = self.dispatch(self.controller, action,
request, **action_args)
except webob.exc.WSGIHTTPException as e:
exc_info = sys.exc_info()
six.reraise(translate_exception(request, e), None, exc_info[2])
except Exception as e:
LOG.exception(_LE("Caught error: %s"), six.text_type(e))
response = webob.exc.HTTPInternalServerError()
return response
try:
response = webob.Response(request=request)
self.dispatch(self.serializer, action, response, action_result)
return response
except webob.exc.WSGIHTTPException as e:
return translate_exception(request, e)
except webob.exc.HTTPException as e:
return e
# return unserializable result (typically a webob exc)
except Exception:
return action_result
def dispatch(self, obj, action, *args, **kwargs):
"""Find action-specific method on self and call it."""
try:
method = getattr(obj, action)
except AttributeError:
method = getattr(obj, 'default')
return method(*args, **kwargs)
def get_action_args(self, request_environment):
"""Parse dictionary created by routes library."""
try:
args = request_environment['wsgiorg.routing_args'][1].copy()
except Exception:
return {}
try:
del args['controller']
except KeyError:
pass
try:
del args['format']
except KeyError:
pass
return args
| |
# -*- coding: utf-8 -*-
import os
from django.conf import settings
from django.db import models
from django.contrib.auth.models import User
from datetime import datetime
from django.utils.translation import ugettext, ugettext_lazy as _
from wikis.settings import *
from wikis.managers.articles import ActiveManager
from wikis.files import get_attachment_path
from permissions.models import Permission
if 'djangosphinx' in settings.INSTALLED_APPS:
from djangosphinx.models import SphinxSearch
else:
SphinxSearch = None
if 'taggit' in settings.INSTALLED_APPS:
from taggit.managers import TaggableManager
else:
TaggableManager = None
class ShouldHaveExactlyOneRootSlug(Exception):
pass
class Article(models.Model):
"""
Wiki referring to Revision model for actual content.
'slug' and 'parent' field should be maintained centrally, since users
aren't allowed to change them, anyways.
"""
author = models.ForeignKey(User, blank=True, null=True, related_name="created_articles")
parent = models.ForeignKey('self', verbose_name=_('Parent cal slug'),
help_text=_('Affects URL structure and possibly inherits permissions'),
null=True, blank=True,default=None)
related = models.ManyToManyField('self', verbose_name=_('Related cals'), symmetrical=True,
help_text=_('Sets a symmetrical relation other articles'),
blank=True, null=True)
title = models.CharField(max_length=140, verbose_name=_('Cal title'),
blank=False)
slug = models.SlugField(max_length=140, verbose_name=_('slug'),
help_text=_('Letters, numbers, underscore and hyphen.'
' Do not use reserved words \'create\','
' \'history\' and \'edit\'.'),
blank=True)
created_at = models.DateTimeField(_('created at'), default=datetime.now)
modified_on = models.DateTimeField(_('modified on'), default=datetime.now)
is_active = models.BooleanField(default=True)
permissions = models.ForeignKey(Permission, verbose_name=_('Permissions'),
blank=True, null=True,
help_text=_('Permission group'))
locked = models.BooleanField(default=False, verbose_name=_('Locked for editing'))
locked_by = models.ForeignKey(User, related_name='locked', blank=True, null=True,)
current_version = models.OneToOneField('Version', related_name='%(app_label)s_%(class)s_version',
blank=True, null=True, editable=True)
category = models.CharField(max_length=1, choices=WIKI_CATEGORY)
if TaggableManager:
tags = TaggableManager()
# TODO: add link list
active = ActiveManager()
objects = models.Manager()
if SphinxSearch:
search_articles = SphinxSearch(
index='articles articles_delta',
weights={
'title':100,
'slug':100,
},
)
class Meta:
verbose_name = _('Article')
verbose_name_plural = _('Articles')
app_label = 'wikis'
unique_together = (('slug', 'parent'),)
def __unicode__(self):
return self.title
def attachments(self):
return Attachment.objects.filter(article__exact=self)
def attachment_profile(self):
attachments = Attachment.objects.filter(article__exact=self).order_by('-uploaded_on')
if attachments.count()>0:
return attachments[0].thumbnail.url
else :
return False
@classmethod
def get_cal_parent(cls, path):
"""
allows to retrieve the first article in the path, to make it
as the parent for the current event article.
otherwise we take the root as the default parent.
"""
if path != []:
if int(path[-1].cal_type) > 0:
try:
return path[-1].pagecal
except Article.DoesNotExist:
return Article.get_cal_parent(path[:-1])
else:
return Article.get_cal_parent(path[:-1])
else:
return None
@classmethod
def get_root(cls):
"""Return the root article, which should ALWAYS exist..
except the very first time the Article is loaded, in which
case the user is prompted to create this article."""
try:
return Article.objects.filter(parent__exact=None)[0]
except:
raise ShouldHaveExactlyOneRootSlug()
def get_url(self):
"""Return the cal URL for an article"""
if self.parent:
return self.parent.get_url() + '/' + self.slug
else:
return self.slug
@models.permalink
def get_absolute_url(self):
url = 'article_view'
return (url, [self.get_url()])
@models.permalink
def get_edit_url(self):
url = 'article_edit'
return (url, [self.get_url()])
@models.permalink
def get_tree_view_url(self):
url = 'article_tree'
return (url, [self.get_url()])
@models.permalink
def get_upload_photo_url(self):
url = 'article_upload_photo'
return (url, [self.get_url()])
@models.permalink
def get_add_related_url(self):
url = 'article_related'
return (url, [self.get_url()])
@models.permalink
def get_cancel_url(self):
url = 'article_cancel'
return (url, [self.get_url()])
@models.permalink
def get_reactivate_url(self):
url = 'article_reactivate'
return (url, [self.get_url()])
@models.permalink
def get_set_parent_url(self):
url = 'article_parent'
return (url, [self.get_url()])
@classmethod
def get_url_reverse(cls, path, article, return_list=[]):
"""Lookup a URL and return the corresponding set of articles
in the path."""
if path == []:
return return_list + [article]
# Lookup next child in path
try:
a = Article.active.get(parent__exact=article, slug__exact=str(path[0]))
return cls.get_url_reverse(path[1:], a, return_list + [article])
except Exception, e:
return None
def can_write_l(self, user):
"""Check write permissions and locked status"""
return not self.locked and self.permissions.can_write_obj(user)
def can_attach(self, user):
return self.can_write_l(user)
def delete(self, user):
if self.can_write_l(user) and user == self.author:
self.is_active = False
self.save()
return True
return False
def reactivate(self, user):
"""reactivate a cal if deleted"""
if self.can_write_l(user) and user == self.author:
self.is_active = True
self.save()
return True
return False
def edit_relatives(self, relatives):
""" change permissions to users on the current cal """
old_relatives = []
relatives_l = self.related.all()
for i in relatives_l:
old_relatives.append(i)
for i in relatives:
if i in old_relatives:
old_relatives.remove(i)
else:
self.related.add(i)
for i in old_relatives:
self.related.remove(i)
def set_parent(self, parent):
""" set a parent for the current cal (should be a pagecal) """
# Ensure doesn't already appended or it's child of the current one or it is itself
if (parent == self):
return
if (parent.parent == self):
#the child is currently the parent of the future parent
parent.parent = self.parent
self.parent = parent
parent.save()
self.save()
elif (self.parent == parent):
#the child parent is the same future parent nothing to do then
return
else :
#the parent and the child aren't related
self.parent = parent
self.save()
DEFAULT_PICTURE = 'cal.gif'
class Attachment(models.Model):
article = models.ForeignKey(Article, verbose_name=_('Article'))
picture = models.ImageField(upload_to=get_attachment_path, default=DEFAULT_PICTURE, blank=True, null=True)
thumbnail = models.ImageField(upload_to='uploads/thumbs/articles/', blank=True, null=True,
editable=False)
uploaded_by = models.ForeignKey(User, blank=True, verbose_name=_('Uploaded by'), null=True)
uploaded_on = models.DateTimeField(default=datetime.now,verbose_name=_('Upload date'))
class Meta:
app_label = 'wikis'
def save(self, force_insert=False, force_update=False):
#get mtime stats from file
thumb_update = False
if self.thumbnail:
try:
if self.picture:
statinfo1 = os.stat(self.picture.path)
statinfo2 = os.stat(self.thumbnail.path)
if statinfo1 > statinfo2:
thumb_update = True
else:
self.picture = DEFAULT_PICTURE
thumb_update = True
except OSError:
thumb_update = True
if self.picture and not self.thumbnail or thumb_update:
from PIL import Image
THUMB_SIZE = (200,200)
#self.thumbnail = self.picture
image = Image.open(self.picture)
if image.mode not in ('L', 'RGB'):
image = image.convert('RGB')
image.thumbnail(THUMB_SIZE, Image.ANTIALIAS)
(head, tail) = os.path.split(self.picture.path)
(a, b) = os.path.split(self.picture.name)
if not os.path.isdir(head + '/uploads/thumbs/articles'):
os.mkdir(head + '/uploads/thumbs/articles')
image.save(head + '/uploads/thumbs/articles/' + tail)
self.thumbnail = 'uploads/thumbs/articles/' + b
super(Attachment, self).save()
| |
#!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2015,2016 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing to setup network devices"""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from brokertest import TestBrokerCommand
# This test case sets up a network that look like the following:
#
# +---> netuc_netmgmt_1a netuc_netmgmt_1b <---+
# | (4.1.0.0/26) (4.1.0.64/26) |
# | |
# | +---> netuc_transit_1a <=> netuc_transit_1b <---+ |
# | | (4.1.1.0/26) (4.1.2.0/26) | |
# | | (vlan101) (vlan201) | |
# | | | |
# + np01fdlr01 +---> netuc_transit_2a <=> netuc_transit_2b <---+ np01fdlr03 +
# | | (4.1.1.64/26) (4.1.2.64/26) | |
# | | (vlan102) (vlan202) | |
# | | | |
# + np01fdlr02 +---> netuc_transit_3a <=> netuc_transit_3b <---+ np01fdlr04 +
# | (4.1.1.128/26) (4.1.2.128/26) |
# | (vlan103) (vlan203) |
# | |
# +---> netuc_transit_4a <=> netuc_transit_4b <---+
# (4.1.1.192/26) (4.1.2.192/26)
# (vlan104) (vlan204)
#
# - Sides:
# There are two sides in the above diagream. The idea is that each side
# is serviced by a pair of independent routers. The result is that hosts
# can connect to two resiliant transit subnets (see the <=> on the diagram).
#
# - Routers
# There are for routers in total (np01fdlr01-04), two on side 'a' and two
# on side 'b'.
#
# - Management networks:
# Each side has a 'virtual' management network which is used by the router
# loopback addresses. So np01fdlr01/02 use the 'a' side management
# network netuc_netmgmt_1a.
#
# - Transit networks:
# There are eight transit networks, four on each side. Each belong to the
# routers on either side. Each router has a vlan interface on these,
# where the lowest numberd router takes the lowest numbered address.
#
# - Shared addresss:
# An HSRP address is shared by the 01/02 and 03/04 router paris, where
# an odd numberd router (i.e 03) is the primary for an odd numbered
# network (netuc_transit_1b) - and visa versa for even numbered routers.
#
config = {
'domain': 'net.ms.com',
'gateways': {
'netuc_transit_1a': [('np01fdlr01', 'vlan101'), ('np01fdlr02', 'vlan101')],
'netuc_transit_2a': [('np01fdlr02', 'vlan102'), ('np01fdlr01', 'vlan102')],
'netuc_transit_3a': [('np01fdlr01', 'vlan103'), ('np01fdlr02', 'vlan103')],
'netuc_transit_4a': [('np01fdlr02', 'vlan104'), ('np01fdlr01', 'vlan104')],
'netuc_transit_1b': [('np01fdlr03', 'vlan201'), ('np01fdlr04', 'vlan201')],
'netuc_transit_2b': [('np01fdlr04', 'vlan202'), ('np01fdlr03', 'vlan202')],
'netuc_transit_3b': [('np01fdlr03', 'vlan203'), ('np01fdlr04', 'vlan203')],
'netuc_transit_4b': [('np01fdlr04', 'vlan204'), ('np01fdlr03', 'vlan204')],
},
'devices': {
'np01fdlr01': {
'model': 'cat6509',
'mgmt_interface': 'lo0',
'interfaces': {
'lo0': {
'net': 'netuc_netmgmt_1a',
'ipidx': 1,
},
'vlan101': {
'net': 'netuc_transit_1a',
'ipidx': 2,
},
'vlan102': {
'net': 'netuc_transit_2a',
'ipidx': 2,
},
'vlan103': {
'net': 'netuc_transit_3a',
'ipidx': 2,
},
'vlan104': {
'net': 'netuc_transit_4a',
'ipidx': 2,
}
}
},
'np01fdlr02': {
'model': 'cat6509',
'mgmt_interface': 'lo0',
'interfaces': {
'lo0': {
'net': 'netuc_netmgmt_1a',
'ipidx': 2,
},
'vlan101': {
'net': 'netuc_transit_1a',
'ipidx': 3,
},
'vlan102': {
'net': 'netuc_transit_2a',
'ipidx': 3,
},
'vlan103': {
'net': 'netuc_transit_3a',
'ipidx': 3,
},
'vlan104': {
'net': 'netuc_transit_4a',
'ipidx': 3,
}
}
},
'np01fdlr03': {
'model': 'cat6509',
'mgmt_interface': 'lo0',
'interfaces': {
'lo0': {
'net': 'netuc_netmgmt_1b',
'ipidx': 1,
},
'vlan201': {
'net': 'netuc_transit_1b',
'ipidx': 2,
},
'vlan202': {
'net': 'netuc_transit_2b',
'ipidx': 2,
},
'vlan203': {
'net': 'netuc_transit_3b',
'ipidx': 2,
},
'vlan204': {
'net': 'netuc_transit_4b',
'ipidx': 2,
}
}
},
'np01fdlr04': {
'model': 'cat6509',
'mgmt_interface': 'lo0',
'interfaces': {
'lo0': {
'net': 'netuc_netmgmt_1b',
'ipidx': 2,
},
'vlan201': {
'net': 'netuc_transit_1b',
'ipidx': 3,
},
'vlan202': {
'net': 'netuc_transit_2b',
'ipidx': 3,
},
'vlan203': {
'net': 'netuc_transit_3b',
'ipidx': 3,
},
'vlan204': {
'net': 'netuc_transit_4b',
'ipidx': 3,
}
}
}
}
}
# When running these unit test on their own the following flags will
# help skip some parts of the test that are not generally needed in
# personal development enviornments
#flags = ('skip_prereq', 'skip_dsdb', 'skip_add')
#flags = ('skip_prereq', 'skip_dsdb', 'skip_delete')
#flags = ('skip_prereq', 'skip_dsdb')
flags = ()
class TestUsecaseNetworks(TestBrokerCommand):
########## SETUP STAGE ##########
def test_100_add_dns_domain(self):
if 'skip_prereq' in flags:
return True
self.dsdb_expect("add_dns_domain -domain_name %s -comments " % config['domain'])
self.noouttest(["add", "dns_domain",
"--dns_domain", config['domain']])
self.dsdb_verify()
def test_100_add_6509_model(self):
if 'skip_prereq' in flags:
return True
self.noouttest(["add_model", "--model", "cat6509",
"--vendor", "cisco", "--type=switch-router"])
########## ADDITION STAGE ##########
def test_200_add_networks(self):
if 'skip_add' in flags:
return True
for network in self.net:
if not network.name.startswith('netuc_'):
continue
command = ["add_network", "--network=%s" % network.name,
"--ip=%s" % network.ip,
"--netmask=%s" % network.netmask,
"--" + network.loc_type, network.loc_name,
"--type=%s" % network.nettype,
"--side=%s" % network.side]
if network.comments:
command.extend(["--comments", network.comments])
self.noouttest(command)
self.check_plenary_exists('network', 'internal', str(network.ip), 'config')
def test_201_add_routers(self):
if 'skip_add' in flags:
return True
for (name, dev_attrs) in config['devices'].iteritems():
fqdn = name + '.' + config['domain']
interface = dev_attrs['mgmt_interface']
if_attrs = dev_attrs['interfaces'][interface]
net = self.net[if_attrs['net']]
ip = net[if_attrs['ipidx']]
iftype = if_attrs['type'] if 'type' in if_attrs else 'loopback'
self.dsdb_expect_add(fqdn, ip, interface)
self.successtest(["add", "network_device", "--type", "misc",
"--network_device", fqdn,
"--ip", ip, "--interface", interface,
"--iftype", iftype,
"--%s" % net.loc_type, net.loc_name,
"--model", dev_attrs['model']])
if 'skip_dsdb' not in flags:
self.dsdb_verify()
def test_202_add_interfaces(self):
if 'skip_add' in flags:
return True
for (dev_name, dev_attrs) in config['devices'].iteritems():
fqdn = dev_name + '.' + config['domain']
for (if_name, if_attrs) in dev_attrs['interfaces'].iteritems():
if if_name == dev_attrs['mgmt_interface']:
continue
iftype = if_attrs['type'] if 'type' in if_attrs else "virtual"
command = ["add", "interface", "--interface", if_name,
"--iftype", iftype,
"--network_device", fqdn]
self.noouttest(command)
def test_203_add_addresses(self):
if 'skip_add' in flags:
return True
for (dev_name, dev_attrs) in config['devices'].iteritems():
fqdn = dev_name + '.' + config['domain']
for (if_name, if_attrs) in dev_attrs['interfaces'].iteritems():
if if_name == dev_attrs['mgmt_interface']:
continue
net = self.net[if_attrs['net']]
ip = net[if_attrs['ipidx']]
if_fqdn = dev_name + '-' + if_name + '.' + config['domain']
self.dsdb_expect_add(if_fqdn, ip, if_name, primary=fqdn)
command = ["add", "interface", "address",
"--network_device", fqdn,
"--interface", if_name, "--ip", ip]
self.noouttest(command)
if 'skip_dsdb' not in flags:
self.dsdb_verify()
def test_204_add_hsrp(self):
if 'skip_add' in flags:
return True
for net_name, gateways in config['gateways'].iteritems():
net = self.net[net_name]
ip = net[1] # Always use first address
gw_fqdn = '-'.join(net.name.split('_')[1:] + ['gateway']) + '.' + config['domain']
priority = 100
for (dev_name, if_name) in gateways:
fqdn = dev_name + '.' + config['domain']
# Note, this only happens the first time an address is added
if priority == 100:
self.dsdb_expect_add(gw_fqdn, ip)
# We only specify the FQDN when creating the first interface
command = ["add", "interface", "address",
"--network_device", fqdn,
"--interface", if_name, "--label", "hsrp",
"--fqdn", gw_fqdn, "--ip", ip,
"--shared", "--priority", priority]
self.noouttest(command)
if 'skip_dsdb' not in flags and priority == 100:
self.dsdb_verify()
priority = priority + 1
def test_205_add_default_route(self):
if 'skip_add' in flags:
return True
for network in self.net:
if not network.name.startswith('netuc_'):
continue
if network.nettype == 'management':
continue
fqdn = '-'.join(network.name.split('_')[1:] + ['gateway']) + '.' + config['domain']
command = ["add_router_address", "--fqdn", fqdn]
self.noouttest(command)
########## TESTING STAGE ##########
def test_301_network_plenary(self):
for (net_name, net_attrs) in config['gateways'].iteritems():
net = self.net[net_name]
pri_router = net_attrs[0][0]
pri_router_fqdn = pri_router + '.' + config['domain']
pri_router_if = net_attrs[0][1]
_pri_router_ip = config['devices'][pri_router]['interfaces'][pri_router_if]
pri_router_ip = self.net[_pri_router_ip['net']][_pri_router_ip['ipidx']]
sec_router = net_attrs[1][0]
sec_router_fqdn = sec_router + '.' + config['domain']
sec_router_if = net_attrs[1][1]
_sec_router_ip = config['devices'][sec_router]['interfaces'][sec_router_if]
sec_router_ip = self.net[_sec_router_ip['net']][_sec_router_ip['ipidx']]
command = ["cat", "--networkip", str(net.ip)]
out = self.commandtest(command)
m = lambda x: self.matchoutput(out, x, command)
m('structure template network/internal/%s/config' % net.ip)
m('"name" = "%s"' % net.name)
m('"network" = "%s"' % net.ip)
m('"netmask" = "%s"' % net.netmask)
m('"broadcast" = "%s"' % net.broadcast)
m('"prefix_length" = %d' % net.prefixlen)
m('"type" = "%s"' % net.nettype)
m('"side" = "%s"' % net.side)
m('"sysloc/building" = "%s"' % net.loc_name)
m('"network_environment/name" = "internal"')
s = lambda x: self.searchoutput(out, x, command)
assignment = lambda lhs, rhs: r'"%s"\s*=\s*%s' % (lhs, rhs)
nlisti = lambda (k, v): r'"%s",\s*"%s"\s*' % (k, v)
nlist = lambda *l: r'nlist\(\s*' + r',\s*'.join(map(nlisti, l)) + r'\)\s*'
slist = lambda *l: r'list\(\s*' + r',\s*'.join(l) + r'\)\s*'
s(assignment(r'router_address/{%s}/providers' % net[1],
slist(nlist(("interface", pri_router_if),
("ip", pri_router_ip),
("router", pri_router_fqdn)),
nlist(("interface", sec_router_if),
("ip", sec_router_ip),
("router", sec_router_fqdn)))))
########## DELETEION STAGE ##########
def test_804_del_default_route(self):
if 'skip_delete' in flags:
return True
for network in self.net:
if not network.name.startswith('netuc_'):
continue
if network.nettype == 'management':
continue
fqdn = '-'.join(network.name.split('_')[1:] + ['gateway']) + '.' + config['domain']
command = ["del_router_address", "--fqdn", fqdn]
self.noouttest(command)
def test_805_del_hsrp(self):
if 'skip_delete' in flags:
return True
for net_name, gateways in config['gateways'].iteritems():
net = self.net[net_name]
ip = net[1] # Always use first address
if 'skip_dsdb' not in flags:
self.dsdb_expect_delete(ip)
for (dev_name, if_name) in gateways:
fqdn = dev_name + '.' + config['domain']
# We only specify the FQDN when creating the first interface
command = ["del", "interface", "address",
"--network_device", fqdn,
"--interface", if_name,
"--ip", ip]
self.noouttest(command)
if 'skip_dsdb' not in flags:
self.dsdb_verify()
def test_806_del_addresses(self):
if 'skip_delete' in flags:
return True
for (dev_name, dev_attrs) in config['devices'].iteritems():
fqdn = dev_name + '.' + config['domain']
for (if_name, if_attrs) in dev_attrs['interfaces'].iteritems():
if if_name == dev_attrs['mgmt_interface']:
continue
net = self.net[if_attrs['net']]
ip = net[if_attrs['ipidx']]
self.dsdb_expect_delete(ip)
command = ["del", "interface", "address",
"--network_device", fqdn,
"--interface", if_name, "--ip", ip]
self.noouttest(command)
if 'skip_dsdb' not in flags:
self.dsdb_verify()
def test_807_del_interfaces(self):
if 'skip_delete' in flags:
return True
for (dev_name, dev_attrs) in config['devices'].iteritems():
fqdn = dev_name + '.' + config['domain']
for (if_name, if_attrs) in dev_attrs['interfaces'].iteritems():
if if_name == dev_attrs['mgmt_interface']:
continue
command = ["del", "interface", "--interface", if_name,
"--network_device", fqdn]
self.noouttest(command)
def test_808_del_routers(self):
if 'skip_delete' in flags:
return True
for (name, dev_attrs) in config['devices'].iteritems():
fqdn = name + '.' + config['domain']
interface = dev_attrs['mgmt_interface']
if_attrs = dev_attrs['interfaces'][interface]
net = self.net[if_attrs['net']]
ip = net[if_attrs['ipidx']]
self.dsdb_expect_delete(ip)
command = "del network_device --network_device %s" % fqdn
self.noouttest(command.split(" "))
if 'skip_dsdb' not in flags:
self.dsdb_verify()
def test_809_del_networks(self):
if 'skip_delete' in flags:
return True
for network in self.net:
if not network.name.startswith('netuc_'):
continue
command = ["del_network", "--ip=%s" % network.ip]
self.noouttest(command)
self.check_plenary_gone('network', 'internal', str(network.ip), 'config')
########## CLEANUP STAGE ##########
def test_900_del_6509_model(self):
if 'skip_prereq' in flags:
return True
command = "del model --model cat6509 --vendor cisco"
self.noouttest(command.split(" "))
def test_900_del_dns_domain(self):
if 'skip_prereq' in flags:
return True
self.dsdb_expect("delete_dns_domain -domain_name %s" % config['domain'])
command = "del dns_domain --dns_domain %s" % config['domain']
self.noouttest(command.split(" "))
self.dsdb_verify()
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestUsecaseNetworks)
unittest.TextTestRunner(verbosity=2).run(suite)
| |
"""
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2011 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Shubhangi Satras, Cisco Systems, Inc.
# @author: Peter Strunk, Cisco Systems, Inc.
# @author: Atul Gaikad, Cisco Systems, Inc.
# @author: Tyler Smith, Cisco Systems, Inc.
#
"""
import unittest
import logging as LOG
from quantum.common import exceptions as exc
from quantum.common import utils
from quantum.plugins.cisco import l2network_plugin_configuration as conf
from quantum.plugins.cisco.common import cisco_constants as const
from quantum.plugins.cisco.common import cisco_credentials as creds
from quantum.plugins.cisco.models import l2network_multi_blade
from quantum.plugins.cisco.db import api as db
from quantum.plugins.cisco.db import l2network_db as cdb
LOG.basicConfig(level=LOG.WARN)
LOG.getLogger(__name__)
# Set some data to use in tests
tenant_id = "network_admin"
net_name = "TestNetwork1"
new_net_name = "NewTestNetwork1"
net_id = "44"
port_id = "p0005"
port_state = const.PORT_UP
interface_id = "vif-01"
vlan_id = "102"
def vlan_name(id):
return "q-%svlan" % id[0:10]
class TestMultiBlade(unittest.TestCase):
"""
Tests for the multi-blade model for the L2Network plugin
"""
_plugins = {}
_inventory = {}
def setUp(self):
"""Setup our tests"""
# Initialize cdb and credentials
db.configure_db({'sql_connection': 'sqlite:///:memory:'})
cdb.initialize()
creds.Store.initialize()
# Create a place a store net and port ids for the druation of the test
self.net_id = 0
self.port_id = 0
# Create the multiblade object
self._l2network_multiblade = l2network_multi_blade. \
L2NetworkMultiBlade()
self.plugin_key = "quantum.plugins.cisco.ucs.cisco_ucs_plugin" + \
".UCSVICPlugin"
# Get UCS inventory to make sure all UCSs are affected by tests
for key in conf.PLUGINS[const.PLUGINS].keys():
if key in conf.PLUGINS[const.INVENTORY].keys():
self._inventory[key] = utils.import_object(
conf.PLUGINS[const.INVENTORY][key])
self.ucs_count = self._inventory['ucs_plugin'].\
_inventory.__len__()
def tearDown(self):
"""Tear down our tests"""
try:
port = db.port_get(self.net_id, self.port_id)
self._l2network_multiblade.delete_port([tenant_id, self.net_id,
self.port_id])
except exc.NetworkNotFound:
# We won't always have a port to remove
pass
except exc.PortNotFound:
# We won't always have a port to remove
pass
try:
net = db.network_get(self.net_id)
self._l2network_multiblade.delete_network([tenant_id, self.net_id])
except exc.NetworkNotFound:
# We won't always have a network to remove
pass
db.clear_db()
def test_create_network(self):
"""Support for the Quantum core API call"""
LOG.debug("test_create_network - START")
# Create the network in the test DB, then with the model
self.net_id = db.network_create(tenant_id, net_name)[const.UUID]
networks = self._l2network_multiblade.create_network([tenant_id,
net_name,
self.net_id,
vlan_name(self.net_id),
vlan_id])
cdb.add_vlan_binding(vlan_id, vlan_name(self.net_id), self.net_id)
for network in networks:
self.assertEqual(network[const.NET_ID], self.net_id)
self.assertEqual(network[const.NET_NAME], net_name)
LOG.debug("test_create_network - END")
def test_delete_network(self):
"""Support for the Quantum core API call"""
LOG.debug("test_delete_network - START")
# Create the network in the test DB, then with the model
self.net_id = db.network_create(tenant_id, net_name)[const.UUID]
self._l2network_multiblade.create_network([tenant_id,
net_name,
self.net_id,
vlan_name(self.net_id),
vlan_id])
cdb.add_vlan_binding(vlan_id, vlan_name(self.net_id), self.net_id)
networks = self._l2network_multiblade.delete_network([tenant_id,
self.net_id])
cdb.remove_vlan_binding(self.net_id)
db.network_destroy(self.net_id)
for network in networks:
self.assertEqual(network[const.NET_ID], self.net_id)
self.assertEqual(network[const.NET_NAME], net_name)
LOG.debug("test_delete_network - END")
def test_delete_networkDNE(self):
"""Support for the Quantum core API call"""
LOG.debug("test_delete_networkDNE - START")
self.assertRaises(exc.NetworkNotFound,
self._l2network_multiblade.delete_network,
[tenant_id, net_id])
LOG.debug("test_delete_networkDNE - END")
def test_update_network(self):
"""Support for the Quantum core API call"""
LOG.debug("test_update_network - START")
self.net_id = db.network_create(tenant_id, net_name)[const.UUID]
self._l2network_multiblade.create_network([tenant_id,
net_name,
self.net_id,
vlan_name(self.net_id),
vlan_id])
cdb.add_vlan_binding(vlan_id, vlan_name(self.net_id), self.net_id)
net_details = db.network_update(self.net_id, tenant_id,
name=new_net_name)
networks = self._l2network_multiblade.update_network([tenant_id,
self.net_id,
{'name': new_net_name}])
for network in networks:
self.assertEqual(network[const.NET_ID], self.net_id)
self.assertEqual(network[const.NET_NAME], new_net_name)
LOG.debug("test_update_network - END")
def test_update_networkDNE(self):
"""Support for the Quantum core API call"""
LOG.debug("test_update_networkDNE - START")
self.assertRaises(exc.NetworkNotFound,
self._l2network_multiblade.update_network,
[tenant_id, net_id, {'name': new_net_name}])
LOG.debug("test_update_networkDNE - END")
def test_get_all_networks(self):
"""Not implemented for this model"""
pass
def test_get_network_details(self):
"""Not implemented for this model"""
pass
def test_create_port(self):
"""Support for the Quantum core API call"""
LOG.debug("test_create_port - START")
self.net_id = db.network_create(tenant_id, net_name)[const.UUID]
self._l2network_multiblade.create_network([tenant_id,
net_name,
self.net_id,
vlan_name(self.net_id),
vlan_id])
cdb.add_vlan_binding(vlan_id, vlan_name(self.net_id), self.net_id)
self.port_id = db.port_create(self.net_id, port_state)[const.UUID]
port = self._l2network_multiblade.create_port([tenant_id,
self.net_id,
port_state,
self.port_id])
self.assertEqual(self.port_id, port[0][const.PORTID])
LOG.debug("test_create_port - END")
def test_delete_port(self):
"""Support for the Quantum core API call"""
LOG.debug("test_delete_port - START")
self.net_id = db.network_create(tenant_id, net_name)[const.UUID]
self._l2network_multiblade.create_network([tenant_id,
net_name,
self.net_id,
vlan_name(self.net_id),
vlan_id])
cdb.add_vlan_binding(vlan_id, vlan_name(self.net_id), self.net_id)
self.port_id = db.port_create(self.net_id, port_state)[const.UUID]
self._l2network_multiblade.create_port([tenant_id,
self.net_id,
port_state, self.port_id])
port = self._l2network_multiblade.delete_port([tenant_id,
self.net_id,
self.port_id])
self.assertEqual(self.port_id, port[0][const.PORTID])
# Recreating port so tear down doesn't cause an error
self.port_id = db.port_create(self.net_id, port_state)[const.UUID]
self._l2network_multiblade.create_port([tenant_id,
self.net_id,
port_state, self.port_id])
LOG.debug("test_delete_port - END")
def test_get_all_ports(self):
"""Not implemented for this model"""
pass
def test_update_port(self):
"""Not implemented for this model"""
pass
def test_update_portDNE(self):
"""Not implemented for this model"""
pass
def test_update_port_networkDNE(self):
"""Not implemented for this model"""
pass
def test_port_details(self):
"""Not implemented for this model"""
pass
def test_plug_interface(self):
"""Support for the Quantum core API call"""
LOG.debug("test_plug_interface - START")
self.net_id = db.network_create(tenant_id, net_name)[const.UUID]
self._l2network_multiblade.create_network([tenant_id,
net_name,
self.net_id,
vlan_name(self.net_id),
vlan_id])
cdb.add_vlan_binding(vlan_id, vlan_name(self.net_id), self.net_id)
self.port_id = db.port_create(self.net_id, port_state)[const.UUID]
self._l2network_multiblade.create_port([tenant_id,
self.net_id,
port_state, self.port_id])
interface = self._l2network_multiblade.plug_interface([tenant_id,
self.net_id, self.port_id, interface_id])
port = db.port_set_attachment(self.net_id, self.port_id, interface_id)
self.assertEqual(self.port_id, interface[0][const.PORTID])
self.assertEqual(port[const.INTERFACEID], interface_id)
LOG.debug("test_plug_interface - END")
def test_plug_interface_networkDNE(self):
"""Support for the Quantum core API call"""
LOG.debug("test_plug_interface_networkDNE - START")
self.net_id = db.network_create(tenant_id, net_name)[const.UUID]
self._l2network_multiblade.create_network([tenant_id,
net_name,
self.net_id,
vlan_name(self.net_id),
vlan_id])
cdb.add_vlan_binding(vlan_id, vlan_name(self.net_id), self.net_id)
self.port_id = db.port_create(self.net_id, port_state)[const.UUID]
self._l2network_multiblade.create_port([tenant_id,
self.net_id,
port_state, self.port_id])
self.assertRaises(exc.NetworkNotFound,
self._l2network_multiblade.plug_interface,
[tenant_id, net_id, self.port_id, interface_id])
LOG.debug("test_plug_interface_networkDNE - END")
def test_plug_interface_portDNE(self):
"""Support for the Quantum core API call"""
LOG.debug("test_plug_interface_portDNE - START")
self.net_id = db.network_create(tenant_id, net_name)[const.UUID]
self._l2network_multiblade.create_network([tenant_id,
net_name,
self.net_id,
vlan_name(self.net_id),
vlan_id])
cdb.add_vlan_binding(vlan_id, vlan_name(self.net_id), self.net_id)
self.assertRaises(exc.PortNotFound,
self._l2network_multiblade.plug_interface,
[tenant_id, self.net_id, port_id, interface_id])
LOG.debug("test_plug_interface_portDNE - START")
def test_unplug_interface(self):
"""Support for the Quantum core API call"""
LOG.debug("test_unplug_interface - START")
self.net_id = db.network_create(tenant_id, net_name)[const.UUID]
self._l2network_multiblade.create_network([tenant_id,
net_name,
self.net_id,
vlan_name(self.net_id),
vlan_id])
cdb.add_vlan_binding(vlan_id, vlan_name(self.net_id), self.net_id)
self.port_id = db.port_create(self.net_id, port_state)[const.UUID]
self._l2network_multiblade.create_port([tenant_id,
self.net_id,
port_state, self.port_id])
self._l2network_multiblade.plug_interface([tenant_id, self.net_id,
self.port_id, interface_id])
db.port_set_attachment(self.net_id, self.port_id, interface_id)
interface = self._l2network_multiblade.unplug_interface([tenant_id,
self.net_id, self.port_id])
self.assertEqual(self.port_id, interface[0][const.PORTID])
LOG.debug("test_unplug_interface - END")
| |
"""
Tests for the rzlib module.
"""
import py
from rpython.rlib import rzlib
from rpython.rlib.rarithmetic import r_uint
import zlib
expanded = 'some bytes which will be compressed'
compressed = zlib.compress(expanded)
def test_crc32():
"""
When called with a string, rzlib.crc32 should compute its CRC32 and
return it as a unsigned 32 bit integer.
"""
assert rzlib.crc32('') == r_uint(0)
assert rzlib.crc32('\0') == r_uint(3523407757)
assert rzlib.crc32('hello, world.') == r_uint(3358036098)
def test_crc32_start_value():
"""
When called with a string and an integer, zlib.crc32 should compute the
CRC32 of the string using the integer as the starting value.
"""
assert rzlib.crc32('', 42) == r_uint(42)
assert rzlib.crc32('\0', 42) == r_uint(163128923)
assert rzlib.crc32('hello, world.', 42) == r_uint(1090960721)
hello = 'hello, '
hellocrc = rzlib.crc32(hello)
world = 'world.'
helloworldcrc = rzlib.crc32(world, hellocrc)
assert helloworldcrc == rzlib.crc32(hello + world)
def test_adler32():
"""
When called with a string, zlib.crc32 should compute its adler 32
checksum and return it as an unsigned 32 bit integer.
"""
assert rzlib.adler32('') == r_uint(1)
assert rzlib.adler32('\0') == r_uint(65537)
assert rzlib.adler32('hello, world.') == r_uint(571147447)
assert rzlib.adler32('x' * 23) == r_uint(2172062409)
def test_adler32_start_value():
"""
When called with a string and an integer, zlib.adler32 should compute
the adler 32 checksum of the string using the integer as the starting
value.
"""
assert rzlib.adler32('', 42) == r_uint(42)
assert rzlib.adler32('\0', 42) == r_uint(2752554)
assert rzlib.adler32('hello, world.', 42) == r_uint(606078176)
assert rzlib.adler32('x' * 23, 42) == r_uint(2233862898)
hello = 'hello, '
hellosum = rzlib.adler32(hello)
world = 'world.'
helloworldsum = rzlib.adler32(world, hellosum)
assert helloworldsum == rzlib.adler32(hello + world)
def test_invalidLevel():
"""
deflateInit() should raise ValueError when an out of bounds level is
passed to it.
"""
py.test.raises(ValueError, rzlib.deflateInit, -2)
py.test.raises(ValueError, rzlib.deflateInit, 10)
def test_deflate_init_end():
"""
deflateInit() followed by deflateEnd() should work and do nothing.
"""
stream = rzlib.deflateInit()
rzlib.deflateEnd(stream)
def test_deflate_set_dictionary():
text = 'abcabc'
zdict = 'abc'
stream = rzlib.deflateInit()
rzlib.deflateSetDictionary(stream, zdict)
bytes = rzlib.compress(stream, text, rzlib.Z_FINISH)
rzlib.deflateEnd(stream)
stream2 = rzlib.inflateInit()
from rpython.rtyper.lltypesystem import lltype, rffi, rstr
from rpython.rtyper.annlowlevel import llstr
from rpython.rlib.rstring import StringBuilder
with lltype.scoped_alloc(rffi.CCHARP.TO, len(bytes)) as inbuf:
rstr.copy_string_to_raw(llstr(bytes), inbuf, 0, len(bytes))
stream2.c_next_in = rffi.cast(rzlib.Bytefp, inbuf)
rffi.setintfield(stream2, 'c_avail_in', len(bytes))
with lltype.scoped_alloc(rffi.CCHARP.TO, 100) as outbuf:
stream2.c_next_out = rffi.cast(rzlib.Bytefp, outbuf)
bufsize = 100
rffi.setintfield(stream2, 'c_avail_out', bufsize)
err = rzlib._inflate(stream2, rzlib.Z_SYNC_FLUSH)
assert err == rzlib.Z_NEED_DICT
rzlib.inflateSetDictionary(stream2, zdict)
rzlib._inflate(stream2, rzlib.Z_SYNC_FLUSH)
avail_out = rffi.cast(lltype.Signed, stream2.c_avail_out)
result = StringBuilder()
result.append_charpsize(outbuf, bufsize - avail_out)
rzlib.inflateEnd(stream2)
assert result.build() == text
def test_compression():
"""
Once we have got a deflate stream, rzlib.compress()
should allow us to compress bytes.
"""
stream = rzlib.deflateInit()
bytes = rzlib.compress(stream, expanded)
bytes += rzlib.compress(stream, "", rzlib.Z_FINISH)
rzlib.deflateEnd(stream)
assert bytes == compressed
def test_compression_lots_of_data():
"""
Test compression of more data that fits in a single internal output buffer.
"""
expanded = repr(range(20000))
compressed = zlib.compress(expanded)
print len(expanded), '=>', len(compressed)
stream = rzlib.deflateInit()
bytes = rzlib.compress(stream, expanded, rzlib.Z_FINISH)
rzlib.deflateEnd(stream)
assert bytes == compressed
def test_inflate_init_end():
"""
inflateInit() followed by inflateEnd() should work and do nothing.
"""
stream = rzlib.inflateInit()
rzlib.inflateEnd(stream)
def test_decompression():
"""
Once we have got a inflate stream, rzlib.decompress()
should allow us to decompress bytes.
"""
stream = rzlib.inflateInit()
bytes1, finished1, unused1 = rzlib.decompress(stream, compressed)
bytes2, finished2, unused2 = rzlib.decompress(stream, "", rzlib.Z_FINISH)
rzlib.inflateEnd(stream)
assert bytes1 + bytes2 == expanded
assert finished1 is True
assert finished2 is True
assert unused1 == 0
assert unused2 == 0
def test_decompression_lots_of_data():
"""
Test compression of more data that fits in a single internal output buffer.
"""
expanded = repr(range(20000))
compressed = zlib.compress(expanded)
print len(compressed), '=>', len(expanded)
stream = rzlib.inflateInit()
bytes, finished, unused = rzlib.decompress(stream, compressed,
rzlib.Z_FINISH)
rzlib.inflateEnd(stream)
assert bytes == expanded
assert finished is True
assert unused == 0
def test_decompression_truncated_input():
"""
Test that we can accept incomplete input when inflating, but also
detect this situation when using Z_FINISH.
"""
expanded = repr(range(20000))
compressed = zlib.compress(expanded)
print len(compressed), '=>', len(expanded)
stream = rzlib.inflateInit()
data, finished1, unused1 = rzlib.decompress(stream, compressed[:1000])
assert expanded.startswith(data)
assert finished1 is False
assert unused1 == 0
data2, finished2, unused2 = rzlib.decompress(stream, compressed[1000:2000])
data += data2
assert finished2 is False
assert unused2 == 0
assert expanded.startswith(data)
exc = py.test.raises(
rzlib.RZlibError,
rzlib.decompress, stream, compressed[2000:-500], rzlib.Z_FINISH)
msg = "Error -5 while decompressing data: incomplete or truncated stream"
assert str(exc.value) == msg
rzlib.inflateEnd(stream)
def test_decompression_too_much_input():
"""
Check the case where we feed extra data to decompress().
"""
stream = rzlib.inflateInit()
data1, finished1, unused1 = rzlib.decompress(stream, compressed[:-5])
assert finished1 is False
assert unused1 == 0
data2, finished2, unused2 = rzlib.decompress(stream,
compressed[-5:] + 'garbage')
assert finished2 is True
assert unused2 == len('garbage')
assert data1 + data2 == expanded
data3, finished3, unused3 = rzlib.decompress(stream, 'more_garbage')
assert finished3 is True
assert unused3 == len('more_garbage')
assert data3 == ''
rzlib.deflateEnd(stream)
def test_decompress_max_length():
"""
Test the max_length argument of decompress().
"""
stream = rzlib.inflateInit()
data1, finished1, unused1 = rzlib.decompress(stream, compressed,
max_length = 17)
assert data1 == expanded[:17]
assert finished1 is False
assert unused1 > 0
data2, finished2, unused2 = rzlib.decompress(stream, compressed[-unused1:])
assert data2 == expanded[17:]
assert finished2 is True
assert unused2 == 0
rzlib.deflateEnd(stream)
def test_cornercases():
"""
Test degenerate arguments.
"""
stream = rzlib.deflateInit()
bytes = rzlib.compress(stream, "")
bytes += rzlib.compress(stream, "")
bytes += rzlib.compress(stream, "", rzlib.Z_FINISH)
assert zlib.decompress(bytes) == ""
rzlib.deflateEnd(stream)
stream = rzlib.inflateInit()
data, finished, unused = rzlib.decompress(stream, "")
assert data == ""
assert finished is False
assert unused == 0
buf = compressed
for i in range(10):
data, finished, unused = rzlib.decompress(stream, buf, max_length=0)
assert data == ""
assert finished is False
assert unused > 0
buf = buf[-unused:]
rzlib.deflateEnd(stream)
def test_zlibVersion():
runtime_version = rzlib.zlibVersion()
assert runtime_version[0] == rzlib.ZLIB_VERSION[0]
| |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import warnings
import os
import ctypes
from functools import partial
import numpy as np
from numpy.ctypeslib import ndpointer, load_library
from .core import Kernel, Kernel1D, Kernel2D, MAX_NORMALIZATION
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.console import human_file_size
from astropy import units as u
from astropy.nddata import support_nddata
from astropy.modeling.core import CompoundModel
from astropy.modeling.core import SPECIAL_OPERATORS
from .utils import KernelSizeError, has_even_axis, raise_even_kernel_exception
LIBRARY_PATH = os.path.dirname(__file__)
try:
_convolve = load_library("_convolve", LIBRARY_PATH)
except Exception:
raise ImportError("Convolution C extension is missing. Try re-building astropy.")
# The GIL is automatically released by default when calling functions imported
# from libraries loaded by ctypes.cdll.LoadLibrary(<path>)
# Declare prototypes
# Boundary None
_convolveNd_c = _convolve.convolveNd_c
_convolveNd_c.restype = None
_convolveNd_c.argtypes = [ndpointer(ctypes.c_double, flags={"C_CONTIGUOUS", "WRITEABLE"}), # return array
ndpointer(ctypes.c_double, flags="C_CONTIGUOUS"), # input array
ctypes.c_uint, # N dim
# size array for input and result unless
# embed_result_within_padded_region is False,
# in which case the result array is assumed to be
# input.shape - 2*(kernel.shape//2). Note: integer division.
ndpointer(ctypes.c_size_t, flags="C_CONTIGUOUS"),
ndpointer(ctypes.c_double, flags="C_CONTIGUOUS"), # kernel array
ndpointer(ctypes.c_size_t, flags="C_CONTIGUOUS"), # size array for kernel
ctypes.c_bool, # nan_interpolate
ctypes.c_bool, # embed_result_within_padded_region
ctypes.c_uint] # n_threads
# Disabling all doctests in this module until a better way of handling warnings
# in doctests can be determined
__doctest_skip__ = ['*']
BOUNDARY_OPTIONS = [None, 'fill', 'wrap', 'extend']
def _copy_input_if_needed(input, dtype=float, order='C', nan_treatment=None,
mask=None, fill_value=None):
# Alias input
input = input.array if isinstance(input, Kernel) else input
# strip quantity attributes
if hasattr(input, 'unit'):
input = input.value
output = input
# Copy input
try:
# Anything that's masked must be turned into NaNs for the interpolation.
# This requires copying. A copy is also needed for nan_treatment == 'fill'
# A copy prevents possible function side-effects of the input array.
if nan_treatment == 'fill' or np.ma.is_masked(input) or mask is not None:
if np.ma.is_masked(input):
# ``np.ma.maskedarray.filled()`` returns a copy, however there
# is no way to specify the return type or order etc. In addition
# ``np.nan`` is a ``float`` and there is no conversion to an
# ``int`` type. Therefore, a pre-fill copy is needed for non
# ``float`` masked arrays. ``subok=True`` is needed to retain
# ``np.ma.maskedarray.filled()``. ``copy=False`` allows the fill
# to act as the copy if type and order are already correct.
output = np.array(input, dtype=dtype, copy=False, order=order, subok=True)
output = output.filled(fill_value)
else:
# Since we're making a copy, we might as well use `subok=False` to save,
# what is probably, a negligible amount of memory.
output = np.array(input, dtype=dtype, copy=True, order=order, subok=False)
if mask is not None:
# mask != 0 yields a bool mask for all ints/floats/bool
output[mask != 0] = fill_value
else:
# The call below is synonymous with np.asanyarray(array, ftype=float, order='C')
# The advantage of `subok=True` is that it won't copy when array is an ndarray subclass. If it
# is and `subok=False` (default), then it will copy even if `copy=False`. This uses less memory
# when ndarray subclasses are passed in.
output = np.array(input, dtype=dtype, copy=False, order=order, subok=True)
except (TypeError, ValueError) as e:
raise TypeError('input should be a Numpy array or something '
'convertible into a float array', e)
return output
@support_nddata(data='array')
def convolve(array, kernel, boundary='fill', fill_value=0.,
nan_treatment='interpolate', normalize_kernel=True, mask=None,
preserve_nan=False, normalization_zero_tol=1e-8):
"""
Convolve an array with a kernel.
This routine differs from `scipy.ndimage.convolve` because
it includes a special treatment for ``NaN`` values. Rather than
including ``NaN`` values in the array in the convolution calculation, which
causes large ``NaN`` holes in the convolved array, ``NaN`` values are
replaced with interpolated values using the kernel as an interpolation
function.
Parameters
----------
array : `~astropy.nddata.NDData` or `numpy.ndarray` or array_like
The array to convolve. This should be a 1, 2, or 3-dimensional array
or a list or a set of nested lists representing a 1, 2, or
3-dimensional array. If an `~astropy.nddata.NDData`, the ``mask`` of
the `~astropy.nddata.NDData` will be used as the ``mask`` argument.
kernel : `numpy.ndarray` or `~astropy.convolution.Kernel`
The convolution kernel. The number of dimensions should match those for
the array, and the dimensions should be odd in all directions. If a
masked array, the masked values will be replaced by ``fill_value``.
boundary : str, optional
A flag indicating how to handle boundaries:
* `None`
Set the ``result`` values to zero where the kernel
extends beyond the edge of the array.
* 'fill'
Set values outside the array boundary to ``fill_value`` (default).
* 'wrap'
Periodic boundary that wrap to the other side of ``array``.
* 'extend'
Set values outside the array to the nearest ``array``
value.
fill_value : float, optional
The value to use outside the array when using ``boundary='fill'``
normalize_kernel : bool, optional
Whether to normalize the kernel to have a sum of one.
nan_treatment : {'interpolate', 'fill'}
interpolate will result in renormalization of the kernel at each
position ignoring (pixels that are NaN in the image) in both the image
and the kernel.
'fill' will replace the NaN pixels with a fixed numerical value (default
zero, see ``fill_value``) prior to convolution
Note that if the kernel has a sum equal to zero, NaN interpolation
is not possible and will raise an exception.
preserve_nan : bool
After performing convolution, should pixels that were originally NaN
again become NaN?
mask : `None` or `numpy.ndarray`
A "mask" array. Shape must match ``array``, and anything that is masked
(i.e., not 0/`False`) will be set to NaN for the convolution. If
`None`, no masking will be performed unless ``array`` is a masked array.
If ``mask`` is not `None` *and* ``array`` is a masked array, a pixel is
masked of it is masked in either ``mask`` *or* ``array.mask``.
normalization_zero_tol: float, optional
The absolute tolerance on whether the kernel is different than zero.
If the kernel sums to zero to within this precision, it cannot be
normalized. Default is "1e-8".
Returns
-------
result : `numpy.ndarray`
An array with the same dimensions and as the input array,
convolved with kernel. The data type depends on the input
array type. If array is a floating point type, then the
return array keeps the same data type, otherwise the type
is ``numpy.float``.
Notes
-----
For masked arrays, masked values are treated as NaNs. The convolution
is always done at ``numpy.float`` precision.
"""
if boundary not in BOUNDARY_OPTIONS:
raise ValueError(f"Invalid boundary option: must be one of {BOUNDARY_OPTIONS}")
if nan_treatment not in ('interpolate', 'fill'):
raise ValueError("nan_treatment must be one of 'interpolate','fill'")
# OpenMP support is disabled at the C src code level, changing this will have
# no effect.
n_threads = 1
# Keep refs to originals
passed_kernel = kernel
passed_array = array
# The C routines all need float type inputs (so, a particular
# bit size, endianness, etc.). So we have to convert, which also
# has the effect of making copies so we don't modify the inputs.
# After this, the variables we work with will be array_internal, and
# kernel_internal. However -- we do want to keep track of what type
# the input array was so we can cast the result to that at the end
# if it's a floating point type. Don't bother with this for lists --
# just always push those as float.
# It is always necessary to make a copy of kernel (since it is modified),
# but, if we just so happen to be lucky enough to have the input array
# have exactly the desired type, we just alias to array_internal
# Convert kernel to ndarray if not already
# Copy or alias array to array_internal
array_internal = _copy_input_if_needed(passed_array, dtype=float, order='C',
nan_treatment=nan_treatment, mask=mask,
fill_value=np.nan)
array_dtype = getattr(passed_array, 'dtype', array_internal.dtype)
# Copy or alias kernel to kernel_internal
kernel_internal = _copy_input_if_needed(passed_kernel, dtype=float, order='C',
nan_treatment=None, mask=None,
fill_value=fill_value)
# Make sure kernel has all odd axes
if has_even_axis(kernel_internal):
raise_even_kernel_exception()
# If both image array and kernel are Kernel instances
# constrain convolution method
# This must occur before the main alias/copy of ``passed_kernel`` to
# ``kernel_internal`` as it is used for filling masked kernels.
if isinstance(passed_array, Kernel) and isinstance(passed_kernel, Kernel):
warnings.warn("Both array and kernel are Kernel instances, hardwiring "
"the following parameters: boundary='fill', fill_value=0,"
" normalize_Kernel=True, nan_treatment='interpolate'",
AstropyUserWarning)
boundary = 'fill'
fill_value = 0
normalize_kernel = True
nan_treatment = 'interpolate'
# -----------------------------------------------------------------------
# From this point onwards refer only to ``array_internal`` and
# ``kernel_internal``.
# Assume both are base np.ndarrays and NOT subclasses e.g. NOT
# ``Kernel`` nor ``np.ma.maskedarray`` classes.
# -----------------------------------------------------------------------
# Check dimensionality
if array_internal.ndim == 0:
raise Exception("cannot convolve 0-dimensional arrays")
elif array_internal.ndim > 3:
raise NotImplementedError('convolve only supports 1, 2, and 3-dimensional '
'arrays at this time')
elif array_internal.ndim != kernel_internal.ndim:
raise Exception('array and kernel have differing number of '
'dimensions.')
array_shape = np.array(array_internal.shape)
kernel_shape = np.array(kernel_internal.shape)
pad_width = kernel_shape//2
# For boundary=None only the center space is convolved. All array indices within a
# distance kernel.shape//2 from the edge are completely ignored (zeroed).
# E.g. (1D list) only the indices len(kernel)//2 : len(array)-len(kernel)//2
# are convolved. It is therefore not possible to use this method to convolve an
# array by a kernel that is larger (see note below) than the array - as ALL pixels would be ignored
# leaving an array of only zeros.
# Note: For even kernels the correctness condition is array_shape > kernel_shape.
# For odd kernels it is:
# array_shape >= kernel_shape OR array_shape > kernel_shape-1 OR array_shape > 2*(kernel_shape//2).
# Since the latter is equal to the former two for even lengths, the latter condition is complete.
if boundary is None and not np.all(array_shape > 2*pad_width):
raise KernelSizeError("for boundary=None all kernel axes must be smaller than array's - "
"use boundary in ['fill', 'extend', 'wrap'] instead.")
# NaN interpolation significantly slows down the C convolution
# computation. Since nan_treatment = 'interpolate', is the default
# check whether it is even needed, if not, don't interpolate.
# NB: np.isnan(array_internal.sum()) is faster than np.isnan(array_internal).any()
nan_interpolate = (nan_treatment == 'interpolate') and np.isnan(array_internal.sum())
# Check if kernel is normalizable
if normalize_kernel or nan_interpolate:
kernel_sum = kernel_internal.sum()
kernel_sums_to_zero = np.isclose(kernel_sum, 0, atol=normalization_zero_tol)
if kernel_sum < 1. / MAX_NORMALIZATION or kernel_sums_to_zero:
raise ValueError("The kernel can't be normalized, because its sum is "
"close to zero. The sum of the given kernel is < {}"
.format(1. / MAX_NORMALIZATION))
# Mark the NaN values so we can replace them later if interpolate_nan is
# not set
if preserve_nan or nan_treatment == 'fill':
initially_nan = np.isnan(array_internal)
if nan_treatment == 'fill':
array_internal[initially_nan] = fill_value
# Avoid any memory allocation within the C code. Allocate output array
# here and pass through instead.
result = np.zeros(array_internal.shape, dtype=float, order='C')
embed_result_within_padded_region = True
array_to_convolve = array_internal
if boundary in ('fill', 'extend', 'wrap'):
embed_result_within_padded_region = False
if boundary == 'fill':
# This method is faster than using numpy.pad(..., mode='constant')
array_to_convolve = np.full(array_shape + 2*pad_width, fill_value=fill_value, dtype=float, order='C')
# Use bounds [pad_width[0]:array_shape[0]+pad_width[0]] instead of [pad_width[0]:-pad_width[0]]
# to account for when the kernel has size of 1 making pad_width = 0.
if array_internal.ndim == 1:
array_to_convolve[pad_width[0]:array_shape[0]+pad_width[0]] = array_internal
elif array_internal.ndim == 2:
array_to_convolve[pad_width[0]:array_shape[0]+pad_width[0],
pad_width[1]:array_shape[1]+pad_width[1]] = array_internal
else:
array_to_convolve[pad_width[0]:array_shape[0]+pad_width[0],
pad_width[1]:array_shape[1]+pad_width[1],
pad_width[2]:array_shape[2]+pad_width[2]] = array_internal
else:
np_pad_mode_dict = {'fill': 'constant', 'extend': 'edge', 'wrap': 'wrap'}
np_pad_mode = np_pad_mode_dict[boundary]
pad_width = kernel_shape // 2
if array_internal.ndim == 1:
np_pad_width = (pad_width[0],)
elif array_internal.ndim == 2:
np_pad_width = ((pad_width[0],), (pad_width[1],))
else:
np_pad_width = ((pad_width[0],), (pad_width[1],), (pad_width[2],))
array_to_convolve = np.pad(array_internal, pad_width=np_pad_width,
mode=np_pad_mode)
_convolveNd_c(result, array_to_convolve,
array_to_convolve.ndim,
np.array(array_to_convolve.shape, dtype=ctypes.c_size_t, order='C'),
kernel_internal,
np.array(kernel_shape, dtype=ctypes.c_size_t, order='C'),
nan_interpolate, embed_result_within_padded_region,
n_threads)
# So far, normalization has only occured for nan_treatment == 'interpolate'
# because this had to happen within the C extension so as to ignore
# any NaNs
if normalize_kernel:
if not nan_interpolate:
result /= kernel_sum
elif nan_interpolate:
result *= kernel_sum
if nan_interpolate and not preserve_nan and np.isnan(result.sum()):
warnings.warn("nan_treatment='interpolate', however, NaN values detected "
"post convolution. A contiguous region of NaN values, larger "
"than the kernel size, are present in the input array. "
"Increase the kernel size to avoid this.", AstropyUserWarning)
if preserve_nan:
result[initially_nan] = np.nan
# Convert result to original data type
array_unit = getattr(passed_array, "unit", None)
if array_unit is not None:
result <<= array_unit
if isinstance(passed_array, Kernel):
if isinstance(passed_array, Kernel1D):
new_result = Kernel1D(array=result)
elif isinstance(passed_array, Kernel2D):
new_result = Kernel2D(array=result)
else:
raise TypeError("Only 1D and 2D Kernels are supported.")
new_result._is_bool = False
new_result._separable = passed_array._separable
if isinstance(passed_kernel, Kernel):
new_result._separable = new_result._separable and passed_kernel._separable
return new_result
elif array_dtype.kind == 'f':
# Try to preserve the input type if it's a floating point type
# Avoid making another copy if possible
try:
return result.astype(array_dtype, copy=False)
except TypeError:
return result.astype(array_dtype)
else:
return result
@support_nddata(data='array')
def convolve_fft(array, kernel, boundary='fill', fill_value=0.,
nan_treatment='interpolate', normalize_kernel=True,
normalization_zero_tol=1e-8,
preserve_nan=False, mask=None, crop=True, return_fft=False,
fft_pad=None, psf_pad=None, min_wt=0.0, allow_huge=False,
fftn=np.fft.fftn, ifftn=np.fft.ifftn,
complex_dtype=complex):
"""
Convolve an ndarray with an nd-kernel. Returns a convolved image with
``shape = array.shape``. Assumes kernel is centered.
`convolve_fft` is very similar to `convolve` in that it replaces ``NaN``
values in the original image with interpolated values using the kernel as
an interpolation function. However, it also includes many additional
options specific to the implementation.
`convolve_fft` differs from `scipy.signal.fftconvolve` in a few ways:
* It can treat ``NaN`` values as zeros or interpolate over them.
* ``inf`` values are treated as ``NaN``
* (optionally) It pads to the nearest 2^n size to improve FFT speed.
* Its only valid ``mode`` is 'same' (i.e., the same shape array is returned)
* It lets you use your own fft, e.g.,
`pyFFTW <https://pypi.org/project/pyFFTW/>`_ or
`pyFFTW3 <https://pypi.org/project/PyFFTW3/0.2.1/>`_ , which can lead to
performance improvements, depending on your system configuration. pyFFTW3
is threaded, and therefore may yield significant performance benefits on
multi-core machines at the cost of greater memory requirements. Specify
the ``fftn`` and ``ifftn`` keywords to override the default, which is
`numpy.fft.fft` and `numpy.fft.ifft`.
Parameters
----------
array : `numpy.ndarray`
Array to be convolved with ``kernel``. It can be of any
dimensionality, though only 1, 2, and 3d arrays have been tested.
kernel : `numpy.ndarray` or `astropy.convolution.Kernel`
The convolution kernel. The number of dimensions should match those
for the array. The dimensions *do not* have to be odd in all directions,
unlike in the non-fft `convolve` function. The kernel will be
normalized if ``normalize_kernel`` is set. It is assumed to be centered
(i.e., shifts may result if your kernel is asymmetric)
boundary : {'fill', 'wrap'}, optional
A flag indicating how to handle boundaries:
* 'fill': set values outside the array boundary to fill_value
(default)
* 'wrap': periodic boundary
The `None` and 'extend' parameters are not supported for FFT-based
convolution
fill_value : float, optional
The value to use outside the array when using boundary='fill'
nan_treatment : {'interpolate', 'fill'}
``interpolate`` will result in renormalization of the kernel at each
position ignoring (pixels that are NaN in the image) in both the image
and the kernel. ``fill`` will replace the NaN pixels with a fixed
numerical value (default zero, see ``fill_value``) prior to
convolution. Note that if the kernel has a sum equal to zero, NaN
interpolation is not possible and will raise an exception.
normalize_kernel : function or boolean, optional
If specified, this is the function to divide kernel by to normalize it.
e.g., ``normalize_kernel=np.sum`` means that kernel will be modified to be:
``kernel = kernel / np.sum(kernel)``. If True, defaults to
``normalize_kernel = np.sum``.
normalization_zero_tol: float, optional
The absolute tolerance on whether the kernel is different than zero.
If the kernel sums to zero to within this precision, it cannot be
normalized. Default is "1e-8".
preserve_nan : bool
After performing convolution, should pixels that were originally NaN
again become NaN?
mask : `None` or `numpy.ndarray`
A "mask" array. Shape must match ``array``, and anything that is masked
(i.e., not 0/`False`) will be set to NaN for the convolution. If
`None`, no masking will be performed unless ``array`` is a masked array.
If ``mask`` is not `None` *and* ``array`` is a masked array, a pixel is
masked of it is masked in either ``mask`` *or* ``array.mask``.
crop : bool, optional
Default on. Return an image of the size of the larger of the input
image and the kernel.
If the image and kernel are asymmetric in opposite directions, will
return the largest image in both directions.
For example, if an input image has shape [100,3] but a kernel with shape
[6,6] is used, the output will be [100,6].
return_fft : bool, optional
Return the ``fft(image)*fft(kernel)`` instead of the convolution (which is
``ifft(fft(image)*fft(kernel))``). Useful for making PSDs.
fft_pad : bool, optional
Default on. Zero-pad image to the nearest 2^n. With
``boundary='wrap'``, this will be disabled.
psf_pad : bool, optional
Zero-pad image to be at least the sum of the image sizes to avoid
edge-wrapping when smoothing. This is enabled by default with
``boundary='fill'``, but it can be overridden with a boolean option.
``boundary='wrap'`` and ``psf_pad=True`` are not compatible.
min_wt : float, optional
If ignoring ``NaN`` / zeros, force all grid points with a weight less than
this value to ``NaN`` (the weight of a grid point with *no* ignored
neighbors is 1.0).
If ``min_wt`` is zero, then all zero-weight points will be set to zero
instead of ``NaN`` (which they would be otherwise, because 1/0 = nan).
See the examples below.
allow_huge : bool, optional
Allow huge arrays in the FFT? If False, will raise an exception if the
array or kernel size is >1 GB.
fftn : functions, optional
The fft function. Can be overridden to use your own ffts,
e.g. an fftw3 wrapper or scipy's fftn, ``fft=scipy.fftpack.fftn``.
ifftn : functions, optional
The inverse fft function. Can be overridden the same way ``fttn``.
complex_dtype : numpy.complex, optional
Which complex dtype to use. `numpy` has a range of options, from 64 to
256.
Raises
------
ValueError:
If the array is bigger than 1 GB after padding, will raise this exception
unless ``allow_huge`` is True
See Also
--------
convolve:
Convolve is a non-fft version of this code. It is more memory
efficient and for small kernels can be faster.
Returns
-------
default : ndarray
``array`` convolved with ``kernel``. If ``return_fft`` is set, returns
``fft(array) * fft(kernel)``. If crop is not set, returns the
image, but with the fft-padded size instead of the input size
Notes
-----
With ``psf_pad=True`` and a large PSF, the resulting data can become
very large and consume a lot of memory. See Issue
https://github.com/astropy/astropy/pull/4366 for further detail.
Examples
--------
>>> convolve_fft([1, 0, 3], [1, 1, 1])
array([ 1., 4., 3.])
>>> convolve_fft([1, np.nan, 3], [1, 1, 1])
array([ 1., 4., 3.])
>>> convolve_fft([1, 0, 3], [0, 1, 0])
array([ 1., 0., 3.])
>>> convolve_fft([1, 2, 3], [1])
array([ 1., 2., 3.])
>>> convolve_fft([1, np.nan, 3], [0, 1, 0], nan_treatment='interpolate')
...
array([ 1., 0., 3.])
>>> convolve_fft([1, np.nan, 3], [0, 1, 0], nan_treatment='interpolate',
... min_wt=1e-8)
array([ 1., nan, 3.])
>>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate')
array([ 1., 4., 3.])
>>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate',
... normalize_kernel=True)
array([ 1., 2., 3.])
>>> import scipy.fftpack # optional - requires scipy
>>> convolve_fft([1, np.nan, 3], [1, 1, 1], nan_treatment='interpolate',
... normalize_kernel=True,
... fftn=scipy.fftpack.fft, ifftn=scipy.fftpack.ifft)
array([ 1., 2., 3.])
"""
# Checking copied from convolve.py - however, since FFTs have real &
# complex components, we change the types. Only the real part will be
# returned! Note that this always makes a copy.
# Check kernel is kernel instance
if isinstance(kernel, Kernel):
kernel = kernel.array
if isinstance(array, Kernel):
raise TypeError("Can't convolve two kernels with convolve_fft. "
"Use convolve instead.")
if nan_treatment not in ('interpolate', 'fill'):
raise ValueError("nan_treatment must be one of 'interpolate','fill'")
#Get array quantity if it exists
array_unit = getattr(array, "unit", None)
# Convert array dtype to complex
# and ensure that list inputs become arrays
array = _copy_input_if_needed(array, dtype=complex, order='C',
nan_treatment=nan_treatment, mask=mask,
fill_value=np.nan)
kernel = _copy_input_if_needed(kernel, dtype=complex, order='C',
nan_treatment=None, mask=None,
fill_value=0)
# Check that the number of dimensions is compatible
if array.ndim != kernel.ndim:
raise ValueError("Image and kernel must have same number of "
"dimensions")
arrayshape = array.shape
kernshape = kernel.shape
array_size_B = (np.product(arrayshape, dtype=np.int64) *
np.dtype(complex_dtype).itemsize)*u.byte
if array_size_B > 1*u.GB and not allow_huge:
raise ValueError("Size Error: Arrays will be {}. Use "
"allow_huge=True to override this exception."
.format(human_file_size(array_size_B.to_value(u.byte))))
# NaN and inf catching
nanmaskarray = np.isnan(array) | np.isinf(array)
if nan_treatment == 'fill':
array[nanmaskarray] = fill_value
else:
array[nanmaskarray] = 0
nanmaskkernel = np.isnan(kernel) | np.isinf(kernel)
kernel[nanmaskkernel] = 0
if normalize_kernel is True:
if kernel.sum() < 1. / MAX_NORMALIZATION:
raise Exception("The kernel can't be normalized, because its sum is "
"close to zero. The sum of the given kernel is < {}"
.format(1. / MAX_NORMALIZATION))
kernel_scale = kernel.sum()
normalized_kernel = kernel / kernel_scale
kernel_scale = 1 # if we want to normalize it, leave it normed!
elif normalize_kernel:
# try this. If a function is not passed, the code will just crash... I
# think type checking would be better but PEPs say otherwise...
kernel_scale = normalize_kernel(kernel)
normalized_kernel = kernel / kernel_scale
else:
kernel_scale = kernel.sum()
if np.abs(kernel_scale) < normalization_zero_tol:
if nan_treatment == 'interpolate':
raise ValueError('Cannot interpolate NaNs with an unnormalizable kernel')
else:
# the kernel's sum is near-zero, so it can't be scaled
kernel_scale = 1
normalized_kernel = kernel
else:
# the kernel is normalizable; we'll temporarily normalize it
# now and undo the normalization later.
normalized_kernel = kernel / kernel_scale
if boundary is None:
warnings.warn("The convolve_fft version of boundary=None is "
"equivalent to the convolve boundary='fill'. There is "
"no FFT equivalent to convolve's "
"zero-if-kernel-leaves-boundary", AstropyUserWarning)
if psf_pad is None:
psf_pad = True
if fft_pad is None:
fft_pad = True
elif boundary == 'fill':
# create a boundary region at least as large as the kernel
if psf_pad is False:
warnings.warn("psf_pad was set to {}, which overrides the "
"boundary='fill' setting.".format(psf_pad),
AstropyUserWarning)
else:
psf_pad = True
if fft_pad is None:
# default is 'True' according to the docstring
fft_pad = True
elif boundary == 'wrap':
if psf_pad:
raise ValueError("With boundary='wrap', psf_pad cannot be enabled.")
psf_pad = False
if fft_pad:
raise ValueError("With boundary='wrap', fft_pad cannot be enabled.")
fft_pad = False
fill_value = 0 # force zero; it should not be used
elif boundary == 'extend':
raise NotImplementedError("The 'extend' option is not implemented "
"for fft-based convolution")
# find ideal size (power of 2) for fft.
# Can add shapes because they are tuples
if fft_pad: # default=True
if psf_pad: # default=False
# add the dimensions and then take the max (bigger)
fsize = 2 ** np.ceil(np.log2(
np.max(np.array(arrayshape) + np.array(kernshape))))
else:
# add the shape lists (max of a list of length 4) (smaller)
# also makes the shapes square
fsize = 2 ** np.ceil(np.log2(np.max(arrayshape + kernshape)))
newshape = np.full((array.ndim, ), fsize, dtype=int)
else:
if psf_pad:
# just add the biggest dimensions
newshape = np.array(arrayshape) + np.array(kernshape)
else:
newshape = np.array([np.max([imsh, kernsh])
for imsh, kernsh in zip(arrayshape, kernshape)])
# perform a second check after padding
array_size_C = (np.product(newshape, dtype=np.int64) *
np.dtype(complex_dtype).itemsize)*u.byte
if array_size_C > 1*u.GB and not allow_huge:
raise ValueError("Size Error: Arrays will be {}. Use "
"allow_huge=True to override this exception."
.format(human_file_size(array_size_C)))
# For future reference, this can be used to predict "almost exactly"
# how much *additional* memory will be used.
# size * (array + kernel + kernelfft + arrayfft +
# (kernel*array)fft +
# optional(weight image + weight_fft + weight_ifft) +
# optional(returned_fft))
# total_memory_used_GB = (np.product(newshape)*np.dtype(complex_dtype).itemsize
# * (5 + 3*((interpolate_nan or ) and kernel_is_normalized))
# + (1 + (not return_fft)) *
# np.product(arrayshape)*np.dtype(complex_dtype).itemsize
# + np.product(arrayshape)*np.dtype(bool).itemsize
# + np.product(kernshape)*np.dtype(bool).itemsize)
# ) / 1024.**3
# separate each dimension by the padding size... this is to determine the
# appropriate slice size to get back to the input dimensions
arrayslices = []
kernslices = []
for ii, (newdimsize, arraydimsize, kerndimsize) in enumerate(zip(newshape, arrayshape, kernshape)):
center = newdimsize - (newdimsize + 1) // 2
arrayslices += [slice(center - arraydimsize // 2,
center + (arraydimsize + 1) // 2)]
kernslices += [slice(center - kerndimsize // 2,
center + (kerndimsize + 1) // 2)]
arrayslices = tuple(arrayslices)
kernslices = tuple(kernslices)
if not np.all(newshape == arrayshape):
if np.isfinite(fill_value):
bigarray = np.ones(newshape, dtype=complex_dtype) * fill_value
else:
bigarray = np.zeros(newshape, dtype=complex_dtype)
bigarray[arrayslices] = array
else:
bigarray = array
if not np.all(newshape == kernshape):
bigkernel = np.zeros(newshape, dtype=complex_dtype)
bigkernel[kernslices] = normalized_kernel
else:
bigkernel = normalized_kernel
arrayfft = fftn(bigarray)
# need to shift the kernel so that, e.g., [0,0,1,0] -> [1,0,0,0] = unity
kernfft = fftn(np.fft.ifftshift(bigkernel))
fftmult = arrayfft * kernfft
interpolate_nan = (nan_treatment == 'interpolate')
if interpolate_nan:
if not np.isfinite(fill_value):
bigimwt = np.zeros(newshape, dtype=complex_dtype)
else:
bigimwt = np.ones(newshape, dtype=complex_dtype)
bigimwt[arrayslices] = 1.0 - nanmaskarray * interpolate_nan
wtfft = fftn(bigimwt)
# You can only get to this point if kernel_is_normalized
wtfftmult = wtfft * kernfft
wtsm = ifftn(wtfftmult)
# need to re-zero weights outside of the image (if it is padded, we
# still don't weight those regions)
bigimwt[arrayslices] = wtsm.real[arrayslices]
else:
bigimwt = 1
if np.isnan(fftmult).any():
# this check should be unnecessary; call it an insanity check
raise ValueError("Encountered NaNs in convolve. This is disallowed.")
fftmult *= kernel_scale
if array_unit is not None:
fftmult <<= array_unit
if return_fft:
return fftmult
if interpolate_nan:
with np.errstate(divide='ignore', invalid='ignore'):
# divide by zeros are expected here; if the weight is zero, we want
# the output to be nan or inf
rifft = (ifftn(fftmult)) / bigimwt
if not np.isscalar(bigimwt):
if min_wt > 0.:
rifft[bigimwt < min_wt] = np.nan
else:
# Set anything with no weight to zero (taking into account
# slight offsets due to floating-point errors).
rifft[bigimwt < 10 * np.finfo(bigimwt.dtype).eps] = 0.0
else:
rifft = ifftn(fftmult)
if preserve_nan:
rifft[arrayslices][nanmaskarray] = np.nan
if crop:
result = rifft[arrayslices].real
return result
else:
return rifft.real
def interpolate_replace_nans(array, kernel, convolve=convolve, **kwargs):
"""
Given a data set containing NaNs, replace the NaNs by interpolating from
neighboring data points with a given kernel.
Parameters
----------
array : `numpy.ndarray`
Array to be convolved with ``kernel``. It can be of any
dimensionality, though only 1, 2, and 3d arrays have been tested.
kernel : `numpy.ndarray` or `astropy.convolution.Kernel`
The convolution kernel. The number of dimensions should match those
for the array. The dimensions *do not* have to be odd in all directions,
unlike in the non-fft `convolve` function. The kernel will be
normalized if ``normalize_kernel`` is set. It is assumed to be centered
(i.e., shifts may result if your kernel is asymmetric). The kernel
*must be normalizable* (i.e., its sum cannot be zero).
convolve : `convolve` or `convolve_fft`
One of the two convolution functions defined in this package.
Returns
-------
newarray : `numpy.ndarray`
A copy of the original array with NaN pixels replaced with their
interpolated counterparts
"""
if not np.any(np.isnan(array)):
return array.copy()
newarray = array.copy()
convolved = convolve(array, kernel, nan_treatment='interpolate',
normalize_kernel=True, preserve_nan=False, **kwargs)
isnan = np.isnan(array)
newarray[isnan] = convolved[isnan]
return newarray
def convolve_models(model, kernel, mode='convolve_fft', **kwargs):
"""
Convolve two models using `~astropy.convolution.convolve_fft`.
Parameters
----------
model : `~astropy.modeling.core.Model`
Functional model
kernel : `~astropy.modeling.core.Model`
Convolution kernel
mode : str
Keyword representing which function to use for convolution.
* 'convolve_fft' : use `~astropy.convolution.convolve_fft` function.
* 'convolve' : use `~astropy.convolution.convolve`.
kwargs : dict
Keyword arguments to me passed either to `~astropy.convolution.convolve`
or `~astropy.convolution.convolve_fft` depending on ``mode``.
Returns
-------
default : CompoundModel
Convolved model
"""
if mode == 'convolve_fft':
SPECIAL_OPERATORS['convolve_fft'] = partial(convolve_fft, **kwargs)
elif mode == 'convolve':
SPECIAL_OPERATORS['convolve'] = partial(convolve, **kwargs)
else:
raise ValueError(f'Mode {mode} is not supported.')
return CompoundModel(mode, model, kernel)
| |
# Copyright 2014 CloudFounders NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file violates a lot of PEP8-rules which are required to work with the non-PEP8 compliant Arakoon client files
# Do not correct the violations unless you're sure what you're doing.
import os
import time
import signal
import subprocess
import ConfigParser
from arakoon.Arakoon import ArakoonClientConfig, ArakoonClient
from arakoon.ArakoonManagement import ArakoonManagement, ArakoonCluster, logging
config_dir = '/opt/OpenvStorage/config'
def which_arakoon():
return 'arakoon'
class ArakoonManagementEx(ArakoonManagement):
"""
Overrides Incubaid's ArakoonManagement class
"""
def __init__(self):
"""
Dummy initializer
"""
pass
def getCluster(self, cluster_name):
"""
@type cluster_name: string
@return a helper to config that cluster
"""
return ArakoonClusterEx(cluster_name)
def listClusters(self):
"""
Returns a list with the existing clusters.
"""
return os.listdir('{0}/arakoon'.format(config_dir))
class ArakoonClusterEx(ArakoonCluster):
"""
Overrides Incbaid's ArakoonCluster class.
A few remarks:
* Don't call super, as it makes a lot of assumptions
* Make sure to validate all inherited calls before usage, as they might not work, or make wrong assuptions
"""
def __init__(self, cluster_name):
"""
Intitialize cluster constructor.
"""
self.__validateName(cluster_name)
# There's a difference between the clusterId and the cluster's name.
# The name is used to construct the path to find the config file.
# the id is what's inside the cfg file and what you need to provide to a client that want's to talk to the cluster.
self._clusterName = cluster_name
self._binary = which_arakoon()
self._arakoonDir = '{0}/arakoon'.format(config_dir)
def __validateName(self, name):
if name is None or name.strip() == '':
raise Exception('A name should be passed. An empty name is not an option')
if not isinstance(name, str):
raise Exception('Name should be of type string')
for char in [' ', ',', '#']:
if char in name:
raise Exception('Name should not contain %s' % char)
@staticmethod
def _read_config_file(path):
"""
Reads a config file
"""
c_parser = ConfigParser.ConfigParser()
c_parser.read(path)
c_parser._path = path
return c_parser
@staticmethod
def _write_config_file(config_object):
"""
Writes a configuration object
"""
if not hasattr(config_object, '_path'):
raise RuntimeError('The given configuration object does not contain a _path')
with open(config_object._path, 'wb') as config_file:
config_object.write(config_file)
def _getConfigFilePath(self):
return '{0}/{1}'.format(self._arakoonDir, self._clusterName)
def _getConfigFile(self):
return ArakoonClusterEx._read_config_file('{0}/{1}.cfg'.format(self._getConfigFilePath(), self._clusterName))
def _getClientConfigFile(self):
return ArakoonClusterEx._read_config_file('{0}/{1}_client.cfg'.format(self._getConfigFilePath(), self._clusterName))
def _changeTlogCompression(self, nodes, value):
if nodes is None:
nodes = self.listNodes()
else:
for n in nodes:
self.__validateName(n)
config = self._getConfigFile()
for n in nodes:
if 'disable_tlog_compression' in config.options(n):
config.remove_option(n, 'disable_tlog_compression')
config.set(n, 'tlog_compression', value)
ArakoonClusterEx._write_config_file(config)
def enableTlogCompression(self, nodes=None, compressor='bz2'):
"""
Enables tlog compression for the given nodes (this is enabled by default)
@param nodes List of node names
@param compressor one of 'bz2', 'snappy', 'none'
"""
self._changeTlogCompression(nodes, compressor)
def _changeFsync(self, nodes, value):
if nodes is None:
nodes = self.listNodes()
else:
for n in nodes:
self.__validateName(n)
config = self._getConfigFile()
for node in nodes:
config.set(node, 'fsync', value)
ArakoonClusterEx._write_config_file(config)
def getClientConfig(self):
"""
Get an object that contains all node information in the supplied cluster
@return dict the dict can be used as param for the ArakoonConfig object
"""
config = self._getClientConfigFile()
clientconfig = dict()
nodes = self.__getNodes(config)
for name in nodes:
ips = config.get(name, 'ip')
ip_list = ips.split(',')
port = int(config.get(name, 'client_port'))
clientconfig[name] = (ip_list, port)
return clientconfig
def getClient(self):
config = self.getClientConfig()
client = ArakoonClient(ArakoonClientConfig(self._clusterName, config))
return client
def getNodeConfig(self, name):
"""
Get the parameters of a node section
@param name the name of the node
@return dict keys and values of the nodes parameters
"""
self.__validateName(name)
config = self._getConfigFile()
if config.has_section(name):
return dict(config.items(name))
else:
raise Exception('No node with name %s configured' % name)
def createDirs(self, name):
"""
Create the Directories for a local arakoon node in the supplied cluster
@param name: the name of the node as configured in the config file
"""
self.__validateName(name)
config = self._getConfigFile()
if config.has_section(name):
home = config.get(name, 'home')
subprocess.call(['mkdir', '-p', home])
for option in ['tlog_dir', 'tlf_dir', 'head_dir']:
if config.has_option(name, option):
option_dir = config.get(name, option)
subprocess.call(['mkdir', '-p', option_dir])
log_dir = config.get(name, 'log_dir')
subprocess.call(['mkdir', '-p', log_dir])
return
msg = 'No node %s configured' % name
raise Exception(msg)
def addLocalNode(self, name, config_filename=None):
"""
Add a node to the list of nodes that have to be started locally
from the supplied cluster
@param name: the name of the node as configured in the config file
@param config_filename: the filename to store the new config to (if none, the existing one is updated)
"""
self.__validateName(name)
config = self._getConfigFile()
config_name = self._servernodes()
if name in config:
config_name_path = os.path.join(self._clusterPath, config_name)
nodesconfig = ArakoonClusterEx._read_config_file(config_name_path)
if not nodesconfig.has_section('global'):
nodesconfig.add_section('global')
nodesconfig.set('global', 'cluster', '')
nodes = self.__getNodes(nodesconfig)
if name in nodes:
raise Exception('node %s already present' % name)
nodes.append(name)
nodesconfig.set('global', 'cluster', nodes)
if config_filename:
nodesconfig._path = config_filename
if not os.path.exists(os.path.dirname(config_filename)):
os.makedirs(os.path.dirname(config_filename))
ArakoonClusterEx._write_config_file(nodesconfig)
return
raise Exception('No node %s' % name)
def listLocalNodes(self):
"""
Get a list of the local nodes in the supplied cluster
@return list of strings containing the node names
"""
config_name = self._servernodes()
config_name_path = '{0}/{1}.cfg'.format(self._getConfigFilePath(), config_name)
config = ArakoonClusterEx._read_config_file(config_name_path)
return self.__getNodes(config)
def __getNodes(self, config):
if config.has_section('global') and config.has_option('global', 'cluster'):
return [node.strip() for node in config.get('global', 'cluster').split(',')]
return []
def start(self, daemon=True):
"""
start all nodes in the cluster
"""
rcs = {}
from ovs.extensions.db.arakoon.CheckArakoonTlogMark import CheckArakoonTlogMark
CheckArakoonTlogMark().fixtlogs(self._clusterName, always_stop=True)
for name in self.listLocalNodes():
rcs[name] = self._startOneEx(name, daemon)
return rcs
def _cmd(self, name):
r = [self._binary, '--node', name, '-config',
'%s/%s.cfg' % (self._getConfigFilePath(), self._clusterName),
'-start']
return r
def _startOneEx(self, name, daemon):
if self._getStatusOne(name):
return
config = self.getNodeConfig(name)
cmd = []
if 'wrapper' in config:
cmd = config['wrapper'].split(' ')
command = self._cmd(name)
cmd.extend(command)
if daemon:
cmd.append('-daemonize')
logging.debug('calling: %s', str(cmd))
return subprocess.call(cmd, close_fds=True)
def _stopOne(self, name):
line = self._cmdLine(name)
cmd = ['pkill', '-f', line]
logging.debug("stopping '%s' with: %s" % (name, ' '.join(cmd)))
rc = subprocess.call(cmd, close_fds=True)
logging.debug('%s=>rc=%i' % (cmd, rc))
i = 0
while self._getStatusOne(name):
rc = subprocess.call(cmd, close_fds=True)
logging.debug('%s=>rc=%i' % (cmd, rc))
time.sleep(1)
i += 1
logging.debug("'%s' is still running... waiting" % name)
if i == 10:
msg = "Requesting '%s' to dump crash log information" % name
logging.debug(msg)
subprocess.call(['pkill', '-%d' % signal.SIGUSR2, '-f', line], close_fds=True)
time.sleep(1)
logging.debug("stopping '%s' with kill -9" % name)
rc = subprocess.call(['pkill', '-9', '-f', line], close_fds=True)
if rc == 0:
rc = 9
cnt = 0
while self._getStatusOne(name):
logging.debug("'%s' is STILL running... waiting" % name)
time.sleep(1)
cnt += 1
if cnt > 10:
break
break
else:
subprocess.call(cmd, close_fds=True)
if rc < 9:
rc = 0 # might be we looped one time too many.
return rc
def _getStatusOne(self, name):
line = self._cmdLine(name)
cmd = ['pgrep', '-fn', line]
proc = subprocess.Popen(cmd, close_fds=True, stdout=subprocess.PIPE)
pids = proc.communicate()[0]
pid_list = pids.split()
lenp = len(pid_list)
if lenp == 1:
result = True
elif lenp == 0:
result = False
else:
for pid in pid_list:
try:
f = open('/proc/%s/cmdline' % pid, 'r')
startup = f.read()
f.close()
logging.debug('pid=%s; cmdline=%s', pid, startup)
except:
pass
raise Exception('multiple matches', pid_list)
return result
def writeClientConfig(self, config=None, config_filename=None):
"""
Write Arakoon Cluster client config to file
@param config: arakoon client config for this cluster (if none, will be retrieved from current cluster config)
@param config_filename: the filename to store the config to (if none, the existing one is updated)
"""
if not config_filename:
client_config = self._getClientConfigFile()
else:
client_config = ArakoonClusterEx._read_config_file(config_filename)
if not config:
config = self.getClientConfig()
if not client_config.has_section('global'):
client_config.add_section('global')
client_config.set('global', 'cluster_id', self._clusterName)
client_config.set('global', 'cluster', config.keys())
for node, node_config in config.iteritems():
if not client_config.has_section(node):
client_config.add_section(node)
client_config.set(node, 'name', node)
client_config.set(node, 'ip', node_config[0][0])
client_config.set(node, 'client_port', node_config[1])
ArakoonClusterEx._write_config_file(client_config)
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser(description='Arakoon Management')
parser.add_option('--stop', dest='start_stop', action='store_false', default=None, help='Stop arakoon')
parser.add_option('--start', dest='start_stop', action='store_true', default=None, help='Start arakoon')
parser.add_option('-c', '--cluster', dest='cluster', help='Name of arakoon cluster')
(options, args) = parser.parse_args()
if not options.cluster:
parser.error('No arakoon cluster specified')
if options.start_stop is None:
parser.error('No action specified')
arakoonManagement = ArakoonManagementEx()
arakoon_cluster = arakoonManagement.getCluster(options.cluster)
if options.start_stop:
arakoon_cluster.start(False)
else:
arakoon_cluster.stop()
| |
"""Tests for certbot.util."""
import argparse
import errno
from importlib import reload as reload_module
import io
import sys
import unittest
from certbot import errors
from certbot.compat import filesystem
from certbot.compat import os
import certbot.tests.util as test_util
try:
import mock
except ImportError: # pragma: no cover
from unittest import mock
class EnvNoSnapForExternalCallsTest(unittest.TestCase):
"""Tests for certbot.util.env_no_snap_for_external_calls."""
@classmethod
def _call(cls):
from certbot.util import env_no_snap_for_external_calls
return env_no_snap_for_external_calls()
def test_removed(self):
original_path = os.environ['PATH']
env_copy_dict = os.environ.copy()
env_copy_dict['PATH'] = 'RANDOM_NONSENSE_GARBAGE/blah/blah:' + original_path
env_copy_dict['SNAP'] = 'RANDOM_NONSENSE_GARBAGE'
env_copy_dict['CERTBOT_SNAPPED'] = 'True'
with mock.patch('certbot.compat.os.environ.copy', return_value=env_copy_dict):
self.assertEqual(self._call()['PATH'], original_path)
def test_noop(self):
env_copy_dict_unmodified = os.environ.copy()
env_copy_dict_unmodified['PATH'] = 'RANDOM_NONSENSE_GARBAGE/blah/blah:' \
+ env_copy_dict_unmodified['PATH']
env_copy_dict = env_copy_dict_unmodified.copy()
with mock.patch('certbot.compat.os.environ.copy', return_value=env_copy_dict):
# contains neither necessary key
env_copy_dict.pop('SNAP', None)
env_copy_dict.pop('CERTBOT_SNAPPED', None)
self.assertEqual(self._call()['PATH'], env_copy_dict_unmodified['PATH'])
# contains only one necessary key
env_copy_dict['SNAP'] = 'RANDOM_NONSENSE_GARBAGE'
self.assertEqual(self._call()['PATH'], env_copy_dict_unmodified['PATH'])
del env_copy_dict['SNAP']
env_copy_dict['CERTBOT_SNAPPED'] = 'True'
self.assertEqual(self._call()['PATH'], env_copy_dict_unmodified['PATH'])
class RunScriptTest(unittest.TestCase):
"""Tests for certbot.util.run_script."""
@classmethod
def _call(cls, params):
from certbot.util import run_script
return run_script(params)
@mock.patch("certbot.util.subprocess.run")
def test_default(self, mock_run):
"""These will be changed soon enough with reload."""
mock_run().returncode = 0
mock_run().stdout = "stdout"
mock_run().stderr = "stderr"
out, err = self._call(["test"])
self.assertEqual(out, "stdout")
self.assertEqual(err, "stderr")
@mock.patch("certbot.util.subprocess.run")
def test_bad_process(self, mock_run):
mock_run.side_effect = OSError
self.assertRaises(errors.SubprocessError, self._call, ["test"])
@mock.patch("certbot.util.subprocess.run")
def test_failure(self, mock_run):
mock_run().returncode = 1
self.assertRaises(errors.SubprocessError, self._call, ["test"])
class ExeExistsTest(unittest.TestCase):
"""Tests for certbot.util.exe_exists."""
@classmethod
def _call(cls, exe):
from certbot.util import exe_exists
return exe_exists(exe)
def test_exe_exists(self):
with mock.patch("certbot.util.filesystem.is_executable", return_value=True):
self.assertTrue(self._call("/path/to/exe"))
def test_exe_not_exists(self):
with mock.patch("certbot.util.filesystem.is_executable", return_value=False):
self.assertFalse(self._call("/path/to/exe"))
class LockDirUntilExit(test_util.TempDirTestCase):
"""Tests for certbot.util.lock_dir_until_exit."""
@classmethod
def _call(cls, *args, **kwargs):
from certbot.util import lock_dir_until_exit
return lock_dir_until_exit(*args, **kwargs)
def setUp(self):
super().setUp()
# reset global state from other tests
import certbot.util
reload_module(certbot.util)
@mock.patch('certbot.util.logger')
@mock.patch('certbot.util.atexit_register')
def test_it(self, mock_register, mock_logger):
subdir = os.path.join(self.tempdir, 'subdir')
filesystem.mkdir(subdir)
self._call(self.tempdir)
self._call(subdir)
self._call(subdir)
self.assertEqual(mock_register.call_count, 1)
registered_func = mock_register.call_args[0][0]
from certbot import util
# Despite lock_dir_until_exit has been called twice to subdir, its lock should have been
# added only once. So we expect to have two lock references: for self.tempdir and subdir
self.assertEqual(len(util._LOCKS), 2) # pylint: disable=protected-access
registered_func() # Exception should not be raised
# Logically, logger.debug, that would be invoked in case of unlock failure,
# should never been called.
self.assertEqual(mock_logger.debug.call_count, 0)
class SetUpCoreDirTest(test_util.TempDirTestCase):
"""Tests for certbot.util.make_or_verify_core_dir."""
def _call(self, *args, **kwargs):
from certbot.util import set_up_core_dir
return set_up_core_dir(*args, **kwargs)
@mock.patch('certbot.util.lock_dir_until_exit')
def test_success(self, mock_lock):
new_dir = os.path.join(self.tempdir, 'new')
self._call(new_dir, 0o700, False)
self.assertTrue(os.path.exists(new_dir))
self.assertEqual(mock_lock.call_count, 1)
@mock.patch('certbot.util.make_or_verify_dir')
def test_failure(self, mock_make_or_verify):
mock_make_or_verify.side_effect = OSError
self.assertRaises(errors.Error, self._call, self.tempdir, 0o700, False)
class MakeOrVerifyDirTest(test_util.TempDirTestCase):
"""Tests for certbot.util.make_or_verify_dir.
Note that it is not possible to test for a wrong directory owner,
as this testing script would have to be run as root.
"""
def setUp(self):
super().setUp()
self.path = os.path.join(self.tempdir, "foo")
filesystem.mkdir(self.path, 0o600)
def _call(self, directory, mode):
from certbot.util import make_or_verify_dir
return make_or_verify_dir(directory, mode, strict=True)
def test_creates_dir_when_missing(self):
path = os.path.join(self.tempdir, "bar")
self._call(path, 0o650)
self.assertTrue(os.path.isdir(path))
self.assertTrue(filesystem.check_mode(path, 0o650))
def test_existing_correct_mode_does_not_fail(self):
self._call(self.path, 0o600)
self.assertTrue(filesystem.check_mode(self.path, 0o600))
def test_existing_wrong_mode_fails(self):
self.assertRaises(errors.Error, self._call, self.path, 0o400)
def test_reraises_os_error(self):
with mock.patch.object(filesystem, "makedirs") as makedirs:
makedirs.side_effect = OSError()
self.assertRaises(OSError, self._call, "bar", 12312312)
class UniqueFileTest(test_util.TempDirTestCase):
"""Tests for certbot.util.unique_file."""
def setUp(self):
super().setUp()
self.default_name = os.path.join(self.tempdir, "foo.txt")
def _call(self, mode=0o600):
from certbot.util import unique_file
return unique_file(self.default_name, mode)
def test_returns_fd_for_writing(self):
fd, name = self._call()
fd.write("bar")
fd.close()
with open(name) as f:
self.assertEqual(f.read(), "bar")
def test_right_mode(self):
fd1, name1 = self._call(0o700)
fd2, name2 = self._call(0o600)
self.assertTrue(filesystem.check_mode(name1, 0o700))
self.assertTrue(filesystem.check_mode(name2, 0o600))
fd1.close()
fd2.close()
def test_default_exists(self):
fd1, name1 = self._call() # create 0000_foo.txt
fd2, name2 = self._call()
fd3, name3 = self._call()
self.assertNotEqual(name1, name2)
self.assertNotEqual(name1, name3)
self.assertNotEqual(name2, name3)
self.assertEqual(os.path.dirname(name1), self.tempdir)
self.assertEqual(os.path.dirname(name2), self.tempdir)
self.assertEqual(os.path.dirname(name3), self.tempdir)
basename1 = os.path.basename(name2)
self.assertTrue(basename1.endswith("foo.txt"))
basename2 = os.path.basename(name2)
self.assertTrue(basename2.endswith("foo.txt"))
basename3 = os.path.basename(name3)
self.assertTrue(basename3.endswith("foo.txt"))
fd1.close()
fd2.close()
fd3.close()
try:
file_type = file
except NameError:
import io
file_type = io.TextIOWrapper # type: ignore
class UniqueLineageNameTest(test_util.TempDirTestCase):
"""Tests for certbot.util.unique_lineage_name."""
def _call(self, filename, mode=0o777):
from certbot.util import unique_lineage_name
return unique_lineage_name(self.tempdir, filename, mode)
def test_basic(self):
f, path = self._call("wow")
self.assertIsInstance(f, file_type)
self.assertEqual(os.path.join(self.tempdir, "wow.conf"), path)
f.close()
def test_multiple(self):
items = []
for _ in range(10):
items.append(self._call("wow"))
f, name = items[-1]
self.assertIsInstance(f, file_type)
self.assertIsInstance(name, str)
self.assertIn("wow-0009.conf", name)
for f, _ in items:
f.close()
def test_failure(self):
with mock.patch("certbot.compat.filesystem.open", side_effect=OSError(errno.EIO)):
self.assertRaises(OSError, self._call, "wow")
class SafelyRemoveTest(test_util.TempDirTestCase):
"""Tests for certbot.util.safely_remove."""
def setUp(self):
super().setUp()
self.path = os.path.join(self.tempdir, "foo")
def _call(self):
from certbot.util import safely_remove
return safely_remove(self.path)
def test_exists(self):
with open(self.path, "w"):
pass # just create the file
self._call()
self.assertFalse(os.path.exists(self.path))
def test_missing(self):
self._call()
# no error, yay!
self.assertFalse(os.path.exists(self.path))
def test_other_error_passthrough(self):
with mock.patch("certbot.util.os.remove") as mock_remove:
mock_remove.side_effect = OSError
self.assertRaises(OSError, self._call)
class SafeEmailTest(unittest.TestCase):
"""Test safe_email."""
@classmethod
def _call(cls, addr):
from certbot.util import safe_email
return safe_email(addr)
def test_valid_emails(self):
addrs = [
"certbot@certbot.org",
"tbd.ade@gmail.com",
"abc_def.jdk@hotmail.museum",
]
for addr in addrs:
self.assertTrue(self._call(addr), "%s failed." % addr)
def test_invalid_emails(self):
addrs = [
"certbot@certbot..org",
".tbd.ade@gmail.com",
"~/abc_def.jdk@hotmail.museum",
]
for addr in addrs:
self.assertFalse(self._call(addr), "%s failed." % addr)
class AddDeprecatedArgumentTest(unittest.TestCase):
"""Test add_deprecated_argument."""
def setUp(self):
self.parser = argparse.ArgumentParser()
def _call(self, argument_name, nargs):
from certbot.util import add_deprecated_argument
add_deprecated_argument(self.parser.add_argument, argument_name, nargs)
def test_warning_no_arg(self):
self._call("--old-option", 0)
with mock.patch("warnings.warn") as mock_warn:
self.parser.parse_args(["--old-option"])
self.assertEqual(mock_warn.call_count, 1)
self.assertIn("is deprecated", mock_warn.call_args[0][0])
self.assertIn("--old-option", mock_warn.call_args[0][0])
def test_warning_with_arg(self):
self._call("--old-option", 1)
with mock.patch("warnings.warn") as mock_warn:
self.parser.parse_args(["--old-option", "42"])
self.assertEqual(mock_warn.call_count, 1)
self.assertIn("is deprecated", mock_warn.call_args[0][0])
self.assertIn("--old-option", mock_warn.call_args[0][0])
def test_help(self):
self._call("--old-option", 2)
stdout = io.StringIO()
with mock.patch("sys.stdout", new=stdout):
try:
self.parser.parse_args(["-h"])
except SystemExit:
pass
self.assertNotIn("--old-option", stdout.getvalue())
def test_set_constant(self):
"""Test when ACTION_TYPES_THAT_DONT_NEED_A_VALUE is a set.
This variable is a set in configargparse versions < 0.12.0.
"""
self._test_constant_common(set)
def test_tuple_constant(self):
"""Test when ACTION_TYPES_THAT_DONT_NEED_A_VALUE is a tuple.
This variable is a tuple in configargparse versions >= 0.12.0.
"""
self._test_constant_common(tuple)
def _test_constant_common(self, typ):
with mock.patch("certbot.util.configargparse") as mock_configargparse:
mock_configargparse.ACTION_TYPES_THAT_DONT_NEED_A_VALUE = typ()
self._call("--old-option", 1)
self._call("--old-option2", 2)
self.assertEqual(
len(mock_configargparse.ACTION_TYPES_THAT_DONT_NEED_A_VALUE), 1)
class EnforceLeValidity(unittest.TestCase):
"""Test enforce_le_validity."""
def _call(self, domain):
from certbot.util import enforce_le_validity
return enforce_le_validity(domain)
def test_sanity(self):
self.assertRaises(errors.ConfigurationError, self._call, u"..")
def test_invalid_chars(self):
self.assertRaises(
errors.ConfigurationError, self._call, u"hello_world.example.com")
def test_leading_hyphen(self):
self.assertRaises(
errors.ConfigurationError, self._call, u"-a.example.com")
def test_trailing_hyphen(self):
self.assertRaises(
errors.ConfigurationError, self._call, u"a-.example.com")
def test_one_label(self):
self.assertRaises(errors.ConfigurationError, self._call, u"com")
def test_valid_domain(self):
self.assertEqual(self._call(u"example.com"), u"example.com")
def test_input_with_scheme(self):
self.assertRaises(errors.ConfigurationError, self._call, u"http://example.com")
self.assertRaises(errors.ConfigurationError, self._call, u"https://example.com")
def test_valid_input_with_scheme_name(self):
self.assertEqual(self._call(u"http.example.com"), u"http.example.com")
class EnforceDomainSanityTest(unittest.TestCase):
"""Test enforce_domain_sanity."""
def _call(self, domain):
from certbot.util import enforce_domain_sanity
return enforce_domain_sanity(domain)
def test_nonascii_str(self):
self.assertRaises(errors.ConfigurationError, self._call,
u"eichh\u00f6rnchen.example.com".encode("utf-8"))
def test_nonascii_unicode(self):
self.assertRaises(errors.ConfigurationError, self._call,
u"eichh\u00f6rnchen.example.com")
def test_too_long(self):
long_domain = u"a"*256
self.assertRaises(errors.ConfigurationError, self._call,
long_domain)
def test_not_too_long(self):
not_too_long_domain = u"{0}.{1}.{2}.{3}".format("a"*63, "b"*63, "c"*63, "d"*63)
self._call(not_too_long_domain)
def test_empty_label(self):
empty_label_domain = u"fizz..example.com"
self.assertRaises(errors.ConfigurationError, self._call,
empty_label_domain)
def test_empty_trailing_label(self):
empty_trailing_label_domain = u"example.com.."
self.assertRaises(errors.ConfigurationError, self._call,
empty_trailing_label_domain)
def test_long_label_1(self):
long_label_domain = u"a"*64
self.assertRaises(errors.ConfigurationError, self._call,
long_label_domain)
def test_long_label_2(self):
long_label_domain = u"{0}.{1}.com".format(u"a"*64, u"b"*63)
self.assertRaises(errors.ConfigurationError, self._call,
long_label_domain)
def test_not_long_label(self):
not_too_long_label_domain = u"{0}.{1}.com".format(u"a"*63, u"b"*63)
self._call(not_too_long_label_domain)
def test_empty_domain(self):
empty_domain = u""
self.assertRaises(errors.ConfigurationError, self._call,
empty_domain)
def test_punycode_ok(self):
# Punycode is now legal, so no longer an error; instead check
# that it's _not_ an error (at the initial sanity check stage)
self._call('this.is.xn--ls8h.tld')
class IsWildcardDomainTest(unittest.TestCase):
"""Tests for is_wildcard_domain."""
def setUp(self):
self.wildcard = u"*.example.org"
self.no_wildcard = u"example.org"
def _call(self, domain):
from certbot.util import is_wildcard_domain
return is_wildcard_domain(domain)
def test_no_wildcard(self):
self.assertFalse(self._call(self.no_wildcard))
self.assertFalse(self._call(self.no_wildcard.encode()))
def test_wildcard(self):
self.assertTrue(self._call(self.wildcard))
self.assertTrue(self._call(self.wildcard.encode()))
class OsInfoTest(unittest.TestCase):
"""Test OS / distribution detection"""
@mock.patch("certbot.util.distro")
@unittest.skipUnless(sys.platform.startswith("linux"), "requires Linux")
def test_systemd_os_release_like(self, m_distro):
import certbot.util as cbutil
m_distro.like.return_value = "first debian third"
id_likes = cbutil.get_systemd_os_like()
self.assertEqual(len(id_likes), 3)
self.assertIn("debian", id_likes)
@mock.patch("certbot.util.distro")
@unittest.skipUnless(sys.platform.startswith("linux"), "requires Linux")
def test_get_os_info_ua(self, m_distro):
import certbot.util as cbutil
with mock.patch('platform.system_alias',
return_value=('linux', '42', '42')):
m_distro.version.return_value = "1.0"
# empty value on first call for fallback to "get_python_os_info" in get_os_info_ua
m_distro.name.side_effect = ["", "something", "something"]
self.assertEqual(cbutil.get_os_info_ua(),
" ".join(cbutil.get_python_os_info(pretty=True)))
m_distro.name.side_effect = ["whatever"]
self.assertEqual(cbutil.get_os_info_ua(), "whatever")
@mock.patch("certbot.util.distro")
@unittest.skipUnless(sys.platform.startswith("linux"), "requires Linux")
def test_get_os_info(self, m_distro):
import certbot.util as cbutil
with mock.patch("platform.system") as mock_platform:
m_distro.id.return_value = "name"
m_distro.version.return_value = "version"
mock_platform.return_value = "linux"
self.assertEqual(cbutil.get_os_info(), ("name", "version"))
m_distro.id.return_value = "something"
m_distro.version.return_value = "else"
self.assertEqual(cbutil.get_os_info(), ("something", "else"))
def test_non_systemd_os_info(self):
import certbot.util as cbutil
with mock.patch('certbot.util._USE_DISTRO', False):
with mock.patch('platform.system_alias',
return_value=('NonSystemD', '42', '42')):
self.assertEqual(cbutil.get_python_os_info()[0], 'nonsystemd')
with mock.patch('platform.system_alias',
return_value=('darwin', '', '')):
with mock.patch("subprocess.run") as run_mock:
run_mock().stdout = '42.42.42'
self.assertEqual(cbutil.get_python_os_info()[0], 'darwin')
self.assertEqual(cbutil.get_python_os_info()[1], '42.42.42')
with mock.patch('platform.system_alias',
return_value=('freebsd', '9.3-RC3-p1', '')):
self.assertEqual(cbutil.get_python_os_info(), ("freebsd", "9"))
with mock.patch('platform.system_alias',
return_value=('windows', '', '')):
with mock.patch('platform.win32_ver',
return_value=('4242', '95', '2', '')):
self.assertEqual(cbutil.get_python_os_info(),
("windows", "95"))
@mock.patch("certbot.util.distro")
@unittest.skipUnless(sys.platform.startswith("linux"), "requires Linux")
def test_python_os_info_notfound(self, m_distro):
import certbot.util as cbutil
m_distro.id.return_value = ""
m_distro.version.return_value = ""
self.assertEqual(cbutil.get_python_os_info()[0], "linux")
@mock.patch("certbot.util.distro")
@unittest.skipUnless(sys.platform.startswith("linux"), "requires Linux")
def test_python_os_info_custom(self, m_distro):
import certbot.util as cbutil
m_distro.id.return_value = "testdist"
m_distro.version.return_value = "42"
self.assertEqual(cbutil.get_python_os_info(), ("testdist", "42"))
class GetStrictVersionTest(unittest.TestCase):
"""Test for certbot.util.get_strict_version."""
@classmethod
def _call(cls, *args, **kwargs):
from certbot.util import get_strict_version
return get_strict_version(*args, **kwargs)
def test_it(self):
with self.assertWarnsRegex(DeprecationWarning, "get_strict_version"):
self._call("1.2.3")
class AtexitRegisterTest(unittest.TestCase):
"""Tests for certbot.util.atexit_register."""
def setUp(self):
self.func = mock.MagicMock()
self.args = ('hi',)
self.kwargs = {'answer': 42}
@classmethod
def _call(cls, *args, **kwargs):
from certbot.util import atexit_register
return atexit_register(*args, **kwargs)
def test_called(self):
self._test_common(os.getpid())
self.func.assert_called_with(*self.args, **self.kwargs)
def test_not_called(self):
self._test_common(initial_pid=-1)
self.assertIs(self.func.called, False)
def _test_common(self, initial_pid):
with mock.patch('certbot.util._INITIAL_PID', initial_pid):
with mock.patch('certbot.util.atexit') as mock_atexit:
self._call(self.func, *self.args, **self.kwargs)
# _INITIAL_PID must be mocked when calling atexit_func
self.assertTrue(mock_atexit.register.called)
args, kwargs = mock_atexit.register.call_args
atexit_func = args[0]
atexit_func(*args[1:], **kwargs)
class ParseLooseVersionTest(unittest.TestCase):
"""Test for certbot.util.parse_loose_version.
These tests are based on the original tests for
distutils.version.LooseVersion at
https://github.com/python/cpython/blob/v3.10.0/Lib/distutils/tests/test_version.py#L58-L81.
"""
@classmethod
def _call(cls, *args, **kwargs):
from certbot.util import parse_loose_version
return parse_loose_version(*args, **kwargs)
def test_less_than(self):
comparisons = (('1.5.1', '1.5.2b2'),
('3.4j', '1996.07.12'),
('2g6', '11g'),
('0.960923', '2.2beta29'),
('1.13++', '5.5.kw'))
for v1, v2 in comparisons:
self.assertLess(self._call(v1), self._call(v2))
def test_equal(self):
self.assertEqual(self._call('8.02'), self._call('8.02'))
def test_greater_than(self):
comparisons = (('161', '3.10a'),
('3.2.pl0', '3.1.1.6'))
for v1, v2 in comparisons:
self.assertGreater(self._call(v1), self._call(v2))
if __name__ == "__main__":
unittest.main() # pragma: no cover
| |
# encoding: utf-8
from south.v2 import DataMigration
class Migration(DataMigration):
def forwards(self, orm):
pass
def backwards(self, orm):
pass
models = {
'webinars.account': {
'Meta': {'object_name': 'Account'},
'account_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.AccountType']"}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'extra': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'hub': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Hub']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_sync_job': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['webinars.SyncJob']"}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'prevent_unformed_lead_import': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'webinars.accounttype': {
'Meta': {'object_name': 'AccountType'},
'can_api_create_event': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_api_load_event': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_api_register_user': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_api_report_views': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'extra_username_label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'listing_priority': ('django.db.models.fields.IntegerField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'updated_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'username_label': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'webinars.cmsform': {
'Meta': {'object_name': 'CmsForm'},
'guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'primary_key': 'True'}),
'hub': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Hub']"}),
'is_sync_target': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'webinars.event': {
'Meta': {'object_name': 'Event'},
'_attended_criterium_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'_attended_saved_search_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'_noshow_saved_search_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'_registered_criterium_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'_registered_saved_search_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Account']"}),
'attended_campaign_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'cms_forms': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['webinars.CmsForm']", 'through': "orm['webinars.EventForm']", 'symmetrical': 'False'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'deleted_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '4096'}),
'duration': ('django.db.models.fields.IntegerField', [], {}),
'hashcode': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_sync_job': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'null': 'True', 'to': "orm['webinars.SyncJob']"}),
'mothballed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'noshow_campaign_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'registrants_synced_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'requested_registrants_sync': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'started_registrants_sync_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'sync_leads_for_all_time': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'time_starts_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'timezone_starts_at': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'unknowable_registrants': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'updated_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'})
},
'webinars.eventform': {
'Meta': {'object_name': 'EventForm'},
'cms_form': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.CmsForm']"}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_last_converted_at': ('sanetime.dj.SaneTimeField', [], {'default': '0'})
},
'webinars.hub': {
'Meta': {'object_name': 'Hub'},
'_attended_any_criterium_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'_attended_any_saved_search_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'_registered_any_criterium_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'_registered_any_saved_search_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'_timezone': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'events_synced_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.BigIntegerField', [], {'unique': 'True', 'primary_key': 'True'}),
'requested_events_sync': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'started_events_sync_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'uninstalled_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'updated_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'})
},
'webinars.hubspotregistrantsnapshot': {
'Meta': {'object_name': 'HubSpotRegistrantSnapshot'},
'attended_any': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'attended_for': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'attended_this': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'hashcode': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'initial_form_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'lead_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'registered_any': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'registered_this': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'stopped_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
},
'webinars.landingpage': {
'Meta': {'object_name': 'LandingPage'},
'cms_form': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.CmsForm']"}),
'form_title': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'webinars.registrant': {
'Meta': {'object_name': 'Registrant'},
'attended_for': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'cms_form': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.CmsForm']", 'null': 'True'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'deleted_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'hashcode': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'lead_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'stopped_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'updated_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'})
},
'webinars.stagedhubspotregistrant': {
'Meta': {'object_name': 'StagedHubSpotRegistrant'},
'attended_any': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'attended_for': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'attended_this': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'converted_at': ('sanetime.dj.SaneTimeField', [], {}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'form_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'hashcode': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'lead_guid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'null': 'True'}),
'registered_any': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'registered_this': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'stopped_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
},
'webinars.stagedwebexevent': {
'Meta': {'object_name': 'StagedWebexEvent'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Account']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'null': 'True'}),
'duration': ('django.db.models.fields.IntegerField', [], {}),
'hashcode': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'session_key': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'time_starts_at': ('sanetime.dj.SaneTimeField', [], {}),
'timezone_starts_at': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'webinars.stagedwebexregistrant': {
'Meta': {'object_name': 'StagedWebexRegistrant'},
'attendee_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'duration': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'hashcode': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'stopped_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
},
'webinars.syncjob': {
'Meta': {'object_name': 'SyncJob'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Account']", 'null': 'True'}),
'auto': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'completed_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.SyncJob']", 'null': 'True'}),
'staged_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'})
},
'webinars.syncshard': {
'Meta': {'object_name': 'SyncShard'},
'completed_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'depth': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'section': ('django.db.models.fields.IntegerField', [], {}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'sync_job': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.SyncJob']"})
},
'webinars.syncstage': {
'Meta': {'object_name': 'SyncStage'},
'cms_form': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.CmsForm']", 'null': 'True'}),
'completed_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'max_size': ('django.db.models.fields.IntegerField', [], {}),
'offset': ('django.db.models.fields.IntegerField', [], {}),
'size': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'subkind': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True'}),
'sync_job': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.SyncJob']"})
},
'webinars.task': {
'Meta': {'object_name': 'Task'},
'completed_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'created_at': ('sanetime.dj.SaneTimeField', [], {'blank': 'True'}),
'error': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'null': 'True'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']", 'null': 'True'}),
'hub': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Hub']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'priority': ('django.db.models.fields.IntegerField', [], {}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'sync_all_registrants': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sync_events': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sync_specific_registrants': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'webinars.taskrunner': {
'Meta': {'object_name': 'TaskRunner'},
'completed_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
},
'webinars.webexeventsnapshot': {
'Meta': {'object_name': 'WebexEventSnapshot'},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Account']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '4096', 'null': 'True'}),
'duration': ('django.db.models.fields.IntegerField', [], {}),
'hashcode': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'time_starts_at': ('sanetime.dj.SaneTimeField', [], {}),
'timezone_starts_at': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'webinars.webexregistrantsnapshot': {
'Meta': {'object_name': 'WebexRegistrantSnapshot'},
'attended_for': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'email': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'event': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['webinars.Event']"}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'hashcode': ('django.db.models.fields.IntegerField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'remote_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'started_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'}),
'stopped_at': ('sanetime.dj.SaneTimeField', [], {'null': 'True'})
}
}
complete_apps = ['webinars']
| |
#!/usr/bin/env python3
# transpiled with BefunCompile v1.3.0 (c) 2017
def td(a,b):
return ((0)if(b==0)else(a//b))
def tm(a,b):
return ((0)if(b==0)else(a%b))
s=[]
def sp():
global s
if (len(s) == 0):
return 0
return s.pop()
def sa(v):
global s
s.append(v)
def sr():
global s
if (len(s) == 0):
return 0
return s[-1]
x0=2
x1=1073741824
x2=37
x3=37
x4=37
x5=5
x6=37
def _0():
global t0
t0=0
sa(2)
sa(5)
return 1
def _1():
global x0
sa(x0-1)
sa(x0-1)
return 2
def _2():
return (4)if(sp()!=0)else(3)
def _3():
global x0
global t0
global x5
sp();
sp();
sa(sp()+1)
sa(sr());
x0=sr()
t0=(sr()*3)-1
sa(sp()*t0)
sa(sp()/2);
x5=sr()
return 1
def _4():
global x2
global t0
global t1
global x3
global x6
global x4
global x1
x2=sr()
t0=(sr()*3)-1
sa(sp()*t0)
t1=sp()
t1=t1/2
x3=t1
x6=0
sa(sp()-t1)
sa(sp()*24)
sa(sp()+1)
x4=sr()
sa(x1)
sa((1)if(x1>x4)else(0))
return 5
def _5():
return (25)if(sp()!=0)else(6)
def _6():
sa(sr());
return 7
def _7():
return (22)if(sp()!=0)else(8)
def _8():
global x6
global t0
sp();
sa(sp()-(x6*x6))
t0=x6
return (17)if(sp()!=0)else(9)
def _9():
global t0
t0=t0%6
t0=t0-5
return (17)if((t0)!=0)else(10)
def _10():
global x3
global x5
global t0
global x6
global x4
global x1
sa(((x3+x5)*24)+1)
t0=((x3+x5)*24)+1
x6=0
x4=t0
sa(x1)
sa((1)if(x1>x4)else(0))
return 11
def _11():
return (21)if(sp()!=0)else(12)
def _12():
sa(sr());
return 13
def _13():
return (18)if(sp()!=0)else(14)
def _14():
global x6
global t0
sp();
sa(sp()-(x6*x6))
t0=x6
return (17)if(sp()!=0)else(15)
def _15():
global t0
t0=t0%6
t0=t0-5
return (17)if((t0)!=0)else(16)
def _16():
global x5
global x3
print(x5-x3,end=" ",flush=True)
sp();
return 26
def _17():
global x5
global x2
sa(x5)
sa(x2-1)
sa(x2-1)
return 2
def _18():
global x6
global x4
return (19)if((sr()+x6)>x4)else(20)
def _19():
global x6
x6=x6/2
sa(sp()/4);
sa(sr());
return 13
def _20():
global t0
global x6
global t1
global x4
global t2
t0=sr()+x6
t1=x4
t2=t1-t0
x4=t2
t0=(sr()*2)+x6
x6=t0
x6=x6/2
sa(sp()/4);
return 12
def _21():
global x4
sa(sp()/4);
sa((1)if(sr()>x4)else(0))
return 11
def _22():
global x6
global x4
return (23)if((sr()+x6)>x4)else(24)
def _23():
global x6
x6=x6/2
sa(sp()/4);
sa(sr());
return 7
def _24():
global t0
global x6
global t1
global x4
global t2
t0=sr()+x6
t1=x4
t2=t1-t0
x4=t2
t0=(sr()*2)+x6
x6=t0
x6=x6/2
sa(sp()/4);
return 6
def _25():
global x4
sa(sp()/4);
sa((1)if(sr()>x4)else(0))
return 5
m=[_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_10,_11,_12,_13,_14,_15,_16,_17,_18,_19,_20,_21,_22,_23,_24,_25]
c=0
while c<26:
c=m[c]()
| |
#!/usr/bin/env python3
"""List all new authors since the given revision. Only tested on Linux."""
import os
import sys
import subprocess
from pathlib import Path
if len(sys.argv) < 2 or sys.argv[1] == '-h' or sys.argv[1] == '--help':
usage = "Usage: authors-since.py revision"
print(usage)
sys.exit(1)
revision = sys.argv[1]
previous_authors = set()
recent_authors = set()
authors_with_email = set()
def remove_maintainers(authors):
individual_authors = []
for author in authors:
if author.lower().find('maintainer') == -1 and \
author.lower().find('robot') == -1 and \
author.lower().find('group') == -1 and \
author.lower().find('upstream') == -1 and \
len(author) > 3:
individual_authors.append(author)
return set(individual_authors)
def decode_authors(git_output):
return git_output.decode('utf-8').strip().split('\n')
def update_previous_authors(revision):
previous_authors_output = subprocess.check_output('git log --pretty=format:"%aN" {0} | sort -u'.format(revision),
shell=True)
previous_authors.update(decode_authors(previous_authors_output))
def update_recent_authors(revision):
recent_authors_out = subprocess.check_output('git log --pretty=format:"%aN" {0} | sort -u'.format(revision),
shell=True)
recent_authors.update(decode_authors(recent_authors_out))
def update_authors_with_email(revision):
authors_with_email_out = subprocess.check_output('git log --pretty=format:"%aN <%aE>" {0} | sort -u'.format(revision),
shell=True)
authors_with_email.update(decode_authors(authors_with_email_out))
def remote_repository(remote_spec):
for line in remote_spec.split('\n'):
split = line.split()
try:
tag_index = split.index('GIT_REPOSITORY')
return split[tag_index + 1].replace('${git_protocol}', 'https').strip()
except ValueError:
continue
print('GIT_REPOSITORY not found!')
print('\nRemote spec:')
print(remote_spec)
sys.exit(1)
def remote_tag(remote_spec):
for line in remote_spec.split('\n'):
split = line.split()
try:
tag_index = split.index('GIT_TAG')
return split[tag_index + 1].strip()
except ValueError:
continue
print('GIT_TAG not found!')
print('\nRemote spec:')
print(remote_spec)
sys.exit(1)
scratch_dir = Path('/tmp/AuthorsChangesSince')
if not scratch_dir.exists():
os.makedirs(scratch_dir)
def format_shortlog(log, commit_link_prefix):
output = ''
current_author = ''
current_author_output = ''
bug_fixes = []
platform_fixes = []
doc_updates = []
enhancements = []
performance_improvements = []
style_changes = []
misc_changes = []
def formatted_current_author():
current_author_output = ''
if enhancements:
current_author_output += '\n#### Enhancements\n\n'
for line, commit in enhancements:
current_author_output += '- {0}'.format(line)
current_author_output += ' ([{0}]({1}{0}))\n'.format(commit, commit_link_prefix)
if performance_improvements:
current_author_output += '\n#### Performance Improvements\n\n'
for line, commit in performance_improvements:
current_author_output += '- {0}'.format(line)
current_author_output += ' ([{0}]({1}{0}))\n'.format(commit, commit_link_prefix)
if doc_updates:
current_author_output += '\n#### Documentation Updates\n\n'
for line, commit in doc_updates:
current_author_output += '- {0}'.format(line)
current_author_output += ' ([{0}]({1}{0}))\n'.format(commit, commit_link_prefix)
if platform_fixes:
current_author_output += '\n#### Platform Fixes\n\n'
for line, commit in platform_fixes:
current_author_output += '- {0}'.format(line)
current_author_output += ' ([{0}]({1}{0}))\n'.format(commit, commit_link_prefix)
if bug_fixes:
current_author_output += '\n#### Bug Fixes\n\n'
for line, commit in bug_fixes:
current_author_output += '- {0}'.format(line)
current_author_output += ' ([{0}]({1}{0}))\n'.format(commit, commit_link_prefix)
if style_changes:
current_author_output += '\n#### Style Changes\n\n'
for line, commit in style_changes:
current_author_output += '- {0}'.format(line)
current_author_output += ' ([{0}]({1}{0}))\n'.format(commit, commit_link_prefix)
if misc_changes:
current_author_output += '\n#### Miscellaneous Changes\n\n'
for line, commit in misc_changes:
current_author_output += '- {0}'.format(line)
current_author_output += ' ([{0}]({1}{0}))\n'.format(commit, commit_link_prefix)
current_author_output += '\n\n'
return current_author_output
for line in log.split('\n'):
if not line:
#$ blank
pass
elif line[:3] != ' ':
if current_author:
output += formatted_current_author()
current_author = line
output += '### ' + current_author + '\n'
bug_fixes = []
platform_fixes = []
doc_updates = []
enhancements = []
performance_improvements = []
style_changes = []
misc_changes = []
else:
prefix = line.split(':')[0].strip()
commit = line.split(':')[-1]
if prefix == 'BUG':
description = line.split(':')[1]
bug_fixes.append((description, commit))
elif prefix == 'COMP':
description = line.split(':')[1]
platform_fixes.append((description, commit))
elif prefix == 'DOC':
description = line.split(':')[1]
doc_updates.append((description, commit))
elif prefix == 'ENH':
description = line.split(':')[1]
enhancements.append((description, commit))
elif prefix == 'PERF':
description = line.split(':')[1]
performance_improvements.append((description, commit))
elif prefix == 'STYLE':
description = line.split(':')[1]
style_changes.append((description, commit))
else:
description = line.split(':')[1]
misc_changes.append((description, commit))
output += formatted_current_author()
return output
changelog_file = scratch_dir / 'Changelog.md'
with open(changelog_file, 'w') as fp:
fp.write('')
def write_changelog(repo_name, commit_link_prefix, git_revision):
log = subprocess.check_output('git shortlog --format=%s:%h --topo-order --no-merges {0}'.format(git_revision), shell=True).decode('utf-8')
formatted_log = format_shortlog(log, commit_link_prefix)
with open(changelog_file, 'a') as fp:
fp.write('{0} Changes Since {1}\n'.format(repo_name, revision))
fp.write('---------------------------------------------\n\n')
fp.write(formatted_log)
fp.write('\n\n')
revision_time = subprocess.check_output('git show -s --format="%ci" {0}^{{commit}}'.format(revision), shell=True)
revision_time = revision_time.decode('utf-8').strip()
print('Revision time: ' + revision_time + '\n')
itk_dir = Path(os.path.dirname(os.path.abspath(__file__))) / '..' / '..'
# ITK Repository
update_previous_authors(revision)
update_recent_authors(revision + '..')
update_authors_with_email(revision + '..')
commit_link_prefix = 'https://github.com/InsightSoftwareConsortium/ITK/commit/'
write_changelog('ITK', commit_link_prefix, revision + '..')
# ITKExamples Repository
os.chdir(scratch_dir)
examples_dir = scratch_dir / 'ITKExamples'
if not examples_dir.exists():
subprocess.check_call('git clone https://github.com/InsightSoftwareConsortium/ITKExamples', shell=True)
os.chdir(examples_dir)
update_previous_authors('--until="{0}"'.format(revision_time))
update_recent_authors('--since="{0}"'.format(revision_time))
update_authors_with_email('--since="{0}"'.format(revision_time))
commit_link_prefix = 'https://github.com/InsightSoftwareConsortium/ITKExamples/commit/'
write_changelog('ITK Examples', commit_link_prefix, '--since="{0}"'.format(revision_time))
# ITKSoftwareGuide Repository
os.chdir(scratch_dir)
examples_dir = scratch_dir / 'ITKSoftwareGuide'
if not examples_dir.exists():
subprocess.check_call('git clone https://github.com/InsightSoftwareConsortium/ITKSoftwareGuide', shell=True)
os.chdir(examples_dir)
update_previous_authors('--until="{0}"'.format(revision_time))
update_recent_authors('--since="{0}"'.format(revision_time))
update_authors_with_email('--since="{0}"'.format(revision_time))
commit_link_prefix = 'https://github.com/InsightSoftwareConsortium/ITKSoftwareGuide/commit/'
write_changelog('ITK Software Guide', commit_link_prefix, '--since="{0}"'.format(revision_time))
# Remote modules
os.chdir(itk_dir)
changed_remotes = subprocess.check_output('git diff-index --diff-filter=AM --name-only {0} -- Modules/Remote/'.format(revision), shell=True).decode('utf-8').strip()
with open(changelog_file, 'a') as fp:
fp.write('Remote Module Changes Since {0}\n'.format(revision))
fp.write('---------------------------------------------\n\n')
print('Remote module:')
for remote in changed_remotes.split():
module_name = remote.split('/')[-1].split('.')[0]
if module_name in ['SphinxExamples', 'CMakeLists', 'README']:
continue
print(module_name)
os.chdir(itk_dir)
# The remote file could have been added or its name changed. Use the oldest
# commit since revision with the current name.
old_commit = subprocess.check_output('git rev-list {0}.. -- {1}'.format(revision, remote),
shell=True).decode('utf-8').split()[-1]
try:
remote_spec = subprocess.check_output('git show {0}^:{1}'.format(old_commit,
remote), shell=True).decode('utf-8')
except subprocess.CalledProcessError:
remote_spec = subprocess.check_output('git show {0}:{1}'.format(old_commit,
remote), shell=True).decode('utf-8')
remote_old_tag = remote_tag(remote_spec)
remote_spec = subprocess.check_output('git show HEAD:{1}'.format(revision,
remote), shell=True).decode('utf-8')
remote_new_tag = remote_tag(remote_spec)
remote_repo = remote_repository(remote_spec)
os.chdir(scratch_dir)
remote_dir = scratch_dir / remote_repo.split('/')[-1]
if not remote_dir.exists():
subprocess.check_call('git clone {0} {1}'.format(remote_repo, remote_dir),
shell=True)
os.chdir(remote_dir)
# update_previous_authors('..{0}'.format(remote_new_tag))
update_recent_authors('{0}..{1}'.format(remote_old_tag, remote_new_tag))
update_authors_with_email('{0}..{1}'.format(remote_old_tag, remote_new_tag))
log = subprocess.check_output('git shortlog --format=%s:%h --topo-order --no-merges {0}..{1}'.format(remote_old_tag, remote_new_tag), shell=True).decode('utf-8')
commit_link_prefix = remote_repo.replace('.git', '') + '/commit/'
formatted_log = format_shortlog(log, commit_link_prefix)
with open(changelog_file, 'a') as fp:
fp.write('## {0}:\n'.format(module_name))
fp.write(formatted_log)
fp.write('\n')
os.chdir(scratch_dir)
recent_authors = remove_maintainers(recent_authors)
authors_with_email = remove_maintainers(authors_with_email)
previous_authors = remove_maintainers(previous_authors)
new_authors = recent_authors.difference(previous_authors)
# Print results
print('\n\nPrevious authors: ' + str(len(previous_authors)))
print('Recent authors: ' + str(len(recent_authors)))
print('\nNew authors: ' + str(len(new_authors)))
with open(scratch_dir / 'NewAuthors.txt', 'w') as fp:
for author in new_authors:
sys.stdout.write(author + ', ')
fp.write(author + ', ')
recent_authors_with_email = []
for author in authors_with_email:
for an in recent_authors:
if author.find(an) != -1:
recent_authors_with_email.append(author)
print('\n\nRecent with emails:')
with open(scratch_dir / 'RecentWithEmails.txt', 'w') as fp:
for author in recent_authors_with_email:
sys.stdout.write(author + ', ')
fp.write(author + ', ')
print('\n\nWith emails oneline:')
with open(scratch_dir / 'WithEmailsOneline.txt', 'w') as fp:
for author in authors_with_email:
sys.stdout.write(author + '\n')
fp.write(author + '\n')
print('\n\nNames:')
with open(scratch_dir / 'Names.txt', 'w') as fp:
for author in authors_with_email:
sys.stdout.write(author.split('<')[0] + '\n')
fp.write(author.split('<')[0] + '\n')
print('\n\nEmails:')
with open(scratch_dir / 'Emails.txt', 'w') as fp:
for author in authors_with_email:
sys.stdout.write(author.split('<')[1][:-1] + '\n')
fp.write(author.split('<')[1][:-1] + '\n')
| |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
import numpy as np
import scipy.stats as sp_stats
# Use the nibabel image object
from nibabel import Nifti1Image as Image
from nibabel.affines import apply_affine
from ..io.nibcompat import get_affine
from ..algorithms.graph.field import field_from_graph_and_data
from ..algorithms.graph.graph import wgraph_from_3d_grid
from ..algorithms.statistics import empirical_pvalue
from .glm import glm
from .group.permutation_test import \
permutation_test_onesample, permutation_test_twosample
# FIXME: rename permutation_test_onesample class
#so that name starts with upper case
###############################################################################
# Cluster statistics
###############################################################################
def bonferroni(p, n):
return np.minimum(1., p * n)
def simulated_pvalue(t, simu_t):
return 1 - np.searchsorted(simu_t, t) / float(np.size(simu_t))
def cluster_stats(zimg, mask, height_th, height_control='fpr',
cluster_th=0, nulls={}):
"""
Return a list of clusters, each cluster being represented by a
dictionary. Clusters are sorted by descending size order. Within
each cluster, local maxima are sorted by descending depth order.
Parameters
----------
zimg: z-score image
mask: mask image
height_th: cluster forming threshold
height_control: string
false positive control meaning of cluster forming
threshold: 'fpr'|'fdr'|'bonferroni'|'none'
cluster_th: cluster size threshold
null_s : cluster-level calibration method: None|'rft'|array
Notes
-----
This works only with three dimensional data
"""
# Masking
if len(mask.shape) > 3:
xyz = np.where((mask.get_data() > 0).squeeze())
zmap = zimg.get_data().squeeze()[xyz]
else:
xyz = np.where(mask.get_data() > 0)
zmap = zimg.get_data()[xyz]
xyz = np.array(xyz).T
nvoxels = np.size(xyz, 0)
# Thresholding
if height_control == 'fpr':
zth = sp_stats.norm.isf(height_th)
elif height_control == 'fdr':
zth = empirical_pvalue.gaussian_fdr_threshold(zmap, height_th)
elif height_control == 'bonferroni':
zth = sp_stats.norm.isf(height_th / nvoxels)
else: ## Brute-force thresholding
zth = height_th
pth = sp_stats.norm.sf(zth)
above_th = zmap > zth
if len(np.where(above_th)[0]) == 0:
return None, None ## FIXME
zmap_th = zmap[above_th]
xyz_th = xyz[above_th]
# Clustering
## Extract local maxima and connex components above some threshold
ff = field_from_graph_and_data(wgraph_from_3d_grid(xyz_th, k=18), zmap_th)
maxima, depth = ff.get_local_maxima(th=zth)
labels = ff.cc()
## Make list of clusters, each cluster being a dictionary
clusters = []
for k in range(labels.max() + 1):
s = np.sum(labels == k)
if s >= cluster_th:
in_cluster = labels[maxima] == k
m = maxima[in_cluster]
d = depth[in_cluster]
sorted = d.argsort()[::-1]
clusters.append({'size': s,
'maxima': m[sorted],
'depth': d[sorted]})
## Sort clusters by descending size order
clusters.sort(key=lambda c : c['size'], reverse=True)
# FDR-corrected p-values
fdr_pvalue = empirical_pvalue.gaussian_fdr(zmap)[above_th]
# Default "nulls"
if not 'zmax' in nulls:
nulls['zmax'] = 'bonferroni'
if not 'smax' in nulls:
nulls['smax'] = None
if not 's' in nulls:
nulls['s'] = None
# Report significance levels in each cluster
for c in clusters:
maxima = c['maxima']
zscore = zmap_th[maxima]
pval = sp_stats.norm.sf(zscore)
# Replace array indices with real coordinates
c['maxima'] = apply_affine(get_affine(zimg), xyz_th[maxima])
c['zscore'] = zscore
c['pvalue'] = pval
c['fdr_pvalue'] = fdr_pvalue[maxima]
# Voxel-level corrected p-values
p = None
if nulls['zmax'] == 'bonferroni':
p = bonferroni(pval, nvoxels)
elif isinstance(nulls['zmax'], np.ndarray):
p = simulated_pvalue(zscore, nulls['zmax'])
c['fwer_pvalue'] = p
# Cluster-level p-values (corrected)
p = None
if isinstance(nulls['smax'], np.ndarray):
p = simulated_pvalue(c['size'], nulls['smax'])
c['cluster_fwer_pvalue'] = p
# Cluster-level p-values (uncorrected)
p = None
if isinstance(nulls['s'], np.ndarray):
p = simulated_pvalue(c['size'], nulls['s'])
c['cluster_pvalue'] = p
# General info
info = {'nvoxels': nvoxels,
'threshold_z': zth,
'threshold_p': pth,
'threshold_pcorr': bonferroni(pth, nvoxels)}
return clusters, info
###############################################################################
# Peak_extraction
###############################################################################
def get_3d_peaks(image, mask=None, threshold=0., nn=18, order_th=0):
"""
returns all the peaks of image that are with the mask
and above the provided threshold
Parameters
----------
image, (3d) test image
mask=None, (3d) mask image
By default no masking is performed
threshold=0., float, threshold value above which peaks are considered
nn=18, int, number of neighbours of the topological spatial model
order_th=0, int, threshold on topological order to validate the peaks
Returns
-------
peaks, a list of dictionaries, where each dict has the fields:
vals, map value at the peak
order, topological order of the peak
ijk, array of shape (1,3) grid coordinate of the peak
pos, array of shape (n_maxima,3) mm coordinates (mapped by affine)
of the peaks
"""
# Masking
if mask is not None:
bmask = mask.get_data().ravel()
data = image.get_data().ravel()[bmask > 0]
xyz = np.array(np.where(bmask > 0)).T
else:
shape = image.shape
data = image.get_data().ravel()
xyz = np.reshape(np.indices(shape), (3, np.prod(shape))).T
affine = get_affine(image)
if not (data > threshold).any():
return None
# Extract local maxima and connex components above some threshold
ff = field_from_graph_and_data(wgraph_from_3d_grid(xyz, k=18), data)
maxima, order = ff.get_local_maxima(th=threshold)
# retain only the maxima greater than the specified order
maxima = maxima[order > order_th]
order = order[order > order_th]
n_maxima = len(maxima)
if n_maxima == 0:
# should not occur ?
return None
# reorder the maxima to have decreasing peak value
vals = data[maxima]
idx = np.argsort(- vals)
maxima = maxima[idx]
order = order[idx]
vals = data[maxima]
ijk = xyz[maxima]
pos = np.dot(np.hstack((ijk, np.ones((n_maxima, 1)))), affine.T)[:, :3]
peaks = [{'val': vals[k], 'order': order[k], 'ijk': ijk[k], 'pos': pos[k]}
for k in range(n_maxima)]
return peaks
###############################################################################
# Statistical tests
###############################################################################
def prepare_arrays(data_images, vardata_images, mask_images):
from .mask import intersect_masks
# Compute mask intersection
mask = intersect_masks(mask_images, threshold=1.)
# Compute xyz coordinates from mask
xyz = np.array(np.where(mask > 0))
# Prepare data & vardata arrays
data = np.array([(d.get_data()[xyz[0], xyz[1], xyz[2]]).squeeze()
for d in data_images]).squeeze()
if vardata_images == None:
vardata = None
else:
vardata = np.array([(d.get_data()[xyz[0], xyz[1], xyz[2]]).squeeze()
for d in vardata_images]).squeeze()
return data, vardata, xyz, mask
def onesample_test(data_images, vardata_images, mask_images, stat_id,
permutations=0, cluster_forming_th=0.01):
"""
Helper function for permutation-based mass univariate onesample
group analysis.
"""
# Prepare arrays
data, vardata, xyz, mask = prepare_arrays(data_images, vardata_images,
mask_images)
# Create one-sample permutation test instance
ptest = permutation_test_onesample(data, xyz, vardata=vardata,
stat_id=stat_id)
# Compute z-map image
zmap = np.zeros(data_images[0].shape).squeeze()
zmap[list(xyz)] = ptest.zscore()
zimg = Image(zmap, get_affine(data_images[0]))
# Compute mask image
maskimg = Image(mask.astype(np.int8), get_affine(data_images[0]))
# Multiple comparisons
if permutations <= 0:
return zimg, maskimg
else:
# Cluster definition: (threshold, diameter)
cluster_def = (ptest.height_threshold(cluster_forming_th), None)
# Calibration
voxel_res, cluster_res, region_res = \
ptest.calibrate(nperms=permutations, clusters=[cluster_def])
nulls = {}
nulls['zmax'] = ptest.zscore(voxel_res['perm_maxT_values'])
nulls['s'] = cluster_res[0]['perm_size_values']
nulls['smax'] = cluster_res[0]['perm_maxsize_values']
# Return z-map image, mask image and dictionary of null distribution
# for cluster sizes (s), max cluster size (smax) and max z-score (zmax)
return zimg, maskimg, nulls
def twosample_test(data_images, vardata_images, mask_images, labels, stat_id,
permutations=0, cluster_forming_th=0.01):
"""
Helper function for permutation-based mass univariate twosample group
analysis. Labels is a binary vector (1-2). Regions more active for group
1 than group 2 are inferred.
"""
# Prepare arrays
data, vardata, xyz, mask = prepare_arrays(data_images, vardata_images,
mask_images)
# Create two-sample permutation test instance
if vardata_images == None:
ptest = permutation_test_twosample(
data[labels == 1], data[labels == 2], xyz, stat_id=stat_id)
else:
ptest = permutation_test_twosample(
data[labels == 1], data[labels == 2], xyz,
vardata1=vardata[labels == 1], vardata2=vardata[labels == 2],
stat_id=stat_id)
# Compute z-map image
zmap = np.zeros(data_images[0].shape).squeeze()
zmap[list(xyz)] = ptest.zscore()
zimg = Image(zmap, get_affine(data_images[0]))
# Compute mask image
maskimg = Image(mask, get_affine(data_images[0]))
# Multiple comparisons
if permutations <= 0:
return zimg, maskimg
else:
# Cluster definition: (threshold, diameter)
cluster_def = (ptest.height_threshold(cluster_forming_th), None)
# Calibration
voxel_res, cluster_res, region_res = \
ptest.calibrate(nperms=permutations, clusters=[cluster_def])
nulls = {}
nulls['zmax'] = ptest.zscore(voxel_res['perm_maxT_values'])
nulls['s'] = cluster_res[0]['perm_size_values']
nulls['smax'] = cluster_res[0]['perm_maxsize_values']
# Return z-map image, mask image and dictionary of null
# distribution for cluster sizes (s), max cluster size (smax)
# and max z-score (zmax)
return zimg, maskimg, nulls
###############################################################################
# Linear model
###############################################################################
def linear_model_fit(data_images, mask_images, design_matrix, vector):
"""
Helper function for group data analysis using arbitrary design matrix
"""
# Prepare arrays
data, vardata, xyz, mask = prepare_arrays(data_images, None, mask_images)
# Create glm instance
G = glm(data, design_matrix)
# Compute requested contrast
c = G.contrast(vector)
# Compute z-map image
zmap = np.zeros(data_images[0].shape).squeeze()
zmap[list(xyz)] = c.zscore()
zimg = Image(zmap, get_affine(data_images[0]))
return zimg
class LinearModel(object):
def_model = 'spherical'
def_niter = 2
def __init__(self, data, design_matrix, mask=None, formula=None,
model=def_model, method=None, niter=def_niter):
# Convert input data and design into sequences
if not hasattr(data, '__iter__'):
data = [data]
if not hasattr(design_matrix, '__iter__'):
design_matrix = [design_matrix]
# configure spatial properties
# the 'sampling' direction is assumed to be the last
# TODO: check that all input images have the same shape and
# that it's consistent with the mask
nomask = mask == None
if nomask:
self.xyz = None
self.axis = len(data[0].shape) - 1
else:
self.xyz = np.where(mask.get_data() > 0)
self.axis = 1
self.spatial_shape = data[0].shape[0: -1]
self.affine = get_affine(data[0])
self.glm = []
for i in range(len(data)):
if not isinstance(design_matrix[i], np.ndarray):
raise ValueError('Invalid design matrix')
if nomask:
Y = data[i].get_data()
else:
Y = data[i].get_data()[self.xyz]
X = design_matrix[i]
self.glm.append(glm(Y, X, axis=self.axis,
formula=formula, model=model,
method=method, niter=niter))
def dump(self, filename):
"""Dump GLM fit as npz file.
"""
models = len(self.glm)
if models == 1:
self.glm[0].save(filename)
else:
for i in range(models):
self.glm[i].save(filename + str(i))
def contrast(self, vector):
"""Compute images of contrast and contrast variance.
"""
# Compute the overall contrast across models
c = self.glm[0].contrast(vector)
for g in self.glm[1:]:
c += g.contrast(vector)
def affect_inmask(dest, src, xyz):
if xyz == None:
dest = src
else:
dest[xyz] = src
return dest
con = np.zeros(self.spatial_shape)
con_img = Image(affect_inmask(con, c.effect, self.xyz), self.affine)
vcon = np.zeros(self.spatial_shape)
vcon_img = Image(affect_inmask(vcon, c.variance, self.xyz),
self.affine)
z = np.zeros(self.spatial_shape)
z_img = Image(affect_inmask(z, c.zscore(), self.xyz), self.affine)
dof = c.dof
return con_img, vcon_img, z_img, dof
###############################################################################
# Hack to have nose skip onesample_test, which is not a unit test
onesample_test.__test__ = False
twosample_test.__test__ = False
| |
# Copyright 2012-2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Volume v1 Volume action implementations"""
import logging
import six
from cliff import command
from cliff import lister
from cliff import show
from openstackclient.common import parseractions
from openstackclient.common import utils
class CreateVolume(show.ShowOne):
"""Create new volume"""
log = logging.getLogger(__name__ + '.CreateVolume')
def get_parser(self, prog_name):
parser = super(CreateVolume, self).get_parser(prog_name)
parser.add_argument(
'name',
metavar='<name>',
help='Name of the volume',
)
parser.add_argument(
'--size',
metavar='<size>',
required=True,
type=int,
help='New volume size',
)
parser.add_argument(
'--snapshot-id',
metavar='<snapshot-id>',
help='ID of the snapshot',
)
parser.add_argument(
'--description',
metavar='<description>',
help='Description of the volume',
)
parser.add_argument(
'--volume-type',
metavar='<volume-type>',
help='Type of volume',
)
parser.add_argument(
'--user',
metavar='<user>',
help='Specify a different user (admin only)',
)
parser.add_argument(
'--project',
metavar='<project>',
help='Specify a different project (admin only)',
)
parser.add_argument(
'--availability-zone',
metavar='<availability-zone>',
help='Availability zone to use',
)
parser.add_argument(
'--property',
metavar='<key=value>',
action=parseractions.KeyValueAction,
help='Property to store for this volume '
'(repeat option to set multiple properties)',
)
parser.add_argument(
'--image',
metavar='<image>',
help='Reference to a stored image',
)
parser.add_argument(
'--source',
metavar='<volume>',
help='Source for volume clone',
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
identity_client = self.app.client_manager.identity
volume_client = self.app.client_manager.volume
source_volume = None
if parsed_args.source:
source_volume = utils.find_resource(
volume_client.volumes,
parsed_args.source,
).id
project = None
if parsed_args.project:
project = utils.find_resource(
identity_client.tenants, parsed_args.project).id
user = None
if parsed_args.user:
user = utils.find_resource(
identity_client.users, parsed_args.user).id
volume = volume_client.volumes.create(
parsed_args.size,
parsed_args.snapshot_id,
source_volume,
parsed_args.name,
parsed_args.description,
parsed_args.volume_type,
user,
project,
parsed_args.availability_zone,
parsed_args.property,
parsed_args.image
)
# Map 'metadata' column to 'properties'
volume._info.update(
{'properties': utils.format_dict(volume._info.pop('metadata'))}
)
return zip(*sorted(six.iteritems(volume._info)))
class DeleteVolume(command.Command):
"""Delete volume"""
log = logging.getLogger(__name__ + '.DeleteVolume')
def get_parser(self, prog_name):
parser = super(DeleteVolume, self).get_parser(prog_name)
parser.add_argument(
'volume',
metavar='<volume>',
help='Name or ID of volume to delete',
)
parser.add_argument(
'--force',
dest='force',
action='store_true',
default=False,
help='Attempt forced removal of a volume, regardless of state',
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
volume_client = self.app.client_manager.volume
volume = utils.find_resource(
volume_client.volumes, parsed_args.volume)
if parsed_args.force:
volume_client.volumes.force_delete(volume.id)
else:
volume_client.volumes.delete(volume.id)
return
class ListVolume(lister.Lister):
"""List volumes"""
log = logging.getLogger(__name__ + '.ListVolume')
def get_parser(self, prog_name):
parser = super(ListVolume, self).get_parser(prog_name)
parser.add_argument(
'--status',
metavar='<status>',
help='Filter results by status',
)
parser.add_argument(
'--name',
metavar='<name>',
help='Filter results by name',
)
parser.add_argument(
'--all-projects',
action='store_true',
default=False,
help='Include all projects (admin only)',
)
parser.add_argument(
'--long',
action='store_true',
default=False,
help='Display properties',
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
if parsed_args.long:
columns = (
'ID',
'Display Name',
'Status',
'Size',
'Volume Type',
'Bootable',
'Attached to',
'Metadata',
)
column_headers = (
'ID',
'Display Name',
'Status',
'Size',
'Type',
'Bootable',
'Attached',
'Properties',
)
else:
columns = (
'ID',
'Display Name',
'Status',
'Size',
'Attached to',
)
column_headers = (
'ID',
'Display Name',
'Status',
'Size',
'Attached',
)
search_opts = {
'all_tenants': parsed_args.all_projects,
'display_name': parsed_args.name,
'status': parsed_args.status,
}
volume_client = self.app.client_manager.volume
data = volume_client.volumes.list(search_opts=search_opts)
return (column_headers,
(utils.get_item_properties(
s, columns,
formatters={'Metadata': utils.format_dict},
) for s in data))
class SetVolume(command.Command):
"""Set volume properties"""
log = logging.getLogger(__name__ + '.SetVolume')
def get_parser(self, prog_name):
parser = super(SetVolume, self).get_parser(prog_name)
parser.add_argument(
'volume',
metavar='<volume>',
help='Name or ID of volume to change',
)
parser.add_argument(
'--name',
metavar='<new-name>',
help='New volume name',
)
parser.add_argument(
'--description',
metavar='<new-description>',
help='New volume description',
)
parser.add_argument(
'--property',
metavar='<key=value>',
action=parseractions.KeyValueAction,
help='Property to add/change for this volume '
'(repeat option to set multiple properties)',
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
volume_client = self.app.client_manager.volume
volume = utils.find_resource(volume_client.volumes, parsed_args.volume)
if parsed_args.property:
volume_client.volumes.set_metadata(volume.id, parsed_args.property)
kwargs = {}
if parsed_args.name:
kwargs['display_name'] = parsed_args.name
if parsed_args.description:
kwargs['display_description'] = parsed_args.description
if kwargs:
volume_client.volumes.update(volume.id, **kwargs)
if not kwargs and not parsed_args.property:
self.app.log.error("No changes requested\n")
return
class ShowVolume(show.ShowOne):
"""Show specific volume"""
log = logging.getLogger(__name__ + '.ShowVolume')
def get_parser(self, prog_name):
parser = super(ShowVolume, self).get_parser(prog_name)
parser.add_argument(
'volume',
metavar='<volume>',
help='Name or ID of volume to display',
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
volume_client = self.app.client_manager.volume
volume = utils.find_resource(volume_client.volumes, parsed_args.volume)
# Map 'metadata' column to 'properties'
volume._info.update(
{'properties': utils.format_dict(volume._info.pop('metadata'))}
)
if 'os-vol-tenant-attr:tenant_id' in volume._info:
volume._info.update(
{'project_id': volume._info.pop(
'os-vol-tenant-attr:tenant_id')}
)
return zip(*sorted(six.iteritems(volume._info)))
class UnsetVolume(command.Command):
"""Unset volume properties"""
log = logging.getLogger(__name__ + '.UnsetVolume')
def get_parser(self, prog_name):
parser = super(UnsetVolume, self).get_parser(prog_name)
parser.add_argument(
'volume',
metavar='<volume>',
help='Name or ID of volume to change',
)
parser.add_argument(
'--property',
metavar='<key>',
action='append',
default=[],
help='Property key to remove from volume '
'(repeat to set multiple values)',
)
return parser
def take_action(self, parsed_args):
self.log.debug('take_action(%s)' % parsed_args)
volume_client = self.app.client_manager.volume
volume = utils.find_resource(
volume_client.volumes, parsed_args.volume)
if parsed_args.property:
volume_client.volumes.delete_metadata(
volume.id,
parsed_args.property,
)
else:
self.app.log.error("No changes requested\n")
return
| |
# Copyright (c) 2014 Huawei Technologies Co., Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the Huawei nas driver module."""
import os
import shutil
import tempfile
import time
import xml.dom.minidom
import ddt
import mock
from oslo_serialization import jsonutils
from manila import context
from manila import db
from manila import exception
from manila.share import configuration as conf
from manila.share.drivers.huawei import huawei_nas
from manila.share.drivers.huawei.v3 import connection
from manila.share.drivers.huawei.v3 import helper
from manila import test
def fake_sleep(time):
pass
def data_session(url):
if url == "/xx/sessions":
data = """{"error":{"code":0},
"data":{"username":"admin",
"iBaseToken":"2001031430",
"deviceid":"210235G7J20000000000"}}"""
if url == "sessions":
data = '{"error":{"code":0},"data":{"ID":11}}'
return data
def filesystem(method, data, fs_status_flag):
extend_share_flag = False
shrink_share_flag = False
if method == "PUT":
if data == """{"CAPACITY": 8388608}""":
data = """{"error":{"code":0},
"data":{"ID":"4",
"CAPACITY":"8388608"}}"""
extend_share_flag = True
elif data == """{"CAPACITY": 2097152}""":
data = """{"error":{"code":0},
"data":{"ID":"4",
"CAPACITY":"2097152"}}"""
shrink_share_flag = True
elif data == """{"NAME": "share_fake_manage_uuid"}""":
data = """{"error":{"code":0},
"data":{"ID":"4",
"CAPACITY":"8388608"}}"""
elif method == "DELETE":
data = """{"error":{"code":0}}"""
elif method == "GET":
if fs_status_flag:
data = """{"error":{"code":0},
"data":{"HEALTHSTATUS":"1",
"RUNNINGSTATUS":"27",
"ALLOCTYPE":"1",
"CAPACITY":"8388608",
"PARENTNAME":"OpenStack_Pool"}}"""
else:
data = """{"error":{"code":0},
"data":{"HEALTHSTATUS":"0",
"RUNNINGSTATUS":"27",
"ALLOCTYPE":"0",
"CAPACITY":"8388608",
"PARENTNAME":"OpenStack_Pool"}}"""
else:
data = '{"error":{"code":31755596}}'
return (data, extend_share_flag, shrink_share_flag)
def allow_access(type, method, data):
allow_ro_flag = False
allow_rw_flag = False
access_nfs = {
"TYPE": "16409",
"NAME": "1.2.3.4",
"PARENTID": "1",
"ACCESSVAL": "0",
"SYNC": "0",
"ALLSQUASH": "1",
"ROOTSQUASH": "0",
}
access_nfs_ro_data = jsonutils.dumps(access_nfs)
access_nfs["NAME"] = "100.112.0.1"
access_nfs["ACCESSVAL"] = "1"
access_nfs_rw_data = jsonutils.dumps(access_nfs)
access_cifs = {
"NAME": "user_name",
"PARENTID": "2",
"PERMISSION": "0",
"DOMAINTYPE": "2",
}
access_cifs_ro_data = jsonutils.dumps(access_cifs)
access_cifs["PERMISSION"] = "5"
access_cifs_rw_data = jsonutils.dumps(access_cifs)
if method != "POST":
data = """{"error":{"code":31755596}}"""
return data
if ((data == access_nfs_ro_data and type == "NFS")
or (data == access_cifs_ro_data and type == "CIFS")):
allow_ro_flag = True
data = """{"error":{"code":0}}"""
elif ((data == access_nfs_rw_data and type == 'NFS')
or (data == access_cifs_rw_data and type == 'CIFS')):
allow_rw_flag = True
data = """{"error":{"code":0}}"""
else:
data = """{"error":{"code":31755596}}"""
return (data, allow_ro_flag, allow_rw_flag)
class FakeHuaweiNasHelper(helper.RestHelper):
def __init__(self, *args, **kwargs):
helper.RestHelper.__init__(self, *args, **kwargs)
self.test_normal = True
self.deviceid = None
self.delete_flag = False
self.allow_flag = False
self.deny_flag = False
self.create_snapflag = False
self.setupserver_flag = False
self.fs_status_flag = True
self.create_share_flag = False
self.snapshot_flag = True
self.service_status_flag = True
self.share_exist = True
self.service_nfs_status_flag = True
self.create_share_data_flag = False
self.allow_ro_flag = False
self.allow_rw_flag = False
self.extend_share_flag = False
self.shrink_share_flag = False
self.test_multi_url_flag = 0
def _change_file_mode(self, filepath):
pass
def do_call(self, url, data=None, method=None):
url = url.replace('http://100.115.10.69:8082/deviceManager/rest', '')
url = url.replace('/210235G7J20000000000/', '')
if self.test_normal:
if self.test_multi_url_flag == 1:
data = '{"error":{"code":-403}}'
res_json = jsonutils.loads(data)
return res_json
elif self.test_multi_url_flag == 2:
if 'http://100.115.10.70:8082/deviceManager/rest' in url:
url = url.replace('http://100.115.10.70:8082/'
'deviceManager/rest', '')
else:
data = '{"error":{"code":-403}}'
res_json = jsonutils.loads(data)
return res_json
if url == "/xx/sessions" or url == "sessions":
data = data_session(url)
if url == "storagepool":
data = """{"error":{"code":0},
"data":[{"USERFREECAPACITY":"2097152",
"ID":"1",
"NAME":"OpenStack_Pool",
"USERTOTALCAPACITY":"4194304",
"USAGETYPE":"2",
"USERCONSUMEDCAPACITY":"2097152"}]}"""
if url == "filesystem":
data = """{"error":{"code":0},"data":{
"ID":"4"}}"""
if url == "NFSHARE" or url == "CIFSHARE":
if self.create_share_flag:
data = '{"error":{"code":31755596}}'
elif self.create_share_data_flag:
data = '{"error":{"code":0}}'
else:
data = """{"error":{"code":0},"data":{
"ID":"10"}}"""
if url == "NFSHARE?range=[100-200]":
if self.share_exist:
data = """{"error":{"code":0},
"data":[{"ID":"1",
"FSID":"4",
"NAME":"test",
"SHAREPATH":"/share_fake_uuid/"}]}"""
else:
data = """{"error":{"code":0},
"data":[{"ID":"1",
"FSID":"",
"NAME":"test",
"SHAREPATH":"/share_fake_uuid_fail/"}]}"""
if url == "CIFSHARE?range=[100-200]":
data = """{"error":{"code":0},
"data":[{"ID":"2",
"FSID":"4",
"NAME":"test",
"SHAREPATH":"/share_fake_uuid/"}]}"""
if url == "NFSHARE?range=[0-100]":
data = """{"error":{"code":0},
"data":[{"ID":"1",
"FSID":"4",
"NAME":"test_fail",
"SHAREPATH":"/share_fake_uuid_fail/"}]}"""
if url == "CIFSHARE?range=[0-100]":
data = """{"error":{"code":0},
"data":[{"ID":"2",
"FSID":"4",
"NAME":"test_fail",
"SHAREPATH":"/share_fake_uuid_fail/"}]}"""
if url == "NFSHARE/1" or url == "CIFSHARE/2":
data = """{"error":{"code":0}}"""
self.delete_flag = True
if url == "FSSNAPSHOT":
data = """{"error":{"code":0},"data":{
"ID":"3"}}"""
self.create_snapflag = True
if url == "FSSNAPSHOT/4@share_snapshot_fake_snapshot_uuid":
if self.snapshot_flag:
data = """{"error":{"code":0},"data":{"ID":"3"}}"""
else:
data = '{"error":{"code":1073754118}}'
self.delete_flag = True
if url == "FSSNAPSHOT/3":
data = """{"error":{"code":0}}"""
self.delete_flag = True
if url == "NFS_SHARE_AUTH_CLIENT":
data, self.allow_ro_flag, self.allow_rw_flag = \
allow_access('NFS', method, data)
self.allow_flag = True
if url == "CIFS_SHARE_AUTH_CLIENT":
data, self.allow_ro_flag, self.allow_rw_flag = \
allow_access('CIFS', method, data)
self.allow_flag = True
if url == "FSSNAPSHOT?TYPE=48&PARENTID=4"\
"&&sortby=TIMESTAMP,d&range=[0-2000]":
data = """{"error":{"code":0},
"data":[{"ID":"3",
"NAME":"share_snapshot_fake_snapshot_uuid"}]}"""
self.delete_flag = True
if url == "NFS_SHARE_AUTH_CLIENT?"\
"filter=PARENTID::1&range=[0-100]":
data = """{"error":{"code":0},
"data":[{"ID":"0",
"NAME":"100.112.0.1_fail"}]}"""
if url == "CIFS_SHARE_AUTH_CLIENT?"\
"filter=PARENTID::2&range=[0-100]":
data = """{"error":{"code":0},
"data":[{"ID":"0",
"NAME":"user_name_fail"}]}"""
if url == "NFS_SHARE_AUTH_CLIENT?"\
"filter=PARENTID::1&range=[100-200]":
data = """{"error":{"code":0},
"data":[{"ID":"5",
"NAME":"100.112.0.1"}]}"""
if url == "CIFS_SHARE_AUTH_CLIENT?"\
"filter=PARENTID::2&range=[100-200]":
data = """{"error":{"code":0},
"data":[{"ID":"6",
"NAME":"user_name"}]}"""
if url == "NFS_SHARE_AUTH_CLIENT/5"\
or url == "CIFS_SHARE_AUTH_CLIENT/6":
data = """{"error":{"code":0}}"""
self.deny_flag = True
if url == "NFSHARE/count" or url == "CIFSHARE/count":
data = """{"error":{"code":0},"data":{
"COUNT":"196"}}"""
if url == "NFS_SHARE_AUTH_CLIENT/count?filter=PARENTID::1"\
or url == "CIFS_SHARE_AUTH_CLIENT/count?filter="\
"PARENTID::2":
data = """{"error":{"code":0},"data":{
"COUNT":"196"}}"""
if url == "CIFSSERVICE":
if self.service_status_flag:
data = """{"error":{"code":0},"data":{
"RUNNINGSTATUS":"2"}}"""
else:
data = """{"error":{"code":0},"data":{
"RUNNINGSTATUS":"1"}}"""
if url == "NFSSERVICE":
if self.service_nfs_status_flag:
data = """{"error":{"code":0},
"data":{"RUNNINGSTATUS":"2",
"SUPPORTV3":"true",
"SUPPORTV4":"true"}}"""
else:
data = """{"error":{"code":0},
"data":{"RUNNINGSTATUS":"1",
"SUPPORTV3":"true",
"SUPPORTV4":"true"}}"""
self.setupserver_flag = True
if url == "FILESYSTEM?range=[0-8191]":
data = """{"error":{"code":0},
"data":[{"ID":"4",
"NAME":"share_fake_uuid"}]}"""
if url == "filesystem/4":
data, self.extend_share_flag, self.shrink_share_flag = (
filesystem(method, data, self.fs_status_flag))
self.delete_flag = True
else:
data = '{"error":{"code":31755596}}'
res_json = jsonutils.loads(data)
return res_json
class FakeHuaweiNasDriver(huawei_nas.HuaweiNasDriver):
"""Fake HuaweiNasDriver."""
def __init__(self, *args, **kwargs):
huawei_nas.HuaweiNasDriver.__init__(self, *args, **kwargs)
self.plugin = FakeV3StorageConnection(self.configuration)
class FakeV3StorageConnection(connection.V3StorageConnection):
"""Fake V3StorageConnection."""
def __init__(self, configuration):
connection.V3StorageConnection.__init__(self, configuration)
self.configuration = configuration
self.helper = FakeHuaweiNasHelper(self.configuration)
@ddt.ddt
class HuaweiShareDriverTestCase(test.TestCase):
"""Tests GenericShareDriver."""
def setUp(self):
super(HuaweiShareDriverTestCase, self).setUp()
self._context = context.get_admin_context()
self.tmp_dir = tempfile.mkdtemp()
self.fake_conf_file = self.tmp_dir + '/manila_huawei_conf.xml'
self.addCleanup(shutil.rmtree, self.tmp_dir)
self.create_fake_conf_file(self.fake_conf_file)
self.addCleanup(os.remove, self.fake_conf_file)
def _safe_get(opt):
return getattr(self.configuration, opt)
self.configuration = mock.Mock(spec=conf.Configuration)
self.configuration.safe_get = mock.Mock(side_effect=_safe_get)
self.configuration.network_config_group = 'fake_network_config_group'
self.configuration.share_backend_name = 'fake_share_backend_name'
self.configuration.huawei_share_backend = 'V3'
self.configuration.manila_huawei_conf_file = self.fake_conf_file
self.configuration.driver_handles_share_servers = False
self._helper_fake = mock.Mock()
self.mock_object(huawei_nas.importutils, 'import_object',
mock.Mock(return_value=self._helper_fake))
self.mock_object(time, 'sleep', fake_sleep)
self.driver = FakeHuaweiNasDriver(configuration=self.configuration)
self.driver.plugin.helper.test_normal = True
self.share_nfs = {
'id': 'fake_uuid',
'project_id': 'fake_tenant_id',
'display_name': 'fake',
'name': 'share-fake-uuid',
'size': 1,
'share_proto': 'NFS',
'share_network_id': 'fake_net_id',
'share_server_id': 'fake-share-srv-id',
'host': 'fake_host@fake_backend#OpenStack_Pool',
'export_locations': [
{'path': '100.115.10.68:/share_fake_uuid'},
],
'host': 'fake_host@fake_backend#OpenStack_Pool',
'share_type_id': 'fake_id',
}
self.share_manage_nfs = {
'id': 'fake_uuid',
'project_id': 'fake_tenant_id',
'display_name': 'fake',
'name': 'share-fake-manage-uuid',
'size': 1,
'share_proto': 'NFS',
'share_network_id': 'fake_net_id',
'share_server_id': 'fake-share-srv-id',
'export_locations': [
{'path': '100.115.10.68:/share_fake_uuid'},
],
'host': 'fake_host@fake_backend#OpenStack_Pool',
'share_type_id': 'fake_id',
}
self.share_pool_name_not_match = {
'id': 'fake_uuid',
'project_id': 'fake_tenant_id',
'display_name': 'fake',
'name': 'share-fake-manage-uuid',
'size': 1,
'share_proto': 'NFS',
'share_network_id': 'fake_net_id',
'share_server_id': 'fake-share-srv-id',
'export_locations': [
{'path': '100.115.10.68:/share_fake_uuid'},
],
'host': 'fake_host@fake_backend#OpenStack_Pool_not_match',
'share_type_id': 'fake_id',
}
self.share_proto_fail = {
'id': 'fake_uuid',
'project_id': 'fake_tenant_id',
'display_name': 'fake',
'name': 'share-fake-uuid',
'size': 1,
'share_proto': 'proto_fail',
'share_network_id': 'fake_net_id',
'share_server_id': 'fake-share-srv-id',
'host': 'fake_host@fake_backend#OpenStack_Pool',
}
self.share_cifs = {
'id': 'fake_uuid',
'project_id': 'fake_tenant_id',
'display_name': 'fake',
'name': 'share-fake-uuid',
'size': 1,
'share_proto': 'CIFS',
'share_network_id': 'fake_net_id',
'share_server_id': 'fake-share-srv-id',
'export_locations': [
{'path': 'share_fake_uuid'},
],
'host': 'fake_host@fake_backend#OpenStack_Pool',
'share_type_id': 'fake_id',
}
self.share_manage_cifs = {
'id': 'fake_uuid',
'project_id': 'fake_tenant_id',
'display_name': 'fake',
'name': 'share-fake-manage-uuid',
'size': 1,
'share_proto': 'CIFS',
'share_network_id': 'fake_net_id',
'share_server_id': 'fake-share-srv-id',
'export_locations': [
{'path': '\\\\100.115.10.68\\share_fake_uuid'},
],
'host': 'fake_host@fake_backend#OpenStack_Pool',
'share_type_id': 'fake_id',
}
self.nfs_snapshot = {
'id': 'fake_snapshot_uuid',
'share_name': 'share_fake_uuid',
'share_id': 'fake_uuid',
'display_name': 'snapshot',
'name': 'fake_snapshot_name',
'share_size': 1,
'size': 1,
'share_proto': 'NFS',
}
self.cifs_snapshot = {
'id': 'fake_snapshot_uuid',
'share_name': 'share_fake_uuid',
'share_id': 'fake_uuid',
'display_name': 'snapshot',
'name': 'fake_snapshot_name',
'share_size': 1,
'size': 1,
'share_proto': 'CIFS',
}
self.security_service = {
'id': 'fake_id',
'domain': 'FAKE',
'server': 'fake_server',
'user': 'fake_user',
'password': 'fake_password',
}
self.access_ip = {
'access_type': 'ip',
'access_to': '100.112.0.1',
'access_level': 'rw',
}
self.access_user = {
'access_type': 'user',
'access_to': 'user_name',
'access_level': 'rw',
}
self.driver_options = {
'volume_id': 'fake',
}
self.share_server = None
self.driver._licenses = ['fake']
self.network_info = {
'server_id': 'fake_server_id',
'cidr': '10.0.0.0/24',
'security_services': ['fake_ldap', 'fake_kerberos', 'fake_ad', ],
'segmentation_id': '1000',
'network_allocations': [
{'id': 'fake_na_id_1', 'ip_address': 'fake_ip_1', },
{'id': 'fake_na_id_2', 'ip_address': 'fake_ip_2', },
],
}
self.share_nfs_host_not_exist = {
'id': 'fake_uuid',
'project_id': 'fake_tenant_id',
'display_name': 'fake',
'name': 'share-fake-uuid',
'size': 1,
'share_proto': 'NFS',
'share_network_id': 'fake_net_id',
'share_server_id': 'fake-share-srv-id',
'host': 'fake_host@fake_backend#',
}
self.share_nfs_storagepool_fail = {
'id': 'fake_uuid',
'project_id': 'fake_tenant_id',
'display_name': 'fake',
'name': 'share-fake-uuid',
'size': 1,
'share_proto': 'NFS',
'share_network_id': 'fake_net_id',
'share_server_id': 'fake-share-srv-id',
'host': 'fake_host@fake_backend#OpenStack_Pool2',
}
fake_extra_specs = {
u'driver_handles_share_servers': u'False',
}
fake_share_type_id = u'fake_id'
self.fake_type_extra = {
'test_with_extra': {
'created_at': 'fake_time',
'deleted': '0',
'deleted_at': None,
'extra_specs': fake_extra_specs,
'required_extra_specs': {},
'id': fake_share_type_id,
'name': u'test_with_extra',
'updated_at': None
}
}
def test_conf_product_fail(self):
self.recreate_fake_conf_file(product_flag=False)
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.assertRaises(exception.InvalidInput,
self.driver.plugin.check_conf_file)
def test_conf_pool_node_fail(self):
self.recreate_fake_conf_file(pool_node_flag=False)
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.assertRaises(exception.InvalidInput,
self.driver.plugin.check_conf_file)
def test_conf_username_fail(self):
self.recreate_fake_conf_file(username_flag=False)
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.assertRaises(exception.InvalidInput,
self.driver.plugin.check_conf_file)
def test_conf_timeout_fail(self):
self.recreate_fake_conf_file(timeout_flag=False)
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
timeout = self.driver.plugin._get_timeout()
self.assertEqual(60, timeout)
def test_conf_wait_interval_fail(self):
self.recreate_fake_conf_file(wait_interval_flag=False)
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
wait_interval = self.driver.plugin._get_wait_interval()
self.assertEqual(3, wait_interval)
def test_get_backend_driver_fail(self):
test_fake_conf_file = None
self.driver.plugin.configuration.manila_huawei_conf_file = (
test_fake_conf_file)
self.assertRaises(exception.InvalidInput,
self.driver.get_backend_driver)
def test_get_backend_driver_fail_driver_none(self):
self.recreate_fake_conf_file(product_flag=False)
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.assertRaises(exception.InvalidInput,
self.driver.get_backend_driver)
def test_create_share_nfs_alloctype_fail(self):
self.recreate_fake_conf_file(alloctype_value='alloctype_fail')
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.driver.plugin.helper.login()
self.assertRaises(exception.InvalidShare,
self.driver.create_share,
self._context,
self.share_nfs,
self.share_server)
def test_create_share_storagepool_not_exist(self):
self.driver.plugin.helper.login()
self.assertRaises(exception.InvalidHost,
self.driver.create_share,
self._context,
self.share_nfs_host_not_exist,
self.share_server)
def test_create_share_nfs_storagepool_fail(self):
self.driver.plugin.helper.login()
self.assertRaises(exception.InvalidHost,
self.driver.create_share,
self._context,
self.share_nfs_storagepool_fail,
self.share_server)
def test_create_share_nfs_no_data_fail(self):
self.driver.plugin.helper.create_share_data_flag = True
self.driver.plugin.helper.login()
self.assertRaises(exception.InvalidShare,
self.driver.create_share,
self._context,
self.share_nfs,
self.share_server)
def test_read_xml_fail(self):
test_fake_conf_file = None
self.driver.plugin.configuration.manila_huawei_conf_file = (
test_fake_conf_file)
self.assertRaises(exception.InvalidInput,
self.driver.plugin.helper._read_xml)
def test_connect_fail(self):
self.driver.plugin.configuration = None
self.assertRaises(exception.InvalidInput,
self.driver.plugin.connect)
def test_login_success(self):
deviceid = self.driver.plugin.helper.login()
self.assertEqual("210235G7J20000000000", deviceid)
def test_check_for_setup_success(self):
self.driver.plugin.helper.login()
self.driver.check_for_setup_error()
def test_check_for_setup_service_down(self):
self.driver.plugin.helper.service_status_flag = False
self.driver.plugin.helper.login()
self.driver.check_for_setup_error()
def test_check_for_setup_nfs_down(self):
self.driver.plugin.helper.service_nfs_status_flag = False
self.driver.plugin.helper.login()
self.driver.check_for_setup_error()
def test_check_for_setup_service_false(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.check_for_setup_error)
def test_create_share_nfs_alloctype_thin_success(self):
self.recreate_fake_conf_file(alloctype_value='Thin')
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.driver.plugin.helper.login()
location = self.driver.create_share(self._context, self.share_nfs,
self.share_server)
self.assertEqual("100.115.10.68:/share_fake_uuid", location)
def test_shrink_share_success(self):
self.driver.plugin.helper.shrink_share_flag = False
self.driver.plugin.helper.login()
self.driver.shrink_share(self.share_nfs, 1,
self.share_server)
self.assertTrue(self.driver.plugin.helper.shrink_share_flag)
def test_shrink_share_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.shrink_share,
self.share_nfs,
1,
self.share_server)
def test_shrink_share_size_fail(self):
self.driver.plugin.helper.login()
self.assertRaises(exception.InvalidShare,
self.driver.shrink_share,
self.share_nfs,
5,
self.share_server)
def test_shrink_share_alloctype_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.fs_status_flag = False
self.assertRaises(exception.InvalidShare,
self.driver.shrink_share,
self.share_nfs,
1,
self.share_server)
def test_shrink_share_not_exist(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.share_exist = False
self.assertRaises(exception.InvalidShare,
self.driver.shrink_share,
self.share_nfs,
1,
self.share_server)
def test_extend_share_success(self):
self.driver.plugin.helper.extend_share_flag = False
self.driver.plugin.helper.login()
self.driver.extend_share(self.share_nfs, 4,
self.share_server)
self.assertTrue(self.driver.plugin.helper.extend_share_flag)
def test_extend_share_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.extend_share,
self.share_nfs,
4,
self.share_server)
def test_extend_share_not_exist(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.share_exist = False
self.assertRaises(exception.InvalidShareAccess,
self.driver.extend_share,
self.share_nfs,
4,
self.share_server)
def test_create_share_nfs_success(self):
self.driver.plugin.helper.login()
location = self.driver.create_share(self._context, self.share_nfs,
self.share_server)
self.assertEqual("100.115.10.68:/share_fake_uuid", location)
def test_create_share_cifs_success(self):
self.driver.plugin.helper.login()
location = self.driver.create_share(self._context, self.share_cifs,
self.share_server)
self.assertEqual("\\\\100.115.10.68\\share_fake_uuid", location)
def test_login_fail(self):
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.plugin.helper.login)
def test_create_share_nfs_fs_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.create_share,
self._context,
self.share_nfs,
self.share_server)
def test_create_share_nfs_status_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.fs_status_flag = False
self.assertRaises(exception.InvalidShare,
self.driver.create_share,
self._context,
self.share_nfs,
self.share_server)
def test_create_share_cifs_fs_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.create_share,
self._context,
self.share_cifs,
self.share_server)
def test_create_share_cifs_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.create_share_flag = True
self.assertRaises(exception.InvalidShare,
self.driver.create_share,
self._context,
self.share_cifs,
self.share_server)
def test_create_share_nfs_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.create_share_flag = True
self.assertRaises(exception.InvalidShare,
self.driver.create_share,
self._context,
self.share_nfs,
self.share_server)
def test_delete_share_nfs_success(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.delete_flag = False
self.driver.delete_share(self._context,
self.share_nfs, self.share_server)
self.assertTrue(self.driver.plugin.helper.delete_flag)
def test_check_snapshot_id_exist_fail(self):
snapshot_id = "4"
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.plugin.helper._check_snapshot_id_exist,
snapshot_id)
def test_delete_share_nfs_fail_not_exist(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.delete_flag = False
self.driver.plugin.helper.share_exist = False
self.driver.delete_share(self._context,
self.share_nfs, self.share_server)
self.assertTrue(self.driver.plugin.helper.delete_flag)
def test_delete_share_cifs_success(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.delete_flag = False
self.driver.delete_share(self._context, self.share_cifs,
self.share_server)
self.assertTrue(self.driver.plugin.helper.delete_flag)
def test_get_network_allocations_number(self):
number = self.driver.get_network_allocations_number()
self.assertEqual(0, number)
def test_create_share_from_snapshot(self):
self.assertRaises(NotImplementedError,
self.driver.create_share_from_snapshot,
self._context, self.share_nfs, self.nfs_snapshot,
self.share_server)
def test_get_share_stats_refresh_pool_not_exist(self):
self.driver.plugin.helper.login()
self.recreate_fake_conf_file(pool_node_flag=False)
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.assertRaises(exception.InvalidInput,
self.driver._update_share_stats)
def test_get_share_stats_refresh(self):
self.driver.plugin.helper.login()
self.driver._update_share_stats()
expected = {}
expected["share_backend_name"] = "fake_share_backend_name"
expected["driver_handles_share_servers"] = False
expected["vendor_name"] = 'Huawei'
expected["driver_version"] = '1.1'
expected["storage_protocol"] = 'NFS_CIFS'
expected['reserved_percentage'] = 0
expected['total_capacity_gb'] = 0.0
expected['free_capacity_gb'] = 0.0
expected['QoS_support'] = False
expected["pools"] = []
pool = {}
pool.update(dict(
pool_name='OpenStack_Pool',
total_capacity_gb=2,
free_capacity_gb=1,
allocated_capacity_gb=1,
QoS_support=False,
reserved_percentage=0,
))
expected["pools"].append(pool)
self.assertEqual(expected, self.driver._stats)
def test_allow_access_proto_fail(self):
self.driver.plugin.helper.login()
self.assertRaises(exception.InvalidInput,
self.driver.allow_access,
self._context,
self.share_proto_fail,
self.access_ip,
self.share_server)
def test_allow_access_ip_rw_success(self):
self.driver.plugin.helper.login()
self.allow_flag = False
self.allow_rw_flag = False
self.driver.allow_access(self._context,
self.share_nfs,
self.access_ip,
self.share_server)
self.assertTrue(self.driver.plugin.helper.allow_flag)
self.assertTrue(self.driver.plugin.helper.allow_rw_flag)
def test_allow_access_ip_ro_success(self):
access_ro = {
'access_type': 'ip',
'access_to': '1.2.3.4',
'access_level': 'ro',
}
self.driver.plugin.helper.login()
self.allow_flag = False
self.allow_ro_flag = False
self.driver.allow_access(self._context,
self.share_nfs,
access_ro,
self.share_server)
self.assertTrue(self.driver.plugin.helper.allow_flag)
self.assertTrue(self.driver.plugin.helper.allow_ro_flag)
def test_allow_access_user_rw_success(self):
self.driver.plugin.helper.login()
self.allow_flag = False
self.allow_rw_flag = False
self.driver.allow_access(self._context, self.share_cifs,
self.access_user, self.share_server)
self.assertTrue(self.driver.plugin.helper.allow_flag)
self.assertTrue(self.driver.plugin.helper.allow_rw_flag)
def test_allow_access_user_ro_success(self):
access_ro = {
'access_type': 'user',
'access_to': 'user_name',
'access_level': 'ro',
}
self.driver.plugin.helper.login()
self.allow_flag = False
self.allow_ro_flag = False
self.driver.allow_access(self._context, self.share_cifs,
access_ro, self.share_server)
self.assertTrue(self.driver.plugin.helper.allow_flag)
self.assertTrue(self.driver.plugin.helper.allow_ro_flag)
def test_allow_access_level_fail(self):
access_fail = {
'access_type': 'user',
'access_to': 'user_name',
'access_level': 'fail',
}
self.driver.plugin.helper.login()
self.assertRaises(exception.InvalidShareAccess,
self.driver.allow_access,
self._context, self.share_cifs,
access_fail, self.share_server)
def test_get_share_client_type_fail(self):
share_proto = 'fake_proto'
self.assertRaises(exception.InvalidInput,
self.driver.plugin.helper._get_share_client_type,
share_proto)
@ddt.data("NFS", "CIFS")
def test_get_share_url_type(self, share_proto):
share_url_type = self.driver.plugin.helper._get_share_url_type(
share_proto)
self.assertEqual(share_proto + 'HARE', share_url_type)
def test_get_location_path_fail(self):
share_name = 'share-fake-uuid'
share_proto = 'fake_proto'
self.assertRaises(exception.InvalidShareAccess,
self.driver.plugin._get_location_path, share_name,
share_proto)
def test_allow_access_ip_proto_fail(self):
self.driver.plugin.helper.login()
self.assertRaises(exception.InvalidShareAccess,
self.driver.allow_access, self._context,
self.share_nfs, self.access_user, self.share_server)
def test_allow_access_user_proto_fail(self):
self.driver.plugin.helper.login()
self.assertRaises(exception.InvalidShareAccess,
self.driver.allow_access, self._context,
self.share_cifs, self.access_ip, self.share_server)
def test_deny_access_ip_proto_fail(self):
self.driver.plugin.helper.login()
result = self.driver.deny_access(self._context, self.share_nfs,
self.access_user, self.share_server)
self.assertEqual(None, result)
def test_deny_access_user_proto_fail(self):
self.driver.plugin.helper.login()
result = self.driver.deny_access(self._context, self.share_cifs,
self.access_ip, self.share_server)
self.assertEqual(None, result)
def test_allow_access_ip_share_not_exist(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.share_exist = False
self.assertRaises(exception.InvalidShareAccess,
self.driver.allow_access, self._context,
self.share_nfs, self.access_ip, self.share_server)
def test_deny_access_ip_share_not_exist(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.share_exist = False
self.driver.deny_access(self._context, self.share_nfs,
self.access_ip, self.share_server)
def test_allow_access_ip_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.allow_access, self._context,
self.share_nfs, self.access_ip, self.share_server)
def test_allow_access_user_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.allow_access, self._context,
self.share_cifs, self.access_user, self.share_server)
def test_deny_access_ip_success(self):
self.driver.plugin.helper.login()
self.deny_flag = False
self.driver.deny_access(self._context, self.share_nfs,
self.access_ip, self.share_server)
self.assertTrue(self.driver.plugin.helper.deny_flag)
def test_deny_access_user_success(self):
self.driver.plugin.helper.login()
self.deny_flag = False
self.driver.deny_access(self._context, self.share_cifs,
self.access_user, self.share_server)
self.assertTrue(self.driver.plugin.helper.deny_flag)
def test_deny_access_ip_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.deny_access, self._context,
self.share_nfs, self.access_ip, self.share_server)
def test_deny_access_user_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.deny_access, self._context,
self.share_cifs, self.access_user, self.share_server)
def test_create_nfs_snapshot_success(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.create_snapflag = False
self.driver.create_snapshot(self._context, self.nfs_snapshot,
self.share_server)
self.assertTrue(self.driver.plugin.helper.create_snapflag)
def test_create_nfs_snapshot_share_not_exist(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.share_exist = False
self.assertRaises(exception.InvalidInput,
self.driver.create_snapshot, self._context,
self.nfs_snapshot, self.share_server)
def test_create_cifs_snapshot_success(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.create_snapflag = False
self.driver.create_snapshot(self._context, self.cifs_snapshot,
self.share_server)
self.assertTrue(self.driver.plugin.helper.create_snapflag)
def test_delete_snapshot_success(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.delete_flag = False
self.driver.plugin.helper.snapshot_flag = True
self.driver.delete_snapshot(self._context, self.nfs_snapshot,
self.share_server)
self.assertTrue(self.driver.plugin.helper.delete_flag)
def test_delete_snapshot_not_exist_success(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.delete_flag = False
self.driver.plugin.helper.snapshot_flag = False
self.driver.delete_snapshot(self._context, self.nfs_snapshot,
self.share_server)
self.assertTrue(self.driver.plugin.helper.delete_flag)
def test_create_nfs_snapshot_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.create_snapshot, self._context,
self.nfs_snapshot, self.share_server)
def test_create_cifs_snapshot_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.create_snapshot, self._context,
self.cifs_snapshot, self.share_server)
def test_delete_nfs_snapshot_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.delete_snapshot, self._context,
self.nfs_snapshot, self.share_server)
def test_delete_cifs_snapshot_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_normal = False
self.assertRaises(exception.InvalidShare,
self.driver.delete_snapshot, self._context,
self.cifs_snapshot, self.share_server)
@ddt.data({"share_proto": "NFS",
"path": ["100.115.10.68:/share_fake_manage_uuid"]},
{"share_proto": "CIFS",
"path": ["\\\\100.115.10.68\\share_fake_manage_uuid"]})
@ddt.unpack
def test_manage_share_nfs_success(self, share_proto, path):
if share_proto == "NFS":
share = self.share_manage_nfs
elif share_proto == "CIFS":
share = self.share_manage_cifs
share_type = self.fake_type_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
self.driver.plugin.helper.login()
share_info = self.driver.manage_existing(share,
self.driver_options)
self.assertEqual(4, share_info["size"])
self.assertEqual(path,
share_info["export_locations"])
@ddt.data({"flag": "share_not_exist", "exc": exception.InvalidShare},
{"flag": "fs_status_error", "exc": exception.InvalidShare},
{"flag": "poolname_not_match", "exc": exception.InvalidHost})
@ddt.unpack
def test_manage_share_fail(self, flag, exc):
share = None
if flag == "share_not_exist":
self.driver.plugin.helper.share_exist = False
share = self.share_nfs
elif flag == "fs_status_error":
self.driver.plugin.helper.fs_status_flag = False
share = self.share_nfs
elif flag == "poolname_not_match":
share = self.share_pool_name_not_match
self.driver.plugin.helper.login()
share_type = self.fake_type_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
self.assertRaises(exc,
self.driver.manage_existing,
share,
self.driver_options)
@ddt.data({"share_proto": "NFS",
"export_path": "fake_ip:/share_fake_uuid"},
{"share_proto": "NFS", "export_path": "fake_ip:/"},
{"share_proto": "NFS",
"export_path": "100.112.0.1://share_fake_uuid"},
{"share_proto": "NFS", "export_path": None},
{"share_proto": "NFS", "export_path": "\\share_fake_uuid"},
{"share_proto": "CIFS",
"export_path": "\\\\fake_ip\\share_fake_uuid"},
{"share_proto": "CIFS",
"export_path": "\\dd\\100.115.10.68\\share_fake_uuid"})
@ddt.unpack
def test_manage_export_path_fail(self, share_proto, export_path):
share_manage_nfs_export_path_fail = {
'id': 'fake_uuid',
'project_id': 'fake_tenant_id',
'display_name': 'fake',
'name': 'share-fake-manage-uuid',
'size': 1,
'share_proto': share_proto,
'share_network_id': 'fake_net_id',
'share_server_id': 'fake-share-srv-id',
'export_locations': [
{'path': export_path},
],
'host': 'fake_host@fake_backend#OpenStack_Pool',
'share_type_id': 'fake_id'
}
share_type = self.fake_type_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
self.driver.plugin.helper.login()
self.assertRaises(exception.InvalidInput,
self.driver.manage_existing,
share_manage_nfs_export_path_fail,
self.driver_options)
def test_manage_logical_port_ip_fail(self):
self.recreate_fake_conf_file(logical_port_ip="")
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.driver.plugin.helper.login()
share_type = self.fake_type_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
self.assertRaises(exception.InvalidInput,
self.driver.manage_existing,
self.share_nfs,
self.driver_options)
def test_manage_existing_share_type_mismatch(self):
fake_extra_specs = {
u'driver_handles_share_servers': u'True',
}
fake_share_type_id = u'fake_id'
fake_type_mismatch_extra = {
'test_with_extra': {
'created_at': 'fake_time',
'deleted': '0',
'deleted_at': None,
'extra_specs': fake_extra_specs,
'required_extra_specs': {},
'id': fake_share_type_id,
'name': u'test_with_extra',
'updated_at': None
}
}
share_type = fake_type_mismatch_extra['test_with_extra']
self.mock_object(db,
'share_type_get',
mock.Mock(return_value=share_type))
self.driver.plugin.helper.login()
self.assertRaises(exception.ManageExistingShareTypeMismatch,
self.driver.manage_existing,
self.share_nfs,
self.driver_options)
def test_get_pool_success(self):
self.driver.plugin.helper.login()
pool_name = self.driver.get_pool(self.share_nfs_host_not_exist)
self.assertEqual('OpenStack_Pool', pool_name)
def test_get_pool_fail(self):
self.driver.plugin.helper.login()
self.driver.plugin.helper.share_exist = False
pool_name = self.driver.get_pool(self.share_nfs_host_not_exist)
self.assertEqual(None, pool_name)
def test_multi_resturls_success(self):
self.recreate_fake_conf_file(multi_url=True)
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_multi_url_flag = 2
location = self.driver.create_share(self._context, self.share_nfs,
self.share_server)
self.assertEqual("100.115.10.68:/share_fake_uuid", location)
def test_multi_resturls_fail(self):
self.recreate_fake_conf_file(multi_url=True)
self.driver.plugin.configuration.manila_huawei_conf_file = (
self.fake_conf_file)
self.driver.plugin.helper.login()
self.driver.plugin.helper.test_multi_url_flag = 1
self.assertRaises(exception.InvalidShare,
self.driver.create_share,
self._context,
self.share_nfs,
self.share_server)
def create_fake_conf_file(self, fake_conf_file,
product_flag=True, username_flag=True,
pool_node_flag=True, timeout_flag=True,
wait_interval_flag=True,
alloctype_value='Thick',
multi_url=False,
logical_port_ip='100.115.10.68'):
doc = xml.dom.minidom.Document()
config = doc.createElement('Config')
doc.appendChild(config)
storage = doc.createElement('Storage')
config.appendChild(storage)
controllerip0 = doc.createElement('LogicalPortIP')
controllerip0_text = doc.createTextNode(logical_port_ip)
controllerip0.appendChild(controllerip0_text)
storage.appendChild(controllerip0)
if product_flag:
product_text = doc.createTextNode('V3')
else:
product_text = doc.createTextNode('V3_fail')
product = doc.createElement('Product')
product.appendChild(product_text)
storage.appendChild(product)
if username_flag:
username_text = doc.createTextNode('admin')
else:
username_text = doc.createTextNode('')
username = doc.createElement('UserName')
username.appendChild(username_text)
storage.appendChild(username)
userpassword = doc.createElement('UserPassword')
userpassword_text = doc.createTextNode('Admin@storage')
userpassword.appendChild(userpassword_text)
storage.appendChild(userpassword)
url = doc.createElement('RestURL')
if multi_url:
url_text = doc.createTextNode('http://100.115.10.69:8082/'
'deviceManager/rest/;'
'http://100.115.10.70:8082/'
'deviceManager/rest/')
else:
url_text = doc.createTextNode('http://100.115.10.69:8082/'
'deviceManager/rest/')
url.appendChild(url_text)
storage.appendChild(url)
lun = doc.createElement('Filesystem')
config.appendChild(lun)
storagepool = doc.createElement('StoragePool')
if pool_node_flag:
pool_text = doc.createTextNode('OpenStack_Pool;OpenStack_Pool2; ;')
else:
pool_text = doc.createTextNode('')
storagepool.appendChild(pool_text)
timeout = doc.createElement('Timeout')
if timeout_flag:
timeout_text = doc.createTextNode('0')
else:
timeout_text = doc.createTextNode('')
timeout.appendChild(timeout_text)
waitinterval = doc.createElement('WaitInterval')
if wait_interval_flag:
waitinterval_text = doc.createTextNode('0')
else:
waitinterval_text = doc.createTextNode('')
waitinterval.appendChild(waitinterval_text)
alloctype = doc.createElement('AllocType')
alloctype_text = doc.createTextNode(alloctype_value)
alloctype.appendChild(alloctype_text)
lun.appendChild(timeout)
lun.appendChild(alloctype)
lun.appendChild(waitinterval)
lun.appendChild(storagepool)
prefetch = doc.createElement('Prefetch')
prefetch.setAttribute('Type', '0')
prefetch.setAttribute('Value', '0')
lun.appendChild(prefetch)
fakefile = open(fake_conf_file, 'w')
fakefile.write(doc.toprettyxml(indent=''))
fakefile.close()
def recreate_fake_conf_file(self, product_flag=True, username_flag=True,
pool_node_flag=True, timeout_flag=True,
wait_interval_flag=True,
alloctype_value='Thick',
multi_url=False,
logical_port_ip='100.115.10.68'):
self.tmp_dir = tempfile.mkdtemp()
self.fake_conf_file = self.tmp_dir + '/manila_huawei_conf.xml'
self.addCleanup(shutil.rmtree, self.tmp_dir)
self.create_fake_conf_file(self.fake_conf_file, product_flag,
username_flag, pool_node_flag,
timeout_flag, wait_interval_flag,
alloctype_value, multi_url,
logical_port_ip)
self.addCleanup(os.remove, self.fake_conf_file)
| |
"""
Function descriptors.
"""
from collections import defaultdict
import sys
from numba.core import types, itanium_mangler
from numba.core.utils import _dynamic_modname, _dynamic_module
def default_mangler(name, argtypes):
return itanium_mangler.mangle(name, argtypes)
def qualifying_prefix(modname, qualname):
"""
Returns a new string that is used for the first half of the mangled name.
"""
# XXX choose a different convention for object mode
return '{}.{}'.format(modname, qualname) if modname else qualname
class FunctionDescriptor(object):
"""
Base class for function descriptors: an object used to carry
useful metadata about a natively callable function.
Note that while `FunctionIdentity` denotes a Python function
which is being concretely compiled by Numba, `FunctionDescriptor`
may be more "abstract": e.g. a function decorated with `@generated_jit`.
"""
__slots__ = ('native', 'modname', 'qualname', 'doc', 'typemap',
'calltypes', 'args', 'kws', 'restype', 'argtypes',
'mangled_name', 'unique_name', 'env_name', 'global_dict',
'inline', 'noalias')
def __init__(self, native, modname, qualname, unique_name, doc,
typemap, restype, calltypes, args, kws, mangler=None,
argtypes=None, inline=False, noalias=False, env_name=None,
global_dict=None):
self.native = native
self.modname = modname
self.global_dict = global_dict
self.qualname = qualname
self.unique_name = unique_name
self.doc = doc
# XXX typemap and calltypes should be on the compile result,
# not the FunctionDescriptor
self.typemap = typemap
self.calltypes = calltypes
self.args = args
self.kws = kws
self.restype = restype
# Argument types
if argtypes is not None:
assert isinstance(argtypes, tuple), argtypes
self.argtypes = argtypes
else:
# Get argument types from the type inference result
# (note the "arg.FOO" convention as used in typeinfer
self.argtypes = tuple(self.typemap['arg.' + a] for a in args)
mangler = default_mangler if mangler is None else mangler
# The mangled name *must* be unique, else the wrong function can
# be chosen at link time.
qualprefix = qualifying_prefix(self.modname, self.unique_name)
self.mangled_name = mangler(qualprefix, self.argtypes)
if env_name is None:
env_name = mangler(".NumbaEnv.{}".format(qualprefix),
self.argtypes)
self.env_name = env_name
self.inline = inline
self.noalias = noalias
def lookup_globals(self):
"""
Return the global dictionary of the function.
It may not match the Module's globals if the function is created
dynamically (i.e. exec)
"""
return self.global_dict or self.lookup_module().__dict__
def lookup_module(self):
"""
Return the module in which this function is supposed to exist.
This may be a dummy module if the function was dynamically
generated. Raise exception if the module can't be found.
"""
if self.modname == _dynamic_modname:
return _dynamic_module
else:
try:
return sys.modules[self.modname]
except Exception:
raise ModuleNotFoundError(
f"can't compile {self.qualname}: "
f"import of module {self.modname} failed")
def lookup_function(self):
"""
Return the original function object described by this object.
"""
return getattr(self.lookup_module(), self.qualname)
@property
def llvm_func_name(self):
"""
The LLVM-registered name for the raw function.
"""
return self.mangled_name
# XXX refactor this
@property
def llvm_cpython_wrapper_name(self):
"""
The LLVM-registered name for a CPython-compatible wrapper of the
raw function (i.e. a PyCFunctionWithKeywords).
"""
return itanium_mangler.prepend_namespace(self.mangled_name,
ns='cpython')
@property
def llvm_cfunc_wrapper_name(self):
"""
The LLVM-registered name for a C-compatible wrapper of the
raw function.
"""
return 'cfunc.' + self.mangled_name
def __repr__(self):
return "<function descriptor %r>" % (self.unique_name)
@classmethod
def _get_function_info(cls, func_ir):
"""
Returns
-------
qualname, unique_name, modname, doc, args, kws, globals
``unique_name`` must be a unique name.
"""
func = func_ir.func_id.func
qualname = func_ir.func_id.func_qualname
# XXX to func_id
modname = func.__module__
doc = func.__doc__ or ''
args = tuple(func_ir.arg_names)
kws = () # TODO
global_dict = None
if modname is None:
# Dynamically generated function.
modname = _dynamic_modname
# Retain a reference to the dictionary of the function.
# This disables caching, serialization and pickling.
global_dict = func_ir.func_id.func.__globals__
unique_name = func_ir.func_id.unique_name
return qualname, unique_name, modname, doc, args, kws, global_dict
@classmethod
def _from_python_function(cls, func_ir, typemap, restype,
calltypes, native, mangler=None,
inline=False, noalias=False):
(qualname, unique_name, modname, doc, args, kws, global_dict,
) = cls._get_function_info(func_ir)
self = cls(native, modname, qualname, unique_name, doc,
typemap, restype, calltypes,
args, kws, mangler=mangler, inline=inline, noalias=noalias,
global_dict=global_dict)
return self
class PythonFunctionDescriptor(FunctionDescriptor):
"""
A FunctionDescriptor subclass for Numba-compiled functions.
"""
__slots__ = ()
@classmethod
def from_specialized_function(cls, func_ir, typemap, restype, calltypes,
mangler, inline, noalias):
"""
Build a FunctionDescriptor for a given specialization of a Python
function (in nopython mode).
"""
return cls._from_python_function(func_ir, typemap, restype, calltypes,
native=True, mangler=mangler,
inline=inline, noalias=noalias)
@classmethod
def from_object_mode_function(cls, func_ir):
"""
Build a FunctionDescriptor for an object mode variant of a Python
function.
"""
typemap = defaultdict(lambda: types.pyobject)
calltypes = typemap.copy()
restype = types.pyobject
return cls._from_python_function(func_ir, typemap, restype, calltypes,
native=False)
class ExternalFunctionDescriptor(FunctionDescriptor):
"""
A FunctionDescriptor subclass for opaque external functions
(e.g. raw C functions).
"""
__slots__ = ()
def __init__(self, name, restype, argtypes):
args = ["arg%d" % i for i in range(len(argtypes))]
super(ExternalFunctionDescriptor, self
).__init__(native=True, modname=None, qualname=name,
unique_name=name, doc='', typemap=None,
restype=restype, calltypes=None, args=args,
kws=None, mangler=lambda a, x: a, argtypes=argtypes)
| |
# Natural Language Toolkit: Corpus Access
#
# Copyright (C) 2001 University of Pennsylvania
# Author: Edward Loper <edloper@ldc.upenn.edu>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
#
# $Id: __init__.py,v 1.2 2005/04/06 20:43:23 adastra Exp $
#
# Goals for this module:
#
# 1. we want corpus access to be easy. In particular, we don't want
# people to need to think about file formats, or which tokenizers
# etc to use.
#
# 2. we want corpus access to be consistant (each corpus is accessed
# using the same basic system).
#
# 3. we want corpus access to be portable (eg not dependent on where
# files are installed).
#
# 4. we want to make it easy to tell NLTK about new corpera
#
# 4a. it should be possible to tell NLTK about an existing corpus
# without moving the corpus, or editing its contents in any way.
#
# 5. we want to be able to handle a wide variety of formats, including
# formats where mutliple files are used to represent the same data
# (eg standoff annotation).
"""
Access to NLTK's standard distribution of corpora. Each corpus is
accessed by an instance of a C{CorpusReader} class. For information
about using these corpora, see the reference documentation for
L{CorpusReaderI}. The following corpus readers are currently defined:
- L{twenty_newsgroups}: A collection of approximately 20,000
Usenet newsgroup documents, partitioned (nearly) evenly across 20
different newsgroups.
- L{brown}: Approximately 1,000,000 words of part-of-speech tagged
text. The text consists of exerpts from 500 English prose
documents, all printed in the United States in 1961. Each exerpt
is approximately 2000 words long. The experts are grouped into 15
topical categories that cover a wide range of genres and styles.
- L{genesis}: A collection of six translations of the book of Genesis.
The texts are taken from several different languages,
and range from 1,500 words to 4,000 words each.
- L{gutenberg}: A collection of fourteen public-domain English etexts
from Project Gutenberg. The texts are taken from seven different
authors, and range from 7,500 words to 800,000 words.
- L{roget}: The 11th edition of Roget's Thesaurus of English Words
and Phrases, from Project Guttenberg. Each item in this corpus
corresponds to a single thesaurus entry from Roget's thesaurus.
- L{words}: A list of about 45,000 unique words and word forms.
The word list contains mostly English words and names, but also
contains some non-English words that occur frequently in English
text.
- L{chunking}: A collection of 11,000 chunked, tagged sentences from
the CoNLL 2000 chunking evaluation.
- L{ppattatch}: Information about approximately 30,000 instances
of potentially ambigous prepositional phrase attatchments. For
each instance, the corpus specifies the verb and noun that the
prepositional phrase might potentially attatch to; and the
preposition and head noun of the prepositional phrase. I{The
reader for this corpus is not implemented yet.}
- L{reuters}: A collection of approximately 21,500 documents that
appeared on the Reuters newswire in 1987. The documents were
assembled and indexed with categories by Reuters. I{The reader
for this corpus is corpus is not implemented yet.}
- L{treebank}: A collection of hand-annotated parse trees for
Englsih text. I{This corpus can only be distributed through
LDC; it is therefore not included as part of the standard NLTK
corpus set. However, the C{treebank} object will provide access
to this corpus, if it is installed.}
- L{semcor}: A tagged corpora of approximately 200,000 words, where
each word type is tagged with its part of speech and sense
identifier in WordNet.
- L{senseval}: A set of corpora, each containing a set of contexts
in which a specific ambiguous word appears. Each instance is tagged
with a sense identifier. The ambiguous words used are line/N,
interest/N, hard/A and serve/V.
- L{levin}: The index from Beth Levin's verb classification text,
indicating in which sections a given verb appears. The sectioning of
her text corresponds to different generalisations over verbs.
@group Corpus Readers: twenty_newsgroups, treebank, words, reuters,
ppatttach, brown, gutenberg
@var twenty_newsgroups: A collection of approximately 20,000
Usenet newsgroup documents, partitioned (nearly) evenly across 20
different newsgroups.
@var treebank: A collection of hand-annotated parse trees for
english text.
@var words: A list of English words and word forms.
@var reuters: A collection of documents that appeared on Reuters
newswire in 1987.
@var ppattach: Information about potentially ambiguous prepositional
phrase attatchments.
@var brown: Approximately 1,000,000 words of part-of-speech tagged
text.
@var gutenberg: A collection fourteen public-domain English etexts
from Project Gutenberg.
@var semcor: A corpus of 200,000 words, each tagged with its WordNet sense.
@var senseval: A collection of texts, each consisting of a set of instances
of a given ambiguous word, along tagged with its correct sense.
@var levin: The index from Beth Levin's verb classification text,
indicating in which sections a given verb appears.
@todo: Add default basedir for OS-X?
@variable _BASEDIR: The base directory for NLTK's standard distribution
of corpora. This is read from the environment variable
C{NLTK_CORPORA}, if it exists; otherwise, it is given a
system-dependant default value. C{_BASEDIR}'s value can be changed
with the L{set_basedir()} function.
@type _BASEDIR: C{string}
"""
import sys, os.path, re
from nltk.token import *
from nltk.tokenizer import RegexpTokenizer
from nltk.tokenreader import *
from nltk.tokenreader import sense
from nltk.tokenreader import tokenizerbased
#################################################################
# Base Directory for Corpora
#################################################################
def set_basedir(path):
"""
Set the path to the directory where NLTK looks for corpora.
@type path: C{string}
@param path: The path to the directory where NLTK should look for
corpora.
"""
global _BASEDIR
_BASEDIR = path
def get_basedir():
"""
@return: The path of the directory where NLTK looks for corpora.
@rtype: C{string}
"""
return _BASEDIR
# Find a default base directory.
if os.environ.has_key('NLTK_CORPORA'):
set_basedir(os.environ['NLTK_CORPORA'])
elif sys.platform.startswith('win'):
if os.path.isdir(os.path.join(sys.prefix, 'nltk')):
set_basedir(os.path.join(sys.prefix, 'nltk'))
elif os.path.isdir(os.path.join(sys.prefix, 'lib', 'nltk')):
set_basedir(os.path.join(sys.prefix, 'lib', 'nltk'))
else:
set_basedir(os.path.join(sys.prefix, 'nltk'))
elif os.path.isdir('/usr/lib/nltk'):
set_basedir('/usr/lib/nltk')
elif os.path.isdir('/usr/local/lib/nltk'):
set_basedir('/usr/local/lib/nltk')
elif os.path.isdir('/usr/share/nltk'):
set_basedir('/usr/share/nltk')
else:
set_basedir('/usr/lib/nltk')
#################################################################
# Corpus Reader Interface
#################################################################
class CorpusReaderI:
"""
An accessor for a collection of natural language data. This
collection is organized as a set of named X{items}. Typically,
each item corresponds to a single file, and contains a single
coherent text; but some corpora are divided into items along
different lines.
A corpus can optionally contain a set of X{groups}, or collections
of related items. The items within a single group all have
the same format; but different groups may have different formats.
The set of a corpus's groups are often (but not always) mutually
exclusive. For a description of the groups that are available for
a specific corpus, use the L{description()} method.
The L{groups} method returns a list of the groups that are defined
for a C{CorpusReader}'s corpus. The L{items()} method returns a
list of the names of the items in a group or corpus. The
X{item reader} methods (listed below) are used to read the
contents of individual items. The following example
demonstrates the use of a C{Corpus}:
>>> for newsgroup in twenty_newsgroups.groups():
... for item in twenty_newsgroup.items(newsgroup):
... do_something(newsgroup.tokenize(item), newsgroup)
Some corpora do not implement all of the item reader methods; if
a corpus doesn't implement an item reader method, then that
method will raise a C{NotImplementedError}. Some corpora define
new item reader methods, for reading their contents in specific
formats; see the documentation for individual implementations of
the C{CorpusReaderI} interface for information about new item reader
methods.
@group Basic Item Access: items, read, tokenize, xread, xtokenize
@group Raw Item Access: path, open, raw_read, raw_tokenize,
raw_xtokenize
@group Structured Groups: groups
@group Metadata: name, description, licence, copyright,
__str__, __repr__
"""
#////////////////////////////////////////////////////////////
#// Corpus Information/Metadata
#////////////////////////////////////////////////////////////
def name(self):
"""
@return: The name of this C{CorpusReader}'s corpus.
@rtype: C{string}
"""
raise AssertionError, 'CorpusReaderI is an abstract interface'
def description(self):
"""
@return: A description of the contents of this
C{CorpusReader}'s corpus; or C{None} if no description is
available.
@rtype: C{string} or C{None}
"""
return None
def license(self):
"""
@return: Information about the license governing the use of
this C{CorpusReader}'s corpus.
@rtype: C{string}
"""
return 'Unknown'
def copyright(self):
"""
@return: A copyright notice for this C{CorpusReader}'s corpus.
@rtype: C{string}
"""
return 'Unknown'
def installed(self):
"""
@return: True if this corpus is installed.
@rtype: C{boolean}
"""
raise AssertionError, 'CorpusReaderI is an abstract interface'
#////////////////////////////////////////////////////////////
#// Data access (items)
#////////////////////////////////////////////////////////////
def items(self, group=None):
"""
@return: A list of the names of the items contained in the
specified group, or in the entire corpus if no group is
specified.
@rtype: C{list} of C{string}
"""
raise AssertionError, 'CorpusReaderI is an abstract interface'
def read(self, item):
"""
@return: A token containing the contents of the given item.
@param item: The name of the item to read.
@rtype: L{Token<nltk.token.Token>}
"""
raise NotImplementedError, 'This corpus does not implement read()'
def xread(self, item):
"""
@return: A token containing the contents of the given item,
with properties stored as iterators (where applicable).
@param item: The name of the item to read.
@rtype: L{Token<nltk.token.Token>}
"""
raise NotImplementedError, 'This corpus does not implement read()'
#////////////////////////////////////////////////////////////
#// Raw Item access
#////////////////////////////////////////////////////////////
def path(self, item):
"""
@return: The path of a file containing the given item.
@param item: The name of the requested item
@rtype: C{string}
"""
raise NotImplementedError, 'This corpus does not implement path()'
def open(self, item):
"""
@return: A read-mode C{file} object for the given item.
@param item: The name of the item to read.
@rtype: C{file}
"""
raise NotImplementedError, 'This corpus does not implement open()'
def raw_read(self, item):
"""
@return: A string containing the contents of the given item.
@param item: The name of the item to read.
@rtype: C{string}
"""
raise NotImplementedError, 'This corpus does not implement raw_read()'
#////////////////////////////////////////////////////////////
#// Structure access (groups)
#////////////////////////////////////////////////////////////
def groups(self):
"""
@return: A list of the names of the groups contained in this
C{CorpusReader}'s corpus.
@rtype: C{list} of L{string}
"""
raise AssertionError, 'CorpusReaderI is an abstract interface'
#////////////////////////////////////////////////////////////
#// Printing
#////////////////////////////////////////////////////////////
def __repr__(self):
"""
@return: A single-line description of this corpus.
"""
str = self._name
try:
items = self.items()
groups = self.groups()
if items:
if groups:
str += (' (contains %d items; %d groups)' %
(len(items), len(groups)))
else:
str += ' (contains %d items)' % len(items)
elif groups:
str += ' (contains %d groups)' % len(groups)
except IOError:
str += ' (not installed)'
return '<Corpus: %s>' % str
def __str__(self):
"""
@return: A multi-line description of this corpus.
"""
str = repr(self)[9:-1]
if self._description:
str += ':\n'
str += '\n'.join([' '+line for line
in self._description.split('\n')])
return str
#################################################################
# General-purpose Corpus Implementation
#################################################################
class SimpleCorpusReader(CorpusReaderI):
"""
A general-purpose implementation of the C{CorpusReader} interface
that defines the set of items and the contents of groups with
regular expressions over filenames. The C{SimpleCorpusReader}
implementation is suitable for defining corpus readers for corpora
where:
- Each item consists of the text in a single file.
- Every item has the same format.
- The filenames of items can be distinguished from the
filenames of metadata files with a regular expression.
- The set items in each group can be distinguished with
a single regular expression.
For the purposes of defining regular expressions over path names,
use the forward-slash character (C{'/'}) to delimit directories.
C{SimpleCorpusReader} will automatically convert this to the
appropriate path delimiter for the operating system.
"""
def __init__(self,
# Basic Information
name, rootdir, items_regexp,
# Grouping
groups=None,
# Meta-data
description=None, description_file=None,
license_file=None,
copyright_file=None,
# Formatting meta-data
token_reader=None,
token_reader_expects_source=True,
property_names={}):
"""
Construct a new corpus reader. The parameters C{description},
C{description_file}, C{license_file}, and C{copyright_file}
specify optional metadata. For the corpus reader's
description, you should use C{description} or
C{description_file}, but not both.
@group Basic Information: name, rootdir, items_regexp
@group Grouping: groups
@group Meta-data: description, license, copyright,
description_file, license_file, copyright_file
@group Formatting Meta-data: token_reader
@type name: C{string}
@param name: The name of the corpus. This name is used for
printing the corpus reader, and for constructing
locations. It should usually be identical to the name of
the variable that holds the corpus reader.
@type rootdir: C{string}
@param rootdir: The path to the root directory for the
corpus. If C{rootdir} is a relative path, then it is
interpreted relative to the C{nltk.corpus} base directory
(as returned by L{nltk.corpus.get_basedir()}).
@type items_regexp: C{regexp} or C{string}
@param items_regexp: A regular expression over paths that
defines the set of files that should be listed as
entities for the corpus. The paths that this is tested
against are all relative to the corpus's root directory.
Use the forward-slash character (C{'/'} to delimit
subdirectories; C{SimpleCorpusReader} will automatically convert
this to the appropriate path delimiter for the operating
system.
@type groups: C{list} of C{(string, regexp)} tuples
@param groups: A list specifying the groups for the corpus.
Each element in this list should be a pair
C{(M{groupname}, M{regexp})}, where C{M{groupname}} is the
name of a group; and C{M{regexp}} is a regular expression
over paths that defines the set of files that should be
listed as entities for that group. The paths that these
regular expressions are tested against are all relative
to the corpus's root directory. Use the forward-slash
character (C{'/'} to delimit subdirectories;
C{SimpleCorpusReader} will automatically convert this to the
appropriate path delimiter for the operating system.
@type description: C{string}
@param description: A description of the corpus.
@type description_file: C{string}
@param description_file: The path to a file containing a
description of the corpus. If this is a relative path,
then it is interpreted relative to the corpus's root
directory.
@type license_file: C{string}
@param license_file: The path to a file containing licensing
information about the corpus. If this is a relative
path, then it is interpreted relative to the corpus's root
directory.
@type copyright_file: C{string}
@param copyright_file: The path to a file containing a
copyright notice for the corpus. If this is a relative
path, then it is interpreted relative to the corpus's root
directory.
@type token_reader: L{TokenReaderI}
@param token_reader: The default token_reader that should be
used for the corpus reader's L{read_token} method.
@type token_reader_expects_source: C{bool}
@param token_reader_expects_source: If true, then pass a source
argument to the token reader's C{read_token} method.
"""
if token_reader is None:
token_reader = WhitespaceSeparatedTokenReader(
SUBTOKENS='WORDS')
# Compile regular expressions.
if isinstance(items_regexp, type('')):
items_regexp = re.compile(items_regexp)
if groups is None: groups = []
else: groups = groups[:]
for i in range(len(groups)):
if isinstance(groups[i][1], type('')):
groups[i] = (groups[i][0], re.compile(groups[i][1]))
# Save parameters
self._name = name
self._original_rootdir = rootdir
self._items_regexp = items_regexp
self._grouplists = groups
self._description = description
self._description_file = description_file
self._license = None
self._license_file = license_file
self._copyright = None
self._copyright_file = copyright_file
self._token_reader = token_reader
self._token_reader_expects_source = token_reader_expects_source
self._property_names = property_names
# Postpone actual initialization until the corpus is accessed;
# this gives the user a chance to call set_basedir(), and
# prevents "import nltk.corpus" from raising an exception.
# We'll also want to re-initialize the corpus if basedir
# ever changes.
self._basedir = None
self._rootdir = None
self._items = None
self._groups = None
#////////////////////////////////////////////////////////////
#// Initialization
#////////////////////////////////////////////////////////////
def _initialize(self):
"Make sure that we're initialized."
# If we're already initialized, then do nothing.
if self._basedir == get_basedir(): return
# Make sure the corpus is installed.
self._basedir = get_basedir()
self._rootdir = os.path.join(self._basedir, self._original_rootdir)
if not os.path.isdir(self._rootdir):
raise IOError('%s is not installed' % self._name)
# Find the files that are items
self._items = [os.path.join(path[len(self._rootdir):],file)
for path,dirs,files in os.walk(self._rootdir) for file in files
if self._items_regexp.match(file)]
# Find the files for each group.
self._groups = {}
for (groupname, regexp) in self._grouplists:
self._groups[groupname] = [f for f in self._items
if regexp.match(f)]
# Read metadata from files
if self._description is None and self._description_file is not None:
path = os.path.join(self._rootdir, self._description_file)
try: self._description = open(path).read()
except IOError: pass
if self._license is None and self._license_file is not None:
path = os.path.join(self._rootdir, self._license_file)
try: self._license = open(path).read()
except IOError: pass
if self._copyright is None and self._copyright_file is not None:
path = os.path.join(self._rootdir, self._copyright_file)
try: self._copyright = open(path).read()
except IOError: pass
#////////////////////////////////////////////////////////////
#// Corpus Information/Metadata
#////////////////////////////////////////////////////////////
def name(self):
return self._name
def description(self):
self._initialize()
return self._description
def license(self):
self._initialize()
return self._license
def copyright(self):
self._initialize()
return self._copyright
def installed(self):
try: self._initialize()
except IOError: return 0
return 1
#////////////////////////////////////////////////////////////
#// Data access (items)
#////////////////////////////////////////////////////////////
def items(self, group=None):
self._initialize()
if group is None: return self._items
else: return tuple(self._groups.get(group)) or ()
def read(self, item, *reader_args, **reader_kwargs):
"""
@param reader_args, reader_kwargs: Arguments that are passed on
to the corpus reader's C{TokenReader}.
"""
text = self.raw_read(item)
if self._token_reader_expects_source:
source = '%s/%s' % (self._name, item)
return self._token_reader.read_token(text, source=source,
*reader_args, **reader_kwargs)
else:
return self._token_reader.read_token(text, *reader_args,
**reader_kwargs)
def xread(self, item, *reader_args, **reader_kwargs):
# Default: no iterators.
return self.read(item, *reader_args, **reader_kwargs)
def path(self, item):
self._initialize()
return os.path.join(self._rootdir, item)
def open(self, item):
self._initialize()
return open(self.path(item))
def raw_read(self, item):
return self.open(item).read()
#////////////////////////////////////////////////////////////
#// Structure access (groups)
#////////////////////////////////////////////////////////////
def groups(self):
self._initialize()
return self._groups.keys()
#################################################################
# The standard corpora
#################################################################
###################################################
## 20 Newsgroups
groups = [(ng, ng+'/.*') for ng in '''
alt.atheism rec.autos sci.space comp.graphics rec.motorcycles
soc.religion.christian comp.os.ms-windows.misc rec.sport.baseball
talk.politics.guns comp.sys.ibm.pc.hardware rec.sport.hockey
talk.politics.mideast comp.sys.mac.hardware sci.crypt
talk.politics.misc comp.windows.x sci.electronics
talk.religion.misc misc.forsale sci.med'''.split()]
twenty_newsgroups = SimpleCorpusReader(
'20_newsgroups', '20_newsgroups/', '.*/.*', groups,
description_file='../20_newsgroups.readme')
del groups # delete temporary variable
###################################################
## Brown
groups = [('press: reportage', r'ca\d\d'), ('press: editorial', r'cb\d\d'),
('press: reviews', r'cc\d\d'), ('religion', r'cd\d\d'),
('skill and hobbies', r'ce\d\d'), ('popular lore', r'cf\d\d'),
('belles-lettres', r'cg\d\d'),
('miscellaneous: government & house organs', r'ch\d\d'),
('learned', r'cj\d\d'), ('fiction: general', r'ck\d\d'),
('fiction: mystery', r'cl\d\d'), ('fiction: science', r'cm\d\d'),
('fiction: adventure', r'cn\d\d'), ('fiction: romance', r'cp\d\d'),
('humor', r'cr\d\d')]
brown = SimpleCorpusReader(
'brown', 'brown/', r'c\w\d\d', groups, description_file='README',
token_reader=TaggedTokenReader(SUBTOKENS='WORDS'))
del groups # delete temporary variable
###################################################
## Genesis
genesis = SimpleCorpusReader(
'genesis', 'genesis/', r'.*\.txt', description_file='README')
###################################################
## Gutenberg
groups = [('austen', 'austen-.*'), ('bible', 'bible-.*'),
('blake', 'blake-.*'), ('chesterton', 'chesterton-.*'),
('milton', 'milton-.*'), ('shakespeare', 'shakespeare-.*'),
('whitman', 'whitman-.*')]
gutenberg = SimpleCorpusReader(
'gutenberg', 'gutenberg/', r'.*\.txt', groups, description_file='README')
del groups # delete temporary variable
###################################################
## PP Attachment
ppattach = '''\
[CORPUS READER NOT IMPLEMENTED YET]
Information about approximately 30,000 instances of potentially
ambigous prepositional phrase attachments. For each instance, the
corpus specifies the verb and noun that the prepositional phrase might
potentially attach to; and the preposition and head noun of the
prepositional phrase.'''
###################################################
## Roget
from nltk.corpus.rogetreader import RogetCorpusReader
roget = RogetCorpusReader('roget', 'roget/', 'roget15a.txt')
###################################################
## Words corpus (just English at this point)
words = SimpleCorpusReader(
'words', 'words/', r'words', description_file='README')
###################################################
## Stopwords corpus
stopwords = SimpleCorpusReader(
'stopwords', 'stopwords/', r'[a-z]+', description_file='README')
###################################################
## CONLL 2000 Chunking data corpus
chunking = SimpleCorpusReader(
'chunking', 'chunking/', r'.*\.txt', None, description_file='README',
token_reader=ConllTokenReader())
###################################################
## IEER Named Entity data corpus
groups = [('APW', 'APW_\d*'), ('NYT', 'NYT_\d*')]
ieer = SimpleCorpusReader(
'ieer', 'ieer/', r'(APW|NYT)_\d+', groups, description_file='README',
token_reader=IeerTokenReader())
del groups # delete temporary variable
###################################################
## Treebank (fragment distributed with NLTK)
from nltk.corpus.tree import TreebankCorpusReader
treebank = TreebankCorpusReader('treebank', 'treebank/', False,
description_file='README')
###################################################
## Semcor corpus
#from nltk.sense import SemcorTokenizer
description = """
WordNet semantic concordance data. This is comprised of extracts from the
Brown corpus, with each word tagged with its WordNet 1.7 tag.
"""
semcor = None
#semcor = SimpleCorpusReader(
# 'semcor', 'semcor1.7/', r'brown./tagfiles/.*', description=description,
# default_tokenizer = SemcorTokenizer())
###################################################
## Senseval corpus
#from nltk.sense import SensevalTokenizer
senseval = None
SensevalTokenReader = tokenizerbased.TokenizerBasedTokenReader(sense.SensevalTokenizer())
senseval = SimpleCorpusReader(
'senseval', 'senseval/', r'.*\.pos', description_file='README',
token_reader = SensevalTokenReader)
###################################################
## Names corpus
names = SimpleCorpusReader(
'names', 'names/', r'.*\.txt', description_file='README',
token_reader = NewlineSeparatedTokenReader(SUBTOKENS='NAMES'))
###################################################
## Reuters corpus
reuters = '''\
[CORPUS READER NOT IMPLEMENTED YET]
A collection of approximately 21,500 documents that appeared on the
Reuters newswire in 1987. The documents were assembled and indexed
with categories by Reuters.'''
# Not supported yet
###################################################
## Levin corpus
levin = None
# class _LevinTokenizer(TokenizerI):
# # [XX] add_locs & add_contexts are ignored!
# def tokenize(self, token, add_locs=False, add_contexts=False):
# token['VERB_DICT'] = {}
# for line in token['TEXT'].split('\n'):
# items = line.split(':')
# if len(items) == 2:
# verb, indices = items
# indices = filter(lambda x: x, re.split(r'[,\s]*', indices))
# token['VERB_DICT'][verb.strip()] = indices
# levin = SimpleCorpusReader(
# 'levin', 'levin/', 'verbs', description_file='README',
# default_tokenizer = _LevinTokenizer())
#################################################################
# Demonstration
#################################################################
def _truncate_repr(obj, width, indent, lines=1):
n = width-indent
s = repr(obj)
if len(s) > n*lines:
s = s[:n*lines-3] + '...'
s = re.sub('(.{%d})' % n, '\\1\n'+' '*indent, s)
return s.rstrip()
def _xtokenize_repr(token, width, indent, lines=1):
n = width-indent
s = '<'+'['
for subtok in token['SUBTOKENS']:
s += '%r, ' % subtok
if len(s) > n*lines:
s = s[:n*lines-3]+'...'
break
else: s = s[:-2] + ']'+'>'
s = re.sub('(.{%d})' % n, '\\1\n'+' '*indent, s)
return s.rstrip()
def _test_corpus(corpus):
if corpus is None:
print '(skipping None)'
return
print '='*70
print corpus.name().center(70)
print '-'*70
print 'description() => ' + _truncate_repr(corpus.description(), 70,17)
print 'license() => ' + _truncate_repr(corpus.license(), 70,17)
print 'copyright() => ' + _truncate_repr(corpus.copyright(), 70,17)
print 'items() => ' + _truncate_repr(corpus.items(), 70,17)
print 'groups() => ' + _truncate_repr(corpus.groups(), 70,17)
item = corpus.items()[0]
contents = corpus.read(item)
print 'read(e0) => ' + _truncate_repr(contents, 70,17)
# try:
# tokrepr = _xtokenize_repr(corpus.xtokenize(item), 70,17,2)
# print 'xtokenize(e0) => ' + tokrepr
# except NotImplementedError:
# tokrepr = _truncate_repr(corpus.tokenize(item), 70,17,2)
# print 'tokenize(e0) => ' + tokrepr
def _test_treebank():
_test_corpus(treebank)
print '-'*70
print "r, t, p, m = [treebank.items(group)[0] for group in"
print " 'raw', 'tagged', 'parsed', 'merged']"
r = treebank.items('raw')[0]
t = treebank.items('tagged')[0]
p = treebank.items('parsed')[0]
m = treebank.items('merged')[0]
for (name, item) in zip('rtpm', (r, t, p, m)):
contents = treebank.read(item)
print 'read(%s) => %s' % (name, _truncate_repr(contents, 70,17))
#try:
# tok = treebank.xtokenize(item)
# tokrepr = _xtokenize_repr(tok.exclude('LOC'), 70,17,2)
# print 'xtokenize(%s) => %s' % (name, tokrepr)
#except NotImplementedError:
# tok = treebank.tokenize(item)
# tokrepr = _truncate_repr(tok.exclude('LOC'), 70,17,2)
# print 'tokenize(%s) => %s' % (name, tokrepr)
def demo():
"""
Demonstrate corpus access for each of the defined corpora.
"""
_test_corpus(twenty_newsgroups)
_test_corpus(brown)
_test_corpus(gutenberg)
_test_corpus(roget)
_test_corpus(words)
_test_corpus(semcor)
_test_corpus(senseval)
_test_corpus(chunking)
_test_treebank()
print '='*70
if __name__ == '__main__':
demo()
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for py_func op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import queue
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.client import session as session_lib
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.platform import test
class PyOpTest(test.TestCase):
def testBasic(self):
def my_func(x, y):
return np.sinh(x) + np.cosh(y)
# single type
with self.test_session():
x = constant_op.constant(1.0, dtypes.float32)
y = constant_op.constant(2.0, dtypes.float32)
z = script_ops.py_func(my_func, [x, y], dtypes.float32)
self.assertEqual(z.eval(), my_func(1.0, 2.0).astype(np.float32))
# scalar
with self.test_session():
x = constant_op.constant(1.0, dtypes.float32)
y = constant_op.constant(2.0, dtypes.float32)
z = script_ops.py_func(my_func, [x, y], [dtypes.float32])
self.assertEqual(z[0].eval(), my_func(1.0, 2.0).astype(np.float32))
# array
with self.test_session():
x = constant_op.constant([1.0, 2.0], dtypes.float64)
y = constant_op.constant([2.0, 3.0], dtypes.float64)
z = script_ops.py_func(my_func, [x, y], [dtypes.float64])
self.assertAllEqual(z[0].eval(),
my_func([1.0, 2.0], [2.0, 3.0]).astype(np.float64))
# a bit exotic type (complex64)
with self.test_session():
x = constant_op.constant(1 + 2j, dtypes.complex64)
y = constant_op.constant(3 + 4j, dtypes.complex64)
z, = script_ops.py_func(my_func, [x, y], [dtypes.complex64])
self.assertAllClose(z.eval(), my_func(1 + 2j, 3 + 4j))
# a bit excotic function (rfft)
with self.test_session():
x = constant_op.constant([1., 2., 3., 4.], dtypes.float32)
def rfft(x):
return np.fft.rfft(x).astype(np.complex64)
y, = script_ops.py_func(rfft, [x], [dtypes.complex64])
self.assertAllClose(y.eval(), np.fft.rfft([1., 2., 3., 4.]))
# returns a python literal.
with self.test_session():
def literal(x):
return 1.0 if x == 0.0 else 0.0
x = constant_op.constant(0.0, dtypes.float64)
y, = script_ops.py_func(literal, [x], [dtypes.float64])
self.assertAllClose(y.eval(), 1.0)
# returns a list
with self.test_session():
def list_func(x):
return [x, x + 1]
x = constant_op.constant(0.0, dtypes.float64)
y, z = script_ops.py_func(list_func, [x], [dtypes.float64] * 2)
self.assertAllClose(y.eval(), 0.0)
self.assertAllClose(z.eval(), 1.0)
# returns a tuple
with self.test_session():
def tuple_func(x):
return x, x + 1
x = constant_op.constant(0.0, dtypes.float64)
y, z = script_ops.py_func(tuple_func, [x], [dtypes.float64] * 2)
self.assertAllClose(y.eval(), 0.0)
self.assertAllClose(z.eval(), 1.0)
# returns a tuple, Tout and inp a tuple
with self.test_session():
x = constant_op.constant(0.0, dtypes.float64)
y, z = script_ops.py_func(tuple_func, (x,), (dtypes.float64,
dtypes.float64))
self.assertAllClose(y.eval(), 0.0)
self.assertAllClose(z.eval(), 1.0)
def testStrings(self):
def read_fixed_length_numpy_strings():
return np.array([b" there"])
def read_and_return_strings(x, y):
return x + y
with self.test_session():
x = constant_op.constant([b"hello", b"hi"], dtypes.string)
y, = script_ops.py_func(read_fixed_length_numpy_strings, [],
[dtypes.string])
z, = script_ops.py_func(read_and_return_strings, [x, y], [dtypes.string])
self.assertListEqual(list(z.eval()), [b"hello there", b"hi there"])
def testStringPadding(self):
correct = [b"this", b"is", b"a", b"test"]
with self.test_session():
s, = script_ops.py_func(lambda: [correct], [], [dtypes.string])
self.assertAllEqual(s.eval(), correct)
def testLarge(self):
with self.test_session() as sess:
x = array_ops.zeros([1000000], dtype=np.float32)
y = script_ops.py_func(lambda x: x + 1, [x], [dtypes.float32])
z = script_ops.py_func(lambda x: x * 2, [x], [dtypes.float32])
for _ in xrange(100):
sess.run([y[0].op, z[0].op])
def testNoInput(self):
with self.test_session():
x, = script_ops.py_func(lambda: 42.0, [], [dtypes.float64])
self.assertAllClose(x.eval(), 42.0)
def testCleanup(self):
for _ in xrange(1000):
g = ops.Graph()
with g.as_default():
c = constant_op.constant([1.], dtypes.float32)
_ = script_ops.py_func(lambda x: x + 1, [c], [dtypes.float32])
self.assertTrue(script_ops._py_funcs.size() < 100)
def testAlias(self):
with self.test_session():
np_array = np.array([1.0, 2.0], dtype=np.float32)
tf_array = script_ops.py_func(lambda: np_array, [], [dtypes.float32])
value = tf_array + constant_op.constant([2.0, 3.0], dtype=dtypes.float32)
value.op.run()
self.assertAllEqual(np_array, [1.0, 2.0])
def testBadNumpyReturnType(self):
with self.test_session():
def bad():
# Structured numpy arrays aren't supported.
return np.array([], dtype=[("foo", np.float32)])
y, = script_ops.py_func(bad, [], [dtypes.float32])
with self.assertRaisesRegexp(errors.UnimplementedError,
"Unsupported numpy type"):
y.eval()
def testBadReturnType(self):
with self.test_session():
def bad():
# Non-string python objects aren't supported.
return {"foo": dtypes.float32}
z, = script_ops.py_func(bad, [], [dtypes.int64])
with self.assertRaisesRegexp(errors.UnimplementedError,
"Unsupported object type"):
z.eval()
def testReturnInput(self):
with self.test_session():
def ident(x):
return x[0]
p = array_ops.placeholder(dtypes.float32)
# Create a numpy array aliasing a tensor and a tensor aliasing this array
z, = script_ops.py_func(ident, [p], [dtypes.float32])
z += 0.0 # Makes sure we release the tensor aliasing the numpy array x[0]
# above instead of using its memory as the return value of
# session.run
self.assertEqual(0.0, z.eval(feed_dict={p: [0.0]}))
def testStateful(self):
# Not using self.test_session(), which disables optimization.
with session_lib.Session() as sess:
producer = iter(range(3))
x, = script_ops.py_func(lambda: next(producer), [], [dtypes.int64])
self.assertEqual(sess.run(x), 0)
self.assertEqual(sess.run(x), 1)
self.assertEqual(sess.run(x), 2)
def testStateless(self):
# Not using self.test_session(), which disables optimization.
with session_lib.Session() as sess:
producer = iter(range(3))
x, = script_ops.py_func(
lambda: next(producer), [], [dtypes.int64], stateful=False)
self.assertEqual(sess.run(x), 0)
self.assertEqual(sess.run(x), 0)
self.assertEqual(sess.run(x), 0)
def testGradientFunction(self):
# Input to tf.py_func is necessary, otherwise get_gradient_function()
# returns None per default.
a = constant_op.constant(0)
x, = script_ops.py_func(lambda a: 0, [a], [dtypes.int64])
y, = script_ops.py_func(lambda a: 0, [a], [dtypes.int64], stateful=False)
self.assertEqual(None, ops.get_gradient_function(x.op))
self.assertEqual(None, ops.get_gradient_function(y.op))
def testCOrder(self):
with self.test_session():
val = [[1, 2], [3, 4]]
x, = script_ops.py_func(lambda: np.array(val, order="F"), [],
[dtypes.int64])
self.assertAllEqual(val, x.eval())
def testParallel(self):
# Tests that tf.py_func's can run in parallel if they release the GIL.
with self.test_session() as session:
q = queue.Queue(1)
def blocking_put():
q.put(42)
q.join() # Wait for task_done().
return 42
def blocking_get():
v = q.get(block=True) # Wait for put().
q.task_done()
return v
x, = script_ops.py_func(blocking_put, [], [dtypes.int64])
y, = script_ops.py_func(blocking_get, [], [dtypes.int64])
# This will result in a deadlock if the py_func's don't run in parallel.
session.run([x, y])
def testNoReturnValueStateful(self):
class State(object):
def __init__(self):
self._value = np.array([1], np.int64)
def _increment(self, diff):
self._value += diff
def increment(self, diff):
return script_ops.py_func(self._increment, [diff], [], stateful=True)
@property
def value(self):
return self._value
with self.test_session() as sess:
s = State()
op = s.increment(constant_op.constant(2, dtypes.int64))
ret = sess.run(op)
self.assertIsNone(ret)
self.assertAllEqual([3], s.value)
def testNoReturnValueStateless(self):
def do_nothing(unused_x):
pass
f = script_ops.py_func(
do_nothing, [constant_op.constant(3, dtypes.int64)], [], stateful=False)
with self.test_session() as sess:
self.assertEqual(sess.run(f), [])
def _testExceptionHandling(self, py_exp, tf_exp):
def raise_exception():
raise py_exp("blah") # pylint: disable=not-callable
f = script_ops.py_func(raise_exception, [], [])
with self.test_session() as sess:
with self.assertRaisesRegexp(tf_exp, "blah"):
sess.run(f)
def testExceptionHandling(self):
self._testExceptionHandling(ValueError, errors.InvalidArgumentError)
self._testExceptionHandling(TypeError, errors.InvalidArgumentError)
self._testExceptionHandling(StopIteration, errors.OutOfRangeError)
self._testExceptionHandling(MemoryError, errors.ResourceExhaustedError)
self._testExceptionHandling(NotImplementedError, errors.UnimplementedError)
class WeirdError(Exception):
pass
self._testExceptionHandling(WeirdError, errors.UnknownError)
if __name__ == "__main__":
test.main()
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities related to distributed training."""
# pylint:disable=protected-access
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.distribute import distribute_coordinator_context as dc_context
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.distribute import reduce_util
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_util
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import callbacks
from tensorflow.python.keras import metrics as metrics_module
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.distribute import distributed_training_utils as dist_utils
from tensorflow.python.keras.engine import training_utils_v1
from tensorflow.python.keras.optimizer_v2 import optimizer_v2
from tensorflow.python.keras.utils import tf_contextlib
from tensorflow.python.keras.utils.mode_keys import ModeKeys
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_concat_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import nest
def set_weights(distribution_strategy, dist_model, weights):
"""Sets the weights of the replicated models.
The weights of the replicated models are set to the weights of the original
model. The weights of the replicated model are Mirrored variables and hence
we need to use the `update` call within a DistributionStrategy scope.
Args:
distribution_strategy: DistributionStrategy used to distribute training
and validation.
dist_model: The replicated models on the different devices.
weights: The weights of the original model.
"""
assign_ops = []
for layer in dist_model.layers:
num_param = len(layer.weights)
layer_weights = weights[:num_param]
for sw, w in zip(layer.weights, layer_weights):
if ops.executing_eagerly_outside_functions():
sw.assign(w)
else:
assign_ops.append(distribution_strategy.unwrap(sw.assign(w)))
weights = weights[num_param:]
if not ops.executing_eagerly_outside_functions():
K.get_session(assign_ops).run(assign_ops)
def unwrap_values(distribution_strategy, grouped_inputs, grouped_outputs,
grouped_updates=None, grouped_session_args=None,
with_loss_tensor=False):
"""Unwrap the list of values contained in the PerReplica parameters.
This function calls `flatten_per_replica_values` to parse each of the input
parameters into a list of values on the different devices. If we set
`with_loss_tensor` to be True, we also call `reduce` on the list of losses on
the different devices to give us one loss tensor.
Args:
distribution_strategy: DistributionStrategy used to distribute training and
validation.
grouped_inputs: PerReplica inputs returned from the train or test function
that we ran on each device.
grouped_outputs: PerReplica outputs returned from the train or test function
that we ran on each device.
grouped_updates: PerReplica updates returned from the train or test function
that we ran on each device.
grouped_session_args: PerReplica session args returned from the train or
test function that we ran on each device.
with_loss_tensor: Boolean that indicates if we need to add the reduced loss
tensor as one of the outputs.
Returns:
Values of each of the PerReplica parameters.
"""
# Unwrap per device values returned from each model's train function.
# This will be used to construct the main train function.
all_inputs = flatten_per_replica_values(distribution_strategy,
grouped_inputs)
all_outputs = unwrap_outputs(distribution_strategy, grouped_outputs,
with_loss_tensor)
if grouped_updates:
all_updates = flatten_per_replica_values(distribution_strategy,
grouped_updates)
else:
all_updates = None
all_session_args = {}
if grouped_session_args:
grouped_feed_dict = grouped_session_args.get('feed_dict')
if grouped_feed_dict:
all_session_args['feed_dict'] = flatten_per_replica_values(
distribution_strategy, grouped_feed_dict)
grouped_fetches = grouped_session_args.get('fetches')
if grouped_fetches:
all_session_args['fetches'] = flatten_per_replica_values(
distribution_strategy, grouped_fetches)
# TODO(priyag): Return only non empty/None values
return all_inputs, all_outputs, all_updates, all_session_args
def unwrap_output_dict(strategy, grouped_outputs, mode):
"""Unwrap the list of outputs contained in the PerReplica parameters."""
if mode == ModeKeys.PREDICT:
return flatten_per_replica_values(strategy, grouped_outputs)
# In the case of fit/eval, the grouped_outputs is a dict, whereas in predict,
# the output is as same structure as model output. They need to be treated
# differently
total_loss = strategy.reduce(reduce_util.ReduceOp.SUM,
grouped_outputs['total_loss'][0], axis=None)
output_losses = flatten_per_replica_values(strategy,
grouped_outputs['output_losses'])
metrics = flatten_per_replica_values(strategy,
grouped_outputs['metrics'])
batch_size = strategy.reduce(reduce_util.ReduceOp.SUM,
grouped_outputs['batch_size'], axis=None)
if (dist_utils.is_tpu_strategy(strategy) and
ops.executing_eagerly_outside_functions()):
# Choose 1 value per replica in the TPU case since all replicas produce the
# same output.
# We only do this in eager mode for now since this function is used in
# both graph and eager mode and in the graph case we currently don't use
# experimental_run so would need to be removed when we converge the graph
# code path as well.
output_losses = output_losses[::strategy.num_replicas_in_sync]
metrics = metrics[::strategy.num_replicas_in_sync]
return {'total_loss': [total_loss],
'output_losses': output_losses,
'metrics': metrics,
'batch_size': batch_size}
def unwrap_outputs(distribution_strategy, grouped_outputs,
with_loss_tensor=False):
"""Unwrap the list of outputs contained in the PerReplica parameters.
This function calls `flatten_per_replica_values` to parse each of the input
parameters into a list of outputs on the different devices. If we set
`with_loss_tensor` to be True, we also call `reduce` on the list of losses on
the different devices to give us one loss tensor.
Args:
distribution_strategy: DistributionStrategy used to distribute training and
validation.
grouped_outputs: PerReplica outputs returned from the train or test function
that we ran on each device.
with_loss_tensor: Boolean that indicates if we need to add the reduced loss
tensor as one of the outputs.
Returns:
Values of each of the PerReplica outputs.
"""
if not with_loss_tensor:
return flatten_per_replica_values(distribution_strategy,
grouped_outputs)
if not isinstance(grouped_outputs, list):
grouped_outputs = [grouped_outputs]
# reduce loss tensor before adding it to the list of fetches
loss = distribution_strategy.reduce(reduce_util.ReduceOp.SUM,
grouped_outputs[0], axis=None)
all_outputs = flatten_per_replica_values(distribution_strategy,
grouped_outputs[1:])
if (dist_utils.is_tpu_strategy(distribution_strategy) and
ops.executing_eagerly_outside_functions()):
# Choose 1 value per replica in the TPU case since all replicas produce the
# same output.
# We only do this in eager mode for now since this function is used in
# both graph and eager mode and in the graph case we currently don't use
# experimental_run so would need to be removed when we converge the graph
# code path as well.
all_outputs = all_outputs[::distribution_strategy.num_replicas_in_sync]
return [loss] + all_outputs
def flatten_per_replica_values(distribution_strategy, per_replica_values):
"""Unwraps and flattens a nest of PerReplica parameters.
PerReplica values have one value associated with each device. Each entry in
the PerReplica dict has a device `key` and the corresponding value on the
device as the `value`. In this function we take a PerReplica value or a list
of PerReplica values and return all the values in the PerReplica dict.
Args:
distribution_strategy: DistributionStrategy used to distribute training and
validation.
per_replica_values: List of PerReplica object or a single PerReplica object.
Returns:
List of values of all the PerReplica objects.
"""
# pylint: disable=g-complex-comprehension
# This function takes a PerReplica object or a list of PerReplica objects and
# returns all the values associated with it.
return [e for flattened in nest.flatten(per_replica_values)
for e in distribution_strategy.unwrap(flattened)]
def validate_callbacks(input_callbacks, optimizer):
"""Validate whether given callbacks are supported by DistributionStrategy.
Args:
input_callbacks: List of callbacks passed by the user to fit.
optimizer: Optimizer instance used to train the model.
Raises:
ValueError: If `LearningRateScheduler` or `ReduceLROnPlateau` is one of the
callbacks passed.
ValueError: If `write_grads` is one of the parameters passed as part of the
TensorBoard callback.
"""
if input_callbacks:
for callback in input_callbacks:
if isinstance(callback, (callbacks.LearningRateScheduler,
callbacks.ReduceLROnPlateau)):
if not isinstance(optimizer, optimizer_v2.OptimizerV2):
raise ValueError('You must specify a Keras Optimizer V2 when using '
'%s callback with DistributionStrategy.' % callback)
# If users want to use the TensorBoard callback they cannot use certain
# features of the callback that involve accessing model attributes and
# running ops.
if isinstance(callback, callbacks.TensorBoard):
if getattr(callback, 'write_grads', False):
logging.warning(
UserWarning(
'`write_grads` in the TensorBoard callback is not supported '
'when using DistributionStrategy. Setting `write_grads` '
'to `False`.'))
callback.write_grads = False
def validate_distributed_dataset_inputs(distribution_strategy, x, y,
sample_weights=None):
"""Validate all the components of a DistributedValue Dataset input.
Args:
distribution_strategy: The current DistributionStrategy used to call
`fit`/`evaluate`.
x: Input Dataset DistributedValue object. For example, when we use
`MirroredStrategy` this is a PerReplica object with a tensor for each
device set in the dict. x can also be a tuple or dict. The keys of the
dict should match the names of the input layers of the model.
y: Target Dataset DistributedValue object. For example, when we use
`MirroredStrategy` this is a PerReplica object with a tensor for each
device set in the dict. y can also be a tuple or dict. The keys of the
dict should match the names of the output layers of the model.
sample_weights: Sample weights Dataset DistributedValue object. For example,
when we use `MirroredStrategy` this is a PerReplica object with a tensor
for each device set in the dict.
Returns:
The unwrapped values list of the x and y DistributedValues inputs.
Raises:
ValueError: If x and y do not have support for being evaluated as tensors.
or if x and y contain elements that are not tensors or if x and y
contain elements that have a shape or dtype mismatch.
"""
# If the input and target used to call the model are not dataset tensors,
# we need to raise an error. When using a DistributionStrategy, the input
# and targets to a model should be from a `tf.data.Dataset`.
# If each element of x and y are not tensors, we cannot standardize and
# validate the input and targets.
x_values_list = validate_per_replica_inputs(distribution_strategy, x)
if y is not None:
y_values_list = validate_per_replica_inputs(distribution_strategy, y)
else:
y_values_list = None
if sample_weights is not None:
sample_weights_list = validate_per_replica_inputs(distribution_strategy,
sample_weights)
else:
sample_weights_list = None
# Return the unwrapped values to avoid calling `unwrap` a second time.
return x_values_list, y_values_list, sample_weights_list
def validate_per_replica_inputs(distribution_strategy, x):
"""Validates PerReplica dataset input list.
Args:
distribution_strategy: The current DistributionStrategy used to call
`fit`, `evaluate` and `predict`.
x: A list of PerReplica objects that represent the input or
target values.
Returns:
List containing the first element of each of the PerReplica objects in
the input list.
Raises:
ValueError: If any of the objects in the `per_replica_list` is not a tensor.
"""
# Convert the inputs and targets into a list of PerReplica objects.
per_replica_list = nest.flatten(x, expand_composites=True)
x_values_list = []
for x in per_replica_list:
# At this point x should contain only tensors.
x_values = distribution_strategy.unwrap(x)
for value in x_values:
if not tensor_util.is_tensor(value):
raise ValueError('Dataset input to the model should be tensors instead '
'they are of type {}'.format(type(value)))
if not context.executing_eagerly():
# Validate that the shape and dtype of all the elements in x are the same.
validate_all_tensor_shapes(x, x_values)
validate_all_tensor_types(x, x_values)
x_values_list.append(x_values[0])
return x_values_list
def validate_all_tensor_types(x, x_values):
x_dtype = x_values[0].dtype
for i in range(1, len(x_values)):
if x_dtype != x_values[i].dtype:
raise ValueError('Input tensor dtypes do not match for distributed tensor'
' inputs {}'.format(x))
def validate_all_tensor_shapes(x, x_values):
# Validate that the shape of all the elements in x have the same shape
x_shape = x_values[0].shape.as_list()
for i in range(1, len(x_values)):
if x_shape != x_values[i].shape.as_list():
raise ValueError('Input tensor shapes do not match for distributed tensor'
' inputs {}'.format(x))
def _wait_for_variable_initialization(session):
"""Utility to wait for variables to be initialized."""
all_variables = K._get_variables(K.get_graph()) # pylint: disable=protected-access
candidate_vars = []
for v in all_variables:
if not getattr(v, '_keras_initialized', False):
candidate_vars.append(v)
if not candidate_vars:
return
while True:
is_initialized = session.run(
[variables.is_variable_initialized(v) for v in candidate_vars])
uninitialized_vars = []
for flag, v in zip(is_initialized, candidate_vars):
if not flag:
uninitialized_vars.append(v)
v._keras_initialized = True # pylint: disable=protected-access
if not uninitialized_vars:
break
def init_restore_or_wait_for_variables():
"""Initialize or restore variables or wait for variables to be initialized."""
session = K._get_session() # pylint: disable=protected-access
if not multi_worker_util.has_worker_context(
) or multi_worker_util.should_load_checkpoint():
# TODO(yuefengz): if checkpoints exist, restore from checkpoint.
K._initialize_variables(session) # pylint: disable=protected-access
else:
_wait_for_variable_initialization(session)
def validate_inputs(x, y):
"""Validate inputs when using DistributionStrategy.
Args:
x: Model Inputs.
y: Model Targets.
Raises:
ValueError: if input is not a Dataset or a numpy array(when we use
MirroredStrategy).
"""
if (isinstance(x, iterator_ops.Iterator) or
isinstance(y, iterator_ops.Iterator)):
raise ValueError('`DistributionStrategy` does not support inputs of type '
'Iterator. You must pass a `tf.data.Dataset` object or a '
'numpy array as input.')
def is_dataset_shape_fully_defined(dataset):
"""Returns whether a dataset contains a final partial batch."""
shapes = nest.flatten(dataset_ops.get_legacy_output_shapes(dataset))
unknown_shapes = [s for s in shapes if not s.is_fully_defined()]
return not unknown_shapes
def process_batch_and_step_size(strategy,
inputs,
batch_size,
steps_per_epoch,
mode,
validation_split=0.):
"""Process the batch size and step size based on input and dist strategy."""
first_x_value = nest.flatten(inputs)[0]
if isinstance(first_x_value, np.ndarray):
num_samples = first_x_value.shape[0]
if validation_split and 0. < validation_split < 1.:
num_samples = int(num_samples * (1 - validation_split))
# Until support for partial batch is implemented across all
# functions and distribution strategy, we pass `mode` to selectively
# relax the constraint to consume all the training samples.
steps_per_epoch, batch_size = get_input_params(
strategy, num_samples, steps_per_epoch, batch_size, mode=mode)
return batch_size, steps_per_epoch
def get_input_params(distribution_strategy,
num_samples,
steps,
batch_size,
mode=None):
"""Calculate the number of batches and steps/steps_per_epoch.
Args:
distribution_strategy: The DistributionStrategy used to compile the model.
num_samples: The number of samples from which we determine the batch size
and steps.
steps: The specified number of steps.
batch_size: The specified batch_size.
mode: ModeKey representing whether input will be used for training,
evaluation, or prediction. This is used to relax the constraints on
consuming all the training samples to keep compatibility till we support
partial batches. If none, then partial batches are not allowed.
Returns:
steps: The steps or steps_per_epoch argument depending on if a user is
calling `fit`, `evaluate` or `predict`. If the is_training flag is set
we don't require the number of samples to be used completely.
batch_size: The batch size to be used in model iterations.
Raises:
ValueError: If the number of batches or steps evaluates to 0.
"""
# TODO(b/118776054): Use global batch size for Keras/DS support.
# Currently this is only supported in TPUStrategy and CoreMirroredStrategy.
use_per_replica_batch = not dist_utils.global_batch_size_supported(
distribution_strategy)
# TODO(b/128995245): In eager mode, uneven batch sizes are allowed except for
# `fit()` on TPUStrategy.
# In graph mode, the zero batch case in batch norm is not handled due to
# XLA-GPU regression. Uneven batch sizes are not allowed except
# for `test()` and `predict()` on TPUStrategy.
if context.executing_eagerly():
allow_partial_batch = (
mode != ModeKeys.TRAIN or
not dist_utils.is_tpu_strategy(distribution_strategy))
else:
allow_partial_batch = (
mode == ModeKeys.TRAIN or
((mode == ModeKeys.PREDICT or mode == ModeKeys.TEST) and
dist_utils.is_tpu_strategy(distribution_strategy)))
if steps is None:
if batch_size is None:
# If neither the batch size or number of steps are set. We choose the
# global batch size as the minimum of number of samples and 32. 32 is
# chosen to provide backward compatibility.
global_batch_size = min(num_samples, 32)
else:
# If the user provided the batch size we need to handle the case
# between different strategies that use the global/per-replica batch size
global_batch_size = batch_size
if use_per_replica_batch:
global_batch_size *= distribution_strategy.num_replicas_in_sync
if allow_partial_batch:
steps = np.ceil(num_samples / global_batch_size).astype(int)
else:
if num_samples % global_batch_size:
raise ValueError('The number of samples %s is not divisible by '
'batch size %s.' % (num_samples, global_batch_size))
steps = num_samples // global_batch_size
else:
if batch_size is None:
# We calculate the batch size based on the number of steps specified
if num_samples % steps:
raise ValueError('The number of samples %s is not divisible by '
'steps %s. Please change the number of steps to a '
'value that can consume all the samples' % (
num_samples, steps))
global_batch_size = num_samples // steps
else:
# If the user provided the batch size we need to handle the case
# between different strategies that use the global/per-replica batch size
global_batch_size = batch_size
if use_per_replica_batch:
global_batch_size *= distribution_strategy.num_replicas_in_sync
min_num_samples = global_batch_size * steps
if allow_partial_batch:
min_num_samples = global_batch_size * (steps-1) + 1 if steps > 1 else 0
if num_samples < min_num_samples:
raise ValueError('Number of samples %s is less than samples required '
'for specified batch_size %s and steps %s' % (
num_samples, global_batch_size, steps))
# We need to return the per replica or global batch size based on the strategy
if use_per_replica_batch:
if global_batch_size % distribution_strategy.num_replicas_in_sync:
raise ValueError(
'The batch size (%s) could not be sharded evenly across the sync '
'replicas (%s) in the distribution strategy.' % (
global_batch_size, distribution_strategy.num_replicas_in_sync))
batch_size = global_batch_size // distribution_strategy.num_replicas_in_sync
else:
batch_size = global_batch_size
return steps, batch_size
def get_batch_dimension(iterator):
shapes = nest.flatten(dataset_ops.get_legacy_output_shapes(iterator))
# Take the batch size from the first element, as it should be the same for
# all.
dims = shapes[0].dims
return dims[0] if dims else None
def get_iterator(dataset, distribution_strategy):
with distribution_strategy.scope():
iterator = distribution_strategy.make_dataset_iterator(dataset)
initialize_iterator(iterator, distribution_strategy)
return iterator
def initialize_iterator(iterator, distribution_strategy):
with distribution_strategy.scope():
init_op = control_flow_ops.group(iterator.initializer)
if not context.executing_eagerly():
K.get_session((init_op,)).run(init_op)
def _get_input_from_iterator(iterator, model):
"""Get elements from the iterator and verify the input shape and type."""
next_element = iterator.get_next()
# `len(nest.flatten(x))` is going to not count empty elements such as {}.
# len(nest.flatten([[0,1,2], {}])) is 3 and not 4. The `next_element` is
# going to get flattened in `_prepare_feed_values` to work around that. Empty
# elements are going to get filtered out as part of the flattening.
if len(nest.flatten(next_element)) == len(model.inputs):
x = next_element
y = None
sample_weights = None
elif len(nest.flatten(next_element)) == (len(model.inputs) +
len(model.outputs)):
x, y = next_element
sample_weights = None
else:
x, y, sample_weights = next_element
# Validate that all the elements in x and y are of the same type and shape.
validate_distributed_dataset_inputs(
model._distribution_strategy, x, y, sample_weights)
return x, y, sample_weights
def _prepare_feed_values(model, inputs, targets, sample_weights, mode):
"""Prepare feed values to the model execution function.
Arguments:
model: Model to prepare feed values for.
inputs: List or dict of model inputs.
targets: Optional list of model targets.
sample_weights: Optional list of sample weight arrays.
mode: One of ModeKeys.TRAIN/ModeKeys.TEST/ModeKeys.PREDICT.
Returns:
Feed values for the model in the given mode.
"""
strategy = model._distribution_strategy
inputs, targets, sample_weights = _get_input_from_iterator(inputs, model)
if dist_utils.is_tpu_strategy(strategy):
if sample_weights is not None:
raise ValueError('TPUStrategy does not support sample weights.')
# When the inputs are dict, then we want to flatten it in the same order as
# the input layers, such that the data are fed into the input layers in the
# correct order.
if isinstance(inputs, dict):
inputs = [inputs[key] for key in model._feed_input_names]
if is_distributing_by_cloning(model):
inputs = flatten_per_replica_values(strategy, inputs)
targets = flatten_per_replica_values(strategy, targets)
# Expand 1-dimensional inputs.
# TODO(b/124535720): Remove once this standarize data logic is shared with
# main flow.
inputs, targets = nest.map_structure(
training_utils_v1.standardize_single_array, (inputs, targets))
else:
inputs = training_utils_v1.ModelInputs(inputs).as_list()
if mode == ModeKeys.PREDICT:
sample_weights = []
targets = []
elif sample_weights is not None and is_distributing_by_cloning(model):
if context.executing_eagerly() and not model._compile_distribution:
raise NotImplementedError('`sample_weight` is not supported when using '
'tf.distribute.Strategy in eager mode and '
'cloning=True.')
sample_weights = flatten_per_replica_values(strategy, sample_weights)
ins = [inputs, targets, sample_weights]
return tuple(ins)
def is_distributing_by_cloning(model):
"""Decide whether this model is going to be distributed via cloning.
We are going to distribute the model by cloning in graph mode.
Args:
model: Keras model to distribute.
Returns:
True if the `model` is going to be distributed using cloning and False
otherwise.
"""
if (dist_utils.is_tpu_strategy(model._distribution_strategy) and
context.executing_eagerly): # b/137580852
return False
elif ops.executing_eagerly_outside_functions():
return bool(model._compile_distribution)
return True
def _custom_compile_for_predict(model):
"""Custom compile for TPU predict mode."""
if not model.built:
# Model is not compilable because it does not know its number of inputs
# and outputs, nor their shapes and names. We will compile after the first
# time the model gets called on training data.
return
model._is_compiled = True
model.total_loss = None
model.train_function = None
model.test_function = None
model.predict_function = None
def _build_network_on_replica(model, mode, inputs=None, targets=None):
"""Build an updated model on replicas.
We create a new Keras model while sharing the variables from the old graph.
Building a new sub-graph is required since the original keras model creates
placeholders for the input and the output that are not accessible till we
call iterator.get_next() inside the step_fn for `fit`/`evaluate`/`predict`.
The sharing of weights and layers between the old and the new model guarantee
that we're using Strategy variables and any updates on either model are
reflected correctly in callbacks and loop iterations.
We need to make sure we share the optimizers between the old and the new model
as well so that optimizer state is not lost if the user is running fit
multiple times.
Args:
model: Model to be replicated across Replicas
mode: Which of fit/eval/predict is building the distributed network
inputs: Input variables to be passed to the model
targets: Target tensor to be passed to model.compile
Returns:
A new model with shared layers with the old model.
"""
# Need to do imports here since we run into a circular dependency error.
from tensorflow.python.keras import models # pylint: disable=g-import-not-at-top
from tensorflow.python.keras.engine import sequential # pylint: disable=g-import-not-at-top
# We rely on the internal methods to avoid having share_weights weights in the
# public API.
if isinstance(model, sequential.Sequential):
updated_model = models._clone_sequential_model(
model, input_tensors=inputs, layer_fn=models.share_weights)
else:
updated_model = models._clone_functional_model(
model, input_tensors=inputs, layer_fn=models.share_weights)
# Callable losses added directly to a functional Model need to be added
# here.
updated_model._callable_losses = model._callable_losses
# Recast all low precision outputs back to float32 since we only casted
# the inputs to bfloat16 and not targets. This is done so that we can preserve
# precision when calculating the loss value.
def _upcast_low_precision_outputs(output):
if output.dtype == dtypes.bfloat16:
return math_ops.cast(output, dtypes.float32)
else:
return output
updated_model.outputs = [_upcast_low_precision_outputs(o)
for o in updated_model.outputs]
if isinstance(targets, tuple):
targets = nest.flatten(targets)
if mode == ModeKeys.PREDICT and inputs is not None: # TPU predict case
_custom_compile_for_predict(updated_model)
else:
updated_model.compile(
model.optimizer,
model.loss,
metrics=metrics_module.clone_metrics(model._compile_metrics),
loss_weights=model.loss_weights,
sample_weight_mode=model.sample_weight_mode,
weighted_metrics=metrics_module.clone_metrics(
model._compile_weighted_metrics),
target_tensors=targets)
return updated_model
def _build_distributed_network(model, strategy, mode, inputs=None,
targets=None):
"""Create a cloned model on each replica."""
with K.get_graph().as_default(), strategy.scope():
distributed_model = strategy.extended.call_for_each_replica(
_build_network_on_replica,
args=(model, mode, inputs, targets))
set_distributed_model(model, mode, distributed_model)
def _clone_and_build_model(model, mode, inputs=None, targets=None):
"""Clone and build the given keras_model."""
# We need to set the import here since we run into a circular dependency
# error.
from tensorflow.python.keras import models # pylint: disable=g-import-not-at-top
cloned_model = models.clone_model(model, input_tensors=inputs)
# Compile and build model.
if isinstance(model.optimizer, optimizers.TFOptimizer):
optimizer = model.optimizer
else:
optimizer_config = model.optimizer.get_config()
optimizer = model.optimizer.__class__.from_config(optimizer_config)
# Recast all low precision outputs back to float32 since we only casted
# the inputs to bfloat16 and not targets. This is done so that we can preserve
# precision when calculating the loss value.
def _upcast_low_precision_outputs(output):
if output.dtype == dtypes.bfloat16:
return math_ops.cast(output, dtypes.float32)
else:
return output
cloned_model.outputs = [_upcast_low_precision_outputs(o)
for o in cloned_model.outputs]
if isinstance(targets, tuple):
targets = nest.flatten(targets)
if mode == ModeKeys.PREDICT and inputs is not None: # TPU predict case
_custom_compile_for_predict(cloned_model)
else:
cloned_model.compile(
optimizer,
model.loss,
metrics=metrics_module.clone_metrics(model._compile_metrics),
loss_weights=model.loss_weights,
sample_weight_mode=model.sample_weight_mode,
weighted_metrics=metrics_module.clone_metrics(
model._compile_weighted_metrics),
target_tensors=targets)
return cloned_model
def clone_model_on_replicas(model, strategy, mode, inputs=None, targets=None):
"""Create a cloned model on each replica."""
with K.get_graph().as_default(), strategy.scope():
distributed_model = strategy.extended.call_for_each_replica(
_clone_and_build_model, args=(model, mode, inputs, targets))
set_distributed_model(model, mode, distributed_model)
if mode == ModeKeys.TRAIN:
model._make_callback_model(distributed_model)
def _make_execution_function(model, mode):
"""Makes or reuses function to run one step of distributed model execution."""
if is_distributing_by_cloning(model):
return _make_execution_function_with_cloning(model, mode)
distributed_function = get_distributed_function(model, mode)
if distributed_function:
return distributed_function
distribution_function = _make_execution_function_without_cloning(model, mode)
set_distributed_function(model, mode, distribution_function)
return distribution_function
def _make_execution_function_without_cloning(model, mode):
"""Creates a function to run one step of distributed model execution."""
strategy = model._distribution_strategy
with strategy.scope():
per_replica_function = _make_replica_execution_function(model, mode)
def distributed_function(input_fn):
"""A single step of the distributed execution across replicas."""
x, y, sample_weights = input_fn()
# Call `Model.{train,test,predict}_on_batch` on every replica passing
# PerReplicas as arguments. On every replica inside this call, each
# PerReplica object will return the value for that replica. The outputs
# are PerReplicas too.
outputs = strategy.run(per_replica_function, args=(x, y, sample_weights))
# Out of PerReplica outputs reduce or pick values to return.
all_outputs = unwrap_outputs(
strategy, outputs, with_loss_tensor=(mode != ModeKeys.PREDICT))
return all_outputs
if not model.run_eagerly:
distributed_function = def_function.function(distributed_function)
def execution_function(input_fn):
# `numpy` translates Tensors to values in Eager mode.
return [out.numpy() for out in distributed_function(input_fn)]
else:
execution_function = distributed_function
return execution_function
def _make_replica_execution_function(model, mode):
"""A single step of the distributed execution on a replica."""
if mode == ModeKeys.TRAIN:
func = model.train_on_batch
elif mode == ModeKeys.TEST:
func = model.test_on_batch
else:
def predict_on_batch(x, y=None, sample_weights=None):
del y, sample_weights
return model.predict_on_batch(x)
func = predict_on_batch
if mode != ModeKeys.PREDICT:
# `reset_metrics` is set to False to maintain stateful metrics across
# batch-level calls.
func = functools.partial(func, reset_metrics=False)
return func
def _make_replicated_models_with_cloning(model, mode):
"""Build models on each replica."""
strategy = model._distribution_strategy
# If distributed_model is not built, create one for `mode`.
if model._compile_distribution:
clone_model_on_replicas(model, strategy, mode)
else:
_build_distributed_network(model, strategy, mode)
def _make_execution_function_with_cloning(model, mode):
"""Clones or re-uses models to run one step of distributed model execution."""
distributed_model = get_distributed_model(model, mode)
# TODO(b/134069401): Create a cache for the distributed model and exec
# function that incorporates additional attributes to be part of the cache key
# than just the mode.
# If distributed model for a particular `mode` is already built, use the
# `_distribution_function` on that distributed model.
# If you have updated the sample_weight_mode on the model, then you will need
# to recompile metrics and recreate the execution function. This is indicated
# by the `_recompile_exec_function` property.
if (distributed_model and hasattr(distributed_model, '_distribution_function')
and not (hasattr(distributed_model, '_recompile_exec_function') and
distributed_model._recompile_exec_function)):
return distributed_model._distributed_function
if not distributed_model:
_make_replicated_models_with_cloning(model, mode)
distributed_model = get_distributed_model(model, mode)
assert distributed_model
# Also create an execution function on that distributed model.
if context.executing_eagerly():
distributed_function = _make_eager_execution_function(model, mode)
else:
distributed_function = _make_graph_execution_function(model, mode)
# We cache the distributed execution function on the model since creating
# distributed models and execution functions are expensive.
distributed_model._distributed_function = distributed_function
distributed_model._recompile_exec_function = False
return distributed_function
def _make_graph_execution_function(model, mode):
"""Makes function to run one step of distributed model in graph mode."""
def _per_replica_function(model):
f = model._make_execution_function(mode)
return (f.inputs, f.outputs, f.updates_op, f.session_kwargs)
strategy = model._distribution_strategy
with strategy.scope():
# Create train ops on each of the devices when we call
# `_per_replica_fit_function`.
(grouped_inputs, grouped_outputs, grouped_updates,
grouped_session_args) = strategy.extended.call_for_each_replica(
_per_replica_function, args=(get_distributed_model(model, mode),))
# Initialize the variables in the replicated model. This is necessary for
# multi-worker training because on some workers, initialization is not
# needed. This method does initialization or waiting for initialization
# according to the context object of distribute coordinator.
init_restore_or_wait_for_variables()
# Unwrap all the per device values returned from `call_for_each_replica`.
# Unwrapping per device values gives you a list of values that can be
# used to construct a new train function that is composed of update ops on
# all the devices over which the model is distributed.
(all_inputs, all_outputs, all_updates, all_session_args) = unwrap_values(
strategy,
grouped_inputs,
grouped_outputs,
grouped_updates,
grouped_session_args,
with_loss_tensor=(mode != ModeKeys.PREDICT))
return K.function(
all_inputs,
all_outputs,
updates=all_updates,
name='distributed_{}_function'.format(mode),
**all_session_args)
def _make_eager_execution_function(model, mode):
"""Makes function to run one step of distributed model eager execution."""
def _per_replica_function(model):
f = model._make_execution_function(mode)
return (f.inputs, f.outputs)
# NOTE(priyag): Try creating a new FuncGraph within DS scope instead of using
# the global one.
strategy = model._distribution_strategy
global_graph = K.get_graph()
with global_graph.as_default(), strategy.scope():
# First we gather the relevant portions of the model across all replicas.
# `K._scratch_graph(global_graph)` signals to Keras that it should not
# lift to a separate graph when creating the per-replica functions.
with K._scratch_graph(global_graph):
# Create train ops on each of the devices when we call
# `_per_replica_fit_function`.
grouped = strategy.extended.call_for_each_replica(
_per_replica_function, args=(get_distributed_model(model, mode),))
grouped_inputs, grouped_outputs = grouped
# Unwrap all the per device values returned from `call_for_each_replica`.
# Unwrapping per device values gives you a list of values that can be
# used to construct a new train function that is composed of
# inputs/outputs on all the devices over which the model is distributed.
(all_inputs, all_outputs, _, _) = unwrap_values(
strategy,
grouped_inputs,
grouped_outputs,
with_loss_tensor=(mode != ModeKeys.PREDICT))
# Finally, a joint Keras function is created; this one will be created in
# a separate FuncGraph.
return K.function(
all_inputs,
all_outputs,
name='eager_distributed_{}_function'.format(mode))
def _copy_weights_to_distributed_model(original_model, mode):
"""Copies weights from original model to distributed models."""
strategy = original_model._distribution_strategy
distributed_model = get_distributed_model(original_model, mode)
if strategy:
# Copy the weights from the original model to each of the replicated
# models.
orig_model_weights = original_model.get_weights()
first_model = strategy.unwrap(distributed_model)[0]
set_weights(strategy, first_model, orig_model_weights)
def _copy_weights_to_original_model(model, mode):
"""Copies weights from first distributed model back to original model."""
if model._distribution_strategy and mode == ModeKeys.TRAIN:
distributed_model = get_distributed_model(model, mode)
updated_weights = model._distribution_strategy.unwrap(
distributed_model)[0].get_weights()
model.set_weights(updated_weights)
def _per_replica_aggregate_batch(strategy, batch_outs, model, mode):
"""Aggregates the per-replica batch-level outputs from a distributed step."""
if strategy is not None and mode == ModeKeys.PREDICT:
total_batch_outs = []
for i in range(len(model.outputs)):
num_replicas = strategy.num_replicas_in_sync
nested_outs = batch_outs[i * num_replicas:i * num_replicas + num_replicas]
total_batch_outs.append(
concat_along_batch_dimension(nest.flatten(nested_outs)))
return total_batch_outs
return batch_outs
def _reset_metrics(model):
if model._distribution_strategy:
for mode in [ModeKeys.TRAIN, ModeKeys.TEST, ModeKeys.PREDICT]:
distributed_model = get_distributed_model(model, mode)
if distributed_model:
first_model = model._distribution_strategy.unwrap(distributed_model)[0]
first_model.reset_metrics()
def get_distributed_model(model, mode):
key = _generate_cache_key(mode)
return model._distributed_model_cache.get(key, None)
def set_distributed_model(model, mode, distributed_model):
key = _generate_cache_key(mode)
model._distributed_model_cache[key] = distributed_model
def get_distributed_function(model, mode):
key = _generate_cache_key(mode)
return model._distributed_function_cache.get(key, None)
def set_distributed_function(model, mode, distributed_function):
key = _generate_cache_key(mode)
model._distributed_function_cache[key] = distributed_function
def _generate_cache_key(mode):
key = hash(mode)
return key
@tf_contextlib.contextmanager
def distributed_scope(strategy, learning_phase):
with strategy.scope(), K.learning_phase_scope(learning_phase):
yield
def is_current_worker_chief():
return dc_context.get_current_worker_context().is_chief
def filter_distributed_callbacks(callbacks_list, model):
"""Filter Callbacks based on the worker context when running multi-worker.
Arguments:
callbacks_list: A list of `Callback` instances.
model: Keras model instance.
Returns:
The list of `Callback` instances that should be run on this worker.
"""
if not model._in_multi_worker_mode():
raise ValueError(
'filter_distributed_callbacks() should only be called when Keras '
'is in multi worker mode.')
callbacks_list = callbacks_list or []
if not [
c for c in callbacks_list if isinstance(c, callbacks.ModelCheckpoint)
]:
# TODO(rchao): Consider providing a ModelCheckpoint here if the user
# fails to (possibly with tempfile directory).
logging.warning('ModelCheckpoint callback is not provided. '
'Workers will need to restart training if any fails.')
if callbacks_list is None or is_current_worker_chief():
return callbacks_list
# Some Callbacks should only run on the chief worker.
return [
callback for callback in callbacks_list if not callback._chief_worker_only
] # pylint: disable=protected-access
def _update_sample_weight_modes(model, mode, sample_weights):
"""Update sample_weight_mode of the distributed model."""
if is_distributing_by_cloning(model):
distributed_model = get_distributed_model(model, mode)
if not distributed_model:
_make_replicated_models_with_cloning(model, mode)
distributed_model = get_distributed_model(model, mode)
distributed_model._recompile_exec_function = any(
[e.sample_weights_mismatch() for e in model._training_endpoints])
if sample_weights:
distributed_models = flatten_per_replica_values(
model._distribution_strategy, distributed_model)
# sample_weights is a tuple of 1 list where the number of elements in the
# list is equal to the number of replicas in sync.
sample_weights = sample_weights[0]
if sample_weights and None not in sample_weights:
for m, sw in zip(distributed_models, sample_weights):
m._update_sample_weight_modes(sample_weights=[sw])
def concat_along_batch_dimension(outputs):
"""Concats prediction outputs along the batch dimension."""
if isinstance(outputs[0], sparse_tensor.SparseTensor):
return sparse_ops.sparse_concat_v2(axis=0, sp_inputs=outputs)
if isinstance(outputs[0], ragged_tensor.RaggedTensor):
return ragged_concat_ops.concat(outputs, axis=0)
return np.concatenate(outputs)
| |
# -*- coding: utf-8 -*-
from django.conf import settings
from django.test.signals import template_rendered
from django.core.handlers.wsgi import WSGIHandler
from django.test import TestCase, TransactionTestCase
from django.test.client import store_rendered_templates
from django.utils.functional import curry
from django.utils.importlib import import_module
from django.core import signals
try:
from django.db import close_old_connections
except ImportError:
from django.db import close_connection
close_old_connections = None
try:
from django.core.servers.basehttp import AdminMediaHandler as StaticFilesHandler
except ImportError:
from django.contrib.staticfiles.handlers import StaticFilesHandler
from webtest import TestApp
try:
from webtest.utils import NoDefault
except ImportError:
NoDefault = ''
from django_webtest.response import DjangoWebtestResponse
from django_webtest.compat import to_string, to_wsgi_safe_string
class DjangoTestApp(TestApp):
response_class = DjangoWebtestResponse
def __init__(self, extra_environ=None, relative_to=None):
super(DjangoTestApp, self).__init__(self.get_wsgi_handler(), extra_environ, relative_to)
def get_wsgi_handler(self):
return StaticFilesHandler(WSGIHandler())
def _update_environ(self, environ, user):
if user:
environ = environ or {}
username = _get_username(user)
environ['WEBTEST_USER'] = to_wsgi_safe_string(username)
return environ
def do_request(self, req, status, expect_errors):
# Django closes the database connection after every request;
# this breaks the use of transactions in your tests.
if close_old_connections is not None: # Django 1.6+
signals.request_started.disconnect(close_old_connections)
signals.request_finished.disconnect(close_old_connections)
else: # Django < 1.6
signals.request_finished.disconnect(close_connection)
try:
req.environ.setdefault('REMOTE_ADDR', '127.0.0.1')
# is this a workaround for https://code.djangoproject.com/ticket/11111 ?
req.environ['REMOTE_ADDR'] = to_string(req.environ['REMOTE_ADDR'])
req.environ['PATH_INFO'] = to_string(req.environ['PATH_INFO'])
# Curry a data dictionary into an instance of the template renderer
# callback function.
data = {}
on_template_render = curry(store_rendered_templates, data)
template_rendered.connect(on_template_render)
response = super(DjangoTestApp, self).do_request(req, status, expect_errors)
# Add any rendered template detail to the response.
# If there was only one template rendered (the most likely case),
# flatten the list to a single element.
def flattend(detail):
if len(data[detail]) == 1:
return data[detail][0]
return data[detail]
response.context = None
response.template = None
response.templates = data.get('templates', None)
if data.get('context'):
response.context = flattend('context')
if data.get('template'):
response.template = flattend('template')
elif data.get('templates'):
response.template = flattend('templates')
response.__class__ = self.response_class
return response
finally:
if close_old_connections: # Django 1.6+
signals.request_started.connect(close_old_connections)
signals.request_finished.connect(close_old_connections)
else: # Django < 1.6
signals.request_finished.connect(close_connection)
def get(self, url, params=None, headers=None, extra_environ=None,
status=None, expect_errors=False, user=None, auto_follow=False,
content_type=None):
extra_environ = self._update_environ(extra_environ, user)
response = super(DjangoTestApp, self).get(
url, params, headers, extra_environ, status, expect_errors)
is_redirect = lambda r: r.status_int >= 300 and r.status_int < 400
while auto_follow and is_redirect(response):
response = response.follow()
return response
def post(self, url, params='', headers=None, extra_environ=None,
status=None, upload_files=None, expect_errors=False,
content_type=None, user=None):
extra_environ = self._update_environ(extra_environ, user)
return super(DjangoTestApp, self).post(
url, params, headers, extra_environ, status,
upload_files, expect_errors, content_type)
def put(self, url, params='', headers=None, extra_environ=None,
status=None, upload_files=None, expect_errors=False,
content_type=None, user=None):
extra_environ = self._update_environ(extra_environ, user)
return super(DjangoTestApp, self).put(
url, params, headers, extra_environ, status,
upload_files, expect_errors, content_type)
def patch(self, url, params='', headers=None, extra_environ=None,
status=None, upload_files=None, expect_errors=False,
content_type=None, user=None):
extra_environ = self._update_environ(extra_environ, user)
return super(DjangoTestApp, self).patch(
url, params, headers, extra_environ, status,
upload_files, expect_errors, content_type)
def options(self, url, params='', headers=None, extra_environ=None,
status=None, upload_files=None, expect_errors=False,
content_type=None, user=None):
extra_environ = self._update_environ(extra_environ, user)
return super(DjangoTestApp, self).options(
url, params, headers, extra_environ, status)
def delete(self, url, params=NoDefault, headers=None, extra_environ=None,
status=None, expect_errors=False,
content_type=None, user=None):
extra_environ = self._update_environ(extra_environ, user)
return super(DjangoTestApp, self).delete(
url, params, headers, extra_environ, status,
expect_errors, content_type)
@property
def session(self):
"""
Obtains the current session variables.
"""
if 'django.contrib.sessions' in settings.INSTALLED_APPS:
engine = import_module(settings.SESSION_ENGINE)
cookie = self.cookies.get(settings.SESSION_COOKIE_NAME, None)
if cookie:
return engine.SessionStore(cookie)
return {}
class WebTestMixin(object):
extra_environ = {}
csrf_checks = True
setup_auth = True
app_class = DjangoTestApp
def _patch_settings(self):
'''
Patches settings to add support for django-webtest authorization
and (optional) to disable CSRF checks.
'''
self._DEBUG_PROPAGATE_EXCEPTIONS = settings.DEBUG_PROPAGATE_EXCEPTIONS
self._MIDDLEWARE_CLASSES = settings.MIDDLEWARE_CLASSES[:]
self._AUTHENTICATION_BACKENDS = settings.AUTHENTICATION_BACKENDS[:]
settings.MIDDLEWARE_CLASSES = list(settings.MIDDLEWARE_CLASSES)
settings.AUTHENTICATION_BACKENDS = list(settings.AUTHENTICATION_BACKENDS)
settings.DEBUG_PROPAGATE_EXCEPTIONS = True
if not self.csrf_checks:
self._disable_csrf_checks()
if self.setup_auth:
self._setup_auth()
def _unpatch_settings(self):
''' Restores settings to before-patching state '''
settings.MIDDLEWARE_CLASSES = self._MIDDLEWARE_CLASSES
settings.AUTHENTICATION_BACKENDS = self._AUTHENTICATION_BACKENDS
settings.DEBUG_PROPAGATE_EXCEPTIONS = self._DEBUG_PROPAGATE_EXCEPTIONS
def _setup_auth(self):
''' Setups django-webtest authorization '''
self._setup_auth_middleware()
self._setup_auth_backend()
def _disable_csrf_checks(self):
disable_csrf_middleware = 'django_webtest.middleware.DisableCSRFCheckMiddleware'
if not disable_csrf_middleware in settings.MIDDLEWARE_CLASSES:
settings.MIDDLEWARE_CLASSES.insert(0, disable_csrf_middleware)
def _setup_auth_middleware(self):
webtest_auth_middleware = 'django_webtest.middleware.WebtestUserMiddleware'
django_auth_middleware = 'django.contrib.auth.middleware.AuthenticationMiddleware'
if django_auth_middleware not in settings.MIDDLEWARE_CLASSES:
# There can be a custom AuthenticationMiddleware subclass or replacement,
# we can't compute its index so just put our auth middleware to the end.
# If appending causes problems _setup_auth_middleware method can
# be overriden by a subclass.
settings.MIDDLEWARE_CLASSES.append(webtest_auth_middleware)
else:
index = settings.MIDDLEWARE_CLASSES.index(django_auth_middleware)
settings.MIDDLEWARE_CLASSES.insert(index+1, webtest_auth_middleware)
def _setup_auth_backend(self):
backend_name = 'django_webtest.backends.WebtestUserBackend'
settings.AUTHENTICATION_BACKENDS.insert(0, backend_name)
def renew_app(self):
"""
Resets self.app (drops the stored state): cookies, etc.
Note: this renews only self.app, not the responses fetched by self.app.
"""
self.app = self.app_class(extra_environ=self.extra_environ)
def __call__(self, result=None):
self._patch_settings()
self.renew_app()
res = super(WebTestMixin, self).__call__(result)
self._unpatch_settings()
return res
class WebTest(WebTestMixin, TestCase):
pass
class TransactionWebTest(WebTestMixin, TransactionTestCase):
pass
def _get_username(user):
"""
Return user's username. ``user`` can be standard Django User
instance, a custom user model or just an username (as string).
"""
if hasattr(user, 'get_username'): # custom user, django 1.5+
return user.get_username()
elif hasattr(user, 'username'): # standard User
return user.username
else: # assume user is just an username
return user
| |
import os
import sys
from optparse import OptionParser
import imp
import django
from django.core.management.base import BaseCommand, CommandError, handle_default_options
# For backwards compatibility: get_version() used to be in this module.
get_version = django.get_version
# A cache of loaded commands, so that call_command
# doesn't have to reload every time it's called.
_commands = None
def find_commands(management_dir):
"""
Given a path to a management directory, returns a list of all the command
names that are available.
Returns an empty list if no commands are defined.
"""
command_dir = os.path.join(management_dir, 'commands')
try:
return [f[:-3] for f in os.listdir(command_dir)
if not f.startswith('_') and f.endswith('.py')]
except OSError:
return []
def find_management_module(app_name):
"""
Determines the path to the management module for the given app_name,
without actually importing the application or the management module.
Raises ImportError if the management module cannot be found for any reason.
"""
parts = app_name.split('.')
parts.append('management')
parts.reverse()
part = parts.pop()
path = None
# When using manage.py, the project module is added to the path,
# loaded, then removed from the path. This means that
# testproject.testapp.models can be loaded in future, even if
# testproject isn't in the path. When looking for the management
# module, we need look for the case where the project name is part
# of the app_name but the project directory itself isn't on the path.
try:
f, path, descr = imp.find_module(part,path)
except ImportError,e:
if os.path.basename(os.getcwd()) != part:
raise e
while parts:
part = parts.pop()
f, path, descr = imp.find_module(part, path and [path] or None)
return path
def load_command_class(app_name, name):
"""
Given a command name and an application name, returns the Command
class instance. All errors raised by the import process
(ImportError, AttributeError) are allowed to propagate.
"""
return getattr(__import__('%s.management.commands.%s' % (app_name, name),
{}, {}, ['Command']), 'Command')()
def get_commands():
"""
Returns a dictionary mapping command names to their callback applications.
This works by looking for a management.commands package in django.core, and
in each installed application -- if a commands package exists, all commands
in that package are registered.
Core commands are always included. If a settings module has been
specified, user-defined commands will also be included, the
startproject command will be disabled, and the startapp command
will be modified to use the directory in which the settings module appears.
The dictionary is in the format {command_name: app_name}. Key-value
pairs from this dictionary can then be used in calls to
load_command_class(app_name, command_name)
If a specific version of a command must be loaded (e.g., with the
startapp command), the instantiated module can be placed in the
dictionary in place of the application name.
The dictionary is cached on the first call and reused on subsequent
calls.
"""
global _commands
if _commands is None:
_commands = dict([(name, 'django.core') for name in find_commands(__path__[0])])
# Find the installed apps
try:
from django.conf import settings
apps = settings.INSTALLED_APPS
except (AttributeError, EnvironmentError, ImportError):
apps = []
# Find the project directory
try:
from django.conf import settings
project_directory = setup_environ(
__import__(
settings.SETTINGS_MODULE, {}, {},
(settings.SETTINGS_MODULE.split(".")[-1],)
)
)
except (AttributeError, EnvironmentError, ImportError):
project_directory = None
# Find and load the management module for each installed app.
for app_name in apps:
try:
path = find_management_module(app_name)
_commands.update(dict([(name, app_name)
for name in find_commands(path)]))
except ImportError:
pass # No management module - ignore this app
if project_directory:
# Remove the "startproject" command from self.commands, because
# that's a django-admin.py command, not a manage.py command.
del _commands['startproject']
# Override the startapp command so that it always uses the
# project_directory, not the current working directory
# (which is default).
from django.core.management.commands.startapp import ProjectCommand
_commands['startapp'] = ProjectCommand(project_directory)
return _commands
def call_command(name, *args, **options):
"""
Calls the given command, with the given options and args/kwargs.
This is the primary API you should use for calling specific commands.
Some examples:
call_command('syncdb')
call_command('shell', plain=True)
call_command('sqlall', 'myapp')
"""
try:
app_name = get_commands()[name]
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
klass = app_name
else:
klass = load_command_class(app_name, name)
except KeyError:
raise CommandError, "Unknown command: %r" % name
return klass.execute(*args, **options)
class LaxOptionParser(OptionParser):
"""
An option parser that doesn't raise any errors on unknown options.
This is needed because the --settings and --pythonpath options affect
the commands (and thus the options) that are available to the user.
"""
def error(self, msg):
pass
def print_help(self):
"""Output nothing.
The lax options are included in the normal option parser, so under
normal usage, we don't need to print the lax options.
"""
pass
def print_lax_help(self):
"""Output the basic options available to every command.
This just redirects to the default print_help() behaviour.
"""
OptionParser.print_help(self)
def _process_args(self, largs, rargs, values):
"""
Overrides OptionParser._process_args to exclusively handle default
options and ignore args and other options.
This overrides the behavior of the super class, which stop parsing
at the first unrecognized option.
"""
while rargs:
arg = rargs[0]
try:
if arg[0:2] == "--" and len(arg) > 2:
# process a single long option (possibly with value(s))
# the superclass code pops the arg off rargs
self._process_long_opt(rargs, values)
elif arg[:1] == "-" and len(arg) > 1:
# process a cluster of short options (possibly with
# value(s) for the last one only)
# the superclass code pops the arg off rargs
self._process_short_opts(rargs, values)
else:
# it's either a non-default option or an arg
# either way, add it to the args list so we can keep
# dealing with options
del rargs[0]
raise error
except:
largs.append(arg)
class ManagementUtility(object):
"""
Encapsulates the logic of the django-admin.py and manage.py utilities.
A ManagementUtility has a number of commands, which can be manipulated
by editing the self.commands dictionary.
"""
def __init__(self, argv=None):
self.argv = argv or sys.argv[:]
self.prog_name = os.path.basename(self.argv[0])
def main_help_text(self):
"""
Returns the script's main help text, as a string.
"""
usage = ['',"Type '%s help <subcommand>' for help on a specific subcommand." % self.prog_name,'']
usage.append('Available subcommands:')
commands = get_commands().keys()
commands.sort()
for cmd in commands:
usage.append(' %s' % cmd)
return '\n'.join(usage)
def fetch_command(self, subcommand):
"""
Tries to fetch the given subcommand, printing a message with the
appropriate command called from the command line (usually
"django-admin.py" or "manage.py") if it can't be found.
"""
try:
app_name = get_commands()[subcommand]
if isinstance(app_name, BaseCommand):
# If the command is already loaded, use it directly.
klass = app_name
else:
klass = load_command_class(app_name, subcommand)
except KeyError:
sys.stderr.write("Unknown command: %r\nType '%s help' for usage.\n" % \
(subcommand, self.prog_name))
sys.exit(1)
return klass
def execute(self):
"""
Given the command-line arguments, this figures out which subcommand is
being run, creates a parser appropriate to that command, and runs it.
"""
# Preprocess options to extract --settings and --pythonpath.
# These options could affect the commands that are available, so they
# must be processed early.
parser = LaxOptionParser(usage="%prog subcommand [options] [args]",
version=get_version(),
option_list=BaseCommand.option_list)
try:
options, args = parser.parse_args(self.argv)
handle_default_options(options)
except:
pass # Ignore any option errors at this point.
try:
subcommand = self.argv[1]
except IndexError:
sys.stderr.write("Type '%s help' for usage.\n" % self.prog_name)
sys.exit(1)
if subcommand == 'help':
if len(args) > 2:
self.fetch_command(args[2]).print_help(self.prog_name, args[2])
else:
parser.print_lax_help()
sys.stderr.write(self.main_help_text() + '\n')
sys.exit(1)
# Special-cases: We want 'django-admin.py --version' and
# 'django-admin.py --help' to work, for backwards compatibility.
elif self.argv[1:] == ['--version']:
# LaxOptionParser already takes care of printing the version.
pass
elif self.argv[1:] == ['--help']:
parser.print_lax_help()
sys.stderr.write(self.main_help_text() + '\n')
else:
self.fetch_command(subcommand).run_from_argv(self.argv)
def setup_environ(settings_mod):
"""
Configures the runtime environment. This can also be used by external
scripts wanting to set up a similar environment to manage.py.
Returns the project directory (assuming the passed settings module is
directly in the project directory).
"""
# Add this project to sys.path so that it's importable in the conventional
# way. For example, if this file (manage.py) lives in a directory
# "myproject", this code would add "/path/to/myproject" to sys.path.
project_directory, settings_filename = os.path.split(settings_mod.__file__)
if project_directory == os.curdir or not project_directory:
project_directory = os.getcwd()
project_name = os.path.basename(project_directory)
settings_name = os.path.splitext(settings_filename)[0]
sys.path.append(os.path.join(project_directory, os.pardir))
project_module = __import__(project_name, {}, {}, [''])
sys.path.pop()
# Set DJANGO_SETTINGS_MODULE appropriately.
os.environ['DJANGO_SETTINGS_MODULE'] = '%s.%s' % (project_name, settings_name)
return project_directory
def execute_from_command_line(argv=None):
"""
A simple method that runs a ManagementUtility.
"""
utility = ManagementUtility(argv)
utility.execute()
def execute_manager(settings_mod, argv=None):
"""
Like execute_from_command_line(), but for use by manage.py, a
project-specific django-admin.py utility.
"""
setup_environ(settings_mod)
utility = ManagementUtility(argv)
utility.execute()
| |
#!/usr/bin/env python
__author__ = 'Ian Katz, Michael Meisinger'
import re
from ooi import logging
from ooi.logging import log
from pyon.util.containers import get_ion_ts, DotDict
from pyon.core.exception import BadRequest, Inconsistent, NotFound
from pyon.core.registry import getextends
from pyon.ion.resource import LCE, RT, PRED
from pyon.util.config import Config
# Common resource type and association definitions
errc_lookups = None
class EnhancedResourceRegistryClient(object):
"""
This class provides enhanced resource registry client functionality by wrapping the "real" client.
Specifically, this class adds more succinct interaction with the resource registry in assign and find operations.
This class analyzes the allowable resource/predicate relations to allow the following:
* assigning/unassigning one resource to another and letting this class figure out the allowed predicate
* assigning and validating that only one subject (or object) association exists
* finding objects or subjects between two resource types and letting the class figure out the allowed predicate
* finding a single object or subject and letting the class do the error checking for len(results) == 1
* all of the above find ops, but with resource_id instead of full resource
Examples:
# assigning
self.assign_instrument_model_to_instrument_agent(instrument_model_id, instrument_agent_id)
self.assign_one_instrument_model_to_instrument_device(instrument_model_id, instrument_device_id)
self.assign_instrument_device_to_one_platform_device(instrument_device_id, platform_device_id)
self.unassign_instrument_model_from_instrument_device(instrument_model_id, instrument_device_id)
# find objects
self.find_instrument_models_of_instrument_device(instrument_device_id) # returns list
self.find_instrument_model_of_instrument_device(instrument_device_id) # returns IonObject or raises NotFound
self.find_instrument_devices_by_instrument_model(instrument_model_id) # returns list
self.find_instrument_device_by_instrument_model(instrument_model_id) # returns IonObject or raises NotFound
# find subjects
self.find_instrument_model_ids_of_instrument_device(instrument_device_id) # returns list
self.find_instrument_model_id_of_instrument_device(instrument_device_id) # returns string or raises NotFound
self.find_instrument_device_ids_by_instrument_model(instrument_model_id) # returns list
self.find_instrument_device_id_by_instrument_model(instrument_model_id) # returns string or raises NotFound
Breaking Ambiguity:
assign/unassign method names can also include "_with_has_model" ("_with_", and the predicate type with underscores)
find method name can include "_using_has_model" ("_using_", and the predicate type with underscores)
"""
def __init__(self, rr_client):
self.id = id(self)
log.debug("EnhancedResourceRegistryClient init")
self.RR = rr_client
global errc_lookups
if not errc_lookups:
errc_lookups = self._build_lookups()
self.__dict__.update(errc_lookups)
self._cached_dynamics = {}
self._cached_predicates = {}
self._cached_predicates = {}
self._cached_resources = {}
self._all_cached_resources = {}
log.debug("done init")
@classmethod
def _build_lookups(cls):
lookup_dict = {}
log.debug("Generating lookup tables for %s resources and their labels", len(RT))
lookup_dict["resource_to_label"] = dict([(v, cls._uncamel(v)) for v in RT.values() if type("") == type(v)])
lookup_dict["label_to_resource"] = dict([(cls._uncamel(v), v) for v in RT.values() if type("") == type(v)])
log.debug("Generating lookup tables for %s predicates and their labels", len(PRED.values()))
lookup_dict["predicate_to_label"] = dict([(v, cls._uncamel(v)) for v in PRED.values() if type("") == type(v)])
lookup_dict["label_to_predicate"] = dict([(cls._uncamel(v), v) for v in PRED.values() if type("") == type(v)])
log.debug("Generating predicate lookup table")
lookup_dict["predicates_for_subj_obj"] = cls._build_predicate_list()
return lookup_dict
@classmethod
def _build_predicate_list(cls):
"""
Create a master dict of dicts of lists in self.predicates_for_subj_obj
self.predicates_for_subj_obj[RT.SubjectType][RT.ObjectType] = [PRED.typeOfPred1, PRED.typeOfPred2]
"""
pred_lookup = {}
# if no extends are found, just return the base type as a list
def my_getextends(iontype):
try:
return getextends(iontype)
except KeyError:
return [iontype]
# read associations yaml and expand all domain/range pairs
assoc_defs = Config(["res/config/associations.yml"]).data['AssociationDefinitions']
for ad in assoc_defs:
predicate = ad['predicate']
domain = ad['domain']
range = ad['range']
for d in domain:
for ad in my_getextends(d):
for r in range:
for ar in my_getextends(r):
pred_lookup.setdefault(ad, {}).setdefault(ar, set()).add(predicate)
return pred_lookup
def __getattr__(self, item):
"""
anything we can't puzzle out gets passed along to the real RR client
"""
# don't waste time looking up function names twice
if item in self._cached_dynamics:
return self._cached_dynamics[item]
dynamic_fns = [
self._make_dynamic_assign_function, # understand assign_x_x_to_y_y_with_some_predicate(o, s) functions
self._make_dynamic_assign_single_object_function, # understand assign_one_x_x_to_y_y_with_some_predicate(o, s) functions
self._make_dynamic_assign_single_subject_function, # understand assign_x_x_to_one_y_y_with_some_predicate(o, s) functions
self._make_dynamic_unassign_function, # understand unassign_x_x_to_y_y_with_some_predicate(o, s) functions
self._make_dynamic_find_objects_function, # understand find_x_xs_by_y_y_using_some_predicate(s) functions
self._make_dynamic_find_subjects_function, # understand find_x_xs_by_y_y_using_some_predicate(o) functions
self._make_dynamic_find_object_function, # understand find_x_x_by_y_y_using_some_predicate(s) functions
self._make_dynamic_find_subject_function, # understand find_x_x_by_y_y_using_some_predicate(o) functions
self._make_dynamic_find_object_ids_function, # understand find_x_x_ids_by_y_y_using_some_predicate(s) functions
self._make_dynamic_find_subject_ids_function, # understand find_x_x_ids_by_y_y_using_some_predicate(o) functions
self._make_dynamic_find_object_id_function, # understand find_x_x_id_by_y_y_using_some_predicate(s) functions
self._make_dynamic_find_subject_id_function, # understand find_x_x_id_by_y_y_using_some_predicate(o) functions
]
# try parsing against all the dynamic functions to see if one works
for gen_fn in dynamic_fns:
fn = gen_fn(item)
if fn is None:
log.trace("dynamic function match fail")
else:
log.trace("dynamic function match for %s", item)
self._cached_dynamics[item] = fn
return fn
log.trace("Getting %s attribute from self.RR", item)
if not hasattr(self.RR, item):
raise AttributeError(("The method '%s' could not be parsed as a dynamic function and does not exist " +
"in the Resource Registry Client (%s)") % (item, type(self.RR).__name__))
ret = getattr(self.RR, item)
log.trace("Got attribute from self.RR: %s", type(ret).__name__)
self._cached_dynamics[item] = ret
return ret
def create(self, resource_obj=None, specific_type=None):
"""
create a single object of the predefined type
@param resource_obj an IonObject resource of the proper type
@param specific_type the name of an Ion type (e.g. RT.Resource)
@retval the resource ID
"""
if resource_obj is None:
resource_obj = {}
# Validate the input
self._check_type(resource_obj, specific_type, "to be created")
self._check_name(resource_obj, "to be created")
#persist
#primary_object_obj = IonObject(self.iontype, primary_object)
resource_id, _ = self.RR.create(resource_obj)
return resource_id
def read(self, resource_id='', specific_type=None):
"""
update a single object of the predefined type
@param resource_id the id to be deleted
@param specific_type the name of an Ion type (e.g. RT.Resource)
"""
if resource_id in self._all_cached_resources:
resource_obj = self._all_cached_resources[resource_id]
self._check_type(resource_obj, specific_type, "to be read")
log.debug("Returning cached %s object", specific_type)
return resource_obj
resource_obj = self.RR.read(resource_id)
self._check_type(resource_obj, specific_type, "to be read")
if specific_type in self._cached_resources:
log.debug("Adding cached %s object", specific_type)
self._add_resource_to_cache(specific_type, resource_obj)
return resource_obj
def read_mult(self, resource_ids=None, specific_type=None):
if resource_ids is None:
resource_ids = []
found_resources = [self._all_cached_resources.get(rid, None) for rid in resource_ids]
missing_resources = [resource_ids[i] for i, robj in enumerate(found_resources) if robj is None]
if not missing_resources:
for robj in found_resources:
self._check_type(robj, specific_type, "to be read")
return found_resources
# normal case, check return types
if not specific_type in self._cached_resources:
ret = self.RR.read_mult(resource_ids)
if None is not specific_type:
if not all([r.type_ == specific_type for r in ret]):
raise BadRequest("Expected %s resources from read_mult, but received different type" %
specific_type)
return ret
log.debug("Returning cached %s resources", specific_type)
cache = self._cached_resources[specific_type]
# fill in any holes that we can
misses = [x for x in resource_ids if x not in cache.by_id]
if misses:
log.debug("Attempting to fill in %s cache misses", len(misses))
misses_objs = self.RR.read_mult(misses)
for mo in misses_objs:
if None is not mo:
self._add_resource_to_cache(specific_type, mo)
return [cache.by_id.get(r, None) for r in resource_ids]
def update(self, resource_obj=None, specific_type=None):
"""
update a single object of the predefined type
@param resource_obj the updated resource
@param specific_type the name of an Ion type (e.g. RT.Resource)
"""
if None == resource_obj: resource_obj = {}
self._check_type(resource_obj, specific_type, "to be updated")
if not hasattr(resource_obj, "_id") or "" == resource_obj._id:
raise BadRequest("The _id field was not set in the "
+ "%s resource to be updated" % type(resource_obj).__name__)
#if the name is being changed, make sure it's not
# being changed to a duplicate
self._check_name(resource_obj, "to be updated")
#persist
return self.RR.update(resource_obj)
def retire(self, resource_id='', specific_type=None):
return self.lcs_delete(resource_id, specific_type)
def lcs_delete(self, resource_id='', specific_type=None):
"""
alias for LCS delete -- the default "delete operation" in ION
@param resource_id the id to be deleted
@param specific_type the name of an Ion type (e.g. RT.Resource)
"""
if None is not specific_type:
resource_obj = self.RR.read(resource_id)
self._check_type(resource_obj, specific_type, "to be retired")
self.RR.lcs_delete(resource_id)
return
def delete(self, resource_id):
raise NotImplementedError("TODO: remove me")
def force_delete(self, resource_id='', specific_type=None):
"""
delete a single object of the predefined type
AND its history
AND any associations to/from it
(i.e., NOT retiring!)
@param resource_id the id to be deleted
@param specific_type the name of an Ion type (e.g. RT.Resource)
"""
#primary_object_obj = self.RR.read(primary_object_id)
if None is not specific_type:
resource_obj = self.RR.read(resource_id)
self._check_type(resource_obj, specific_type, "to be deleted")
# Note: delete automatically retires associations
self.RR.delete(resource_id)
def delete_association(self, subject_id='', association_type='', object_id=''):
"""
delete an association
@param subject_id the resource ID of the predefined type
@param association_type the predicate
@param object_id the resource ID of the type to be joined
@todo check for errors
"""
assert(type("") == type(subject_id) == type(object_id))
assoc = self.RR.get_association(subject=subject_id,
predicate=association_type,
object=object_id)
self.RR.delete_association(assoc)
def find_resource_by_name(self, resource_type, name, id_only=False):
rsrcs = self.find_resources_by_name(resource_type, name, id_only)
if 1 == len(rsrcs):
return rsrcs[0]
elif 1 < len(rsrcs):
raise Inconsistent("Expected 1 %s with name '%s', got %d" %
(resource_type, name, len(rsrcs)))
else:
raise NotFound("Expected 1 %s with name '%s', got %d" %
(resource_type, name, len(rsrcs)))
def find_resources_by_name(self, resource_type, name, id_only=False):
assert name
if resource_type not in self._cached_resources:
log.warn("Using find_resources_by_name on resource type %s, which was not cached", resource_type)
ret, _ = self.RR.find_resources(restype=resource_type, name=name, id_only=id_only)
return ret
if not name in self._cached_resources[resource_type].by_name:
log.debug("The %s resource with name '%s' was not in the cache", resource_type, name)
return []
log.debug("Returning object(s) from cache")
objs = self._cached_resources[resource_type].by_name[name]
if id_only:
return [obj._id for obj in objs]
else:
return objs
def find_subjects(self, subject_type='', predicate='', object='', id_only=False):
assert subject_type != ''
assert predicate != ''
object_id, object_type = self._extract_id_and_type(object)
if not self.has_cached_predicate(predicate):
ret, _ = self.RR.find_subjects(subject_type=subject_type,
predicate=predicate,
object=object_id,
id_only=id_only)
return ret
log.debug("Using %s cached results for 'find (%s) subjects'", len(self._cached_predicates[predicate]), predicate)
def filter_fn(assoc):
if object != assoc.o:
return False
if "" != subject_type and subject_type != assoc.st:
return False
return True
log.debug("Checking object_id=%s, subject_type=%s", object_id, subject_type)
preds = self._cached_predicates[predicate]
time_search_start = get_ion_ts()
subject_ids = [a.s for a in self.filter_cached_associations(predicate, filter_fn)]
time_search_stop = get_ion_ts()
total_time = int(time_search_stop) - int(time_search_start)
log.debug("Processed %s %s predicates for %s subjects in %s seconds",
len(preds),
predicate,
len(subject_ids),
total_time / 1000.0)
if id_only:
return subject_ids
else:
log.debug("getting full subject IonObjects with read_mult")
return self.read_mult(subject_ids, subject_type)
def find_objects(self, subject, predicate, object_type='', id_only=False):
subject_id, subject_type = self._extract_id_and_type(subject)
if not self.has_cached_predicate(predicate):
ret, _ = self.RR.find_objects(subject=subject_id,
predicate=predicate,
object_type=object_type,
id_only=id_only)
return ret
log.debug("Using %s cached results for 'find (%s) objects'", len(self._cached_predicates[predicate]), predicate)
def filter_fn(assoc):
if subject_id != assoc.s:
return False
if "" != object_type and object_type != assoc.ot:
return False
return True
log.debug("Checking subject_id=%s, object_type=%s", subject_id, object_type)
preds = self._cached_predicates[predicate]
time_search_start = get_ion_ts()
object_ids = [a.o for a in self.filter_cached_associations(predicate, filter_fn)]
time_search_stop = get_ion_ts()
total_time = int(time_search_stop) - int(time_search_start)
log.debug("Processed %s %s predicates for %s objects in %s seconds",
len(preds),
predicate,
len(object_ids),
total_time / 1000.0)
if id_only:
return object_ids
else:
log.debug("getting full object IonObjects with read_mult")
return self.read_mult(object_ids)
def find_subject(self, subject_type='', predicate='', object='', id_only=False):
assert subject_type != ''
assert predicate != ''
object_id, object_type = self._extract_id_and_type(object)
idstring = ""
if id_only: idstring = " ID"
findop_name = "Find %s subject%s by %s object using predicate %s" % (subject_type,
idstring,
object_type,
predicate)
return self._find_subject_(findop_name, subject_type, predicate, object_id, object_type, id_only)
def _find_subject_(self, findop_name, subject_type, predicate, object_id, object_type, id_only):
objs = self.find_subjects(subject_type=subject_type,
predicate=predicate,
object=object_id,
id_only=id_only)
if 1 == len(objs):
return objs[0]
elif 1 < len(objs):
raise Inconsistent("Expected 1 %s as subject of %s '%s', got %d in '%s'" %
(subject_type, object_type, str(object_id), len(objs), findop_name))
else:
raise NotFound("Expected 1 %s as subject of %s '%s' in '%s'" %
(subject_type, object_type, str(object_id), findop_name))
def find_object(self, subject, predicate, object_type='', id_only=False):
subject_id, subject_type = self._extract_id_and_type(subject)
idstring = ""
if id_only: idstring = " ID"
findop_name = "Find %s subject%s by %s object using predicate %s" % (subject_type,
idstring,
object_type,
predicate)
return self._find_object_(findop_name, subject_id, subject_type, predicate, object_type, id_only)
def _find_object_(self, findop_name, subject_id, subject_type, predicate, object_type, id_only):
objs = self.find_objects(subject=subject_id,
predicate=predicate,
object_type=object_type,
id_only=id_only)
if 1 == len(objs):
return objs[0]
elif 1 < len(objs):
raise Inconsistent("Expected 1 %s as object of %s '%s', got %d in '%s'" %
(object_type, subject_type, str(subject_id), len(objs), findop_name))
else:
raise NotFound("Expected 1 %s as object of %s '%s' in '%s'" %
(object_type, subject_type, str(subject_id), findop_name))
def delete_object_associations(self, subject_id='', association_type=''):
"""
delete all assocations of a given type that are attached as objects to the given subject
"""
log.debug("Deleting all %s object associations from subject with id='%s'",
association_type,
subject_id)
associations = self.RR.find_associations(subject=subject_id, predicate=association_type)
for a in associations:
self.RR.delete_association(a)
def delete_subject_associations(self, association_type='', object_id=''):
"""
delete all assocations of a given type that are attached as subjects to the given object
"""
log.debug("Deleting all %s associations to object with id='%s'",
association_type,
object_id)
associations = self.RR.find_associations(object=object_id, predicate=association_type)
for a in associations:
self.RR.delete_association(a)
def advance_lcs(self, resource_id, transition_event):
"""
attempt to advance the lifecycle state of a resource
@resource_id the resource id
@new_state the new lifecycle state
"""
assert type(resource_id) is str
assert type(transition_event) is str
log.debug("Moving resource life cycle with transition event=%s", transition_event)
ret = self.RR.execute_lifecycle_transition(resource_id=resource_id,
transition_event=transition_event)
log.info("lifecycle transition=%s resulted in lifecycle state=%s", transition_event, str(ret))
return ret
def cache_predicate(self, predicate):
"""
Save all associations of a given predicate type to memory, for in-memory find_subjects/objects ops
This is a PREFETCH operation, and EnhancedResourceRegistryClient objects that use the cache functionality
should NOT be persisted across service calls.
"""
#log.debug("Caching predicates: %s", predicate)
if self.has_cached_predicate(predicate):
#log.debug("Reusing prior cached predicate %s", predicate)
return
time_caching_start = get_ion_ts()
preds = self.RR.find_associations(predicate=predicate, id_only=False)
time_caching_stop = get_ion_ts()
total_time = int(time_caching_stop) - int(time_caching_start)
log.debug("Cached predicate %s with %s resources in %s seconds", predicate, len(preds), total_time / 1000.0)
self._cached_predicates[predicate] = preds
def filter_cached_associations(self, predicate, is_match_fn):
if not self.has_cached_predicate(predicate):
raise BadRequest("Attempted to filter cached associations of uncached predicate '%s'" % predicate)
return [a for a in self._cached_predicates[predicate] if is_match_fn(a)]
def get_cached_associations(self, predicate):
return self.filter_cached_associations(predicate, lambda x: True)
def _add_resource_to_cache(self, resource_type, resource_obj):
self._cached_resources[resource_type].by_id[resource_obj._id] = resource_obj
self._cached_resources[resource_type].by_name.setdefault(resource_obj.name, []).append(resource_obj)
self._all_cached_resources[resource_obj._id] = resource_obj
def cache_resources(self, resource_type, specific_ids=None):
"""
Save all resources of a given type to memory, for in-memory lookup ops
This is a PREFETCH operation, and EnhancedResourceRegistryClient objects that use the cache functionality
should NOT be kept across service calls.
"""
#log.info("Caching resources: %s", resource_type)
#log.debug("This cache is %s", self)
time_caching_start = get_ion_ts()
resource_objs = []
if specific_ids is None:
resource_objs, _ = self.RR.find_resources(restype=resource_type, id_only=False)
else:
assert type(specific_ids) is list
if specific_ids:
resource_objs = self.RR.read_mult(specific_ids)
lookups = DotDict()
lookups.by_id = {}
lookups.by_name = {}
self._cached_resources[resource_type] = lookups
for r in resource_objs:
self._add_resource_to_cache(resource_type, r)
time_caching_stop = get_ion_ts()
total_time = int(time_caching_stop) - int(time_caching_start)
#log.info("Cached %s %s resources in %s seconds", len(resource_objs), resource_type, total_time / 1000.0)
def has_cached_predicate(self, predicate):
return predicate in self._cached_predicates
def has_cached_resource(self, resource_type):
return resource_type in self._cached_resources
def clear_cached_predicate(self, predicate=None):
if None is predicate:
self._cached_predicates = {}
elif predicate in self._cached_predicates:
del self._cached_predicates[predicate]
def clear_cached_resource(self, resource_type=None):
if None is resource_type:
self._cached_resources = {}
self._all_cached_resources = {}
elif resource_type in self._cached_resources:
del self._cached_resources[resource_type]
del_list = [i for i, o in self._all_cached_resources.iteritems() if o.type_ == resource_type]
for i in del_list:
del self._all_cached_resources[i]
@classmethod
def _uncamel(cls, name):
"""
convert CamelCase to camel_case, from http://stackoverflow.com/a/1176023/2063546
"""
log.trace("name is %s: '%s'" % (type(name).__name__, name))
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def _extract_id_and_type(self, id_or_obj):
"""
figure out whether a subject/object is an IonObject or just an ID
"""
if hasattr(id_or_obj, "_id"):
log.debug("find_object for IonObject")
the_id = id_or_obj._id
the_type = type(id_or_obj).__name__
else:
the_id = id_or_obj
the_type = "(Unspecified IonObject)"
if log.isEnabledFor(logging.DEBUG):
try:
the_obj = self.RR.read(the_id)
the_type = type(the_obj).__name__
except:
pass
return the_id, the_type
def _parse_function_name_for_subj_pred_obj(self, genre, fn_name, regexp, required_fields=None, group_names=None):
"""
parse a function name into subject/predicate/object, as well as their CamelCase equivalents
extracts subject, object, and predicate from a function name. predicate is optional, and if missing
then this function will attempt to look it up in the list of predicates for the given subject and
object. the function raises error messages if the function name is parsed correctly but yields no
matches in the RT and PRED lists.
@param genre string, an identifier for what kind of function we're parsing, used for debug messages
@param fn_name string, the function name coming from getattr
@param regexp string, the regexp (containing groups) to parse the fn_name
@param required_fields list, the list of what groups should be "not None" to accept the parse
@param group_names dict mapping of "subject", "object", and "predicate" to their group names
"""
if None is group_names: group_names = {}
if None is required_fields: required_fields = []
log.trace("Attempting parse %s as %s", fn_name, genre)
m = re.match(regexp, fn_name)
if None is m: return None
for r in required_fields:
if None is m.group(r): return None
log.debug("parsed '%s' as %s", fn_name, genre)
ret = {}
for name, idx in group_names.iteritems():
if None is idx:
ret[name] = None
else:
ret[name] = m.group(idx)
obj = ret["object"]
subj = ret["subject"]
pred = ret["predicate"]
if not subj in self.label_to_resource:
log.debug("Attempted to use dynamic %s with unknown subject '%s'", genre, subj)
return None
if not obj in self.label_to_resource:
log.debug("Attempted to use dynamic %s with unknown object '%s'", genre, obj)
return None
isubj = self.label_to_resource[subj]
iobj = self.label_to_resource[obj]
# code won't execute because getextends(Resource) puts ALL resources in a domain position
# if isubj not in self.predicates_for_subj_obj:
# log.debug("Dynamic %s wanted 1 predicate choice for associating %s to %s, no domain" %
# (genre, subj, obj))
# return None
if iobj not in self.predicates_for_subj_obj[isubj]:
log.debug("Dynamic %s wanted 1 predicate choice for associating %s to %s, no range" %
(genre, subj, obj))
return None
if pred is not None:
log.debug("supplied pred is %s", pred)
if not pred in self.label_to_predicate:
raise BadRequest("Attempted to use dynamic %s between %s and %s with unknown predicate '%s'" %
(genre, isubj, iobj, pred))
#return None
ipred = self.label_to_predicate[pred]
if not ipred in self.predicates_for_subj_obj[isubj][iobj]:
raise BadRequest("Attempted to use dynamic %s between %s and %s with disallowed predicate '%s'" %
(genre, isubj, ipred, ipred))
else:
log.debug("no supplied predicate, picking from choices: %s" % self.predicates_for_subj_obj[isubj][iobj])
if len(self.predicates_for_subj_obj[isubj][iobj]) != 1:
raise BadRequest("Dynamic %s wanted 1 predicate choice for associating %s to %s, got %s" %
(genre, subj, obj, self.predicates_for_subj_obj[isubj][iobj]))
ipred = self.predicates_for_subj_obj[isubj][iobj][0]
ret["RT.object"] = iobj
ret["RT.subject"] = isubj
ret["PRED.predicate"] = ipred
return ret
def _make_dynamic_assign_function(self, item):
inputs = self._parse_function_name_for_subj_pred_obj("assign function w/pred",
item,
r"(assign_)(\w+)(_to_)(\w+)(_with_)(\w+)",
[2,3,4,5,6],
{"subject": 4, "predicate": 6, "object": 2})
if None is inputs:
return None
isubj = inputs["RT.subject"]
iobj = inputs["RT.object"]
ipred = inputs["PRED.predicate"]
log.debug("Making function to create associations %s -> %s -> %s", isubj, ipred, iobj)
def freeze():
def ret_fn(obj_id, subj_id):
log.debug("Dynamically creating association %s -> %s -> %s", isubj, ipred, iobj)
log.debug("%s -> %s -> %s", subj_id, ipred, obj_id)
self.RR.create_association(subj_id, ipred, obj_id)
return ret_fn
ret = freeze()
return ret
def _make_dynamic_assign_single_subject_function(self, item):
inputs = self._parse_function_name_for_subj_pred_obj("assign single subject function w/pred",
item,
r"(assign_)(\w+)(_to_one_)(\w+)(_with_)(\w+)",
[2,3,4,5,6],
{"subject": 4, "predicate": 6, "object": 2})
if None is inputs:
return None
isubj = inputs["RT.subject"]
iobj = inputs["RT.object"]
ipred = inputs["PRED.predicate"]
log.debug("Making function to create associations (1)%s -> %s -> %s", isubj, ipred, iobj)
def freeze():
def ret_fn(obj_id, subj_id):
log.debug("Dynamically creating association (1)%s -> %s -> %s", isubj, ipred, iobj)
log.debug("%s -> %s -> %s", subj_id, ipred, obj_id)
# see if there are any other objects of this type and pred on this subject
existing_subjs = self.find_subjects(isubj, ipred, obj_id, id_only=True)
if len(existing_subjs) > 1:
raise Inconsistent("Multiple %s-%s subjects found associated to the same %s object with id='%s'" %
(isubj, ipred, iobj, obj_id))
if len(existing_subjs) > 0:
try:
self.RR.get_association(subj_id, ipred, obj_id)
except NotFound:
raise BadRequest("Attempted to add a second %s-%s association to a %s with id='%s'" %
(isubj, ipred, iobj, obj_id))
else:
log.debug("Create %s Association (single subject): ALREADY EXISTS", ipred)
return
self.RR.create_association(subj_id, ipred, obj_id)
return ret_fn
ret = freeze()
return ret
def _make_dynamic_assign_single_object_function(self, item):
inputs = self._parse_function_name_for_subj_pred_obj("assign single object function w/pred",
item,
r"(assign_one_)(\w+)(_to_)(\w+)(_with_)(\w+)",
[2,3,4,5,6],
{"subject": 4, "predicate": 6, "object": 2})
if None is inputs:
return None
isubj = inputs["RT.subject"]
iobj = inputs["RT.object"]
ipred = inputs["PRED.predicate"]
log.debug("Making function to create associations %s -> %s -> (1)%s", isubj, ipred, iobj)
def freeze():
def ret_fn(obj_id, subj_id):
log.debug("Dynamically creating association %s -> %s -> (1)%s", isubj, ipred, iobj)
log.debug("%s -> %s -> %s", subj_id, ipred, obj_id)
# see if there are any other objects of this type and pred on this subject
existing_objs = self.find_objects(subj_id, ipred, iobj, id_only=True)
if len(existing_objs) > 1:
raise Inconsistent("Multiple %s-%s objects found with the same %s subject with id='%s'" %
(ipred, iobj, isubj, subj_id))
if len(existing_objs) > 0:
try:
log.debug("get_association gives")
log.debug(self.RR.get_association(subj_id, ipred, obj_id))
except NotFound:
raise BadRequest("Attempted to add a second %s-%s association to a %s with id='%s'" %
(ipred, iobj, isubj, subj_id))
else:
log.debug("Create %s Association (single object): ALREADY EXISTS", ipred)
return
self.RR.create_association(subj_id, ipred, obj_id)
return ret_fn
ret = freeze()
return ret
def _make_dynamic_unassign_function(self, item):
inputs = self._parse_function_name_for_subj_pred_obj("unassign function w/pred",
item,
r"(unassign_)(\w+)(_from_)(\w+)(_with_)(\w+)",
[2,3,4,5,6],
{"subject": 4, "predicate": 6, "object": 2})
if None is inputs:
return None
isubj = inputs["RT.subject"]
iobj = inputs["RT.object"]
ipred = inputs["PRED.predicate"]
log.debug("Making function to delete associations %s -> %s -> %s", isubj, ipred, iobj)
def freeze():
def ret_fn(obj_id, subj_id):
log.debug("Dynamically deleting association %s -> %s -> %s", isubj, ipred, iobj)
log.debug("%s -> %s -> %s", subj_id, ipred, obj_id)
self.delete_association(subj_id, ipred, obj_id)
return ret_fn
ret = freeze()
return ret
def _make_dynamic_find_objects_function(self, item):
inputs = self._parse_function_name_for_subj_pred_obj("find objects w/pred function",
item,
r"(find_)(\w+)(s_of_)(\w+)(_using_)(\w+)",
[2,3,4,5,6],
{"subject": 4, "predicate": 6, "object": 2})
if None is inputs:
return None
isubj = inputs["RT.subject"]
iobj = inputs["RT.object"]
ipred = inputs["PRED.predicate"]
log.debug("Making function to find objects %s -> %s -> %s", isubj, ipred, iobj)
def freeze():
def ret_fn(subj):
log.debug("Dynamically finding objects %s -> %s -> %s", isubj, ipred, iobj)
log.debug("%s -> %s -> %s", subj, ipred, iobj)
subj_id, _ = self._extract_id_and_type(subj)
ret = self.find_objects(subject=subj_id, predicate=ipred, object_type=iobj, id_only=False)
return ret
return ret_fn
ret = freeze()
return ret
def _make_dynamic_find_subjects_function(self, item):
inputs = self._parse_function_name_for_subj_pred_obj("find subjects w/pred function",
item,
r"(find_)(\w+)(s_by_)(\w+)(_using_)(\w+)",
[2,3,4,5,6],
{"subject": 2, "predicate": 6, "object": 4})
if None is inputs:
return None
isubj = inputs["RT.subject"]
iobj = inputs["RT.object"]
ipred = inputs["PRED.predicate"]
log.debug("Making function to find subjects %s <- %s <- %s", iobj, ipred, isubj)
def freeze():
def ret_fn(obj):
log.debug("Dynamically finding subjects %s <- %s <- %s", iobj, ipred, isubj)
log.debug("%s <- %s <- %s", obj, ipred, isubj)
obj_id, _ = self._extract_id_and_type(obj)
ret = self.find_subjects(subject_type=isubj, predicate=ipred, object=obj_id, id_only=False)
return ret
return ret_fn
ret = freeze()
return ret
def _make_dynamic_find_object_function(self, item):
inputs = self._parse_function_name_for_subj_pred_obj("find object w/pred function",
item,
r"(find_)(\w+)(_of_)(\w+)(_using_)(\w+)",
[2,3,4,5,6],
{"subject": 4, "predicate": 6, "object": 2})
if None is inputs:
return None
isubj = inputs["RT.subject"]
iobj = inputs["RT.object"]
ipred = inputs["PRED.predicate"]
log.debug("Making function to find object %s -> %s -> %s", isubj, ipred, iobj)
def freeze():
def ret_fn(subj_id):
log.debug("Dynamically finding object %s -> %s -> %s", isubj, ipred, iobj)
log.debug("%s -> %s -> %s", subj_id, ipred, iobj)
ret = self._find_object_(item, subj_id, isubj, ipred, iobj, False)
return ret
return ret_fn
ret = freeze()
return ret
def _make_dynamic_find_subject_function(self, item):
inputs = self._parse_function_name_for_subj_pred_obj("find subject w/pred function",
item,
r"(find_)(\w+)(_by_)(\w+)(_using_)(\w+)",
[2,3,4,5,6],
{"subject": 2, "predicate": 6, "object": 4})
if None is inputs:
return None
isubj = inputs["RT.subject"]
iobj = inputs["RT.object"]
ipred = inputs["PRED.predicate"]
log.debug("Making function to find subject %s <- %s <- %s", iobj, ipred, isubj)
def freeze():
def ret_fn(obj_id):
log.debug("Dynamically finding subject %s <- %s <- %s", iobj, ipred, isubj)
log.debug("%s <- %s <- %s", isubj, ipred, obj_id)
ret = self._find_subject_(item, isubj, ipred, obj_id, iobj, False)
return ret
return ret_fn
ret = freeze()
return ret
def _make_dynamic_find_object_ids_function(self, item):
inputs = self._parse_function_name_for_subj_pred_obj("find object_ids w/pred function",
item,
r"(find_)(\w+)(_ids_of_)(\w+)(_using_)(\w+)",
[2,3,4,5,6],
{"subject": 4, "predicate": 6, "object": 2})
if None is inputs:
return None
isubj = inputs["RT.subject"]
iobj = inputs["RT.object"]
ipred = inputs["PRED.predicate"]
log.debug("Making function to find object_ids %s -> %s -> %s", isubj, ipred, iobj)
def freeze():
def ret_fn(subj):
log.debug("Dynamically finding object_ids %s -> %s -> %s", isubj, ipred, iobj)
log.debug("%s -> %s -> %s", subj, ipred, iobj)
subj_id, _ = self._extract_id_and_type(subj)
ret = self.find_objects(subject=subj_id, predicate=ipred, object_type=iobj, id_only=True)
return ret
return ret_fn
ret = freeze()
return ret
def _make_dynamic_find_subject_ids_function(self, item):
inputs = self._parse_function_name_for_subj_pred_obj("find subject_ids w/pred function",
item,
r"(find_)(\w+)(_ids_by_)(\w+)(_using_)(\w+)",
[2,3,4,5,6],
{"subject": 2, "predicate": 6, "object": 4})
if None is inputs:
return None
isubj = inputs["RT.subject"]
iobj = inputs["RT.object"]
ipred = inputs["PRED.predicate"]
log.debug("Making function to find subject_ids %s <- %s <- %s", iobj, ipred, isubj)
def freeze():
def ret_fn(obj):
log.debug("Dynamically finding subject_ids %s <- %s <- %s", iobj, ipred, isubj)
log.debug("%s <- %s <- %s", isubj, ipred, obj)
obj_id, _ = self._extract_id_and_type(obj)
ret = self.find_subjects(subject_type=isubj, predicate=ipred, object=obj_id, id_only=True)
return ret
return ret_fn
ret = freeze()
return ret
def _make_dynamic_find_object_id_function(self, item):
inputs = self._parse_function_name_for_subj_pred_obj("find object_id w/pred function",
item,
r"(find_)(\w+)(_id_of_)(\w+)(_using_)(\w+)?",
[2,3,4,5,6],
{"subject": 4, "predicate": 6, "object": 2})
if None is inputs:
return None
isubj = inputs["RT.subject"]
iobj = inputs["RT.object"]
ipred = inputs["PRED.predicate"]
log.debug("Making function to find object_id %s -> %s -> %s", isubj, ipred, iobj)
def freeze():
def ret_fn(subj_id):
log.debug("Dynamically finding object_id %s -> %s -> %s", isubj, ipred, iobj)
log.debug("%s -> %s -> %s", subj_id, ipred, iobj)
ret = self._find_object_(item, subj_id, isubj, ipred, iobj, True)
return ret
return ret_fn
ret = freeze()
return ret
def _make_dynamic_find_subject_id_function(self, item):
inputs = self._parse_function_name_for_subj_pred_obj("find subject_id w/pred function",
item,
r"(find_)(\w+)(_id_by_)(\w+)(_using_)(\w+)?",
[2,3,4,5,6],
{"subject": 2, "predicate": 6, "object": 4})
if inputs is None:
return None
isubj = inputs["RT.subject"]
iobj = inputs["RT.object"]
ipred = inputs["PRED.predicate"]
log.debug("Making function to find subject_id %s <- %s <- %s", iobj, ipred, isubj)
def freeze():
def ret_fn(obj_id):
log.debug("Dynamically finding subject_id %s <- %s <- %s", iobj, ipred, isubj)
log.debug("%s <- %s <- %s", isubj, ipred, obj_id)
ret = self._find_subject_(item, isubj, ipred, obj_id, iobj, True)
return ret
return ret_fn
ret = freeze()
return ret
def _check_type(self, resource_obj, specific_type, verb):
"""
determine whether the given resource matches the given type (if indeed given)
@param resource_obj the IonObject resource to be checked
@param specific_type a string type, or None
@param verb what will happen to this object (like "to be created")
@raises BadRequest if name exists already or wasn't set
"""
if specific_type is None:
return
resource_type = resource_obj.type_
if resource_type != specific_type:
raise BadRequest("Expected a %s for the resource %s, but received type %s" %
(specific_type, verb, resource_type))
def _check_name(self, resource_obj, verb):
"""
determine whether a resource with the same type and name already exists
@param resource_obj the IonObject resource to be checked
@param verb what will happen to this object (like "to be created")
@raises BadRequest if name exists already or wasn't set
"""
resource_type = resource_obj.type_
if not (hasattr(resource_obj, "name") and "" != resource_obj.name):
raise BadRequest("The name field was not set in the resource %s" % verb)
def pluck(self, resource_id=''):
"""
delete all associations to/from a resource
"""
# find all associations where this is the subject
_, obj_assns = self.RR.find_objects(subject=resource_id, id_only=True)
# find all associations where this is the object
_, sbj_assns = self.RR.find_subjects(object=resource_id, id_only=True)
log.debug("pluck will remove %s subject associations and %s object associations",
len(sbj_assns), len(obj_assns))
for assn in obj_assns:
log.debug("pluck deleting object association %s", assn)
self.RR.delete_association(assn)
for assn in sbj_assns:
log.debug("pluck deleting subject association %s", assn)
self.RR.delete_association(assn)
| |
import functools
import gym
import logging
import numpy as np
import time
from typing import Callable, Dict, List, Optional, Tuple, Type, Union
from ray.rllib.models.modelv2 import ModelV2
from ray.rllib.models.torch.torch_modelv2 import TorchModelV2
from ray.rllib.models.torch.torch_action_dist import TorchDistributionWrapper
from ray.rllib.policy.policy import Policy, LEARNER_STATS_KEY
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.policy.rnn_sequencing import pad_batch_to_sequences_of_same_size
from ray.rllib.policy.view_requirement import ViewRequirement
from ray.rllib.utils import force_list
from ray.rllib.utils.annotations import override, DeveloperAPI
from ray.rllib.utils.framework import try_import_torch
from ray.rllib.utils.schedules import ConstantSchedule, PiecewiseSchedule
from ray.rllib.utils.torch_ops import convert_to_non_torch_type, \
convert_to_torch_tensor
from ray.rllib.utils.tracking_dict import UsageTrackingDict
from ray.rllib.utils.typing import ModelGradients, ModelWeights, \
TensorType, TrainerConfigDict
torch, _ = try_import_torch()
logger = logging.getLogger(__name__)
@DeveloperAPI
class TorchPolicy(Policy):
"""Template for a PyTorch policy and loss to use with RLlib.
This is similar to TFPolicy, but for PyTorch.
Attributes:
observation_space (gym.Space): observation space of the policy.
action_space (gym.Space): action space of the policy.
config (dict): config of the policy.
model (TorchModel): Torch model instance.
dist_class (type): Torch action distribution class.
"""
@DeveloperAPI
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
config: TrainerConfigDict,
*,
model: ModelV2,
loss: Callable[[
Policy, ModelV2, Type[TorchDistributionWrapper], SampleBatch
], Union[TensorType, List[TensorType]]],
action_distribution_class: Type[TorchDistributionWrapper],
action_sampler_fn: Optional[Callable[[
TensorType, List[TensorType]
], Tuple[TensorType, TensorType]]] = None,
action_distribution_fn: Optional[Callable[[
Policy, ModelV2, TensorType, TensorType, TensorType
], Tuple[TensorType, Type[TorchDistributionWrapper], List[
TensorType]]]] = None,
max_seq_len: int = 20,
get_batch_divisibility_req: Optional[Callable[[Policy],
int]] = None,
):
"""Build a policy from policy and loss torch modules.
Note that model will be placed on GPU device if CUDA_VISIBLE_DEVICES
is set. Only single GPU is supported for now.
Args:
observation_space (gym.spaces.Space): observation space of the
policy.
action_space (gym.spaces.Space): action space of the policy.
config (TrainerConfigDict): The Policy config dict.
model (ModelV2): PyTorch policy module. Given observations as
input, this module must return a list of outputs where the
first item is action logits, and the rest can be any value.
loss (Callable[[Policy, ModelV2, Type[TorchDistributionWrapper],
SampleBatch], Union[TensorType, List[TensorType]]]): Callable
that returns a single scalar loss or a list of loss terms.
action_distribution_class (Type[TorchDistributionWrapper]): Class
for a torch action distribution.
action_sampler_fn (Callable[[TensorType, List[TensorType]],
Tuple[TensorType, TensorType]]): A callable returning a
sampled action and its log-likelihood given Policy, ModelV2,
input_dict, explore, timestep, and is_training.
action_distribution_fn (Optional[Callable[[Policy, ModelV2,
Dict[str, TensorType], TensorType, TensorType],
Tuple[TensorType, type, List[TensorType]]]]): A callable
returning distribution inputs (parameters), a dist-class to
generate an action distribution object from, and
internal-state outputs (or an empty list if not applicable).
Note: No Exploration hooks have to be called from within
`action_distribution_fn`. It's should only perform a simple
forward pass through some model.
If None, pass inputs through `self.model()` to get distribution
inputs.
The callable takes as inputs: Policy, ModelV2, input_dict,
explore, timestep, is_training.
max_seq_len (int): Max sequence length for LSTM training.
get_batch_divisibility_req (Optional[Callable[[Policy], int]]]):
Optional callable that returns the divisibility requirement
for sample batches given the Policy.
"""
self.framework = "torch"
super().__init__(observation_space, action_space, config)
if torch.cuda.is_available():
logger.info("TorchPolicy running on GPU.")
self.device = torch.device("cuda")
else:
logger.info("TorchPolicy running on CPU.")
self.device = torch.device("cpu")
self.model = model.to(self.device)
# Combine view_requirements for Model and Policy.
self.training_view_requirements = dict(
**{
SampleBatch.ACTIONS: ViewRequirement(
space=self.action_space, shift=0),
SampleBatch.REWARDS: ViewRequirement(shift=0),
SampleBatch.DONES: ViewRequirement(shift=0),
},
**self.model.inference_view_requirements)
self.exploration = self._create_exploration()
self.unwrapped_model = model # used to support DistributedDataParallel
self._loss = loss
self._optimizers = force_list(self.optimizer())
self.dist_class = action_distribution_class
self.action_sampler_fn = action_sampler_fn
self.action_distribution_fn = action_distribution_fn
# If set, means we are using distributed allreduce during learning.
self.distributed_world_size = None
self.max_seq_len = max_seq_len
self.batch_divisibility_req = get_batch_divisibility_req(self) if \
callable(get_batch_divisibility_req) else \
(get_batch_divisibility_req or 1)
@override(Policy)
@DeveloperAPI
def compute_actions(
self,
obs_batch: Union[List[TensorType], TensorType],
state_batches: Optional[List[TensorType]] = None,
prev_action_batch: Union[List[TensorType], TensorType] = None,
prev_reward_batch: Union[List[TensorType], TensorType] = None,
info_batch: Optional[Dict[str, list]] = None,
episodes: Optional[List["MultiAgentEpisode"]] = None,
explore: Optional[bool] = None,
timestep: Optional[int] = None,
**kwargs) -> \
Tuple[TensorType, List[TensorType], Dict[str, TensorType]]:
explore = explore if explore is not None else self.config["explore"]
timestep = timestep if timestep is not None else self.global_timestep
with torch.no_grad():
seq_lens = torch.ones(len(obs_batch), dtype=torch.int32)
input_dict = self._lazy_tensor_dict({
SampleBatch.CUR_OBS: np.asarray(obs_batch),
"is_training": False,
})
if prev_action_batch is not None:
input_dict[SampleBatch.PREV_ACTIONS] = \
np.asarray(prev_action_batch)
if prev_reward_batch is not None:
input_dict[SampleBatch.PREV_REWARDS] = \
np.asarray(prev_reward_batch)
state_batches = [
convert_to_torch_tensor(s, self.device)
for s in (state_batches or [])
]
actions, state_out, extra_fetches, logp = \
self._compute_action_helper(
input_dict, state_batches, seq_lens, explore, timestep)
# Action-logp and action-prob.
if logp is not None:
logp = convert_to_non_torch_type(logp)
extra_fetches[SampleBatch.ACTION_PROB] = np.exp(logp)
extra_fetches[SampleBatch.ACTION_LOGP] = logp
return convert_to_non_torch_type((actions, state_out,
extra_fetches))
@override(Policy)
def compute_actions_from_input_dict(
self,
input_dict: Dict[str, TensorType],
explore: bool = None,
timestep: Optional[int] = None,
**kwargs) -> \
Tuple[TensorType, List[TensorType], Dict[str, TensorType]]:
explore = explore if explore is not None else self.config["explore"]
timestep = timestep if timestep is not None else self.global_timestep
with torch.no_grad():
# Pass lazy (torch) tensor dict to Model as `input_dict`.
input_dict = self._lazy_tensor_dict(input_dict)
state_batches = [
input_dict[k] for k in input_dict.keys() if "state_" in k[:6]
]
seq_lens = np.array([1] * len(input_dict["obs"])) \
if state_batches else None
actions, state_out, extra_fetches, logp = \
self._compute_action_helper(
input_dict, state_batches, seq_lens, explore, timestep)
# Leave outputs as is (torch.Tensors): Action-logp and action-prob.
if logp is not None:
extra_fetches[SampleBatch.ACTION_PROB] = torch.exp(logp)
extra_fetches[SampleBatch.ACTION_LOGP] = logp
return actions, state_out, extra_fetches
def _compute_action_helper(self, input_dict, state_batches, seq_lens,
explore, timestep):
"""Shared forward pass logic (w/ and w/o trajectory view API).
Returns:
Tuple:
- actions, state_out, extra_fetches, logp.
"""
if self.action_sampler_fn:
action_dist = dist_inputs = None
state_out = state_batches
actions, logp, state_out = self.action_sampler_fn(
self,
self.model,
input_dict,
state_out,
explore=explore,
timestep=timestep)
else:
# Call the exploration before_compute_actions hook.
self.exploration.before_compute_actions(
explore=explore, timestep=timestep)
if self.action_distribution_fn:
dist_inputs, dist_class, state_out = \
self.action_distribution_fn(
self,
self.model,
input_dict[SampleBatch.CUR_OBS],
explore=explore,
timestep=timestep,
is_training=False)
else:
dist_class = self.dist_class
dist_inputs, state_out = self.model(input_dict, state_batches,
seq_lens)
if not (isinstance(dist_class, functools.partial)
or issubclass(dist_class, TorchDistributionWrapper)):
raise ValueError(
"`dist_class` ({}) not a TorchDistributionWrapper "
"subclass! Make sure your `action_distribution_fn` or "
"`make_model_and_action_dist` return a correct "
"distribution class.".format(dist_class.__name__))
action_dist = dist_class(dist_inputs, self.model)
# Get the exploration action from the forward results.
actions, logp = \
self.exploration.get_exploration_action(
action_distribution=action_dist,
timestep=timestep,
explore=explore)
input_dict[SampleBatch.ACTIONS] = actions
# Add default and custom fetches.
extra_fetches = self.extra_action_out(input_dict, state_batches,
self.model, action_dist)
# Action-dist inputs.
if dist_inputs is not None:
extra_fetches[SampleBatch.ACTION_DIST_INPUTS] = dist_inputs
return actions, state_out, extra_fetches, logp
@override(Policy)
@DeveloperAPI
def compute_log_likelihoods(
self,
actions: Union[List[TensorType], TensorType],
obs_batch: Union[List[TensorType], TensorType],
state_batches: Optional[List[TensorType]] = None,
prev_action_batch: Optional[Union[List[TensorType],
TensorType]] = None,
prev_reward_batch: Optional[Union[List[
TensorType], TensorType]] = None) -> TensorType:
if self.action_sampler_fn and self.action_distribution_fn is None:
raise ValueError("Cannot compute log-prob/likelihood w/o an "
"`action_distribution_fn` and a provided "
"`action_sampler_fn`!")
with torch.no_grad():
input_dict = self._lazy_tensor_dict({
SampleBatch.CUR_OBS: obs_batch,
SampleBatch.ACTIONS: actions
})
if prev_action_batch is not None:
input_dict[SampleBatch.PREV_ACTIONS] = prev_action_batch
if prev_reward_batch is not None:
input_dict[SampleBatch.PREV_REWARDS] = prev_reward_batch
seq_lens = torch.ones(len(obs_batch), dtype=torch.int32)
# Exploration hook before each forward pass.
self.exploration.before_compute_actions(explore=False)
# Action dist class and inputs are generated via custom function.
if self.action_distribution_fn:
dist_inputs, dist_class, _ = self.action_distribution_fn(
policy=self,
model=self.model,
obs_batch=input_dict[SampleBatch.CUR_OBS],
explore=False,
is_training=False)
# Default action-dist inputs calculation.
else:
dist_class = self.dist_class
dist_inputs, _ = self.model(input_dict, state_batches,
seq_lens)
action_dist = dist_class(dist_inputs, self.model)
log_likelihoods = action_dist.logp(input_dict[SampleBatch.ACTIONS])
return log_likelihoods
@override(Policy)
@DeveloperAPI
def learn_on_batch(
self, postprocessed_batch: SampleBatch) -> Dict[str, TensorType]:
# Get batch ready for RNNs, if applicable.
pad_batch_to_sequences_of_same_size(
postprocessed_batch,
max_seq_len=self.max_seq_len,
shuffle=False,
batch_divisibility_req=self.batch_divisibility_req,
_use_trajectory_view_api=self.config["_use_trajectory_view_api"],
)
train_batch = self._lazy_tensor_dict(postprocessed_batch)
# Calculate the actual policy loss.
loss_out = force_list(
self._loss(self, self.model, self.dist_class, train_batch))
# Call Model's custom-loss with Policy loss outputs and train_batch.
if self.model:
loss_out = self.model.custom_loss(loss_out, train_batch)
# Give Exploration component that chance to modify the loss (or add
# its own terms).
if hasattr(self, "exploration"):
loss_out = self.exploration.get_exploration_loss(
loss_out, train_batch)
assert len(loss_out) == len(self._optimizers)
# assert not any(torch.isnan(l) for l in loss_out)
fetches = self.extra_compute_grad_fetches()
# Loop through all optimizers.
grad_info = {"allreduce_latency": 0.0}
for i, opt in enumerate(self._optimizers):
# Erase gradients in all vars of this optimizer.
opt.zero_grad()
# Recompute gradients of loss over all variables.
loss_out[i].backward(retain_graph=(i < len(self._optimizers) - 1))
grad_info.update(self.extra_grad_process(opt, loss_out[i]))
if self.distributed_world_size:
grads = []
for param_group in opt.param_groups:
for p in param_group["params"]:
if p.grad is not None:
grads.append(p.grad)
start = time.time()
if torch.cuda.is_available():
# Sadly, allreduce_coalesced does not work with CUDA yet.
for g in grads:
torch.distributed.all_reduce(
g, op=torch.distributed.ReduceOp.SUM)
else:
torch.distributed.all_reduce_coalesced(
grads, op=torch.distributed.ReduceOp.SUM)
for param_group in opt.param_groups:
for p in param_group["params"]:
if p.grad is not None:
p.grad /= self.distributed_world_size
grad_info["allreduce_latency"] += time.time() - start
# Step the optimizer
for i, opt in enumerate(self._optimizers):
opt.step()
grad_info["allreduce_latency"] /= len(self._optimizers)
grad_info.update(self.extra_grad_info(train_batch))
if self.model:
grad_info["model"] = self.model.metrics()
return dict(fetches, **{LEARNER_STATS_KEY: grad_info})
@override(Policy)
@DeveloperAPI
def compute_gradients(self,
postprocessed_batch: SampleBatch) -> ModelGradients:
train_batch = self._lazy_tensor_dict(postprocessed_batch)
loss_out = force_list(
self._loss(self, self.model, self.dist_class, train_batch))
assert len(loss_out) == len(self._optimizers)
fetches = self.extra_compute_grad_fetches()
grad_process_info = {}
grads = []
for i, opt in enumerate(self._optimizers):
opt.zero_grad()
loss_out[i].backward()
grad_process_info = self.extra_grad_process(opt, loss_out[i])
# Note that return values are just references;
# calling zero_grad will modify the values
for param_group in opt.param_groups:
for p in param_group["params"]:
if p.grad is not None:
grads.append(p.grad.data.cpu().numpy())
else:
grads.append(None)
grad_info = self.extra_grad_info(train_batch)
grad_info.update(grad_process_info)
return grads, dict(fetches, **{LEARNER_STATS_KEY: grad_info})
@override(Policy)
@DeveloperAPI
def apply_gradients(self, gradients: ModelGradients) -> None:
# TODO(sven): Not supported for multiple optimizers yet.
assert len(self._optimizers) == 1
for g, p in zip(gradients, self.model.parameters()):
if g is not None:
p.grad = torch.from_numpy(g).to(self.device)
self._optimizers[0].step()
@override(Policy)
@DeveloperAPI
def get_weights(self) -> ModelWeights:
return {
k: v.cpu().detach().numpy()
for k, v in self.model.state_dict().items()
}
@override(Policy)
@DeveloperAPI
def set_weights(self, weights: ModelWeights) -> None:
weights = convert_to_torch_tensor(weights, device=self.device)
self.model.load_state_dict(weights)
@override(Policy)
@DeveloperAPI
def is_recurrent(self) -> bool:
return len(self.model.get_initial_state()) > 0
@override(Policy)
@DeveloperAPI
def num_state_tensors(self) -> int:
return len(self.model.get_initial_state())
@override(Policy)
@DeveloperAPI
def get_initial_state(self) -> List[TensorType]:
return [
s.cpu().detach().numpy() for s in self.model.get_initial_state()
]
@override(Policy)
@DeveloperAPI
def get_state(self) -> Union[Dict[str, TensorType], List[TensorType]]:
state = super().get_state()
state["_optimizer_variables"] = []
for i, o in enumerate(self._optimizers):
state["_optimizer_variables"].append(o.state_dict())
return state
@override(Policy)
@DeveloperAPI
def set_state(self, state: object) -> None:
state = state.copy() # shallow copy
# Set optimizer vars first.
optimizer_vars = state.pop("_optimizer_variables", None)
if optimizer_vars:
assert len(optimizer_vars) == len(self._optimizers)
for o, s in zip(self._optimizers, optimizer_vars):
o.load_state_dict(s)
# Then the Policy's (NN) weights.
super().set_state(state)
@DeveloperAPI
def extra_grad_process(self, optimizer: "torch.optim.Optimizer",
loss: TensorType):
"""Called after each optimizer.zero_grad() + loss.backward() call.
Called for each self._optimizers/loss-value pair.
Allows for gradient processing before optimizer.step() is called.
E.g. for gradient clipping.
Args:
optimizer (torch.optim.Optimizer): A torch optimizer object.
loss (TensorType): The loss tensor associated with the optimizer.
Returns:
Dict[str, TensorType]: An dict with information on the gradient
processing step.
"""
return {}
@DeveloperAPI
def extra_compute_grad_fetches(self) -> Dict[str, any]:
"""Extra values to fetch and return from compute_gradients().
Returns:
Dict[str, any]: Extra fetch dict to be added to the fetch dict
of the compute_gradients call.
"""
return {LEARNER_STATS_KEY: {}} # e.g, stats, td error, etc.
@DeveloperAPI
def extra_action_out(
self, input_dict: Dict[str, TensorType],
state_batches: List[TensorType], model: TorchModelV2,
action_dist: TorchDistributionWrapper) -> Dict[str, TensorType]:
"""Returns dict of extra info to include in experience batch.
Args:
input_dict (Dict[str, TensorType]): Dict of model input tensors.
state_batches (List[TensorType]): List of state tensors.
model (TorchModelV2): Reference to the model object.
action_dist (TorchDistributionWrapper): Torch action dist object
to get log-probs (e.g. for already sampled actions).
Returns:
Dict[str, TensorType]: Extra outputs to return in a
compute_actions() call (3rd return value).
"""
return {}
@DeveloperAPI
def extra_grad_info(self,
train_batch: SampleBatch) -> Dict[str, TensorType]:
"""Return dict of extra grad info.
Args:
train_batch (SampleBatch): The training batch for which to produce
extra grad info for.
Returns:
Dict[str, TensorType]: The info dict carrying grad info per str
key.
"""
return {}
@DeveloperAPI
def optimizer(
self
) -> Union[List["torch.optim.Optimizer"], "torch.optim.Optimizer"]:
"""Custom the local PyTorch optimizer(s) to use.
Returns:
Union[List[torch.optim.Optimizer], torch.optim.Optimizer]:
The local PyTorch optimizer(s) to use for this Policy.
"""
if hasattr(self, "config"):
return torch.optim.Adam(
self.model.parameters(), lr=self.config["lr"])
else:
return torch.optim.Adam(self.model.parameters())
@override(Policy)
@DeveloperAPI
def export_model(self, export_dir: str) -> None:
"""TODO(sven): implement for torch.
"""
raise NotImplementedError
@override(Policy)
@DeveloperAPI
def export_checkpoint(self, export_dir: str) -> None:
"""TODO(sven): implement for torch.
"""
raise NotImplementedError
@override(Policy)
@DeveloperAPI
def import_model_from_h5(self, import_file: str) -> None:
"""Imports weights into torch model."""
return self.model.import_from_h5(import_file)
def _lazy_tensor_dict(self, postprocessed_batch):
train_batch = UsageTrackingDict(postprocessed_batch)
train_batch.set_get_interceptor(
functools.partial(convert_to_torch_tensor, device=self.device))
return train_batch
# TODO: (sven) Unify hyperparam annealing procedures across RLlib (tf/torch)
# and for all possible hyperparams, not just lr.
@DeveloperAPI
class LearningRateSchedule:
"""Mixin for TFPolicy that adds a learning rate schedule."""
@DeveloperAPI
def __init__(self, lr, lr_schedule):
self.cur_lr = lr
if lr_schedule is None:
self.lr_schedule = ConstantSchedule(lr, framework=None)
else:
self.lr_schedule = PiecewiseSchedule(
lr_schedule, outside_value=lr_schedule[-1][-1], framework=None)
@override(Policy)
def on_global_var_update(self, global_vars):
super(LearningRateSchedule, self).on_global_var_update(global_vars)
self.cur_lr = self.lr_schedule.value(global_vars["timestep"])
@override(TorchPolicy)
def optimizer(self):
for opt in self._optimizers:
for p in opt.param_groups:
p["lr"] = self.cur_lr
return self._optimizers
@DeveloperAPI
class EntropyCoeffSchedule:
"""Mixin for TorchPolicy that adds entropy coeff decay."""
@DeveloperAPI
def __init__(self, entropy_coeff, entropy_coeff_schedule):
self.entropy_coeff = entropy_coeff
if entropy_coeff_schedule is None:
self.entropy_coeff_schedule = ConstantSchedule(
entropy_coeff, framework=None)
else:
# Allows for custom schedule similar to lr_schedule format
if isinstance(entropy_coeff_schedule, list):
self.entropy_coeff_schedule = PiecewiseSchedule(
entropy_coeff_schedule,
outside_value=entropy_coeff_schedule[-1][-1],
framework=None)
else:
# Implements previous version but enforces outside_value
self.entropy_coeff_schedule = PiecewiseSchedule(
[[0, entropy_coeff], [entropy_coeff_schedule, 0.0]],
outside_value=0.0,
framework=None)
@override(Policy)
def on_global_var_update(self, global_vars):
super(EntropyCoeffSchedule, self).on_global_var_update(global_vars)
self.entropy_coeff = self.entropy_coeff_schedule.value(
global_vars["timestep"])
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.sites.models import Site
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
from easy_thumbnails.fields import ThumbnailerImageField
from guardian.shortcuts import get_perms
from userena import settings as userena_settings
from userena.managers import UserenaManager, UserenaBaseProfileManager
from userena.utils import get_gravatar, generate_sha1, get_protocol, \
get_datetime_now, user_model_label
import datetime
from .mail import UserenaConfirmationMail
PROFILE_PERMISSIONS = (
('view_profile', 'Can view profile'),
)
def upload_to_mugshot(instance, filename):
"""
Uploads a mugshot for a user to the ``USERENA_MUGSHOT_PATH`` and saving it
under unique hash for the image. This is for privacy reasons so others
can't just browse through the mugshot directory.
"""
extension = filename.split('.')[-1].lower()
salt, hash = generate_sha1(instance.pk)
path = userena_settings.USERENA_MUGSHOT_PATH % {'username': instance.user.username,
'id': instance.user.id,
'date': instance.user.date_joined,
'date_now': get_datetime_now().date()}
return '%(path)s%(hash)s.%(extension)s' % {'path': path,
'hash': hash[:10],
'extension': extension}
@python_2_unicode_compatible
class UserenaSignup(models.Model):
"""
Userena model which stores all the necessary information to have a full
functional user implementation on your Django website.
"""
user = models.OneToOneField(user_model_label,
verbose_name=_('user'),
related_name='userena_signup',
on_delete=models.CASCADE)
last_active = models.DateTimeField(_('last active'),
blank=True,
null=True,
help_text=_('The last date that the user was active.'))
activation_key = models.CharField(_('activation key'),
max_length=40,
blank=True)
activation_notification_send = models.BooleanField(_('notification send'),
default=False,
help_text=_('Designates whether this user has already got a notification about activating their account.'))
email_unconfirmed = models.EmailField(_('unconfirmed email address'),
blank=True,
help_text=_('Temporary email address when the user requests an email change.'))
email_confirmation_key = models.CharField(_('unconfirmed email verification key'),
max_length=40,
blank=True)
email_confirmation_key_created = models.DateTimeField(_('creation date of email confirmation key'),
blank=True,
null=True)
objects = UserenaManager()
class Meta:
verbose_name = _('userena registration')
verbose_name_plural = _('userena registrations')
def __str__(self):
return '%s' % self.user.username
def change_email(self, email):
"""
Changes the email address for a user.
A user needs to verify this new email address before it becomes
active. By storing the new email address in a temporary field --
``temporary_email`` -- we are able to set this email address after the
user has verified it by clicking on the verification URI in the email.
This email gets send out by ``send_verification_email``.
:param email:
The new email address that the user wants to use.
"""
self.email_unconfirmed = email
salt, hash = generate_sha1(self.user.username)
self.email_confirmation_key = hash
self.email_confirmation_key_created = get_datetime_now()
self.save()
# Send email for activation
self.send_confirmation_email()
def send_confirmation_email(self):
"""
Sends an email to confirm the new email address.
This method sends out two emails. One to the new email address that
contains the ``email_confirmation_key`` which is used to verify this
this email address with :func:`UserenaUser.objects.confirm_email`.
The other email is to the old email address to let the user know that
a request is made to change this email address.
"""
context = {'user': self.user,
'without_usernames': userena_settings.USERENA_WITHOUT_USERNAMES,
'new_email': self.email_unconfirmed,
'protocol': get_protocol(),
'confirmation_key': self.email_confirmation_key,
'site': Site.objects.get_current()}
mailer = UserenaConfirmationMail(context=context)
mailer.generate_mail("confirmation", "_old")
if self.user.email:
mailer.send_mail(self.user.email)
mailer.generate_mail("confirmation", "_new")
mailer.send_mail(self.email_unconfirmed)
def activation_key_expired(self):
"""
Checks if activation key is expired.
Returns ``True`` when the ``activation_key`` of the user is expired and
``False`` if the key is still valid.
The key is expired when it's set to the value defined in
``USERENA_ACTIVATED`` or ``activation_key_created`` is beyond the
amount of days defined in ``USERENA_ACTIVATION_DAYS``.
"""
expiration_days = datetime.timedelta(days=userena_settings.USERENA_ACTIVATION_DAYS)
expiration_date = self.user.date_joined + expiration_days
if self.activation_key == userena_settings.USERENA_ACTIVATED:
return False
if get_datetime_now() >= expiration_date:
return True
return False
def send_activation_email(self):
"""
Sends a activation email to the user.
This email is send when the user wants to activate their newly created
user.
"""
context = {'user': self.user,
'without_usernames': userena_settings.USERENA_WITHOUT_USERNAMES,
'protocol': get_protocol(),
'activation_days': userena_settings.USERENA_ACTIVATION_DAYS,
'activation_key': self.activation_key,
'site': Site.objects.get_current()}
mailer = UserenaConfirmationMail(context=context)
mailer.generate_mail("activation")
mailer.send_mail(self.user.email)
@python_2_unicode_compatible
class UserenaBaseProfile(models.Model):
""" Base model needed for extra profile functionality """
PRIVACY_CHOICES = (
('open', _('Open')),
('registered', _('Registered')),
('closed', _('Closed')),
)
MUGSHOT_SETTINGS = {'size': (userena_settings.USERENA_MUGSHOT_SIZE,
userena_settings.USERENA_MUGSHOT_SIZE),
'crop': userena_settings.USERENA_MUGSHOT_CROP_TYPE}
mugshot = ThumbnailerImageField(_('mugshot'),
blank=True,
upload_to=upload_to_mugshot,
resize_source=MUGSHOT_SETTINGS,
help_text=_('A personal image displayed in your profile.'))
privacy = models.CharField(_('privacy'),
max_length=15,
choices=PRIVACY_CHOICES,
default=userena_settings.USERENA_DEFAULT_PRIVACY,
help_text=_('Designates who can view your profile.'))
objects = UserenaBaseProfileManager()
class Meta:
"""
Meta options making the model abstract and defining permissions.
The model is ``abstract`` because it only supplies basic functionality
to a more custom defined model that extends it. This way there is not
another join needed.
We also define custom permissions because we don't know how the model
that extends this one is going to be called. So we don't know what
permissions to check. For ex. if the user defines a profile model that
is called ``MyProfile``, than the permissions would be
``add_myprofile`` etc. We want to be able to always check
``add_profile``, ``change_profile`` etc.
"""
abstract = True
permissions = PROFILE_PERMISSIONS
def __str__(self):
return 'Profile of %(username)s' % {'username': self.user.username}
def get_mugshot_url(self):
"""
Returns the image containing the mugshot for the user.
The mugshot can be a uploaded image or a Gravatar.
Gravatar functionality will only be used when
``USERENA_MUGSHOT_GRAVATAR`` is set to ``True``.
:return:
``None`` when Gravatar is not used and no default image is supplied
by ``USERENA_MUGSHOT_DEFAULT``.
"""
# First check for a mugshot and if any return that.
if self.mugshot:
return self.mugshot.url
# Use Gravatar if the user wants to.
if userena_settings.USERENA_MUGSHOT_GRAVATAR:
return get_gravatar(self.user.email,
userena_settings.USERENA_MUGSHOT_SIZE,
userena_settings.USERENA_MUGSHOT_DEFAULT)
# Gravatar not used, check for a default image.
else:
if userena_settings.USERENA_MUGSHOT_DEFAULT not in ['404', 'mm',
'identicon',
'monsterid',
'wavatar']:
return userena_settings.USERENA_MUGSHOT_DEFAULT
else:
return None
def get_full_name_or_username(self):
"""
Returns the full name of the user, or if none is supplied will return
the username.
Also looks at ``USERENA_WITHOUT_USERNAMES`` settings to define if it
should return the username or email address when the full name is not
supplied.
:return:
``String`` containing the full name of the user. If no name is
supplied it will return the username or email address depending on
the ``USERENA_WITHOUT_USERNAMES`` setting.
"""
user = self.user
if user.first_name or user.last_name:
# We will return this as translated string. Maybe there are some
# countries that first display the last name.
name = _("%(first_name)s %(last_name)s") % \
{'first_name': user.first_name,
'last_name': user.last_name}
else:
# Fallback to the username if usernames are used
if not userena_settings.USERENA_WITHOUT_USERNAMES:
name = "%(username)s" % {'username': user.username}
else:
name = "%(email)s" % {'email': user.email}
return name.strip()
def can_view_profile(self, user):
"""
Can the :class:`User` view this profile?
Returns a boolean if a user has the rights to view the profile of this
user.
Users are divided into four groups:
``Open``
Everyone can view your profile
``Closed``
Nobody can view your profile.
``Registered``
Users that are registered on the website and signed
in only.
``Admin``
Special cases like superadmin and the owner of the profile.
Through the ``privacy`` field a owner of an profile can define what
they want to show to whom.
:param user:
A Django :class:`User` instance.
"""
# Simple cases first, we don't want to waste CPU and DB hits.
# Everyone.
if self.privacy == 'open':
return True
# Registered users.
elif self.privacy == 'registered' \
and isinstance(user, get_user_model()):
return True
# Checks done by guardian for owner and admins.
elif 'view_profile' in get_perms(user, self):
return True
# Fallback to closed profile.
return False
class UserenaLanguageBaseProfile(UserenaBaseProfile):
"""
Extends the :class:`UserenaBaseProfile` with a language choice.
Use this model in combination with ``UserenaLocaleMiddleware`` automatically
set the language of users when they are signed in.
"""
language = models.CharField(_('language'),
max_length=5,
choices=settings.LANGUAGES,
default=settings.LANGUAGE_CODE[:2],
help_text=_('Default language.'))
class Meta:
abstract = True
permissions = PROFILE_PERMISSIONS
| |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Type specifications for TensorFlow APIs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import collections
import re
import numpy as np
import six
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import _pywrap_utils
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util.lazy_loader import LazyLoader
from tensorflow.python.util.tf_export import tf_export
# Use LazyLoader to avoid circular dependencies.
tensor_spec = LazyLoader(
"tensor_spec", globals(),
"tensorflow.python.framework.tensor_spec")
ops = LazyLoader(
"ops", globals(),
"tensorflow.python.framework.ops")
@tf_export("TypeSpec", v1=["TypeSpec", "data.experimental.Structure"])
@six.add_metaclass(abc.ABCMeta)
class TypeSpec(object):
"""Specifies a TensorFlow value type.
A `tf.TypeSpec` provides metadata describing an object accepted or returned
by TensorFlow APIs. Concrete subclasses, such as `tf.TensorSpec` and
`tf.RaggedTensorSpec`, are used to describe different value types.
For example, `tf.function`'s `input_signature` argument accepts a list
(or nested structure) of `TypeSpec`s.
Creating new subclasses of `TypeSpec` (outside of TensorFlow core) is not
currently supported. In particular, we may make breaking changes to the
private methods and properties defined by this base class.
Example:
>>> spec = tf.RaggedTensorSpec(shape=[None, None], dtype=tf.int32)
>>> @tf.function(input_signature=[spec])
... def double(x):
... return x * 2
>>> print(double(tf.ragged.constant([[1, 2], [3]])))
<tf.RaggedTensor [[2, 4], [6]]>
"""
# === Subclassing ===
#
# Each `TypeSpec` subclass must define:
#
# * A "component encoding" for values.
# * A "serialization" for types.
#
# The component encoding for a value is a nested structure of `tf.Tensor`
# or `CompositeTensor` that can be used by the `TypeSpec` to reconstruct
# the value. Each individual `TypeSpec` must use the same nested structure
# for all values -- this structure is defined by the `component_specs`
# attribute. Decomposing values into components, and reconstructing them
# from those components, should be inexpensive. In particular, it should
# *not* require any TensorFlow ops.
#
# The serialization for a `TypeSpec` is a nested tuple of values that can
# be used to reconstruct the `TypeSpec`. See the documentation for
# `_serialize()` for more information.
__slots__ = []
@abc.abstractproperty
def value_type(self):
"""The Python type for values that are compatible with this TypeSpec.
In particular, all values that are compatible with this TypeSpec must be an
instance of this type.
"""
raise NotImplementedError("%s.value_type" % type(self).__name__)
def is_compatible_with(self, spec_or_value):
"""Returns true if `spec_or_value` is compatible with this TypeSpec."""
# === Subclassing ===
# If not overridden by subclasses, the default behavior is to convert
# `spec_or_value` to a `TypeSpec` (if it isn't already); and then to
# consider two `TypeSpec`s compatible if they have the same type, and
# the values returned by `_serialize` are compatible (where
# `tf.TensorShape`, `tf.TensorSpec`, and `tf.DType` are checked for
# compatibility using their `is_compatible_with` method; and all other
# types are considered compatible if they are equal).
if not isinstance(spec_or_value, TypeSpec):
spec_or_value = type_spec_from_value(spec_or_value)
if type(self) is not type(spec_or_value):
return False
return self.__is_compatible(self._serialize(),
spec_or_value._serialize()) # pylint: disable=protected-access
def most_specific_compatible_type(self, other):
"""Returns the most specific TypeSpec compatible with `self` and `other`.
Args:
other: A `TypeSpec`.
Raises:
ValueError: If there is no TypeSpec that is compatible with both `self`
and `other`.
"""
# === Subclassing ===
# If not overridden by a subclass, the default behavior is to raise a
# `ValueError` if `self` and `other` have different types, or if their type
# serializations differ by anything other than `TensorShape`s. Otherwise,
# the two type serializations are combined (using
# `most_specific_compatible_shape` to combine `TensorShape`s), and the
# result is used to construct and return a new `TypeSpec`.
if type(self) is not type(other):
raise ValueError("No TypeSpec is compatible with both %s and %s" %
(self, other))
merged = self.__most_specific_compatible_type_serialization(
self._serialize(), other._serialize()) # pylint: disable=protected-access
return self._deserialize(merged)
def _with_tensor_ranks_only(self):
"""Returns a TypeSpec compatible with `self`, with tensor shapes relaxed.
Returns:
A `TypeSpec` that is compatible with `self`, where any `TensorShape`
information has been relaxed to include only tensor rank (and not
the dimension sizes for individual axes).
"""
# === Subclassing ===
# If not overridden by a subclass, the default behavior is to serialize
# this TypeSpec, relax any TensorSpec or TensorShape values, and
# deserialize the result.
def relax(value):
if isinstance(value, TypeSpec):
return value._with_tensor_ranks_only() # pylint: disable=protected-access
elif (isinstance(value, tensor_shape.TensorShape) and
value.rank is not None):
return tensor_shape.TensorShape([None] * value.rank)
else:
return value
return self._deserialize(nest.map_structure(relax, self._serialize()))
# === Component encoding for values ===
@abc.abstractmethod
def _to_components(self, value):
"""Encodes `value` as a nested structure of `Tensor` or `CompositeTensor`.
Args:
value: A value compatible with this `TypeSpec`. (Caller is responsible
for ensuring compatibility.)
Returns:
A nested structure of `tf.Tensor` or `tf.CompositeTensor` compatible with
`self._component_specs`, which can be used to reconstruct `value`.
"""
# === Subclassing ===
# This method must be inexpensive (do not call TF ops).
raise NotImplementedError("%s._to_components()" % type(self).__name__)
@abc.abstractmethod
def _from_components(self, components):
"""Reconstructs a value from a nested structure of Tensor/CompositeTensor.
Args:
components: A nested structure of `tf.Tensor` or `tf.CompositeTensor`,
compatible with `self._component_specs`. (Caller is responsible for
ensuring compatibility.)
Returns:
A value that is compatible with this `TypeSpec`.
"""
# === Subclassing ===
# This method must be inexpensive (do not call TF ops).
raise NotImplementedError("%s._from_components()" % type(self).__name__)
@abc.abstractproperty
def _component_specs(self):
"""A nested structure of TypeSpecs for this type's components.
Returns:
A nested structure describing the component encodings that are returned
by this TypeSpec's `_to_components` method. In particular, for a
TypeSpec `spec` and a compatible value `value`:
```
nest.map_structure(lambda t, c: assert t.is_compatible_with(c),
spec._component_specs, spec._to_components(value))
```
"""
raise NotImplementedError("%s._component_specs()" % type(self).__name__)
# === Tensor list encoding for values ===
def _to_tensor_list(self, value):
"""Encodes `value` as a flat list of `tf.Tensor`.
By default, this just flattens `self._to_components(value)` using
`nest.flatten`. However, subclasses may override this to return a
different tensor encoding for values. In particular, some subclasses
of `BatchableTypeSpec` override this method to return a "boxed" encoding
for values, which then can be batched or unbatched. See
`BatchableTypeSpec` for more details.
Args:
value: A value with compatible this `TypeSpec`. (Caller is responsible
for ensuring compatibility.)
Returns:
A list of `tf.Tensor`, compatible with `self._flat_tensor_specs`, which
can be used to reconstruct `value`.
"""
return nest.flatten(self._to_components(value), expand_composites=True)
def _from_tensor_list(self, tensor_list):
"""Reconstructs a value from a flat list of `tf.Tensor`.
Args:
tensor_list: A flat list of `tf.Tensor`, compatible with
`self._flat_tensor_specs`.
Returns:
A value that is compatible with this `TypeSpec`.
Raises:
ValueError: If `tensor_list` is not compatible with
`self._flat_tensor_specs`.
"""
self.__check_tensor_list(tensor_list)
return self._from_compatible_tensor_list(tensor_list)
def _from_compatible_tensor_list(self, tensor_list):
"""Reconstructs a value from a compatible flat list of `tf.Tensor`.
Args:
tensor_list: A flat list of `tf.Tensor`, compatible with
`self._flat_tensor_specs`. (Caller is responsible for ensuring
compatibility.)
Returns:
A value that is compatible with this `TypeSpec`.
"""
return self._from_components(nest.pack_sequence_as(
self._component_specs, tensor_list, expand_composites=True))
@property
def _flat_tensor_specs(self):
"""A list of TensorSpecs compatible with self._to_tensor_list(v)."""
return nest.flatten(self._component_specs, expand_composites=True)
# === Serialization for types ===
@abc.abstractmethod
def _serialize(self):
"""Returns a nested tuple containing the state of this TypeSpec.
The serialization may contain the following value types: boolean,
integer, string, float, None, `TensorSpec`, `tf.TensorShape`, `tf.DType`,
`np.ndarray`, `TypeSpec`, and nested tuples, namedtuples, dicts, and
OrderedDicts of any of the above.
This method is used to provide default definitions for: equality
testing (__eq__, __ne__), hashing (__hash__), pickling (__reduce__),
string representation (__repr__), `self.is_compatible_with()`,
`self.most_specific_compatible_type()`, and protobuf serialization
(e.g. TensorInfo and StructuredValue).
"""
raise NotImplementedError("%s._serialize()" % type(self).__name__)
@classmethod
def _deserialize(cls, serialization):
"""Reconstructs a TypeSpec from a value returned by `serialize`.
Args:
serialization: A value returned by _serialize. In some contexts,
`namedtuple`s in `serialization` may not have the identical type
that was returned by `_serialize` (but its type will still be a
`namedtuple` type with the same type name and field names). For
example, the code that loads a SavedModel does not have access to
the original `namedtuple` type, so it dynamically creates a new
`namedtuple` type with the same type name and field names as the
original one. If necessary, you can check `serialization` for these
duck-typed `nametuple` types, and restore them to the original type.
(E.g., this would be necessary if you rely on type checks such as
`isinstance` for this `TypeSpec`'s member variables).
Returns:
A `TypeSpec` of type `cls`.
"""
return cls(*serialization)
# === Operators ===
def __eq__(self, other):
# pylint: disable=protected-access
return (type(other) is type(self) and
self.__get_cmp_key() == other.__get_cmp_key())
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self.__get_cmp_key())
def __reduce__(self):
return type(self), self._serialize()
def __repr__(self):
return "%s%r" % (type(self).__name__, self._serialize())
# === Legacy Output ===
# TODO(b/133606651) Document and/or deprecate the legacy_output methods.
# (These are used by tf.data.)
def _to_legacy_output_types(self):
raise NotImplementedError("%s._to_legacy_output_types()" %
type(self).__name__)
def _to_legacy_output_shapes(self):
raise NotImplementedError("%s._to_legacy_output_shapes()" %
type(self).__name__)
def _to_legacy_output_classes(self):
return self.value_type
# === Private Helper Methods ===
def __check_tensor_list(self, tensor_list):
expected = self._flat_tensor_specs
specs = [type_spec_from_value(t) for t in tensor_list]
if len(specs) != len(expected):
raise ValueError("Incompatible input: wrong number of tensors")
for i, (s1, s2) in enumerate(zip(specs, expected)):
if not s1.is_compatible_with(s2):
raise ValueError("Incompatible input: tensor %d (%s) is incompatible "
"with %s" % (i, tensor_list[i], s2))
def __get_cmp_key(self):
"""Returns a hashable eq-comparable key for `self`."""
# TODO(b/133606651): Decide whether to cache this value.
return (type(self), self.__make_cmp_key(self._serialize()))
def __make_cmp_key(self, value):
"""Converts `value` to a hashable key."""
if isinstance(value,
(int, float, bool, np.generic, dtypes.DType, TypeSpec)):
return value
if isinstance(value, compat.bytes_or_text_types):
return value
if value is None:
return value
if isinstance(value, dict):
return tuple([
tuple([self.__make_cmp_key(key),
self.__make_cmp_key(value[key])])
for key in sorted(value.keys())
])
if isinstance(value, tuple):
return tuple([self.__make_cmp_key(v) for v in value])
if isinstance(value, list):
return (list, tuple([self.__make_cmp_key(v) for v in value]))
if isinstance(value, tensor_shape.TensorShape):
if value.ndims is None:
# Note: we include a type object in the tuple, to ensure we can't get
# false-positive matches (since users can't include type objects).
return (tensor_shape.TensorShape, None)
return (tensor_shape.TensorShape, tuple(value.as_list()))
if isinstance(value, np.ndarray):
return (np.ndarray, value.shape,
TypeSpec.__nested_list_to_tuple(value.tolist()))
raise ValueError("Unsupported value type %s returned by "
"%s._serialize" %
(type(value).__name__, type(self).__name__))
@staticmethod
def __nested_list_to_tuple(value):
"""Converts a nested list to a corresponding nested tuple."""
if isinstance(value, list):
return tuple(TypeSpec.__nested_list_to_tuple(v) for v in value)
return value
@staticmethod
def __same_types(a, b):
"""Returns whether a and b have the same type, up to namedtuple equivalence.
Consistent with tf.nest.assert_same_structure(), two namedtuple types
are considered the same iff they agree in their class name (without
qualification by module name) and in their sequence of field names.
This makes namedtuples recreated by StructureCoder compatible with their
original Python definition.
Args:
a: a Python object.
b: a Python object.
Returns:
A boolean that is true iff type(a) and type(b) are the same object
or equivalent namedtuple types.
"""
if nest.is_namedtuple(a) and nest.is_namedtuple(b):
return nest.same_namedtuples(a, b)
else:
return type(a) is type(b)
@staticmethod
def __is_compatible(a, b):
"""Returns true if the given type serializations compatible."""
if isinstance(a, TypeSpec):
return a.is_compatible_with(b)
if not TypeSpec.__same_types(a, b):
return False
if isinstance(a, (list, tuple)):
return (len(a) == len(b) and
all(TypeSpec.__is_compatible(x, y) for (x, y) in zip(a, b)))
if isinstance(a, dict):
return (len(a) == len(b) and sorted(a.keys()) == sorted(b.keys()) and all(
TypeSpec.__is_compatible(a[k], b[k]) for k in a.keys()))
if isinstance(a, (tensor_shape.TensorShape, dtypes.DType)):
return a.is_compatible_with(b)
return a == b
@staticmethod
def __most_specific_compatible_type_serialization(a, b):
"""Helper for most_specific_compatible_type.
Combines two type serializations as follows:
* If they are both tuples of the same length, then recursively combine
the respective tuple elements.
* If they are both dicts with the same keys, then recursively combine
the respective dict elements.
* If they are both TypeSpecs, then combine using
TypeSpec.most_specific_compatible_type.
* If they are both TensorShapes, then combine using
TensorShape.most_specific_compatible_shape.
* If they are both TensorSpecs with the same dtype, then combine using
TensorShape.most_specific_compatible_shape to combine shapes.
* If they are equal, then return a.
* If none of the above, then raise a ValueError.
Args:
a: A serialized TypeSpec or nested component from a serialized TypeSpec.
b: A serialized TypeSpec or nested component from a serialized TypeSpec.
Returns:
A value with the same type and structure as `a` and `b`.
Raises:
ValueError: If `a` and `b` are incompatible.
"""
if not TypeSpec.__same_types(a, b):
raise ValueError("Types are not compatible: %r vs %r" % (a, b))
if nest.is_namedtuple(a):
assert a._fields == b._fields # Implied by __same_types(a, b).
return type(a)(*[
TypeSpec.__most_specific_compatible_type_serialization(x, y)
for (x, y) in zip(a, b)])
if isinstance(a, (list, tuple)):
if len(a) != len(b):
raise ValueError("Types are not compatible: %r vs %r" % (a, b))
return tuple(TypeSpec.__most_specific_compatible_type_serialization(x, y)
for (x, y) in zip(a, b))
if isinstance(a, collections.OrderedDict):
a_keys, b_keys = a.keys(), b.keys()
if len(a) != len(b) or a_keys != b_keys:
raise ValueError("Types are not compatible: %r vs %r" % (a, b))
return collections.OrderedDict([
(k,
TypeSpec.__most_specific_compatible_type_serialization(a[k], b[k]))
for k in a_keys
])
if isinstance(a, dict):
a_keys, b_keys = sorted(a.keys()), sorted(b.keys())
if len(a) != len(b) or a_keys != b_keys:
raise ValueError("Types are not compatible: %r vs %r" % (a, b))
return {
k: TypeSpec.__most_specific_compatible_type_serialization(a[k], b[k])
for k in a_keys
}
if isinstance(a, tensor_shape.TensorShape):
return a.most_specific_compatible_shape(b)
if isinstance(a, list):
raise AssertionError("_serialize() should not return list values.")
if isinstance(a, TypeSpec):
return a.most_specific_compatible_type(b)
if a != b:
raise ValueError("Types are not compatible: %r vs %r" % (a, b))
return a
class BatchableTypeSpec(TypeSpec):
"""TypeSpec with a batchable tensor encoding.
The batchable tensor encoding is a list of `tf.Tensor`s that supports
batching and unbatching. In particular, stacking (or unstacking)
values with the same `TypeSpec` must be equivalent to stacking (or
unstacking) each of their tensor lists. Unlike the component encoding
(returned by `self._to_components)`, the batchable tensor encoding
may require using encoding/decoding ops.
If a subclass's batchable tensor encoding is not simply a flattened version
of the component encoding, then the subclass must override `_to_tensor_list`,
`_from_tensor_list`, and _flat_tensor_specs`.
"""
__slots__ = []
@abc.abstractmethod
def _batch(self, batch_size):
"""Returns a TypeSpec representing a batch of objects with this TypeSpec.
Args:
batch_size: An `int` representing the number of elements in a batch,
or `None` if the batch size may vary.
Returns:
A `TypeSpec` representing a batch of objects with this TypeSpec.
"""
raise NotImplementedError("%s._batch" % type(self).__name__)
@abc.abstractmethod
def _unbatch(self):
"""Returns a TypeSpec representing a single element this TypeSpec.
Returns:
A `TypeSpec` representing a single element of objects with this TypeSpec.
"""
raise NotImplementedError("%s._unbatch" % type(self).__name__)
def _to_batched_tensor_list(self, value):
"""Returns a tensor list encoding for value with rank>0."""
tensor_list = self._to_tensor_list(value)
if any(t.shape.ndims == 0 for t in tensor_list):
raise ValueError("Value %s has insufficient rank for batching." % value)
return tensor_list
@tf_export("type_spec_from_value")
def type_spec_from_value(value):
"""Returns a `tf.TypeSpec` that represents the given `value`.
Examples:
>>> tf.type_spec_from_value(tf.constant([1, 2, 3]))
TensorSpec(shape=(3,), dtype=tf.int32, name=None)
>>> tf.type_spec_from_value(np.array([4.0, 5.0], np.float64))
TensorSpec(shape=(2,), dtype=tf.float64, name=None)
>>> tf.type_spec_from_value(tf.ragged.constant([[1, 2], [3, 4, 5]]))
RaggedTensorSpec(TensorShape([2, None]), tf.int32, 1, tf.int64)
>>> example_input = tf.ragged.constant([[1, 2], [3]])
>>> @tf.function(input_signature=[tf.type_spec_from_value(example_input)])
... def f(x):
... return tf.reduce_sum(x, axis=1)
Args:
value: A value that can be accepted or returned by TensorFlow APIs.
Accepted types for `value` include `tf.Tensor`, any value that can be
converted to `tf.Tensor` using `tf.convert_to_tensor`, and any subclass
of `CompositeTensor` (such as `tf.RaggedTensor`).
Returns:
A `TypeSpec` that is compatible with `value`.
Raises:
TypeError: If a TypeSpec cannot be built for `value`, because its type
is not supported.
"""
spec = _type_spec_from_value(value)
if spec is not None:
return spec
# Fallback: try converting value to a tensor.
try:
tensor = ops.convert_to_tensor(value)
spec = _type_spec_from_value(tensor)
if spec is not None:
return spec
except (ValueError, TypeError) as e:
logging.vlog(
3, "Failed to convert %r to tensor: %s" % (type(value).__name__, e))
raise TypeError("Could not build a TypeSpec for %r with type %s" %
(value, type(value).__name__))
def _type_spec_from_value(value):
"""Returns a `TypeSpec` that represents the given `value`."""
if isinstance(value, ops.Tensor):
# Note: we do not include Tensor names when constructing TypeSpecs.
return tensor_spec.TensorSpec(value.shape, value.dtype)
if isinstance(value, composite_tensor.CompositeTensor):
return value._type_spec # pylint: disable=protected-access
# If `value` is a list and all of its elements can be represented by the same
# batchable type spec, then we can represent the entire list using a single
# type spec that captures the type accurately (unlike the `convert_to_tensor`
# fallback).
if isinstance(value, list) and value:
subspecs = [_type_spec_from_value(v) for v in value]
if isinstance(subspecs[0], BatchableTypeSpec):
merged_subspec = subspecs[0]
try:
for subspec in subspecs[1:]:
merged_subspec = merged_subspec.most_specific_compatible_type(subspec)
return merged_subspec._batch(len(subspecs)) # pylint: disable=protected-access
except (ValueError, TypeError):
pass # incompatible subspecs
for entry in reversed(_TYPE_CONVERSION_FUNCTION_REGISTRY):
type_object, converter_fn, allow_subclass = entry
if ((type(value) is type_object) or # pylint: disable=unidiomatic-typecheck
(allow_subclass and isinstance(value, type_object))):
return converter_fn(value)
return None
_TYPE_CONVERSION_FUNCTION_REGISTRY = []
def register_type_spec_from_value_converter(type_object, converter_fn,
allow_subclass=False):
"""Registers a function for converting values with a given type to TypeSpecs.
If multiple registered `type_object`s match a value, then the most recent
registration takes precedence. Custom converters should not be defined for
`CompositeTensor`s; use `CompositeTensor._type_spec` instead.
Args:
type_object: A Python `type` object representing the type of values
accepted by `converter_fn`.
converter_fn: A function that takes one argument (an instance of the
type represented by `type_object`) and returns a `TypeSpec`.
allow_subclass: If true, then use `isinstance(value, type_object)` to
check for matches. If false, then use `type(value) is type_object`.
"""
_, type_object = tf_decorator.unwrap(type_object)
_TYPE_CONVERSION_FUNCTION_REGISTRY.append(
(type_object, converter_fn, allow_subclass))
_pywrap_utils.RegisterType("TypeSpec", TypeSpec)
_TYPE_SPEC_TO_NAME = {}
_NAME_TO_TYPE_SPEC = {}
# Regular expression for valid TypeSpec names.
_REGISTERED_NAME_RE = re.compile(r"^(\w+\.)+\w+$")
# TODO(b/173744905) tf_export this as "tf.register_type_spec". (And add a
# usage example to the docstring, once the API is public.)
#
# TODO(b/173744905) Update this decorator to apply to ExtensionType rather than
# TypeSpec (once we do refactoring to move to_components/from_components from
# TypeSpec to ExtensionType).
def register(name):
"""Decorator used to register a globally unique name for a TypeSpec subclass.
Args:
name: The name of the type spec. Must be globally unique. Must have
the form `"{project_name}.{type_name}"`. E.g. `"my_project.MyTypeSpec"`.
Returns:
A class decorator that registers the decorated class with the given name.
"""
if not isinstance(name, str):
raise TypeError("Expected `name` to be a string; got %r" % (name,))
if not _REGISTERED_NAME_RE.match(name):
raise ValueError(
"Registered name must have the form '{project_name}.{type_name}' "
"(e.g. 'my_project.MyTypeSpec'); got %r." % name)
def decorator_fn(cls):
if not (isinstance(cls, type) and issubclass(cls, TypeSpec)):
raise TypeError("Expected `cls` to be a TypeSpec; got %r" % (cls,))
if cls in _TYPE_SPEC_TO_NAME:
raise ValueError("Class %s.%s has already been registered with name %s."
% (cls.__module__, cls.__name__,
_TYPE_SPEC_TO_NAME[cls]))
if name in _NAME_TO_TYPE_SPEC:
raise ValueError("Name %s has already been registered for class %s.%s."
% (name, _NAME_TO_TYPE_SPEC[name].__module__,
_NAME_TO_TYPE_SPEC[name].__name__))
_TYPE_SPEC_TO_NAME[cls] = name
_NAME_TO_TYPE_SPEC[name] = cls
return cls
return decorator_fn
# TODO(edloper) tf_export this as "tf.get_type_spec_name" (or some similar name)
def get_name(cls):
"""Returns the registered name for TypeSpec `cls`."""
if not (isinstance(cls, type) and issubclass(cls, TypeSpec)):
raise TypeError("Expected `cls` to be a TypeSpec; got %r" % (cls,))
if cls not in _TYPE_SPEC_TO_NAME:
raise ValueError("TypeSpec %s.%s has not been registered." %
(cls.__module__, cls.__name__))
return _TYPE_SPEC_TO_NAME[cls]
# TODO(edloper) tf_export this as "tf.lookup_type_spec" (or some similar name)
def lookup(name):
"""Returns the TypeSpec that has been registered with name `name`."""
if not isinstance(name, str):
raise TypeError("Expected `name` to be a string; got %r" % (name,))
if name not in _NAME_TO_TYPE_SPEC:
raise ValueError("No TypeSpec has been registered with name %r" % (name,))
return _NAME_TO_TYPE_SPEC[name]
| |
"""Qubits for quantum computing.
Todo:
* Finish implementing measurement logic. This should include POVM.
* Update docstrings.
* Update tests.
"""
import math
from sympy import Integer, log, Mul, Add, Pow, conjugate
from sympy.core.basic import sympify
from sympy.matrices.matrices import Matrix, zeros
from sympy.printing.pretty.stringpict import prettyForm
from sympy.physics.quantum.hilbert import ComplexSpace
from sympy.physics.quantum.state import Ket, Bra, State
from sympy.physics.quantum.qexpr import QuantumError
from sympy.physics.quantum.represent import represent
from sympy.physics.quantum.matrixutils import (
numpy_ndarray, scipy_sparse_matrix
)
from sympy.mpmath.libmp.libintmath import bitcount
__all__ = [
'Qubit',
'QubitBra',
'IntQubit',
'IntQubitBra',
'qubit_to_matrix',
'matrix_to_qubit',
'measure_all',
'measure_partial',
'measure_partial_oneshot',
'measure_all_oneshot'
]
#-----------------------------------------------------------------------------
# Qubit Classes
#-----------------------------------------------------------------------------
class QubitState(State):
"""Base class for Qubit and QubitBra."""
#-------------------------------------------------------------------------
# Initialization/creation
#-------------------------------------------------------------------------
@classmethod
def _eval_args(cls, args):
# If we are passed a QubitState or subclass, we just take its qubit
# values directly.
if len(args) == 1 and isinstance(args[0], QubitState):
return args[0].qubit_values
# Turn strings into tuple of strings
if len(args) == 1 and isinstance(args[0], basestring):
args = tuple(args[0])
args = sympify(args)
# Validate input (must have 0 or 1 input)
for element in args:
if not (element == 1 or element == 0):
raise ValueError("Qubit values must be 0 or 1, got: %r" % element)
return args
@classmethod
def _eval_hilbert_space(cls, args):
return ComplexSpace(2)**len(args)
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
@property
def dimension(self):
"""The number of Qubits in the state."""
return len(self.qubit_values)
@property
def nqubits(self):
return self.dimension
@property
def qubit_values(self):
"""Returns the values of the qubits as a tuple."""
return self.label
#-------------------------------------------------------------------------
# Special methods
#-------------------------------------------------------------------------
def __len__(self):
return self.dimension
def __getitem__(self, bit):
return self.qubit_values[int(self.dimension-bit-1)]
#-------------------------------------------------------------------------
# Utility methods
#-------------------------------------------------------------------------
def flip(self, *bits):
"""Flip the bit(s) given."""
newargs = list(self.qubit_values)
for i in bits:
bit = int(self.dimension-i-1)
if newargs[bit] == 1:
newargs[bit] = 0
else:
newargs[bit] = 1
return self.__class__(*tuple(newargs))
class Qubit(QubitState, Ket):
"""A multi-qubit ket in the computational (z) basis.
We use the normal convention that the least significant qubit is on the
right, so ``|00001>`` has a 1 in the least significant qubit.
Parameters
==========
values : list, str
The qubit values as a list of ints ([0,0,0,1,1,]) or a string ('011').
Examples
========
Create a qubit in a couple of different ways and look at their attributes:
>>> from sympy.physics.quantum.qubit import Qubit
>>> Qubit(0,0,0)
|000>
>>> q = Qubit('0101')
>>> q
|0101>
>>> q.nqubits
4
>>> len(q)
4
>>> q.dimension
4
>>> q.qubit_values
(0, 1, 0, 1)
We can flip the value of an individual qubit:
>>> q.flip(1)
|0111>
We can take the dagger of a Qubit to get a bra:
>>> from sympy.physics.quantum.dagger import Dagger
>>> Dagger(q)
<0101|
>>> type(Dagger(q))
<class 'sympy.physics.quantum.qubit.QubitBra'>
Inner products work as expected:
>>> ip = Dagger(q)*q
>>> ip
<0101|0101>
>>> ip.doit()
1
"""
@classmethod
def dual_class(self):
return QubitBra
def _eval_innerproduct_QubitBra(self, bra, **hints):
if self.label == bra.label:
return Integer(1)
else:
return Integer(0)
def _represent_default_basis(self, **options):
return self._represent_ZGate(None, **options)
def _represent_ZGate(self, basis, **options):
"""Represent this qubits in the computational basis (ZGate).
"""
format = options.get('format', 'sympy')
n = 1
definite_state = 0
for it in reversed(self.qubit_values):
definite_state += n*it
n = n*2
result = [0]*(2**self.dimension)
result[int(definite_state)] = 1
if format == 'sympy':
return Matrix(result)
elif format == 'numpy':
import numpy as np
return np.matrix(result, dtype='complex').transpose()
elif format == 'scipy.sparse':
from scipy import sparse
return sparse.csr_matrix(result, dtype='complex').transpose()
class QubitBra(QubitState, Bra):
"""A multi-qubit bra in the computational (z) basis.
We use the normal convention that the least significant qubit is on the
right, so ``|00001>`` has a 1 in the least significant qubit.
Parameters
==========
values : list, str
The qubit values as a list of ints ([0,0,0,1,1,]) or a string ('011').
See also
========
Qubit: Examples using qubits
"""
@classmethod
def dual_class(self):
return Qubit
class IntQubitState(QubitState):
"""A base class for qubits that work with binary representations."""
@classmethod
def _eval_args(cls, args):
# The case of a QubitState instance
if len(args) == 1 and isinstance(args[0], QubitState):
return QubitState._eval_args(args)
# For a single argument, we construct the binary representation of
# that integer with the minimal number of bits.
if len(args) == 1 and args[0] > 1:
#rvalues is the minimum number of bits needed to express the number
rvalues = reversed(xrange(bitcount(abs(args[0]))))
qubit_values = [(args[0]>>i)&1 for i in rvalues]
return QubitState._eval_args(qubit_values)
# For two numbers, the second number is the number of bits
# on which it is expressed, so IntQubit(0,5) == |00000>.
elif len(args) == 2 and args[1] > 1:
need = bitcount(abs(args[0]))
if args[1] < need:
raise ValueError('cannot represent %s with %s bits' % (args[0], args[1]))
qubit_values = [(args[0]>>i)&1 for i in reversed(range(args[1]))]
return QubitState._eval_args(qubit_values)
else:
return QubitState._eval_args(args)
def as_int(self):
"""Return the numerical value of the qubit."""
number = 0
n = 1
for i in reversed(self.qubit_values):
number += n*i
n = n<<1
return number
def _print_label(self, printer, *args):
return str(self.as_int())
def _print_label_pretty(self, printer, *args):
label = self._print_label(printer, *args)
return prettyForm(label)
_print_label_repr = _print_label
_print_label_latex = _print_label
class IntQubit(IntQubitState, Qubit):
"""A qubit ket that store integers as binary numbers in qubit values.
The differences between this class and ``Qubit`` are:
* The form of the constructor.
* The qubit values are printed as their corresponding integer, rather
than the raw qubit values. The internal storage format of the qubit
values in the same as ``Qubit``.
Parameters
==========
values : int, tuple
If a single argument, the integer we want to represent in the qubit
values. This integer will be represented using the fewest possible
number of qubits. If a pair of integers, the first integer gives the
integer to represent in binary form and the second integer gives
the number of qubits to use.
Examples
========
Create a qubit for the integer 5:
>>> from sympy.physics.quantum.qubit import IntQubit
>>> from sympy.physics.quantum.qubit import Qubit
>>> q = IntQubit(5)
>>> q
|5>
We can also create an ``IntQubit`` by passing a ``Qubit`` instance.
>>> q = IntQubit(Qubit('101'))
>>> q
|5>
>>> q.as_int()
5
>>> q.nqubits
3
>>> q.qubit_values
(1, 0, 1)
We can go back to the regular qubit form.
>>> Qubit(q)
|101>
"""
@classmethod
def dual_class(self):
return IntQubitBra
class IntQubitBra(IntQubitState, QubitBra):
"""A qubit bra that store integers as binary numbers in qubit values."""
@classmethod
def dual_class(self):
return IntQubit
#-----------------------------------------------------------------------------
# Qubit <---> Matrix conversion functions
#-----------------------------------------------------------------------------
def matrix_to_qubit(matrix):
"""Convert from the matrix repr. to a sum of Qubit objects.
Parameters
----------
matrix : Matrix, numpy.matrix, scipy.sparse
The matrix to build the Qubit representation of. This works with
sympy matrices, numpy matrices and scipy.sparse sparse matrices.
Examples
--------
Represent a state and then go back to its qubit form:
>>> from sympy.physics.quantum.qubit import matrix_to_qubit, Qubit
>>> from sympy.physics.quantum.gate import Z
>>> from sympy.physics.quantum.represent import represent
>>> q = Qubit('01')
>>> matrix_to_qubit(represent(q))
|01>
"""
# Determine the format based on the type of the input matrix
format = 'sympy'
if isinstance(matrix, numpy_ndarray):
format = 'numpy'
if isinstance(matrix, scipy_sparse_matrix):
format = 'scipy.sparse'
# Make sure it is of correct dimensions for a Qubit-matrix representation.
# This logic should work with sympy, numpy or scipy.sparse matrices.
if matrix.shape[0] == 1:
mlistlen = matrix.shape[1]
nqubits = log(mlistlen, 2)
ket = False
cls = QubitBra
elif matrix.shape[1] == 1:
mlistlen = matrix.shape[0]
nqubits = log(mlistlen, 2)
ket = True
cls = Qubit
else:
raise QuantumError(
'Matrix must be a row/column vector, got %r' % matrix
)
if not isinstance(nqubits, Integer):
raise QuantumError('Matrix must be a row/column vector of size '
'2**nqubits, got: %r' % matrix)
# Go through each item in matrix, if element is non-zero, make it into a
# Qubit item times the element.
result = 0
for i in range(mlistlen):
if ket:
element = matrix[i,0]
else:
element = matrix[0,i]
if format == 'numpy' or format == 'scipy.sparse':
element = complex(element)
if element != 0.0:
# Form Qubit array; 0 in bit-locations where i is 0, 1 in
# bit-locations where i is 1
qubit_array = [int(i & (1<<x) != 0) for x in range(nqubits)]
qubit_array.reverse()
result = result + element*cls(*qubit_array)
# If sympy simplified by pulling out a constant coefficient, undo that.
if isinstance(result, (Mul,Add,Pow)):
result = result.expand()
return result
def qubit_to_matrix(qubit, format='sympy'):
"""Coverts an Add/Mul of Qubit objects into it's matrix representation
This function is the inverse of ``matrix_to_qubit`` and is a shorthand
for ``represent(qubit)``.
"""
return represent(qubit, format=format)
#-----------------------------------------------------------------------------
# Measurement
#-----------------------------------------------------------------------------
def measure_all(qubit, format='sympy', normalize=True):
"""Perform an ensemble measurement of all qubits.
Parameters
==========
qubit : Qubit, Add
The qubit to measure. This can be any Qubit or a linear combination
of them.
format : str
The format of the intermediate matrices to use. Possible values are
('sympy','numpy','scipy.sparse'). Currently only 'sympy' is
implemented.
Returns
=======
result : list
A list that consists of primitive states and their probabilities.
Examples
========
>>> from sympy.physics.quantum.qubit import Qubit, measure_all
>>> from sympy.physics.quantum.gate import H, X, Y, Z
>>> from sympy.physics.quantum.qapply import qapply
>>> c = H(0)*H(1)*Qubit('00')
>>> c
H(0)*H(1)*|00>
>>> q = qapply(c)
>>> measure_all(q)
[(|00>, 1/4), (|01>, 1/4), (|10>, 1/4), (|11>, 1/4)]
"""
m = qubit_to_matrix(qubit, format)
if format == 'sympy':
results = []
if normalize:
m = m.normalized()
size = max(m.shape) # Max of shape to account for bra or ket
nqubits = int(math.log(size)/math.log(2))
for i in range(size):
if m[i] != 0.0:
results.append(
(Qubit(IntQubit(i, nqubits)), m[i]*conjugate(m[i]))
)
return results
else:
raise NotImplementedError(
"This function can't handle non-sympy matrix formats yet"
)
def measure_partial(qubit, bits, format='sympy', normalize=True):
"""Perform a partial ensemble measure on the specifed qubits.
Parameters
==========
qubits : Qubit
The qubit to measure. This can be any Qubit or a linear combination
of them.
bits : tuple
The qubits to measure.
format : str
The format of the intermediate matrices to use. Possible values are
('sympy','numpy','scipy.sparse'). Currently only 'sympy' is
implemented.
Returns
=======
result : list
A list that consists of primitive states and their probabilities.
Examples
========
>>> from sympy.physics.quantum.qubit import Qubit, measure_partial
>>> from sympy.physics.quantum.gate import H, X, Y, Z
>>> from sympy.physics.quantum.qapply import qapply
>>> c = H(0)*H(1)*Qubit('00')
>>> c
H(0)*H(1)*|00>
>>> q = qapply(c)
>>> measure_partial(q, (0,))
[(sqrt(2)*|00>/2 + sqrt(2)*|10>/2, 1/2), (sqrt(2)*|01>/2 + sqrt(2)*|11>/2, 1/2)]
"""
m = qubit_to_matrix(qubit, format)
if isinstance(bits, (int, Integer)):
bits = (int(bits),)
if format == 'sympy':
if normalize:
m = m.normalized()
possible_outcomes = _get_possible_outcomes(m, bits)
# Form output from function.
output = []
for outcome in possible_outcomes:
# Calculate probability of finding the specified bits with
# given values.
prob_of_outcome = 0
prob_of_outcome += (outcome.H*outcome)[0]
# If the output has a chance, append it to output with found
# probability.
if prob_of_outcome != 0:
if normalize:
next_matrix = matrix_to_qubit(outcome.normalized())
else:
next_matrix = matrix_to_qubit(outcome)
output.append((
next_matrix,
prob_of_outcome
))
return output
else:
raise NotImplementedError(
"This function can't handle non-sympy matrix formats yet"
)
def measure_partial_oneshot(qubit, bits, format='sympy'):
"""Perform a partial oneshot measurement on the specified qubits.
A oneshot measurement is equivalent to performing a measurement on a
quantum system. This type of measurement does not return the probabilities
like an ensemble measurement does, but rather returns *one* of the
possible resulting states. The exact state that is returned is determined
by picking a state randomly according to the ensemble probabilities.
Parameters
----------
qubits : Qubit
The qubit to measure. This can be any Qubit or a linear combination
of them.
bits : tuple
The qubits to measure.
format : str
The format of the intermediate matrices to use. Possible values are
('sympy','numpy','scipy.sparse'). Currently only 'sympy' is
implemented.
Returns
-------
result : Qubit
The qubit that the system collapsed to upon measurement.
"""
import random
m = qubit_to_matrix(qubit, format)
if format == 'sympy':
m = m.normalized()
possible_outcomes = _get_possible_outcomes(m, bits)
# Form output from function
random_number = random.random()
total_prob = 0
for outcome in possible_outcomes:
# Calculate probability of finding the specified bits
# with given values
total_prob += (outcome.H*outcome)[0]
if total_prob >= random_number:
return matrix_to_qubit(outcome.normalized())
else:
raise NotImplementedError(
"This function can't handle non-sympy matrix formats yet"
)
def _get_possible_outcomes(m, bits):
"""Get the possible states that can be produced in a measurement.
Parameters
----------
m : Matrix
The matrix representing the state of the system.
bits : tuple, list
Which bits will be measured.
Returns
-------
result : list
The list of possible states which can occur given this measurement.
These are un-normalized so we can derive the probability of finding
this state by taking the inner product with itself
"""
# This is filled with loads of dirty binary tricks...You have been warned
size = max(m.shape) # Max of shape to account for bra or ket
nqubits = int(math.log(size,2)+.1) # Number of qubits possible
# Make the output states and put in output_matrices, nothing in them now.
# Each state will represent a possible outcome of the measurement
# Thus, output_matrices[0] is the matrix which we get when all measured
# bits return 0. and output_matrices[1] is the matrix for only the 0th
# bit being true
output_matrices = []
for i in range(1<<len(bits)):
output_matrices.append(zeros(2**nqubits, 1))
# Bitmasks will help sort how to determine possible outcomes.
# When the bit mask is and-ed with a matrix-index,
# it will determine which state that index belongs to
bit_masks = []
for bit in bits:
bit_masks.append(1<<bit)
# Make possible outcome states
for i in range(2**nqubits):
trueness = 0 # This tells us to which output_matrix this value belongs
# Find trueness
for j in range(len(bit_masks)):
if i&bit_masks[j]:
trueness += j+1
# Put the value in the correct output matrix
output_matrices[trueness][i] = m[i]
return output_matrices
def measure_all_oneshot(qubit, format='sympy'):
"""Perform a oneshot ensemble measurement on all qubits.
A oneshot measurement is equivalent to performing a measurement on a
quantum system. This type of measurement does not return the probabilities
like an ensemble measurement does, but rather returns *one* of the
possible resulting states. The exact state that is returned is determined
by picking a state randomly according to the ensemble probabilities.
Parameters
----------
qubits : Qubit
The qubit to measure. This can be any Qubit or a linear combination
of them.
format : str
The format of the intermediate matrices to use. Possible values are
('sympy','numpy','scipy.sparse'). Currently only 'sympy' is
implemented.
Returns
-------
result : Qubit
The qubit that the system collapsed to upon measurement.
"""
import random
m = qubit_to_matrix(qubit)
if format == 'sympy':
m = m.normalized()
random_number = random.random()
total = 0
result = 0
for i in m:
total += i*i.conjugate()
if total > random_number:
break
result += 1
return Qubit(IntQubit(result, int(math.log(max(m.shape),2)+.1)))
else:
raise NotImplementedError(
"This function can't handle non-sympy matrix formats yet"
)
| |
#!/usr/bin/env python
from __future__ import print_function
import argparse
import json
import re
import subprocess
import sys
from pycoin import encoding
from pycoin.ecdsa.secp256k1 import secp256k1_generator
from pycoin.serialize import b2h, h2b
from pycoin.key import Key
from pycoin.key.key_from_text import key_from_text
from pycoin.key.BIP32Node import BIP32Node
from pycoin.networks import full_network_name_for_netcode, network_name_for_netcode, network_codes
from pycoin.networks.default import get_current_netcode
from pycoin.ui import address_for_pay_to_script
from pycoin.tx.pay_to.ScriptPayToAddressWit import ScriptPayToAddressWit
SEC_RE = re.compile(r"^(0[23][0-9a-fA-F]{64})|(04[0-9a-fA-F]{128})$")
HASH160_RE = re.compile(r"^([0-9a-fA-F]{40})$")
def gpg_entropy():
try:
output = subprocess.Popen(
["gpg", "--gen-random", "2", "64"], stdout=subprocess.PIPE).communicate()[0]
return output
except OSError:
sys.stderr.write("warning: can't open gpg, can't use as entropy source\n")
return b''
def get_entropy():
entropy = bytearray()
try:
entropy.extend(gpg_entropy())
except Exception:
print("warning: can't use gpg as entropy source", file=sys.stdout)
try:
entropy.extend(open("/dev/random", "rb").read(64))
except Exception:
print("warning: can't use /dev/random as entropy source", file=sys.stdout)
entropy = bytes(entropy)
if len(entropy) < 64:
raise OSError("can't find sources of entropy")
return entropy
def parse_as_number(s):
try:
return int(s)
except ValueError:
pass
try:
return int(s, 16)
except ValueError:
pass
def parse_as_secret_exponent(s):
v = parse_as_number(s)
if v and 0 < v < secp256k1_generator.order():
return v
def parse_as_public_pair(s):
for c in ",/":
if c in s:
s0, s1 = s.split(c, 1)
v0 = parse_as_number(s0)
if v0:
if s1 in ("even", "odd"):
is_y_odd = (s1 == "odd")
y = secp256k1_generator.y_values_for_x(v0)[is_y_odd]
return secp256k1_generator.Point(v0, y)
v1 = parse_as_number(s1)
if v1:
if not secp256k1_generator.contains_point(v0, v1):
sys.stderr.write("invalid (x, y) pair\n")
sys.exit(1)
return (v0, v1)
def create_wallet_key_output(key, subkey_path, add_output):
if hasattr(key, "wallet_key"):
if subkey_path:
add_output("subkey_path", subkey_path)
add_output("wallet_key", key.wallet_key(as_private=key.is_private()))
if key.is_private():
add_output("public_version", key.wallet_key(as_private=False))
child_number = key.child_index()
if child_number >= 0x80000000:
wc = child_number - 0x80000000
child_index = "%dH (%d)" % (wc, child_number)
else:
child_index = "%d" % child_number
add_output("tree_depth", "%d" % key.tree_depth())
add_output("fingerprint", b2h(key.fingerprint()))
add_output("parent_fingerprint", b2h(key.parent_fingerprint()), "parent f'print")
add_output("child_index", child_index)
add_output("chain_code", b2h(key.chain_code()))
add_output("private_key", "yes" if key.is_private() else "no")
def create_public_pair_output(key, add_output):
public_pair = key.public_pair()
if public_pair:
add_output("public_pair_x", '%d' % public_pair[0])
add_output("public_pair_y", '%d' % public_pair[1])
add_output("public_pair_x_hex", '%x' % public_pair[0], " x as hex")
add_output("public_pair_y_hex", '%x' % public_pair[1], " y as hex")
add_output("y_parity", "odd" if (public_pair[1] & 1) else "even")
add_output("key_pair_as_sec", b2h(key.sec(use_uncompressed=False)))
add_output("key_pair_as_sec_uncompressed", b2h(key.sec(use_uncompressed=True)), " uncompressed")
def create_hash160_output(key, add_output, output_dict):
network_name = network_name_for_netcode(key._netcode)
hash160_c = key.hash160(use_uncompressed=False)
hash160_u = key.hash160(use_uncompressed=True)
hash160 = hash160_c or hash160_u
if hash160:
add_output("hash160", b2h(hash160))
if hash160_c and hash160_u:
add_output("hash160_uncompressed", b2h(hash160_u), " uncompressed")
if hash160:
address = key.address(use_uncompressed=hash160_c is None)
add_output("address", address, "%s address" % network_name)
output_dict["%s_address" % key._netcode] = address
if hash160_c and hash160_u:
address = key.address(use_uncompressed=True)
add_output("address_uncompressed", address, "%s address uncompressed" % network_name)
output_dict["%s_address_uncompressed" % key._netcode] = address
# don't print segwit addresses unless we're sure we have a compressed key
if hash160_c:
p2aw_script = ScriptPayToAddressWit(b'\0', hash160_c)
address_segwit = p2aw_script.info()["address_f"](key._netcode)
if address_segwit:
# this network seems to support segwit
add_output("address_segwit", address_segwit, "%s segwit address" % network_name)
output_dict["%s_address_segwit" % key._netcode] = address_segwit
p2sh_script = p2aw_script.script()
p2s_address = address_for_pay_to_script(p2aw_script.script(), key._netcode)
if p2s_address:
add_output("p2sh_segwit", p2s_address)
p2sh_script_hex = b2h(p2sh_script)
add_output("p2sh_segwit_script", p2sh_script_hex, " corresponding p2sh script")
def create_output(item, key, subkey_path=None):
output_dict = {}
output_order = []
def add_output(json_key, value=None, human_readable_key=None):
if human_readable_key is None:
human_readable_key = json_key.replace("_", " ")
if value:
output_dict[json_key.strip().lower()] = value
output_order.append((json_key.lower(), human_readable_key))
full_network_name = full_network_name_for_netcode(key._netcode)
add_output("input", item)
add_output("network", full_network_name)
add_output("netcode", key._netcode)
create_wallet_key_output(key, subkey_path, add_output)
secret_exponent = key.secret_exponent()
if secret_exponent:
add_output("secret_exponent", '%d' % secret_exponent)
add_output("secret_exponent_hex", '%x' % secret_exponent, " hex")
add_output("wif", key.wif(use_uncompressed=False))
add_output("wif_uncompressed", key.wif(use_uncompressed=True), " uncompressed")
create_public_pair_output(key, add_output)
create_hash160_output(key, add_output, output_dict)
return output_dict, output_order
def dump_output(output_dict, output_order):
print('')
max_length = max(len(v[1]) for v in output_order)
for key, hr_key in output_order:
space_padding = ' ' * (1 + max_length - len(hr_key))
val = output_dict.get(key)
if val is None:
print(hr_key)
else:
if len(val) > 80:
val = "%s\\\n%s%s" % (val[:66], ' ' * (5 + max_length), val[66:])
print("%s%s: %s" % (hr_key, space_padding, val))
def create_parser():
codes = network_codes()
parser = argparse.ArgumentParser(
description='Crypto coin utility ku ("key utility") to show'
' information about Bitcoin or other cryptocoin data structures.',
epilog=('Known networks codes:\n ' +
', '.join(['%s (%s)' % (i, full_network_name_for_netcode(i)) for i in codes]))
)
parser.add_argument('-w', "--wallet", help='show just Bitcoin wallet key', action='store_true')
parser.add_argument('-W', "--wif", help='show just Bitcoin WIF', action='store_true')
parser.add_argument('-a', "--address", help='show just Bitcoin address', action='store_true')
parser.add_argument(
'-u', "--uncompressed", help='show output in uncompressed form',
action='store_true')
parser.add_argument(
'-P', "--public", help='only show public version of wallet keys',
action='store_true')
parser.add_argument('-j', "--json", help='output as JSON', action='store_true')
parser.add_argument('-s', "--subkey", help='subkey path (example: 0H/2/15-20)')
parser.add_argument('-n', "--network", help='specify network',
default=get_current_netcode(), choices=codes)
parser.add_argument("--override-network", help='override detected network type',
default=None, choices=codes)
parser.add_argument(
'item', nargs="+", help='a BIP0032 wallet key string;'
' a WIF;'
' a bitcoin address;'
' an SEC (ie. a 66 hex chars starting with 02, 03 or a 130 hex chars starting with 04);'
' the literal string "create" to create a new wallet key using strong entropy sources;'
' P:wallet passphrase (NOT RECOMMENDED);'
' H:wallet passphrase in hex (NOT RECOMMENDED);'
' E:electrum value (either a master public, master private, or initial data);'
' secret_exponent (in decimal or hex);'
' x,y where x,y form a public pair (y is a number or one of the strings "even" or "odd");'
' hash160 (as 40 hex characters)')
return parser
def prefix_transforms_for_network(network):
def _create_bip32(_):
max_retries = 64
for _ in range(max_retries):
try:
return BIP32Node.from_master_secret(get_entropy(), netcode=network)
except ValueError as e:
continue
# Probably a bug if we get here
raise RuntimeError("can't create BIP32 key")
return (
("P:", lambda s: BIP32Node.from_master_secret(s.encode("utf8"), netcode=network)),
("H:", lambda s: BIP32Node.from_master_secret(h2b(s), netcode=network)),
("E:", lambda s: key_from_text(s)),
("create", _create_bip32),
)
def parse_prefixes(item, PREFIX_TRANSFORMS):
for k, f in PREFIX_TRANSFORMS:
if item.startswith(k):
try:
return f(item[len(k):])
except Exception:
pass
try:
return Key.from_text(item)
except encoding.EncodingError:
pass
return None
def parse_key(item, PREFIX_TRANSFORMS, network):
key = parse_prefixes(item, PREFIX_TRANSFORMS)
if key:
return key
if HASH160_RE.match(item):
return Key(hash160=h2b(item), netcode=network)
secret_exponent = parse_as_secret_exponent(item)
if secret_exponent:
return Key(secret_exponent=secret_exponent, netcode=network)
if SEC_RE.match(item):
return Key.from_sec(h2b(item))
public_pair = parse_as_public_pair(item)
if public_pair:
return Key(public_pair=public_pair, netcode=network)
return None
def generate_output(args, output_dict, output_order):
if args.json:
# the python2 version of json.dumps puts an extra blank prior to the end of each line
# the "replace" is a hack to make python2 produce the same output as python3
print(json.dumps(output_dict, indent=3, sort_keys=True).replace(" \n", "\n"))
elif args.wallet:
print(output_dict["wallet_key"])
elif args.wif:
print(output_dict["wif_uncompressed" if args.uncompressed else "wif"])
elif args.address:
print(output_dict["address" + ("_uncompressed" if args.uncompressed else "")])
else:
dump_output(output_dict, output_order)
def ku(args, parser):
if args.override_network:
# force network arg to match override, but also will override decoded data below.
args.network = args.override_network
PREFIX_TRANSFORMS = prefix_transforms_for_network(args.network)
for item in args.item:
key = parse_key(item, PREFIX_TRANSFORMS, args.network)
if key is None:
print("can't parse %s" % item, file=sys.stderr)
continue
if args.override_network:
# Override the network value, so we can take the same xpubkey and view what
# the values would be on each other network type.
# XXX public interface for this is needed...
key._netcode = args.override_network
for key in key.subkeys(args.subkey or ""):
if args.public:
key = key.public_copy()
output_dict, output_order = create_output(item, key)
generate_output(args, output_dict, output_order)
def main():
parser = create_parser()
args = parser.parse_args()
ku(args, parser)
if __name__ == '__main__':
main()
| |
#!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License
# Version 1.1 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
#
# The Original Code is Komodo code.
#
# The Initial Developer of the Original Code is ActiveState Software Inc.
# Portions created by ActiveState Software Inc are Copyright (C) 2000-2007
# ActiveState Software Inc. All Rights Reserved.
#
# Contributor(s):
# ActiveState Software Inc
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
r"""Database of static info for (text) languages (e.g. Python, Perl, ...).
Basic usage:
>>> import langinfo
>>> py = langinfo.langinfo_from_lang("Python")
>>> py.name
'Python'
>>> py.exts
['.py', '.pyw']
>>> py.is_text
True
Advanced usage:
>>> lidb = langinfo.Database()
>>> lidb.langinfo_from_lang("HTML")
<HTML LangInfo>
>>> db.langinfo_from_filename("Makefile")
<Makefile LangInfo>
>>> db.langinfo_from_ext(".pm")
<Perl LangInfo>
>>> db.langinfo_from_magic("#!/usr/bin/env ruby")
<Ruby LangInfo>
>>> db.langinfo_from_doctype(public_id="-//W3C//DTD HTML 4.01//EN")
<HTML LangInfo>
The advanced usage allows one to customize how the langinfo database is
built. For example, specified 'dirs' will be searched for
'langinfo_*.py' files that can add to the database. (This can be used to
allow Komodo extensions to add/override language info.)
"""
# TODO:
# - add other Komodo languages
# - langinfo_komodo.py, langinfo_apple.py, langinfo_microsoft.py,
# langinfo_adobe.py
# - Python: .pth, .egg-info
# - some XML langs to add: DocBook, Atom, Dita,
# RDF, RSS (various versions?), RelaxNG, XML Schema, XSLT.
# ODF, UBL (these mentioned by Tim Bray, http://www.tbray.org/ongoing/When/200x/2006/01/08/No-New-XML-Languages)
# others?
# TODO: .wiki (for google code)
# TODO: .*rc files?
# TODO: .cvsignore
# TODO: .pyx? .pxd? .pyd? (see tm/check/contrib/pyyaml/ext/)
# TODO: .deb, .rpm
# - .phpt (in PHP tree)
# TODO: http://en.wikipedia.org/wiki/Adobe_Flash#Related_file_formats_and_extensions
# TODO: text .nib's, .plist, .pbxuser, .pbxproj, .m, .strings,
# TODO: "/Library/Application Support/Apple/Developer Tools/Quartz Composer/Clips/Cubic.qtz"
# not recognized as "data", but it *is* by `file`.
__version_info__ = (1, 0, 0)
__version__ = '.'.join(map(str, __version_info__))
import os
from os.path import join, dirname, abspath, basename, exists
import sys
import re
from pprint import pprint
from glob import glob
import traceback
import logging
import optparse
import types
import struct
import warnings
import operator
#---- exceptions and warnings
class LangInfoError(Exception):
pass
class InvalidLangInfoWarning(Warning):
pass
warnings.simplefilter("once", InvalidLangInfoWarning)
#---- globals
log = logging.getLogger("langinfo")
# log.setLevel(logging.DEBUG)
#---- module API
def langinfo_from_lang(lang):
return get_default_database().langinfo_from_lang(lang)
#---- base LangInfo definition
class LangInfo(object):
"""Base language info class. A subclass of LangInfo defines static
information about a particular text language (e.g. Python, Perl,
CSS, ...).
The following are the "core" attributes for a LangInfo. Subclasses
can feel free to define others, as makes sense for that language.
"""
name = None # a display name (i.e. appropriate for prose, display)
# Used for identifying files of this language.
exts = None
filename_patterns = None
magic_numbers = None
doctypes = None
specialization_hints_from_lang = None
# Values for Emacs `mode` var or Vi modeline `ft' or `filetype',
# other than `name', that identify lang.
emacs_modes = None
vi_filetypes = None
# An optional key for specifying precedence for `magic_numbers`
# usage. If not given the key is `(name, 0)`. Then, for example,
# to ensure magic number checks before Python, one could set
# _magic_number_precedence = ('Python', -1)
_magic_number_precedence = None
# Some languages mandate a default encoding, e.g. for Python it is
# ASCII, for XML UTF-8.
default_encoding = None
encoding_decl_pattern = None # Regex matching an encoding declaration.
# A set of lang names to which this language conforms. For example,
# RDF conforms to XML. See `conforms_to()` below.
#
# This is based on the UTI (Uniform Type Identifier) conforms-to
# idea from Mac OS X:
# http://arstechnica.com/reviews/os/macosx-10-4.ars/11
# http://developer.apple.com/macosx/uniformtypeidentifiers.html
# http://developer.apple.com/documentation/Carbon/Conceptual/understanding_utis/understand_utis_intro/chapter_1_section_1.html
conforms_to_bases = None
# Misc. properties
has_significant_trailing_ws = False
def __init__(self, db):
self._db = db
def __repr__(self):
return "<%s LangInfo>" % self.name
@property
def is_text(self):
"""Convenience property to check if this lang is plain text."""
return self.conforms_to("Text")
def conforms_to(self, lang):
"""Returns True iff this language conforms to the given `lang`."""
if lang == self.name:
return True
if self.conforms_to_bases:
if lang in self.conforms_to_bases:
return True
for base in self.conforms_to_bases:
try:
base_li = self._db.langinfo_from_lang(base)
except LangInfoError:
pass
else:
if base_li.conforms_to(lang):
return True
return False
def conformant_attr(self, attr):
"""Returns the value of the given attr, inheriting from the
`conforms_to_bases` languages if not directly defined for this
language.
"""
if hasattr(self, attr):
val = getattr(self, attr)
if val is not None:
return val
for base in self.conforms_to_bases or []:
try:
base_li = self._db.langinfo_from_lang(base)
except LangInfoError:
pass
else:
val = base_li.conformant_attr(attr)
if val is not None:
return val
return None
#---- LangInfo classes (most are defined in separate langinfo_*.py files)
class TextLangInfo(LangInfo):
name = "Text"
exts = ['.txt', '.text']
filename_patterns = ["README", "COPYING", "LICENSE", "MANIFEST"]
def _generateFallbackKoLangInfo(langinfo_db, koLangInst):
"""Generate a LangInfo instance from the koILanguage instance."""
class FallbackKoLangInfo(LangInfo):
conforms_to_bases = ["Text"]
default_encoding = "utf-8"
def __init__(self, db, koLang):
LangInfo.__init__(self, db)
self.name = koLang.name
if koLang.defaultExtension:
self.exts = [koLang.defaultExtension]
return FallbackKoLangInfo(langinfo_db, koLangInst)
#---- the Database
class Database(object):
def __init__(self, dirs=None):
self._langinfo_from_norm_lang = {}
self._langinfo_from_ext = None
self._langinfo_from_filename = None
self._langinfo_from_filename_re = None
self._magic_table = None
self._li_from_doctype_public_id = None
self._li_from_doctype_system_id = None
self._li_from_emacs_mode = None
self._li_from_vi_filetype = None
self._li_from_norm_komodo_lang = None
self._load()
if dirs is None:
dirs = []
dirs.insert(0, dirname(__file__) or os.curdir)
for dir in dirs:
self._load_dir(dir)
self.dirs = dirs
def langinfos(self):
for li in list(self._langinfo_from_norm_lang.values()):
yield li
def langinfo_from_lang(self, lang):
norm_lang = self._norm_lang_from_lang(lang)
if norm_lang not in self._langinfo_from_norm_lang:
raise LangInfoError("no info on %r lang" % lang)
return self._langinfo_from_norm_lang[norm_lang]
def langinfo_from_komodo_lang(self, komodo_lang, tryFallback=True):
"""Return a langinfo for the given Komodo language name.
There are some minor differences in Komodo language names and
those in langinfo (e.g. "Django" in Komodo vs "Django HTML
Template" in langinfo).
"""
if self._li_from_norm_komodo_lang is None:
self._build_tables()
norm_komodo_lang = self._norm_lang_from_lang(komodo_lang)
if norm_komodo_lang in self._li_from_norm_komodo_lang:
return self._li_from_norm_komodo_lang[norm_komodo_lang]
elif norm_komodo_lang in self._langinfo_from_norm_lang:
return self._langinfo_from_norm_lang[norm_komodo_lang]
elif tryFallback:
# If a koILanguage exists for this lang, then create a fallback
# langinfo for it - this occurs when a user has defined an add-on or
# language, but they haven't made a langinfo.py for it.
try:
from xpcom import components
except ImportError:
pass # no xpcom
else:
langSvc = components.classes["@activestate.com/koLanguageRegistryService;1"] \
.getService(components.interfaces.koILanguageRegistryService)
# Note: When the language does not exist, we get a fallback of
# koILang.Text
koLang = langSvc.getLanguage(komodo_lang)
if koLang is not None and koLang.name in (komodo_lang, norm_komodo_lang):
# Someone's defined a koILanguage for this lang - create
# dummy langinfo for it.
log.warn(
"no LangInfo class found for %r, creating a fallback for it",
komodo_lang)
self._langinfo_from_norm_lang[
norm_komodo_lang] = _generateFallbackKoLangInfo(self, koLang)
self._build_tables()
return self.langinfo_from_komodo_lang(komodo_lang, tryFallback=False)
raise LangInfoError("no info on %r komodo lang" % komodo_lang)
def langinfo_from_emacs_mode(self, emacs_mode):
if self._li_from_emacs_mode is None:
self._build_tables()
if emacs_mode in self._li_from_emacs_mode:
return self._li_from_emacs_mode[emacs_mode]
norm_lang = self._norm_lang_from_lang(emacs_mode)
if norm_lang in self._langinfo_from_norm_lang:
return self._langinfo_from_norm_lang[norm_lang]
def langinfo_from_vi_filetype(self, vi_filetype):
if self._li_from_vi_filetype is None:
self._build_tables()
if vi_filetype in self._li_from_vi_filetype:
return self._li_from_vi_filetype[vi_filetype]
norm_lang = self._norm_lang_from_lang(vi_filetype)
if norm_lang in self._langinfo_from_norm_lang:
return self._langinfo_from_norm_lang[norm_lang]
def langinfo_from_ext(self, ext):
"""Return an appropriate LangInfo for the given filename extension,
or None.
"""
if self._langinfo_from_ext is None:
self._build_tables()
if sys.platform in ("win32", "darwin"): # Case-insensitive filesystems.
ext = ext.lower()
return self._langinfo_from_ext.get(ext)
def langinfo_from_filename(self, filename):
"""Return an appropriate LangInfo for the given filename, or None."""
if self._langinfo_from_filename is None:
self._build_tables()
if filename in self._langinfo_from_filename:
return self._langinfo_from_filename[filename]
else:
for regex, li in list(self._langinfo_from_filename_re.items()):
if regex.search(filename):
return li
def langinfo_from_magic(self, head_bytes, shebang_only=False):
"""Attempt to identify the appropriate LangInfo from the magic number
in the file. This mimics some of the behaviour of GNU file.
@param head_bytes {string} is a string of 8-bit char bytes or a
unicode string from the head of the document.
@param shebang_only {boolean} can be set to true to only process
magic number records for shebang lines (a minor perf
improvement).
"""
if self._magic_table is None:
self._build_tables()
for magic_number, li, sort_key in self._magic_table:
try:
start, format, pattern = magic_number
except ValueError:
# Silently drop bogus magic number decls.
continue
if shebang_only and format != "regex":
continue
if format == "string":
end = start + len(pattern)
if head_bytes[start:end] == pattern:
return li
elif format == "regex":
if pattern.search(head_bytes, start):
return li
else: # a struct format
try:
length = struct.calcsize(format)
except struct.error as ex:
warnings.warn("error in %s magic number struct format: %r"
% (li, format),
InvalidLangInfoWarning)
end = start + length
bytes = head_bytes[start:end]
if len(bytes) == length:
if struct.unpack(format, bytes)[0] == pattern:
return li
def langinfo_from_doctype(self, public_id=None, system_id=None):
"""Return a LangInfo instance matching any of the specified
pieces of doctype info, or None if no match is found.
The behaviour when doctype info from multiple LangInfo classes
collide is undefined (in the current impl, the last one wins).
Notes on doctype info canonicalization:
- I'm not sure if there is specified canonicalization of
doctype names or public-ids, but matching is done
case-insensitively here.
- Technically doctype system-id comparison is of URI (with
non-trivial but well-defined canonicalization rules). For
simplicity we just compare case-insensitively.
"""
if self._li_from_doctype_public_id is None:
self._build_tables()
if public_id is not None \
and public_id in self._li_from_doctype_public_id:
return self._li_from_doctype_public_id[public_id]
if system_id is not None \
and system_id in self._li_from_doctype_system_id:
return self._li_from_doctype_system_id[system_id]
def specialized_langinfo_from_content(self, li, text):
hints, specialized_li = self._specialization_hints_from_lang.get(
li.name, (None, None))
if not hints:
return None
for hint_str, hint_re in hints:
if hint_str not in text:
continue
if hint_re and not hint_re.search(text):
continue
return specialized_li
return None
def _build_tables(self):
self._langinfo_from_ext = {}
self._langinfo_from_filename = {}
self._langinfo_from_filename_re = {}
self._magic_table = []
# list of (<magic-tuple>, <langinfo>, <sort-key>)
self._li_from_doctype_public_id = {}
self._li_from_doctype_system_id = {}
self._li_from_emacs_mode = {}
self._li_from_vi_filetype = {}
self._li_from_norm_komodo_lang = {}
self._specialization_hints_from_lang = {
} # <lang> -> (<hint>, <specialized-langinfo>)
for li in list(self._langinfo_from_norm_lang.values()):
if li.exts:
for ext in li.exts:
if not ext.startswith('.'):
log.warn("exts must start with '.': ext %r for "
"lang %r", ext, li.name)
if sys.platform in ("win32", "darwin"):
ext = ext.lower()
do_replace = True
if ext in self._langinfo_from_ext:
current_li = self._langinfo_from_ext[ext]
variant = getattr(li, "is_minor_variant", None)
if variant is not None:
log.debug(
"ext update: ext: %s, %r is a minor variant of %r",
ext, li, current_li)
elif ext.startswith(".py"):
log.debug(
"ext update: ext: %s, %r is *not* a minor variant of %r",
ext, li, current_li)
if variant is not None and variant.name == current_li.name:
log.debug(
"ext update: found variant for ext %s, li:%r, using:%r",
ext, li, current_li)
do_replace = False
else:
variant = getattr(
current_li, "is_minor_variant", None)
if variant is None or variant.name != li.name:
log.debug("ext conflict: %r for %r conflicts "
"with the same for %r (%r wins)", ext, li,
self._langinfo_from_ext[ext], li)
else:
log.debug(
"ext conflict: ext:%s, replace variant %r with %r",
ext, current_li, li)
if do_replace:
self._langinfo_from_ext[ext] = li
if li.filename_patterns:
for pat in li.filename_patterns:
if isinstance(pat, str):
self._langinfo_from_filename[pat] = li
else:
self._langinfo_from_filename_re[pat] = li
if li.magic_numbers:
sort_key = li._magic_number_precedence or (li.name, 0)
for mn in li.magic_numbers:
self._magic_table.append((mn, li, sort_key))
if li.doctypes:
for dt in li.doctypes:
try:
flavour, name, public_id, system_id = dt
except ValueError:
log.debug("invalid doctype tuple for %r: %r "
"(dropping it)", li, dt)
continue
if public_id:
self._li_from_doctype_public_id[public_id] = li
if system_id:
self._li_from_doctype_system_id[system_id] = li
if li.emacs_modes:
for em in li.emacs_modes:
self._li_from_emacs_mode[em] = li
if li.vi_filetypes:
for em in li.vi_filetypes:
self._li_from_vi_filetypes[em] = li
if hasattr(li, "komodo_name"):
norm_komodo_lang = self._norm_lang_from_lang(li.komodo_name)
self._li_from_norm_komodo_lang[norm_komodo_lang] = li
if li.specialization_hints_from_lang:
for lang, hint in list(li.specialization_hints_from_lang.items()):
self._specialization_hints_from_lang[lang] = (hint, li)
self._magic_table.sort(key=operator.itemgetter(2))
def _norm_lang_from_lang(self, lang):
return lang.lower()
def _load(self):
"""Load LangInfo classes in this module."""
for name, g in list(globals().items()):
if isinstance(g, type) \
and issubclass(g, LangInfo) and g is not LangInfo:
norm_lang = self._norm_lang_from_lang(g.name)
self._langinfo_from_norm_lang[norm_lang] = g(self)
def _load_dir(self, d):
"""Load LangInfo classes in langinfo_*.py modules in this dir."""
for path in glob(join(d, "langinfo_*.py")):
try:
module = _module_from_path(path)
except Exception as ex:
log.warn("could not import `%s': %s", path, ex)
# import traceback
# traceback.print_exc()
continue
for name in dir(module):
attr = getattr(module, name)
if (not name.startswith("_") # skip internal bases
and isinstance(attr, type)
and issubclass(attr, LangInfo)
and attr is not LangInfo):
norm_lang = self._norm_lang_from_lang(attr.name)
self._langinfo_from_norm_lang[norm_lang] = attr(self)
#---- internal support stuff
_g_default_database = None
_g_default_dirs = None
def set_default_dirs(dirs):
global _g_default_dirs, _g_default_database
if dirs != _g_default_dirs:
_g_default_dirs = dirs
_g_default_database = None
def get_default_database():
global _g_default_dirs, _g_default_database
if _g_default_database is None:
_g_default_database = Database(dirs=_g_default_dirs)
return _g_default_database
# Recipe: module_from_path (1.0.1+)
def _module_from_path(path):
import imp
import os
dir = os.path.dirname(path) or os.curdir
name = os.path.splitext(os.path.basename(path))[0]
iinfo = imp.find_module(name, [dir])
return imp.load_module(name, *iinfo)
#---- self-test
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
| |
#!/usr/bin/env python3
# Copyright (c) 2015-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utilities for manipulating blocks and transactions."""
import struct
import time
import unittest
from .address import (
key_to_p2sh_p2wpkh,
key_to_p2wpkh,
script_to_p2sh_p2wsh,
script_to_p2wsh,
)
from .messages import (
CBlock,
COIN,
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
hash256,
ser_uint256,
tx_from_hex,
uint256_from_str,
)
from .script import (
CScript,
CScriptNum,
CScriptOp,
OP_1,
OP_CHECKMULTISIG,
OP_RETURN,
OP_TRUE,
)
from .script_util import (
key_to_p2pk_script,
key_to_p2wpkh_script,
script_to_p2wsh_script,
)
from .util import assert_equal
WITNESS_SCALE_FACTOR = 4
MAX_BLOCK_SIGOPS = 20000
MAX_BLOCK_SIGOPS_WEIGHT = MAX_BLOCK_SIGOPS * WITNESS_SCALE_FACTOR
# Genesis block time (regtest)
TIME_GENESIS_BLOCK = 1296688602
# Coinbase transaction outputs can only be spent after this number of new blocks (network rule)
COINBASE_MATURITY = 100
# From BIP141
WITNESS_COMMITMENT_HEADER = b"\xaa\x21\xa9\xed"
NORMAL_GBT_REQUEST_PARAMS = {"rules": ["segwit"]}
VERSIONBITS_LAST_OLD_BLOCK_VERSION = 4
def create_block(hashprev=None, coinbase=None, ntime=None, *, version=None, tmpl=None, txlist=None):
"""Create a block (with regtest difficulty)."""
block = CBlock()
if tmpl is None:
tmpl = {}
block.nVersion = version or tmpl.get('version') or VERSIONBITS_LAST_OLD_BLOCK_VERSION
block.nTime = ntime or tmpl.get('curtime') or int(time.time() + 600)
block.hashPrevBlock = hashprev or int(tmpl['previousblockhash'], 0x10)
if tmpl and not tmpl.get('bits') is None:
block.nBits = struct.unpack('>I', bytes.fromhex(tmpl['bits']))[0]
else:
block.nBits = 0x207fffff # difficulty retargeting is disabled in REGTEST chainparams
if coinbase is None:
coinbase = create_coinbase(height=tmpl['height'])
block.vtx.append(coinbase)
if txlist:
for tx in txlist:
if not hasattr(tx, 'calc_sha256'):
tx = tx_from_hex(tx)
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
block.calc_sha256()
return block
def get_witness_script(witness_root, witness_nonce):
witness_commitment = uint256_from_str(hash256(ser_uint256(witness_root) + ser_uint256(witness_nonce)))
output_data = WITNESS_COMMITMENT_HEADER + ser_uint256(witness_commitment)
return CScript([OP_RETURN, output_data])
def add_witness_commitment(block, nonce=0):
"""Add a witness commitment to the block's coinbase transaction.
According to BIP141, blocks with witness rules active must commit to the
hash of all in-block transactions including witness."""
# First calculate the merkle root of the block's
# transactions, with witnesses.
witness_nonce = nonce
witness_root = block.calc_witness_merkle_root()
# witness_nonce should go to coinbase witness.
block.vtx[0].wit.vtxinwit = [CTxInWitness()]
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(witness_nonce)]
# witness commitment is the last OP_RETURN output in coinbase
block.vtx[0].vout.append(CTxOut(0, get_witness_script(witness_root, witness_nonce)))
block.vtx[0].rehash()
block.hashMerkleRoot = block.calc_merkle_root()
block.rehash()
def script_BIP34_coinbase_height(height):
if height <= 16:
res = CScriptOp.encode_op_n(height)
# Append dummy to increase scriptSig size above 2 (see bad-cb-length consensus rule)
return CScript([res, OP_1])
return CScript([CScriptNum(height)])
def create_coinbase(height, pubkey=None, extra_output_script=None, fees=0, nValue=50):
"""Create a coinbase transaction.
If pubkey is passed in, the coinbase output will be a P2PK output;
otherwise an anyone-can-spend output.
If extra_output_script is given, make a 0-value output to that
script. This is useful to pad block weight/sigops as needed. """
coinbase = CTransaction()
coinbase.vin.append(CTxIn(COutPoint(0, 0xffffffff), script_BIP34_coinbase_height(height), 0xffffffff))
coinbaseoutput = CTxOut()
coinbaseoutput.nValue = nValue * COIN
if nValue == 50:
halvings = int(height / 150) # regtest
coinbaseoutput.nValue >>= halvings
coinbaseoutput.nValue += fees
if pubkey is not None:
coinbaseoutput.scriptPubKey = key_to_p2pk_script(pubkey)
else:
coinbaseoutput.scriptPubKey = CScript([OP_TRUE])
coinbase.vout = [coinbaseoutput]
if extra_output_script is not None:
coinbaseoutput2 = CTxOut()
coinbaseoutput2.nValue = 0
coinbaseoutput2.scriptPubKey = extra_output_script
coinbase.vout.append(coinbaseoutput2)
coinbase.calc_sha256()
return coinbase
def create_tx_with_script(prevtx, n, script_sig=b"", *, amount, script_pub_key=CScript()):
"""Return one-input, one-output transaction object
spending the prevtx's n-th output with the given amount.
Can optionally pass scriptPubKey and scriptSig, default is anyone-can-spend output.
"""
tx = CTransaction()
assert n < len(prevtx.vout)
tx.vin.append(CTxIn(COutPoint(prevtx.sha256, n), script_sig, 0xffffffff))
tx.vout.append(CTxOut(amount, script_pub_key))
tx.calc_sha256()
return tx
def create_transaction(node, txid, to_address, *, amount):
""" Return signed transaction spending the first output of the
input txid. Note that the node must have a wallet that can
sign for the output that is being spent.
"""
raw_tx = create_raw_transaction(node, txid, to_address, amount=amount)
tx = tx_from_hex(raw_tx)
return tx
def create_raw_transaction(node, txid, to_address, *, amount):
""" Return raw signed transaction spending the first output of the
input txid. Note that the node must have a wallet that can sign
for the output that is being spent.
"""
psbt = node.createpsbt(inputs=[{"txid": txid, "vout": 0}], outputs={to_address: amount})
for _ in range(2):
for w in node.listwallets():
wrpc = node.get_wallet_rpc(w)
signed_psbt = wrpc.walletprocesspsbt(psbt)
psbt = signed_psbt['psbt']
final_psbt = node.finalizepsbt(psbt)
assert_equal(final_psbt["complete"], True)
return final_psbt['hex']
def get_legacy_sigopcount_block(block, accurate=True):
count = 0
for tx in block.vtx:
count += get_legacy_sigopcount_tx(tx, accurate)
return count
def get_legacy_sigopcount_tx(tx, accurate=True):
count = 0
for i in tx.vout:
count += i.scriptPubKey.GetSigOpCount(accurate)
for j in tx.vin:
# scriptSig might be of type bytes, so convert to CScript for the moment
count += CScript(j.scriptSig).GetSigOpCount(accurate)
return count
def witness_script(use_p2wsh, pubkey):
"""Create a scriptPubKey for a pay-to-witness TxOut.
This is either a P2WPKH output for the given pubkey, or a P2WSH output of a
1-of-1 multisig for the given pubkey. Returns the hex encoding of the
scriptPubKey."""
if not use_p2wsh:
# P2WPKH instead
pkscript = key_to_p2wpkh_script(pubkey)
else:
# 1-of-1 multisig
witness_script = CScript([OP_1, bytes.fromhex(pubkey), OP_1, OP_CHECKMULTISIG])
pkscript = script_to_p2wsh_script(witness_script)
return pkscript.hex()
def create_witness_tx(node, use_p2wsh, utxo, pubkey, encode_p2sh, amount):
"""Return a transaction (in hex) that spends the given utxo to a segwit output.
Optionally wrap the segwit output using P2SH."""
if use_p2wsh:
program = CScript([OP_1, bytes.fromhex(pubkey), OP_1, OP_CHECKMULTISIG])
addr = script_to_p2sh_p2wsh(program) if encode_p2sh else script_to_p2wsh(program)
else:
addr = key_to_p2sh_p2wpkh(pubkey) if encode_p2sh else key_to_p2wpkh(pubkey)
if not encode_p2sh:
assert_equal(node.getaddressinfo(addr)['scriptPubKey'], witness_script(use_p2wsh, pubkey))
return node.createrawtransaction([utxo], {addr: amount})
def send_to_witness(use_p2wsh, node, utxo, pubkey, encode_p2sh, amount, sign=True, insert_redeem_script=""):
"""Create a transaction spending a given utxo to a segwit output.
The output corresponds to the given pubkey: use_p2wsh determines whether to
use P2WPKH or P2WSH; encode_p2sh determines whether to wrap in P2SH.
sign=True will have the given node sign the transaction.
insert_redeem_script will be added to the scriptSig, if given."""
tx_to_witness = create_witness_tx(node, use_p2wsh, utxo, pubkey, encode_p2sh, amount)
if (sign):
signed = node.signrawtransactionwithwallet(tx_to_witness)
assert "errors" not in signed or len(["errors"]) == 0
return node.sendrawtransaction(signed["hex"])
else:
if (insert_redeem_script):
tx = tx_from_hex(tx_to_witness)
tx.vin[0].scriptSig += CScript([bytes.fromhex(insert_redeem_script)])
tx_to_witness = tx.serialize().hex()
return node.sendrawtransaction(tx_to_witness)
class TestFrameworkBlockTools(unittest.TestCase):
def test_create_coinbase(self):
height = 20
coinbase_tx = create_coinbase(height=height)
assert_equal(CScriptNum.decode(coinbase_tx.vin[0].scriptSig), height)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The Logistic distribution class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import common_shapes
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
class _Logistic(distribution.Distribution):
"""The scalar Logistic distribution with location and scale parameters.
#### Mathematical details
The CDF of this distribution is:
```cdf(x) = 1/(1+exp(-(x - loc) / scale))```
with support on (-inf, inf).
#### Examples
Examples of initialization of one or a batch of distributions.
```python
# Define a single scalar Logistic distribution.
dist = tf.contrib.distributions.Logistic(loc=0., scale=3.)
# Evaluate the cdf at 1, returning a scalar.
dist.cdf(1.)
# Define a batch of two scalar valued Logistics.
# The first has mean 1 and scale 11, the second 2 and 22.
dist = tf.contrib.distributions.Logistic(loc=[1, 2.], scale=[11, 22.])
# Evaluate the pdf of the first distribution on 0, and the second on 1.5,
# returning a length two tensor.
dist.pdf([0, 1.5])
# Get 3 samples, returning a 3 x 2 tensor.
dist.sample([3])
```
Arguments are broadcast when possible.
```python
# Define a batch of two scalar valued Logistics.
# Both have mean 1, but different scales.
dist = tf.contrib.distributions.Logistic(loc=1., scale=[11, 22.])
# Evaluate the pdf of both distributions on the same point, 3.0,
# returning a length 2 tensor.
dist.pdf(3.0)
```
"""
def __init__(self,
loc,
scale,
validate_args=False,
allow_nan_stats=True,
name="Logistic"):
"""Construct Logistic distributions with mean and scale `loc` and `scale`.
The parameters `loc` and `scale` must be shaped in a way that supports
broadcasting (e.g. `loc + scale` is a valid operation).
Args:
loc: Floating point tensor, the means of the distribution(s).
scale: Floating point tensor, the scales of the distribution(s).
scale must contain only positive values.
validate_args: `Boolean`, default `False`. Whether to assert that
`scale > 0`. If `validate_args` is `False`, correct output is not
guaranteed when input is invalid.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member. If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: if loc and scale are different dtypes.
"""
parameters = locals()
parameters.pop("self")
with ops.name_scope(name, values=[loc, scale]) as ns:
with ops.control_dependencies([check_ops.assert_positive(scale)] if
validate_args else []):
self._loc = array_ops.identity(loc, name="loc")
self._scale = array_ops.identity(scale, name="scale")
contrib_tensor_util.assert_same_float_dtype((self._loc, self._scale))
super(_Logistic, self).__init__(
dtype=self._scale.dtype,
is_continuous=True,
is_reparameterized=True,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
parameters=parameters,
graph_parents=[self._loc, self._scale],
name=ns)
@staticmethod
def _param_shapes(sample_shape):
return dict(
zip(("loc", "scale"), ([ops.convert_to_tensor(
sample_shape, dtype=dtypes.int32)] * 2)))
@property
def loc(self):
"""Distribution parameter for the location."""
return self._loc
@property
def scale(self):
"""Distribution parameter for scale."""
return self._scale
def _batch_shape(self):
return array_ops.shape(self.loc + self.scale)
def _get_batch_shape(self):
return common_shapes.broadcast_shape(self.loc.get_shape(),
self.scale.get_shape())
def _event_shape(self):
return constant_op.constant([], dtype=dtypes.int32)
def _get_event_shape(self):
return tensor_shape.scalar()
def _sample_n(self, n, seed=None):
shape = array_ops.concat(0, ([n], array_ops.shape(self.mean())))
np_dtype = self.dtype.as_numpy_dtype()
minval = np.nextafter(np_dtype(0), np_dtype(1))
uniform = random_ops.random_uniform(shape=shape,
minval=minval,
maxval=1,
dtype=self.dtype,
seed=seed)
sampled = math_ops.log(uniform) - math_ops.log(1-uniform)
return sampled * self.scale + self.loc
def _log_prob(self, x):
z = self._z(x)
return - z - math_ops.log(self.scale) - 2*nn_ops.softplus(-z)
def _prob(self, x):
return math_ops.exp(self._log_prob(x))
def _log_cdf(self, x):
return nn_ops.softplus(-self._z(x))
def _cdf(self, x):
return math_ops.sigmoid(self._z(x))
def _log_survival_function(self, x):
return nn_ops.softplus(self._z(x))
def _survival_function(self, x):
return math_ops.sigmoid(-self._z(x))
def _entropy(self):
# Use broadcasting rules to calculate the full broadcast sigma.
scale = self.scale * array_ops.ones_like(self.loc)
return 2 + math_ops.log(scale)
def _mean(self):
return self.loc * array_ops.ones_like(self.scale)
def _variance(self):
return math_ops.square(self.std())
def _std(self):
return self.scale * array_ops.ones_like(self.loc) * math.pi / math.sqrt(3)
def _mode(self):
return self._mean()
def _z(self, x):
"""Standardize input `x` to a unit logistic."""
with ops.name_scope("standardize", values=[x]):
return (x - self.loc) / self.scale
| |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Models for Oppia users."""
from constants import constants
from core.platform import models
import feconf
from google.appengine.datastore import datastore_query
from google.appengine.ext import ndb
(base_models,) = models.Registry.import_models([models.NAMES.base_model])
class UserSettingsModel(base_models.BaseModel):
"""Settings and preferences for a particular user.
Instances of this class are keyed by the user id.
"""
# Email address of the user.
email = ndb.StringProperty(required=True, indexed=True)
# User role. Required for authorization. User gets a default role of
# exploration editor.
# TODO (1995YogeshSharma): Remove the default value once the one-off
# migration (to give role to all users) is run.
role = ndb.StringProperty(
required=True, indexed=True, default=feconf.ROLE_ID_EXPLORATION_EDITOR)
# Identifiable username to display in the UI. May be None.
username = ndb.StringProperty(indexed=True)
# Normalized username to use for duplicate-username queries. May be None.
normalized_username = ndb.StringProperty(indexed=True)
# When the user last agreed to the terms of the site. May be None.
last_agreed_to_terms = ndb.DateTimeProperty(default=None)
# When the user last started the state editor tutorial. May be None.
last_started_state_editor_tutorial = ndb.DateTimeProperty(default=None) # pylint: disable=invalid-name
# When the user last logged in. This may be out-of-date by up to
# feconf.PROXIMAL_TIMEDELTA_SECS seconds.
last_logged_in = ndb.DateTimeProperty(default=None)
# When the user last edited an exploration.
last_edited_an_exploration = ndb.DateTimeProperty(default=None)
# When the user last created an exploration.
last_created_an_exploration = ndb.DateTimeProperty(default=None)
# User uploaded profile picture as a dataURI string. May be None.
profile_picture_data_url = ndb.TextProperty(default=None, indexed=False)
# The preferred dashboard of the user.
default_dashboard = ndb.StringProperty(
default=constants.DASHBOARD_TYPE_LEARNER,
indexed=True,
choices=[
constants.DASHBOARD_TYPE_LEARNER,
constants.DASHBOARD_TYPE_CREATOR])
# The preferred dashboard display preference.
creator_dashboard_display_pref = ndb.StringProperty(
default=constants.ALLOWED_CREATOR_DASHBOARD_DISPLAY_PREFS['CARD'],
indexed=True,
choices=constants.ALLOWED_CREATOR_DASHBOARD_DISPLAY_PREFS.values())
# User specified biography (to be shown on their profile page).
user_bio = ndb.TextProperty(indexed=False)
# Subject interests specified by the user.
subject_interests = ndb.StringProperty(repeated=True, indexed=True)
# The time, in milliseconds, when the user first contributed to Oppia.
# May be None.
first_contribution_msec = ndb.FloatProperty(default=None)
# Exploration language preferences specified by the user.
# TODO(sll): Add another field for the language that the user wants the
# site to display in. These language preferences are mainly for the purpose
# of figuring out what to show by default in the library index page.
preferred_language_codes = ndb.StringProperty(
repeated=True,
indexed=True,
choices=[lc['code'] for lc in constants.ALL_LANGUAGE_CODES])
# System language preference (for I18N).
preferred_site_language_code = ndb.StringProperty(
default=None, choices=[
language['id'] for language in constants.SUPPORTED_SITE_LANGUAGES])
# Audio language preference used for audio translations.
preferred_audio_language_code = ndb.StringProperty(
default=None, choices=[
language['id'] for language in constants.SUPPORTED_AUDIO_LANGUAGES])
@classmethod
def is_normalized_username_taken(cls, normalized_username):
"""Returns whether or not a given normalized_username is taken.
Args:
normalized_username: str. The given user's normalized username.
Returns:
bool. Whether the normalized_username has already been taken.
"""
return bool(cls.get_all().filter(
cls.normalized_username == normalized_username).get())
@classmethod
def get_by_normalized_username(cls, normalized_username):
"""Returns a user model given a normalized username.
Args:
normalized_username: str. The user's normalized username.
Returns:
UserSettingsModel. The UserSettingsModel instance which contains
the same normalized_username.
"""
return cls.get_all().filter(
cls.normalized_username == normalized_username).get()
@classmethod
def get_by_role(cls, role):
"""Returns user models with given role.
Args:
role: str. The role ID that is being queried for.
Returns:
list(UserSettingsModel). The UserSettingsModel instances which
have the given role ID.
"""
return cls.query(cls.role == role).fetch()
class CompletedActivitiesModel(base_models.BaseModel):
"""Keeps track of all the explorations and collections completed by the
learner.
Instances of this class are keyed by the user id.
"""
# IDs of all the explorations completed by the user.
exploration_ids = ndb.StringProperty(repeated=True, indexed=True)
# IDs of all the collections completed by the user.
collection_ids = ndb.StringProperty(repeated=True, indexed=True)
class IncompleteActivitiesModel(base_models.BaseModel):
"""Keeps track of all the activities currently being completed by the
learner.
Instances of this class are keyed by the user id.
"""
# The ids of the explorations partially completed by the user.
exploration_ids = ndb.StringProperty(repeated=True, indexed=True)
# The ids of the collections partially completed by the user.
collection_ids = ndb.StringProperty(repeated=True, indexed=True)
class ExpUserLastPlaythroughModel(base_models.BaseModel):
"""Stores the "last playthrough" information for partially-completed
explorations.
Instances of this class have keys of the form
[user_id].[exploration_id]
"""
# The user id.
user_id = ndb.StringProperty(required=True, indexed=True)
# The exploration id.
exploration_id = ndb.StringProperty(required=True, indexed=True)
# The version of the exploration last played by the user.
last_played_exp_version = ndb.IntegerProperty(default=None)
# The name of the state at which the learner left the exploration when
# he/she last played it.
last_played_state_name = ndb.StringProperty(default=None)
@classmethod
def _generate_id(cls, user_id, exploration_id):
return '%s.%s' % (user_id, exploration_id)
@classmethod
def create(cls, user_id, exploration_id):
"""Creates a new ExpUserLastPlaythroughModel instance and returns it.
Args:
user_id: str. The id of the user.
exploration_id: str. The id of the exploration.
Returns:
ExpUserLastPlaythroughModel. The newly created
ExpUserLastPlaythroughModel instance.
"""
instance_id = cls._generate_id(user_id, exploration_id)
return cls(
id=instance_id, user_id=user_id, exploration_id=exploration_id)
@classmethod
def get(cls, user_id, exploration_id):
"""Gets the ExpUserLastPlaythroughModel for the given user and
exploration id.
Args:
user_id: str. The id of the user.
exploration_id: str. The id of the exploration.
Returns:
ExpUserLastPlaythroughModel. The ExpUserLastPlaythroughModel
instance which matches with the given user_id and exploration_id.
"""
instance_id = cls._generate_id(user_id, exploration_id)
return super(ExpUserLastPlaythroughModel, cls).get(
instance_id, strict=False)
class LearnerPlaylistModel(base_models.BaseModel):
"""Keeps track of all the explorations and collections in the playlist of
the user.
Instances of this class are keyed by the user id.
"""
# IDs of all the explorations in the playlist of the user.
exploration_ids = ndb.StringProperty(repeated=True, indexed=True)
# IDs of all the collections in the playlist of the user.
collection_ids = ndb.StringProperty(repeated=True, indexed=True)
class UserContributionsModel(base_models.BaseModel):
"""Tracks explorations created/edited for a particular user.
Instances of this class are keyed by the user id.
"""
# IDs of explorations that this user has created
# Includes subsequently deleted and private explorations.
created_exploration_ids = ndb.StringProperty(
repeated=True, indexed=True, default=None)
# IDs of explorations that this user has made a positive
# (i.e. non-revert) commit to.
# Includes subsequently deleted and private explorations.
edited_exploration_ids = ndb.StringProperty(
repeated=True, indexed=True, default=None)
class UserEmailPreferencesModel(base_models.BaseModel):
"""Email preferences for a particular user.
Instances of this class are keyed by the user id.
"""
# The user's preference for receiving general site updates. This is set to
# None if the user has never set a preference.
site_updates = ndb.BooleanProperty(indexed=True)
# The user's preference for receiving email when user is added as a member
# in exploration. This is set to True when user has never set a preference.
editor_role_notifications = ndb.BooleanProperty(
indexed=True, default=feconf.DEFAULT_EDITOR_ROLE_EMAIL_PREFERENCE)
# The user's preference for receiving email when user receives feedback
# message for his/her exploration.
feedback_message_notifications = ndb.BooleanProperty(
indexed=True, default=feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_PREFERENCE)
# The user's preference for receiving email when a creator, to which this
# user has subscribed, publishes an exploration.
subscription_notifications = ndb.BooleanProperty(
indexed=True, default=feconf.DEFAULT_SUBSCRIPTION_EMAIL_PREFERENCE)
class UserSubscriptionsModel(base_models.BaseModel):
"""A list of things that a user subscribes to.
Instances of this class are keyed by the user id.
"""
# IDs of activities (e.g., explorations) that this user subscribes to.
# TODO(bhenning): Rename this to exploration_ids and perform a migration.
activity_ids = ndb.StringProperty(repeated=True, indexed=True)
# IDs of collections that this user subscribes to.
collection_ids = ndb.StringProperty(repeated=True, indexed=True)
# IDs of feedback thread ids that this user subscribes to.
feedback_thread_ids = ndb.StringProperty(repeated=True, indexed=True)
# IDs of the learners who have subscribed to this user.
creator_ids = ndb.StringProperty(repeated=True, indexed=True)
# When the user last checked notifications. May be None.
last_checked = ndb.DateTimeProperty(default=None)
class UserSubscribersModel(base_models.BaseModel):
"""The list of subscribers of the user."""
# IDs of the learners who have subscribed to this user.
subscriber_ids = ndb.StringProperty(repeated=True, indexed=True)
class UserRecentChangesBatchModel(base_models.BaseMapReduceBatchResultsModel):
"""A list of recent changes corresponding to things a user subscribes to.
This is computed using a MapReduce batch job and may not be up to date.
Instances of this class are keyed by the user id.
"""
# The output of the batch job.
output = ndb.JsonProperty(indexed=False)
# The time, in milliseconds since the epoch, when the job that computed
# this batch model was queued.
job_queued_msec = ndb.FloatProperty(indexed=False)
class UserStatsModel(base_models.BaseMapReduceBatchResultsModel):
"""User-specific statistics keyed by user id.
Values for total plays and average ratings are recorded by aggregating over
all explorations owned by a user.
Impact scores are calculated over explorations for which a user
is listed as a contributor.
The impact score for a particular user is defined as:
Sum of (
ln(playthroughs) * (ratings_scaler) * (average(ratings) - 2.5))
*(multiplier),
where multiplier = 10, and ratings_scaler is .1 * (number of ratings)
if there are < 10 ratings for that exploration.
The impact score is 0 for an exploration with 0 playthroughs or with an
average rating of less than 2.5.
"""
# The impact score.
impact_score = ndb.FloatProperty(indexed=True)
# The total plays of all the explorations.
total_plays = ndb.IntegerProperty(indexed=True, default=0)
# The average of average ratings of all explorations.
average_ratings = ndb.FloatProperty(indexed=True)
# The number of ratings of all explorations.
num_ratings = ndb.IntegerProperty(indexed=True, default=0)
# A list which stores history of creator stats.
# Each item in the list is a Json object keyed by a datetime string and
# value as another Json object containing key-value pairs to be stored.
# [
# {
# (date_1): {
# "average_ratings": 4.3,
# "total_plays": 40
# }
# },
# {
# (date_2): {
# "average_ratings": 4.1,
# "total_plays": 60
# }
# },
# ]
weekly_creator_stats_list = ndb.JsonProperty(repeated=True)
# The version of dashboard stats schema.
schema_version = (
ndb.IntegerProperty(
required=True,
default=feconf.CURRENT_DASHBOARD_STATS_SCHEMA_VERSION,
indexed=True))
@classmethod
def get_or_create(cls, user_id):
"""Creates a new UserStatsModel instance, if it does not already exist.
Args:
user_id: str. The user_id to be associated with the UserStatsModel.
Returns:
UserStatsModel. Either an existing one which matches the
given user_id, or the newly created one if it did not already exist.
"""
entity = cls.get(user_id, strict=False)
if not entity:
entity = cls(id=user_id)
return entity
class ExplorationUserDataModel(base_models.BaseModel):
"""User-specific data pertaining to a specific exploration.
Instances of this class have keys of the form
[USER_ID].[EXPLORATION_ID]
"""
# The user id.
user_id = ndb.StringProperty(required=True, indexed=True)
# The exploration id.
exploration_id = ndb.StringProperty(required=True, indexed=True)
# The rating (1-5) the user assigned to the exploration. Note that this
# represents a rating given on completion of the exploration.
rating = ndb.IntegerProperty(default=None, indexed=True)
# When the most recent rating was awarded, or None if not rated.
rated_on = ndb.DateTimeProperty(default=None, indexed=False)
# List of uncommitted changes made by the user to the exploration.
draft_change_list = ndb.JsonProperty(default=None)
# Timestamp of when the change list was last updated.
draft_change_list_last_updated = ndb.DateTimeProperty(default=None)
# The exploration version that this change list applied to.
draft_change_list_exp_version = ndb.IntegerProperty(default=None)
# The version of the draft change list which was last saved by the user.
# Can be zero if the draft is None or if the user has not committed
# draft changes to this exploration since the draft_change_list_id property
# was introduced.
draft_change_list_id = ndb.IntegerProperty(default=0)
# The user's preference for receiving suggestion emails for this
# exploration.
mute_suggestion_notifications = ndb.BooleanProperty(
default=feconf.DEFAULT_SUGGESTION_NOTIFICATIONS_MUTED_PREFERENCE)
# The user's preference for receiving feedback emails for this exploration.
mute_feedback_notifications = ndb.BooleanProperty(
default=feconf.DEFAULT_FEEDBACK_NOTIFICATIONS_MUTED_PREFERENCE)
@classmethod
def _generate_id(cls, user_id, exploration_id):
return '%s.%s' % (user_id, exploration_id)
@classmethod
def create(cls, user_id, exploration_id):
"""Creates a new ExplorationUserDataModel instance and returns it.
Note that the client is responsible for actually saving this entity to
the datastore.
Args:
user_id: str. The id of the user.
exploration_id: str. The id of the exploration.
Returns:
ExplorationUserDataModel. The newly created
ExplorationUserDataModel instance.
"""
instance_id = cls._generate_id(user_id, exploration_id)
return cls(
id=instance_id, user_id=user_id, exploration_id=exploration_id)
@classmethod
def get(cls, user_id, exploration_id):
"""Gets the ExplorationUserDataModel for the given user and exploration
ids.
Args:
user_id: str. The id of the user.
exploration_id: str. The id of the exploration.
Returns:
ExplorationUserDataModel. The ExplorationUserDataModel instance
which matches with the given user_id and exploration_id.
"""
instance_id = cls._generate_id(user_id, exploration_id)
return super(ExplorationUserDataModel, cls).get(
instance_id, strict=False)
@classmethod
def get_multi(cls, user_ids, exploration_id):
"""Gets the ExplorationUserDataModel for the given user and exploration
ids.
Args:
user_ids: list(str). A list of user_ids.
exploration_id: str. The id of the exploration.
Returns:
ExplorationUserDataModel. The ExplorationUserDataModel instance
which matches with the given user_ids and exploration_id.
"""
instance_ids = (
cls._generate_id(user_id, exploration_id) for user_id in user_ids)
return super(ExplorationUserDataModel, cls).get_multi(
instance_ids)
class CollectionProgressModel(base_models.BaseModel):
"""Stores progress a user has made within a collection, including all
explorations which have been completed within the context of the collection.
Please note instances of this progress model will persist even after a
collection is deleted.
TODO(bhenning): Implement a job which removes stale versions of this model
in the data store. That is, it should go through all completion models and
ensure both the user and collection it is associated with still exist within
the data store, otherwise it should remove the instance of the completion
model.
"""
# The user id.
user_id = ndb.StringProperty(required=True, indexed=True)
# The collection id.
collection_id = ndb.StringProperty(required=True, indexed=True)
# The list of explorations which have been completed within the context of
# the collection represented by collection_id.
completed_explorations = ndb.StringProperty(repeated=True)
@classmethod
def _generate_id(cls, user_id, collection_id):
return '%s.%s' % (user_id, collection_id)
@classmethod
def create(cls, user_id, collection_id):
"""Creates a new CollectionProgressModel instance and returns it.
Note: the client is responsible for actually saving this entity to the
datastore.
Args:
user_id: str. The id of the user.
collection_id: str. The id of the collection.
Returns:
CollectionProgressModel. The newly created CollectionProgressModel
instance.
"""
instance_id = cls._generate_id(user_id, collection_id)
return cls(
id=instance_id, user_id=user_id, collection_id=collection_id)
@classmethod
def get(cls, user_id, collection_id):
"""Gets the CollectionProgressModel for the given user and collection
id.
Args:
user_id: str. The id of the user.
collection_id: str. The id of the collection.
Returns:
CollectionProgressModel. The CollectionProgressModel instance which
matches the given user_id and collection_id.
"""
instance_id = cls._generate_id(user_id, collection_id)
return super(CollectionProgressModel, cls).get(
instance_id, strict=False)
@classmethod
def get_multi(cls, user_id, collection_ids):
"""Gets the CollectionProgressModels for the given user and collection
ids.
Args:
user_id: str. The id of the user.
collection_ids: list(str). The ids of the collections.
Returns:
list(CollectionProgressModel). The list of CollectionProgressModel
instances which matches the given user_id and collection_ids.
"""
instance_ids = [cls._generate_id(user_id, collection_id)
for collection_id in collection_ids]
return super(CollectionProgressModel, cls).get_multi(
instance_ids)
@classmethod
def get_or_create(cls, user_id, collection_id):
"""Gets the CollectionProgressModel for the given user and collection
ids, or creates a new instance with if no such instance yet exists
within the datastore.
Args:
user_id: str. The id of the user.
collection_id: str. The id of the collection.
Returns:
CollectionProgressModel. Either an existing one which
matches the given user_id and collection_id, or the newly created
one if it does not already exist.
"""
instance_model = cls.get(user_id, collection_id)
if instance_model:
return instance_model
else:
return cls.create(user_id, collection_id)
class UserQueryModel(base_models.BaseModel):
"""Model for storing result of queries.
The id of each instance of this model is alphanumeric id of length 12
unique to each model instance.
"""
# Options for a query specified by query submitter.
# Query option to specify whether user has created or edited one or more
# explorations in last n days. This only returns users who have ever
# created or edited at least one exploration.
inactive_in_last_n_days = ndb.IntegerProperty(default=None)
# Query option to check whether given user has logged in
# since last n days.
has_not_logged_in_for_n_days = ndb.IntegerProperty(default=None)
# Query option to check whether user has created at least
# n explorations.
created_at_least_n_exps = ndb.IntegerProperty(default=None)
# Query option to check whether user has created fewer than
# n explorations.
created_fewer_than_n_exps = ndb.IntegerProperty(default=None)
# Query option to check if user has edited at least n explorations.
edited_at_least_n_exps = ndb.IntegerProperty(default=None)
# Query option to check if user has edited fewer than n explorations.
edited_fewer_than_n_exps = ndb.IntegerProperty(default=None)
# List of all user_ids who satisfy all parameters given in above query.
# This list will be empty initially. Once query has completed its execution
# this list will be populated with all qualifying user ids.
user_ids = ndb.JsonProperty(default=[], compressed=True)
# ID of the user who submitted the query.
submitter_id = ndb.StringProperty(indexed=True, required=True)
# ID of the instance of BulkEmailModel which stores information
# about sent emails.
sent_email_model_id = ndb.StringProperty(default=None, indexed=True)
# Current status of the query.
query_status = ndb.StringProperty(
indexed=True,
choices=[
feconf.USER_QUERY_STATUS_PROCESSING,
feconf.USER_QUERY_STATUS_COMPLETED,
feconf.USER_QUERY_STATUS_ARCHIVED,
feconf.USER_QUERY_STATUS_FAILED
])
@classmethod
def fetch_page(cls, page_size, cursor):
"""Fetches a list of all query_models sorted by creation date.
Args:
page_size: int. The maximum number of entities to be returned.
cursor: str or None. The list of returned entities starts from this
datastore cursor.
Returns:
3-tuple of (query_models, cursor, more) as described in fetch_page()
at:
https://developers.google.com/appengine/docs/python/ndb/queryclass,
where:
query_models: List of UserQueryModel instances.
next_cursor: str or None. A query cursor pointing to the next
batch of results. If there are no more results, this might
be None.
more: bool. If True, there are probably more results after
this batch. If False, there are no further results after
this batch.
"""
cursor = datastore_query.Cursor(urlsafe=cursor)
query_models, next_cursor, more = (
cls.query().order(-cls.created_on).
fetch_page(page_size, start_cursor=cursor))
next_cursor = next_cursor.urlsafe() if (next_cursor and more) else None
return query_models, next_cursor, more
class UserBulkEmailsModel(base_models.BaseModel):
"""Model to store IDs BulkEmailModel sent to a user.
Instances of this class are keyed by the user id.
"""
# IDs of all BulkEmailModels that correspond to bulk emails sent to this
# user.
sent_email_model_ids = ndb.StringProperty(indexed=True, repeated=True)
class UserSkillMasteryModel(base_models.BaseModel):
"""Model for storing a user's degree of mastery of a skill in Oppia.
This model stores the degree of mastery of each skill for a given user.
The id for this model is of form '{{USER_ID}}.{{SKILL_ID}}'
"""
# The user id of the user.
user_id = ndb.StringProperty(required=True, indexed=True)
# The skill id for which the degree of mastery is stored.
skill_id = ndb.StringProperty(required=True, indexed=True)
# The degree of mastery of the user in the skill.
degree_of_mastery = ndb.FloatProperty(required=True, indexed=True)
@classmethod
def construct_model_id(cls, user_id, skill_id):
"""Returns model id corresponding to user and skill.
Args:
user_id: str. The user ID of the user.
skill_id: str. The unique id of the skill.
Returns:
str. The model id corresponding to the given user and skill.
"""
return '%s.%s' % (user_id, skill_id)
class UserContributionScoringModel(base_models.BaseModel):
"""Model for storing the scores of a user for various suggestions created by
the user. Users having scores above a particular threshold for a category
can review suggestions for that category.
The id for this model is of the form '{{score_category}}.{{user_id}}'.
"""
# The user id of the user.
user_id = ndb.StringProperty(required=True, indexed=True)
# The category of suggestion to score the user on.
score_category = ndb.StringProperty(required=True, indexed=True)
# The score of the user for the above category of suggestions.
score = ndb.FloatProperty(required=True, indexed=True)
@classmethod
def get_all_scores_of_user(cls, user_id):
"""Gets all scores for a given user.
Args:
user_id: str. The id of the user.
Returns:
list(UserContributionsScoringModel). All instances for the given
user.
"""
return cls.get_all().filter(cls.user_id == user_id).fetch()
@classmethod
def get_all_users_with_score_above_minimum_for_category(
cls, score_category):
"""Gets all instances which have score above the
MINIMUM_SCORE_REQUIRED_TO_REVIEW threshold for the given category.
Args:
score_category: str. The category being queried.
Returns:
list(UserContributionsScoringModel). All instances for the given
category with scores above MINIMUM_SCORE_REQUIRED_TO_REVIEW.
"""
return cls.get_all().filter(
cls.score_category == score_category).filter(
cls.score >= feconf.MINIMUM_SCORE_REQUIRED_TO_REVIEW).fetch()
@classmethod
def _get_instance_id(cls, user_id, score_category):
"""Generates the instance id in the form {{score_category}}.{{user_id}}.
Args:
user_id: str. The ID of the user.
score_category: str. The category of suggestion to score the user
on.
Returns:
str. The instance ID for UserContributionScoringModel.
"""
return '.'.join([score_category, user_id])
@classmethod
def get_score_of_user_for_category(cls, user_id, score_category):
"""Gets the score of the user for the given score category.
Args:
user_id: str. The ID of the user.
score_category: str. The category of suggestion to score the user
on.
Returns:
float|None. The score of the user in the given category.
"""
instance_id = cls._get_instance_id(user_id, score_category)
model = cls.get_by_id(instance_id)
return model.score if model else None
@classmethod
def create(cls, user_id, score_category, score):
"""Creates a new UserContributionScoringModel entry.
Args:
user_id: str. The ID of the user.
score_category: str. The category of the suggestion.
score: float. The score of the user.
Raises:
Exception: There is already an entry with the given id.
"""
instance_id = cls._get_instance_id(user_id, score_category)
if cls.get_by_id(instance_id):
raise Exception('There is already an entry with the given id: %s' %
instance_id)
cls(id=instance_id, user_id=user_id, score_category=score_category,
score=score).put()
@classmethod
def increment_score_for_user(cls, user_id, score_category, increment_by):
"""Increment the score of the user in the category by the given amount.
Args:
user_id: str. The id of the user.
score_category: str. The category of the suggestion.
increment_by: float. The amount to increase the score of the user
by. May be negative, in which case the score is reduced.
"""
instance_id = cls._get_instance_id(user_id, score_category)
model = cls.get_by_id(instance_id)
if not model:
cls.create(user_id, score_category, increment_by)
else:
model.score += increment_by
model.put()
| |
#!/usr/bin/env python
"""Flows for handling the collection for artifacts."""
import re
import logging
from grr.lib import aff4
from grr.lib import artifact
from grr.lib import artifact_lib
from grr.lib import config_lib
from grr.lib import flow
from grr.lib import parsers
from grr.lib import rdfvalue
from grr.proto import flows_pb2
class BootStrapKnowledgeBaseFlow(flow.GRRFlow):
"""Flow that finds core bootstrap artifacts.
To use artifacts we need to be able to interpolate paths that the artifacts
use. These are stored in the knowledgebase. However most of the things in the
knowledge base come from artifacts, which in turn rely on facts in the
knowledge base. To resolve the dependency loop we rely on a couple of core
knowledge base values that we call Bootstrap values.
This flow collects or guesses those Bootstrap values.
"""
@flow.StateHandler(next_state="ProcessRegStat")
def Start(self):
"""For each artifact, create subflows for each collector."""
self.client = aff4.FACTORY.Open(self.client_id, token=self.token)
self.state.Register("bootstrap_initialized", False)
system = self.client.Get(self.client.Schema.SYSTEM)
if system != "Windows":
# We don't need bootstrapping for non-windows clients at the moment.
self.state.bootstrap_initialized = True
self.CallState(next_state="End")
return
# First try querying the registry, this should work fine for live clients
# but won't support offline clients.
system_root_reg = (r"HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows NT"
r"\CurrentVersion\SystemRoot")
pathspec = rdfvalue.PathSpec(path=system_root_reg,
pathtype=rdfvalue.PathSpec.PathType.REGISTRY)
self.CallClient("StatFile", pathspec=pathspec,
request_data={"bootstrap_var": "system_root"},
next_state="ProcessRegStat")
@flow.StateHandler(next_state="ProcessFileStats")
def ProcessRegStat(self, responses):
"""Check SystemRoot registry value."""
if responses.success:
systemroot = responses.First().registry_data.GetValue()
if systemroot:
systemdrive = systemroot[0:2]
if re.match(r"^[A-Za-z]:$", systemdrive):
self.SendReply(rdfvalue.Dict({"environ_systemroot": systemroot,
"environ_systemdrive": systemdrive}))
self.state.bootstrap_initialized = True
return
# If registry querying didn't work, we try to guess common paths instead.
system_drive_opts = ["C:", "D:"]
for drive in system_drive_opts:
pathspec = rdfvalue.PathSpec(path=drive,
pathtype=rdfvalue.PathSpec.PathType.OS)
self.CallClient("ListDirectory", pathspec=pathspec,
request_data={"bootstrap_var": "system_root"},
next_state="ProcessFileStats")
@flow.StateHandler(next_state="End")
def ProcessFileStats(self, responses):
"""Extract DataBlob from Stat response."""
if not responses.success:
return
system_root_paths = ["Windows", "WinNT", "WINNT35", "WTSRV"]
for response in responses:
if response.pathspec.path[4:] in system_root_paths:
systemdrive = response.pathspec.path[1:3]
systemroot = "%s\\%s" % (systemdrive, response.pathspec.path[4:])
self.SendReply(rdfvalue.Dict({"environ_systemroot": systemroot,
"environ_systemdrive": systemdrive}))
self.state.bootstrap_initialized = True
break
@flow.StateHandler()
def End(self):
"""Finalize and test if we succeeded. No notification required."""
if not self.state.bootstrap_initialized:
raise flow.FlowError("Could not bootstrap systemroot.")
class ArtifactCollectorFlowArgs(rdfvalue.RDFProtoStruct):
protobuf = flows_pb2.ArtifactCollectorFlowArgs
def Validate(self):
if not self.artifact_list:
raise ValueError("No artifacts to collect.")
class ArtifactCollectorFlow(flow.GRRFlow):
"""Flow that takes a list of artifacts and collects them.
This flow is the core of the Artifact implementation for GRR. Artifacts are
defined using a standardized data format that includes what to collect and
how to process the things collected. This flow takes that data driven format
and makes it useful.
The core functionality of Artifacts is split into Collectors and Processors.
An Artifact defines a set of Collectors that are used to retrieve data from
the client. These can specify collection of files, registry keys, command
output and others. The first part of this flow "Collect" handles running those
collections by issuing GRR flows and client actions.
The results of those are then collected and GRR searches for Processors that
know how to process the output of the Collectors. The Processors all inherit
from the Parser class, and each Parser specifies which Artifacts it knows how
to process.
So this flow hands off the collected rdfvalue results to the Processors which
then return modified or different rdfvalues. These final results are then
either:
1. Sent to the calling flow.
2. Written to a collection.
3. Stored in AFF4 based on a special mapping called the GRRArtifactMappings.
4. A combination of the above.
This is controlled by the flow parameters.
"""
category = "/Collectors/"
args_type = ArtifactCollectorFlowArgs
behaviours = flow.GRRFlow.behaviours + "BASIC"
@flow.StateHandler(next_state=["StartCollection"])
def Start(self):
"""For each artifact, create subflows for each collector."""
self.client = aff4.FACTORY.Open(self.client_id, token=self.token)
self.state.Register("artifacts_skipped_due_to_condition", [])
self.state.Register("failed_count", 0)
self.state.Register("artifacts_failed", [])
self.state.Register("bootstrap_complete", False)
self.state.Register("knowledge_base", self.args.knowledge_base)
self.state.Register("client_anomaly_store", None)
if self.args.use_tsk:
self.state.Register("path_type", rdfvalue.PathSpec.PathType.TSK)
else:
self.state.Register("path_type", rdfvalue.PathSpec.PathType.OS)
if not self.state.knowledge_base:
# If not provided, get a knowledge base from the client.
try:
self.state.knowledge_base = artifact.GetArtifactKnowledgeBase(
self.client)
except artifact_lib.KnowledgeBaseUninitializedError:
# If no-one has ever initialized the knowledge base, we should do so
# now.
if not self._AreArtifactsKnowledgeBaseArtifacts():
self.CallFlow("KnowledgeBaseInitializationFlow",
next_state="StartCollection")
return
# In all other cases start the collection state.
self.CallState(next_state="StartCollection")
@flow.StateHandler(next_state=["ProcessCollected",
"ProcessCollectedArtifactFiles",
"ProcessRegistryValue", "ProcessBootstrap"])
def StartCollection(self, responses):
"""Start collecting."""
if not responses.success:
raise artifact_lib.KnowledgeBaseUninitializedError(
"Attempt to initialize Knowledge Base failed.")
else:
if not self.state.knowledge_base:
self.client = aff4.FACTORY.Open(self.client_id, token=self.token)
# If we are processing the knowledge base, it still won't exist yet.
self.state.knowledge_base = artifact.GetArtifactKnowledgeBase(
self.client, allow_uninitialized=True)
for artifact_name in self.args.artifact_list:
artifact_obj = self._GetArtifactFromName(artifact_name)
# Ensure artifact has been written sanely. Note that this could be
# removed if it turns out to be expensive. Artifact tests should catch
# these.
artifact_obj.Validate()
self.Collect(artifact_obj)
def Collect(self, artifact_obj):
"""Collect the raw data from the client for this artifact."""
artifact_name = artifact_obj.name
test_conditions = list(artifact_obj.conditions)
# Turn supported_os into a condition.
if artifact_obj.supported_os:
filt = " OR ".join("os == '%s'" % o for o in artifact_obj.supported_os)
test_conditions.append(filt)
# Check each of the conditions match our target.
for condition in test_conditions:
if not artifact_lib.CheckCondition(condition, self.state.knowledge_base):
logging.debug("Artifact %s condition %s failed on %s",
artifact_name, condition, self.client_id)
self.state.artifacts_skipped_due_to_condition.append(
(artifact_name, condition))
return
# Call the collector defined action for each collector.
for collector in artifact_obj.collectors:
# Check conditions on the collector.
collector_conditions_met = True
if collector.conditions:
for condition in collector.conditions:
if not artifact_lib.CheckCondition(condition,
self.state.knowledge_base):
collector_conditions_met = False
if collector_conditions_met:
action_name = collector.action
self.current_artifact_name = artifact_name
if action_name == "Bootstrap":
# Can't do anything with a bootstrap action.
pass
elif action_name == "RunCommand":
self.RunCommand(collector)
elif action_name == "GetFile":
self.GetFiles(collector, self.state.path_type)
elif action_name == "GetFiles":
self.GetFiles(collector, self.state.path_type)
elif action_name == "Grep":
self.Grep(collector, self.state.path_type)
elif action_name == "ListFiles":
self.Glob(collector, self.state.path_type)
elif action_name == "GetRegistryKeys":
self.Glob(collector, rdfvalue.PathSpec.PathType.REGISTRY)
elif action_name == "GetRegistryValue":
self.GetRegistryValue(collector)
elif action_name == "GetRegistryValues":
self.GetRegistryValue(collector)
elif action_name == "WMIQuery":
self.WMIQuery(collector)
elif action_name == "VolatilityPlugin":
self.VolatilityPlugin(collector)
elif action_name == "CollectArtifacts":
self.CollectArtifacts(collector)
elif action_name == "CollectArtifactFiles":
self.CollectArtifactFiles(collector)
elif action_name == "RunGrrClientAction":
self.RunGrrClientAction(collector)
else:
raise RuntimeError("Invalid action %s in %s" % (action_name,
artifact_name))
else:
logging.debug("Artifact %s no collectors run due to all collectors "
"having failing conditons on %s", artifact_name,
self.client_id)
def _AreArtifactsKnowledgeBaseArtifacts(self):
knowledgebase_list = config_lib.CONFIG["Artifacts.knowledge_base"]
for artifact_name in self.args.artifact_list:
if artifact_name not in knowledgebase_list:
return False
return True
def GetFiles(self, collector, path_type):
"""Get a set of files."""
if collector.action == "GetFile":
path_list = [collector.args["path"]]
elif collector.action == "GetFiles":
path_list = collector.args["path_list"]
new_path_list = []
for path in path_list:
# Interpolate any attributes from the knowledgebase.
new_path_list.extend(artifact_lib.InterpolateKbAttributes(
path, self.state.knowledge_base))
self.CallFlow(
"FetchFiles", paths=new_path_list, pathtype=path_type,
request_data={"artifact_name": self.current_artifact_name,
"collector": collector.ToPrimitiveDict()},
next_state="ProcessCollected"
)
def Glob(self, collector, pathtype):
"""Glob paths, return StatEntry objects."""
self.CallFlow(
"Glob", paths=self.InterpolateList(collector.args.get("path_list", [])),
pathtype=pathtype,
request_data={"artifact_name": self.current_artifact_name,
"collector": collector.ToPrimitiveDict()},
next_state="ProcessCollected"
)
def Grep(self, collector, pathtype):
"""Grep files in path_list for any matches to content_regex_list."""
path_list = self.InterpolateList(collector.args.get("path_list", []))
content_regex_list = self.InterpolateList(
collector.args.get("content_regex_list", []))
filters = []
for regex in content_regex_list:
regexfilter = rdfvalue.FileFinderContentsRegexMatchFilter(regex=regex,
bytes_before=0,
bytes_after=0)
file_finder_filter = rdfvalue.FileFinderFilter(
filter_type=rdfvalue.FileFinderFilter.Type.CONTENTS_REGEX_MATCH,
contents_regex_match=regexfilter)
filters.append(file_finder_filter)
self.CallFlow("FileFinder", paths=path_list, filters=filters,
action=rdfvalue.FileFinderAction(), pathtype=pathtype,
request_data={"artifact_name": self.current_artifact_name,
"collector": collector.ToPrimitiveDict()},
next_state="ProcessCollected")
def GetRegistryValue(self, collector):
"""Retrieve directly specified registry values, returning Stat objects."""
if collector.action == "GetRegistryValue":
path_list = [collector.args["path"]]
elif collector.action == "GetRegistryValues":
path_list = collector.args["path_list"]
new_paths = set()
for path in path_list:
expanded_paths = artifact_lib.InterpolateKbAttributes(
path, self.state.knowledge_base)
new_paths.update(expanded_paths)
for new_path in new_paths:
pathspec = rdfvalue.PathSpec(path=new_path,
pathtype=rdfvalue.PathSpec.PathType.REGISTRY)
self.CallClient(
"StatFile", pathspec=pathspec,
request_data={"artifact_name": self.current_artifact_name,
"collector": collector.ToPrimitiveDict()},
next_state="ProcessRegistryValue"
)
def CollectArtifacts(self, collector):
self.CallFlow(
"ArtifactCollectorFlow", artifact_list=collector.args["artifact_list"],
store_results_in_aff4=False,
request_data={"artifact_name": self.current_artifact_name,
"collector": collector.ToPrimitiveDict()},
next_state="ProcessCollected"
)
def CollectArtifactFiles(self, collector):
"""Collect files from artifact pathspecs."""
self.CallFlow(
"ArtifactCollectorFlow", artifact_list=collector.args["artifact_list"],
store_results_in_aff4=False,
request_data={"artifact_name": self.current_artifact_name,
"collector": collector.ToPrimitiveDict()},
next_state="ProcessCollectedArtifactFiles"
)
def RunCommand(self, collector):
"""Run a command."""
self.CallClient("ExecuteCommand", cmd=collector.args["cmd"],
args=collector.args.get("args", {}),
request_data={"artifact_name": self.current_artifact_name,
"collector": collector.ToPrimitiveDict()},
next_state="ProcessCollected")
def WMIQuery(self, collector):
"""Run a Windows WMI Query."""
query = collector.args["query"]
queries = artifact_lib.InterpolateKbAttributes(query,
self.state.knowledge_base)
for query in queries:
self.CallClient(
"WmiQuery", query=query,
request_data={"artifact_name": self.current_artifact_name,
"collector": collector.ToPrimitiveDict()},
next_state="ProcessCollected"
)
def VolatilityPlugin(self, collector):
"""Run a Volatility Plugin."""
request = rdfvalue.VolatilityRequest()
request.args[collector.args["plugin"]] = self.InterpolateDict(
collector.args.get("args", {}))
self.CallFlow(
"AnalyzeClientMemory", request=request,
request_data={"artifact_name": self.current_artifact_name,
"vol_plugin": collector.args["plugin"],
"collector": collector.ToPrimitiveDict()},
next_state="ProcessCollected"
)
def _GetSingleExpansion(self, value):
results = list(artifact_lib.InterpolateKbAttributes(
value, self.state.knowledge_base))
if len(results) > 1:
raise ValueError("Interpolation generated multiple results, use a"
" list for multi-value expansions. %s yielded: %s" %
(value, results))
return results[0]
def InterpolateDict(self, input_dict):
"""Interpolate all items from a dict.
Args:
input_dict: dict to interpolate
Returns:
original dict with all string values interpolated
"""
new_args = {}
for key, value in input_dict.items():
if isinstance(value, basestring):
new_args[key] = self._GetSingleExpansion(value)
elif isinstance(value, list):
new_args[key] = self.InterpolateList(value)
else:
new_args[key] = value
return new_args
def InterpolateList(self, input_list):
"""Interpolate all items from a given collector array.
Args:
input_list: list of values to interpolate
Returns:
original list of values extended with strings interpolated
"""
new_args = []
for value in input_list:
if isinstance(value, basestring):
results = list(artifact_lib.InterpolateKbAttributes(
value, self.state.knowledge_base))
new_args.extend(results)
else:
new_args.extend(value)
return new_args
def RunGrrClientAction(self, collector):
"""Call a GRR Client Action."""
self.CallClient(
collector.args["client_action"],
request_data={"artifact_name": self.current_artifact_name,
"collector": collector.ToPrimitiveDict()},
next_state="ProcessCollected",
**self.InterpolateDict(collector.args.get("action_args", {})))
@flow.StateHandler(next_state="ProcessCollected")
def ProcessRegistryValue(self, responses):
"""Extract DataBlob from Stat response."""
# TODO(user): This currently does no transformation.
message = responses.First()
if not responses.success or not message.registry_data:
self.Log("Failed to get registry value %s" %
responses.request_data["artifact_name"])
else:
self.CallState(next_state="ProcessCollected",
request_data=responses.request_data.ToDict(),
messages=[message])
@flow.StateHandler()
def ProcessCollected(self, responses):
"""Each individual collector will call back into here.
Args:
responses: Responses from the collection.
Raises:
artifact_lib.ArtifactDefinitionError: On bad definition.
artifact_lib.ArtifactProcessingError: On failure to process.
"""
flow_name = self.__class__.__name__
artifact_name = responses.request_data["artifact_name"]
collector = responses.request_data.GetItem("collector", None)
if responses.success:
self.Log("Artifact data collection %s completed successfully in flow %s "
"with %d responses", artifact_name, flow_name,
len(responses))
else:
self.Log("Artifact %s data collection failed. Status: %s.",
artifact_name, responses.status)
self.state.failed_count += 1
self.state.artifacts_failed.append(artifact_name)
return
# Initialize some local non-state saved variables for processing.
if self.runner.output:
if self.args.split_output_by_artifact:
self.output_collection_map = {}
if self.args.store_results_in_aff4:
self.aff4_output_map = {}
# Now process the responses.
processors = parsers.Parser.GetClassesByArtifact(artifact_name)
saved_responses = {}
for response in responses:
if processors:
for processor in processors:
processor_obj = processor()
if processor_obj.process_together:
# Store the response until we have them all.
saved_responses.setdefault(processor.__name__, []).append(response)
else:
# Process the response immediately
self._ParseResponses(processor_obj, response, responses,
artifact_name, collector)
else:
# We don't have any defined processors for this artifact.
self._ParseResponses(None, response, responses, artifact_name,
collector)
# If we were saving responses, process them now:
for processor_name, responses_list in saved_responses.items():
processor_obj = parsers.Parser.classes[processor_name]()
self._ParseResponses(processor_obj, responses_list, responses,
artifact_name, collector)
# Flush the results to the objects.
if self.runner.output:
self._FinalizeCollection(artifact_name)
if self.args.store_results_in_aff4:
self._FinalizeMappedAFF4Locations(artifact_name)
if self.state.client_anomaly_store:
self.state.client_anomaly_store.Flush()
@flow.StateHandler(next_state="ProcessCollected")
def ProcessCollectedArtifactFiles(self, responses):
"""Schedule files for download based on pathspec attribute.
Args:
responses: Response objects from the artifact collector.
Raises:
RuntimeError: if pathspec value is not a PathSpec instance and not
a basestring.
"""
self.download_list = []
collector = responses.request_data.GetItem("collector")
pathspec_attribute = collector["args"].get("pathspec_attribute", None)
for response in responses:
if pathspec_attribute:
if response.HasField(pathspec_attribute):
pathspec = response.Get(pathspec_attribute)
else:
self.Log("Missing pathspec field %s: %s", pathspec_attribute,
response)
continue
else:
pathspec = response
if isinstance(pathspec, basestring):
pathspec = rdfvalue.PathSpec(path=pathspec)
if self.args.use_tsk:
pathspec.pathtype = rdfvalue.PathSpec.PathType.TSK
else:
pathspec.pathtype = rdfvalue.PathSpec.PathType.OS
self.download_list.append(pathspec)
elif isinstance(pathspec, rdfvalue.PathSpec):
self.download_list.append(pathspec)
else:
raise RuntimeError(
"Response must be a string path, a pathspec, or have "
"pathspec_attribute set. Got: %s" % pathspec)
if self.download_list:
request_data = responses.request_data.ToDict()
self.CallFlow("MultiGetFile", pathspecs=self.download_list,
request_data=request_data,
next_state="ProcessCollected")
else:
self.Log("No files to download")
def _GetArtifactReturnTypes(self, collector):
"""Get a list of types we expect to handle from our responses."""
if collector:
return collector["returned_types"]
def _ProcessAnomaly(self, anomaly_value):
"""Write anomalies to the client in the data store."""
if not self.state.client_anomaly_store:
self.state.client_anomaly_store = aff4.FACTORY.Create(
self.client_id.Add("anomalies"), "RDFValueCollection",
token=self.token, mode="rw")
self.state.client_anomaly_store.Add(anomaly_value)
def _ParseResponses(self, processor_obj, responses, responses_obj,
artifact_name, collector):
"""Create a result parser sending different arguments for diff parsers.
Args:
processor_obj: A Processor object that inherits from Parser.
responses: A list of, or single response depending on the processors
process_together setting.
responses_obj: The responses object itself.
artifact_name: Name of the artifact that generated the responses.
collector: The collector responsible for producing the responses.
Raises:
RuntimeError: On bad parser.
"""
_ = responses_obj
if not processor_obj:
# We don't do any parsing, the results are raw as they came back.
result_iterator = responses
else:
# We have some processors to run.
if processor_obj.process_together:
# We are processing things in a group which requires specialized
# handling by the parser. This is used when multiple responses need to
# be combined to parse successfully. E.g parsing passwd and shadow files
# together.
parse_method = processor_obj.ParseMultiple
else:
parse_method = processor_obj.Parse
if isinstance(processor_obj, parsers.CommandParser):
# Command processor only supports one response at a time.
response = responses
result_iterator = parse_method(
cmd=response.request.cmd,
args=response.request.args,
stdout=response.stdout,
stderr=response.stderr,
return_val=response.exit_status,
time_taken=response.time_used,
knowledge_base=self.state.knowledge_base)
elif isinstance(processor_obj, parsers.WMIQueryParser):
query = collector["args"]["query"]
result_iterator = parse_method(query, responses,
self.state.knowledge_base)
elif isinstance(processor_obj, parsers.FileParser):
if processor_obj.process_together:
file_objects = [aff4.FACTORY.Open(r.aff4path, token=self.token)
for r in responses]
result_iterator = parse_method(responses, file_objects,
self.state.knowledge_base)
else:
fd = aff4.FACTORY.Open(responses.aff4path, token=self.token)
result_iterator = parse_method(responses, fd,
self.state.knowledge_base)
elif isinstance(processor_obj, (parsers.RegistryParser,
parsers.VolatilityPluginParser,
parsers.RegistryValueParser,
parsers.GenericResponseParser,
parsers.GrepParser)):
result_iterator = parse_method(responses, self.state.knowledge_base)
elif isinstance(processor_obj, (parsers.ArtifactFilesParser)):
result_iterator = parse_method(responses, self.state.knowledge_base,
self.state.path_type)
else:
raise RuntimeError("Unsupported parser detected %s" % processor_obj)
artifact_return_types = self._GetArtifactReturnTypes(collector)
if result_iterator:
# If we have a parser, do something with the results it produces.
for result in result_iterator:
result_type = result.__class__.__name__
if result_type == "Anomaly":
# Anomalies are special results and get handled separately.
self._ProcessAnomaly(result)
elif not artifact_return_types or result_type in artifact_return_types:
self.SendReply(result) # Send to parent.
if self.runner.output:
# Output is set, we need to write to a collection.
self._WriteResultToCollection(result, artifact_name)
if self.args.store_results_in_aff4:
# Write our result back to a mapped location in AFF4 space.
self._WriteResultToMappedAFF4Location(result)
def _WriteResultToCollection(self, result, artifact_name):
"""Write any results to the collection."""
if self.args.split_output_by_artifact:
if self.runner.output and artifact_name not in self.output_collection_map:
urn = self.runner.output.urn.Add(artifact_name)
collection = aff4.FACTORY.Create(urn, "RDFValueCollection", mode="rw",
token=self.token)
# Cache the opened object.
self.output_collection_map[artifact_name] = collection
self.output_collection_map[artifact_name].Add(result)
else:
# If not split the SendReply handling will take care of collection adding.
pass
def _FinalizeCollection(self, artifact_name):
"""Finalize writes to the Collection."""
total = 0
if self.args.split_output_by_artifact:
for collection in self.output_collection_map.values():
total += len(collection)
collection.Flush()
else:
if self.runner.output:
self.runner.output.Flush()
total += len(self.runner.output)
if self.runner.output:
self.Log("Wrote results from Artifact %s to %s. Collection size %d.",
artifact_name, self.runner.output.urn, total)
def _WriteResultToMappedAFF4Location(self, result):
"""If we have a mapping for this result type, write it there."""
result_type = result.__class__.__name__
if result_type not in self.aff4_output_map:
aff4_obj, aff4_attr, operator = (
self.GetAFF4PathForArtifactResponses(result_type))
# Cache the opened object.
self.aff4_output_map[result_type] = (aff4_obj, aff4_attr, operator)
else:
aff4_obj, aff4_attr, operator = self.aff4_output_map[result_type]
if operator == "Append":
aff4_attr.Append(result)
elif operator == "Overwrite":
# We set for each new value, overwriting older ones.
aff4_obj.Set(aff4_attr)
def _FinalizeMappedAFF4Locations(self, artifact_name):
for aff4_obj, aff4_attr, operator in self.aff4_output_map.values():
if operator == "Append":
# For any objects we appended to, we need to do the set now as the new
# attributes aren't assigned to the AFF4 object yet.
aff4_obj.Set(aff4_attr)
aff4_obj.Flush()
self.Log("Wrote Artifact %s results to %s on %s", artifact_name,
aff4_obj.urn, aff4_attr.__class__.__name__)
def GetAFF4PathForArtifactResponses(self, output_type):
"""Use the RDFValue type to find where in AFF4 space to write results.
Args:
output_type: The name of a SemanticValue type.
Returns:
A tuple of (aff4 object, attribute, operator)
Raises:
ArtifactProcessingError: If there is no defined mapping.
"""
rdf_type = artifact.GRRArtifactMappings.rdf_map.get(output_type)
if rdf_type is None:
raise artifact_lib.ArtifactProcessingError(
"No defined RDF type for %s" % output_type)
# "info/software", "InstalledSoftwarePackages", "INSTALLED_PACKAGES",
# "Append"
relative_path, aff4_type, aff4_attribute, operator = rdf_type
urn = self.client_id.Add(relative_path)
try:
result_object = aff4.FACTORY.Open(urn, aff4_type=aff4_type, mode="w",
token=self.token)
except IOError as e:
raise artifact_lib.ArtifactProcessingError(
"Failed to open result object for type %s. %s" % (output_type, e))
result_attr = getattr(result_object.Schema, aff4_attribute)()
if not isinstance(result_attr, rdfvalue.RDFValue):
raise artifact_lib.ArtifactProcessingError(
"Failed to get attribute %s for output type %s" %
(aff4_attribute, output_type))
return result_object, result_attr, operator
def _GetArtifactFromName(self, name):
"""Get an artifact class from the cache in the flow."""
if name in artifact_lib.ArtifactRegistry.artifacts:
return artifact_lib.ArtifactRegistry.artifacts[name]
else:
# If we don't have an artifact, things shouldn't have passed validation
# so we assume its a new one in the datastore.
artifact.LoadArtifactsFromDatastore(token=self.token)
if name not in artifact_lib.ArtifactRegistry.artifacts:
raise RuntimeError("ArtifactCollectorFlow failed due to unknown "
"Artifact %s" % name)
else:
return artifact_lib.ArtifactRegistry.artifacts[name]
@flow.StateHandler()
def End(self):
# If we got no responses, and user asked for it, we error out.
collect_count = self.runner.args.request_state.response_count
if self.args.no_results_errors and collect_count == 0:
raise artifact_lib.ArtifactProcessingError("Artifact collector returned "
"0 responses.")
if self.runner.output:
urn = self.runner.output.urn
else:
urn = self.client_id
self.Notify("ViewObject", urn,
"Completed artifact collection of %s. Collected %d. Errors %d."
% (self.args.artifact_list, collect_count,
self.state.failed_count))
| |
from __future__ import unicode_literals
import time
import json
import uuid
import datetime
import boto3
from moto.core import BaseBackend, BaseModel
from .exceptions import (
ResourceNotFoundException,
InvalidParameterException,
ResourceExistsException,
InvalidRequestException,
ClientError
)
from .utils import random_password, secret_arn
class SecretsManager(BaseModel):
def __init__(self, region_name, **kwargs):
self.region = region_name
class SecretsManagerBackend(BaseBackend):
def __init__(self, region_name=None, **kwargs):
super(SecretsManagerBackend, self).__init__()
self.region = region_name
self.secrets = {}
def reset(self):
region_name = self.region
self.__dict__ = {}
self.__init__(region_name)
def _is_valid_identifier(self, identifier):
return identifier in self.secrets
def _unix_time_secs(self, dt):
epoch = datetime.datetime.utcfromtimestamp(0)
return (dt - epoch).total_seconds()
def get_secret_value(self, secret_id, version_id, version_stage):
if not self._is_valid_identifier(secret_id):
raise ResourceNotFoundException()
if not version_id and version_stage:
# set version_id to match version_stage
versions_dict = self.secrets[secret_id]['versions']
for ver_id, ver_val in versions_dict.items():
if version_stage in ver_val['version_stages']:
version_id = ver_id
break
if not version_id:
raise ResourceNotFoundException()
# TODO check this part
if 'deleted_date' in self.secrets[secret_id]:
raise InvalidRequestException(
"An error occurred (InvalidRequestException) when calling the GetSecretValue operation: You tried to \
perform the operation on a secret that's currently marked deleted."
)
secret = self.secrets[secret_id]
version_id = version_id or secret['default_version_id']
secret_version = secret['versions'][version_id]
response_data = {
"ARN": secret_arn(self.region, secret['secret_id']),
"Name": secret['name'],
"VersionId": secret_version['version_id'],
"VersionStages": secret_version['version_stages'],
"CreatedDate": secret_version['createdate'],
}
if 'secret_string' in secret_version:
response_data["SecretString"] = secret_version['secret_string']
if 'secret_binary' in secret_version:
response_data["SecretBinary"] = secret_version['secret_binary']
response = json.dumps(response_data)
return response
def create_secret(self, name, secret_string=None, secret_binary=None, tags=[], **kwargs):
# error if secret exists
if name in self.secrets.keys():
raise ResourceExistsException('A resource with the ID you requested already exists.')
version_id = self._add_secret(name, secret_string=secret_string, secret_binary=secret_binary, tags=tags)
response = json.dumps({
"ARN": secret_arn(self.region, name),
"Name": name,
"VersionId": version_id,
})
return response
def _add_secret(self, secret_id, secret_string=None, secret_binary=None, tags=[], version_id=None, version_stages=None):
if version_stages is None:
version_stages = ['AWSCURRENT']
if not version_id:
version_id = str(uuid.uuid4())
secret_version = {
'createdate': int(time.time()),
'version_id': version_id,
'version_stages': version_stages,
}
if secret_string is not None:
secret_version['secret_string'] = secret_string
if secret_binary is not None:
secret_version['secret_binary'] = secret_binary
if secret_id in self.secrets:
# remove all old AWSPREVIOUS stages
for secret_verion_to_look_at in self.secrets[secret_id]['versions'].values():
if 'AWSPREVIOUS' in secret_verion_to_look_at['version_stages']:
secret_verion_to_look_at['version_stages'].remove('AWSPREVIOUS')
# set old AWSCURRENT secret to AWSPREVIOUS
previous_current_version_id = self.secrets[secret_id]['default_version_id']
self.secrets[secret_id]['versions'][previous_current_version_id]['version_stages'] = ['AWSPREVIOUS']
self.secrets[secret_id]['versions'][version_id] = secret_version
self.secrets[secret_id]['default_version_id'] = version_id
else:
self.secrets[secret_id] = {
'versions': {
version_id: secret_version
},
'default_version_id': version_id,
}
secret = self.secrets[secret_id]
secret['secret_id'] = secret_id
secret['name'] = secret_id
secret['rotation_enabled'] = False
secret['rotation_lambda_arn'] = ''
secret['auto_rotate_after_days'] = 0
secret['tags'] = tags
return version_id
def put_secret_value(self, secret_id, secret_string, version_stages):
version_id = self._add_secret(secret_id, secret_string, version_stages=version_stages)
response = json.dumps({
'ARN': secret_arn(self.region, secret_id),
'Name': secret_id,
'VersionId': version_id,
'VersionStages': version_stages
})
return response
def describe_secret(self, secret_id):
if not self._is_valid_identifier(secret_id):
raise ResourceNotFoundException
secret = self.secrets[secret_id]
response = json.dumps({
"ARN": secret_arn(self.region, secret['secret_id']),
"Name": secret['name'],
"Description": "",
"KmsKeyId": "",
"RotationEnabled": secret['rotation_enabled'],
"RotationLambdaARN": secret['rotation_lambda_arn'],
"RotationRules": {
"AutomaticallyAfterDays": secret['auto_rotate_after_days']
},
"LastRotatedDate": None,
"LastChangedDate": None,
"LastAccessedDate": None,
"DeletedDate": secret.get('deleted_date', None),
"Tags": secret['tags']
})
return response
def rotate_secret(self, secret_id, client_request_token=None,
rotation_lambda_arn=None, rotation_rules=None):
rotation_days = 'AutomaticallyAfterDays'
if not self._is_valid_identifier(secret_id):
raise ResourceNotFoundException
if 'deleted_date' in self.secrets[secret_id]:
raise InvalidRequestException(
"An error occurred (InvalidRequestException) when calling the RotateSecret operation: You tried to \
perform the operation on a secret that's currently marked deleted."
)
if client_request_token:
token_length = len(client_request_token)
if token_length < 32 or token_length > 64:
msg = (
'ClientRequestToken '
'must be 32-64 characters long.'
)
raise InvalidParameterException(msg)
if rotation_lambda_arn:
if len(rotation_lambda_arn) > 2048:
msg = (
'RotationLambdaARN '
'must <= 2048 characters long.'
)
raise InvalidParameterException(msg)
if rotation_rules:
if rotation_days in rotation_rules:
rotation_period = rotation_rules[rotation_days]
if rotation_period < 1 or rotation_period > 1000:
msg = (
'RotationRules.AutomaticallyAfterDays '
'must be within 1-1000.'
)
raise InvalidParameterException(msg)
secret = self.secrets[secret_id]
old_secret_version = secret['versions'][secret['default_version_id']]
new_version_id = client_request_token or str(uuid.uuid4())
self._add_secret(secret_id, old_secret_version['secret_string'], secret['tags'], version_id=new_version_id, version_stages=['AWSCURRENT'])
secret['rotation_lambda_arn'] = rotation_lambda_arn or ''
if rotation_rules:
secret['auto_rotate_after_days'] = rotation_rules.get(rotation_days, 0)
if secret['auto_rotate_after_days'] > 0:
secret['rotation_enabled'] = True
if 'AWSCURRENT' in old_secret_version['version_stages']:
old_secret_version['version_stages'].remove('AWSCURRENT')
response = json.dumps({
"ARN": secret_arn(self.region, secret['secret_id']),
"Name": secret['name'],
"VersionId": new_version_id
})
return response
def get_random_password(self, password_length,
exclude_characters, exclude_numbers,
exclude_punctuation, exclude_uppercase,
exclude_lowercase, include_space,
require_each_included_type):
# password size must have value less than or equal to 4096
if password_length > 4096:
raise ClientError(
"ClientError: An error occurred (ValidationException) \
when calling the GetRandomPassword operation: 1 validation error detected: Value '{}' at 'passwordLength' \
failed to satisfy constraint: Member must have value less than or equal to 4096".format(password_length))
if password_length < 4:
raise InvalidParameterException(
"InvalidParameterException: An error occurred (InvalidParameterException) \
when calling the GetRandomPassword operation: Password length is too short based on the required types.")
response = json.dumps({
"RandomPassword": random_password(password_length,
exclude_characters,
exclude_numbers,
exclude_punctuation,
exclude_uppercase,
exclude_lowercase,
include_space,
require_each_included_type)
})
return response
def list_secret_version_ids(self, secret_id):
secret = self.secrets[secret_id]
version_list = []
for version_id, version in secret['versions'].items():
version_list.append({
'CreatedDate': int(time.time()),
'LastAccessedDate': int(time.time()),
'VersionId': version_id,
'VersionStages': version['version_stages'],
})
response = json.dumps({
'ARN': secret['secret_id'],
'Name': secret['name'],
'NextToken': '',
'Versions': version_list,
})
return response
def list_secrets(self, max_results, next_token):
# TODO implement pagination and limits
secret_list = []
for secret in self.secrets.values():
versions_to_stages = {}
for version_id, version in secret['versions'].items():
versions_to_stages[version_id] = version['version_stages']
secret_list.append({
"ARN": secret_arn(self.region, secret['secret_id']),
"DeletedDate": secret.get('deleted_date', None),
"Description": "",
"KmsKeyId": "",
"LastAccessedDate": None,
"LastChangedDate": None,
"LastRotatedDate": None,
"Name": secret['name'],
"RotationEnabled": secret['rotation_enabled'],
"RotationLambdaARN": secret['rotation_lambda_arn'],
"RotationRules": {
"AutomaticallyAfterDays": secret['auto_rotate_after_days']
},
"SecretVersionsToStages": versions_to_stages,
"Tags": secret['tags']
})
return secret_list, None
def delete_secret(self, secret_id, recovery_window_in_days, force_delete_without_recovery):
if not self._is_valid_identifier(secret_id):
raise ResourceNotFoundException
if 'deleted_date' in self.secrets[secret_id]:
raise InvalidRequestException(
"An error occurred (InvalidRequestException) when calling the DeleteSecret operation: You tried to \
perform the operation on a secret that's currently marked deleted."
)
if recovery_window_in_days and force_delete_without_recovery:
raise InvalidParameterException(
"An error occurred (InvalidParameterException) when calling the DeleteSecret operation: You can't \
use ForceDeleteWithoutRecovery in conjunction with RecoveryWindowInDays."
)
if recovery_window_in_days and (recovery_window_in_days < 7 or recovery_window_in_days > 30):
raise InvalidParameterException(
"An error occurred (InvalidParameterException) when calling the DeleteSecret operation: The \
RecoveryWindowInDays value must be between 7 and 30 days (inclusive)."
)
deletion_date = datetime.datetime.utcnow()
if force_delete_without_recovery:
secret = self.secrets.pop(secret_id, None)
else:
deletion_date += datetime.timedelta(days=recovery_window_in_days or 30)
self.secrets[secret_id]['deleted_date'] = self._unix_time_secs(deletion_date)
secret = self.secrets.get(secret_id, None)
if not secret:
raise ResourceNotFoundException
arn = secret_arn(self.region, secret['secret_id'])
name = secret['name']
return arn, name, self._unix_time_secs(deletion_date)
def restore_secret(self, secret_id):
if not self._is_valid_identifier(secret_id):
raise ResourceNotFoundException
self.secrets[secret_id].pop('deleted_date', None)
secret = self.secrets[secret_id]
arn = secret_arn(self.region, secret['secret_id'])
name = secret['name']
return arn, name
available_regions = (
boto3.session.Session().get_available_regions("secretsmanager")
)
secretsmanager_backends = {region: SecretsManagerBackend(region_name=region)
for region in available_regions}
| |
import os
import pickle
import unittest
import sys
from collections import defaultdict
import ray
from ray import tune, logger
from ray.tune import Trainable, run_experiments, register_trainable
from ray.tune.error import TuneError
from ray.tune.function_runner import wrap_function
from ray.tune.schedulers.trial_scheduler import FIFOScheduler, TrialScheduler
class FrequentPausesScheduler(FIFOScheduler):
def on_trial_result(self, trial_runner, trial, result):
return TrialScheduler.PAUSE
def create_resettable_class():
class MyResettableClass(Trainable):
def setup(self, config):
self.config = config
self.num_resets = 0
self.iter = 0
self.msg = config.get("message", "No message")
def step(self):
self.iter += 1
print("PRINT_STDOUT: {}".format(self.msg))
print("PRINT_STDERR: {}".format(self.msg), file=sys.stderr)
logger.info("LOG_STDERR: {}".format(self.msg))
return {
"id": self.config["id"],
"num_resets": self.num_resets,
"done": self.iter > 1,
"iter": self.iter
}
def save_checkpoint(self, chkpt_dir):
return {"iter": self.iter}
def load_checkpoint(self, item):
self.iter = item["iter"]
def reset_config(self, new_config):
if "fake_reset_not_supported" in self.config:
return False
self.num_resets += 1
self.iter = 0
self.msg = new_config.get("message", "No message")
return True
return MyResettableClass
def create_resettable_function(num_resets: defaultdict):
def trainable(config, checkpoint_dir=None):
if checkpoint_dir:
with open(os.path.join(checkpoint_dir, "chkpt"), "rb") as fp:
step = pickle.load(fp)
else:
step = 0
while step < 2:
step += 1
with tune.checkpoint_dir(step) as checkpoint_dir:
with open(os.path.join(checkpoint_dir, "chkpt"), "wb") as fp:
pickle.dump(step, fp)
tune.report(**{
"done": step >= 2,
"iter": step,
"id": config["id"]
})
trainable = wrap_function(trainable)
class ResetCountTrainable(trainable):
def reset_config(self, new_config):
num_resets[self.trial_id] += 1
return super().reset_config(new_config)
return ResetCountTrainable
class ActorReuseTest(unittest.TestCase):
def setUp(self):
ray.init(num_cpus=1, num_gpus=0)
os.environ["TUNE_STATE_REFRESH_PERIOD"] = "0.1"
def tearDown(self):
ray.shutdown()
def _run_trials_with_frequent_pauses(self, trainable, reuse=False):
trials = run_experiments(
{
"foo": {
"run": trainable,
"num_samples": 1,
"config": {
"id": tune.grid_search([0, 1, 2, 3])
},
}
},
reuse_actors=reuse,
scheduler=FrequentPausesScheduler(),
verbose=0)
return trials
def testTrialReuseDisabled(self):
trials = self._run_trials_with_frequent_pauses(
create_resettable_class(), reuse=False)
self.assertEqual([t.last_result["id"] for t in trials], [0, 1, 2, 3])
self.assertEqual([t.last_result["iter"] for t in trials], [2, 2, 2, 2])
self.assertEqual([t.last_result["num_resets"] for t in trials],
[0, 0, 0, 0])
def testTrialReuseDisabledFunction(self):
num_resets = defaultdict(lambda: 0)
trials = self._run_trials_with_frequent_pauses(
create_resettable_function(num_resets), reuse=False)
self.assertEqual([t.last_result["id"] for t in trials], [0, 1, 2, 3])
self.assertEqual([t.last_result["iter"] for t in trials], [2, 2, 2, 2])
self.assertEqual([num_resets[t.trial_id] for t in trials],
[0, 0, 0, 0])
def testTrialReuseEnabled(self):
trials = self._run_trials_with_frequent_pauses(
create_resettable_class(), reuse=True)
self.assertEqual([t.last_result["id"] for t in trials], [0, 1, 2, 3])
self.assertEqual([t.last_result["iter"] for t in trials], [2, 2, 2, 2])
self.assertEqual([t.last_result["num_resets"] for t in trials],
[4, 5, 6, 7])
def testTrialReuseEnabledFunction(self):
num_resets = defaultdict(lambda: 0)
trials = self._run_trials_with_frequent_pauses(
create_resettable_function(num_resets), reuse=True)
self.assertEqual([t.last_result["id"] for t in trials], [0, 1, 2, 3])
self.assertEqual([t.last_result["iter"] for t in trials], [2, 2, 2, 2])
self.assertEqual([num_resets[t.trial_id] for t in trials],
[0, 0, 0, 0])
def testReuseEnabledError(self):
def run():
run_experiments(
{
"foo": {
"run": create_resettable_class(),
"max_failures": 1,
"num_samples": 1,
"config": {
"id": tune.grid_search([0, 1, 2, 3]),
"fake_reset_not_supported": True
},
}
},
reuse_actors=True,
scheduler=FrequentPausesScheduler())
self.assertRaises(TuneError, lambda: run())
def testTrialReuseLogToFile(self):
register_trainable("foo2", create_resettable_class())
# Log to default files
[trial1, trial2] = tune.run(
"foo2",
config={
"message": tune.grid_search(["First", "Second"]),
"id": -1
},
log_to_file=True,
scheduler=FrequentPausesScheduler(),
reuse_actors=True).trials
# Check trial 1
self.assertEqual(trial1.last_result["num_resets"], 2)
self.assertTrue(os.path.exists(os.path.join(trial1.logdir, "stdout")))
self.assertTrue(os.path.exists(os.path.join(trial1.logdir, "stderr")))
with open(os.path.join(trial1.logdir, "stdout"), "rt") as fp:
content = fp.read()
self.assertIn("PRINT_STDOUT: First", content)
self.assertNotIn("PRINT_STDOUT: Second", content)
with open(os.path.join(trial1.logdir, "stderr"), "rt") as fp:
content = fp.read()
self.assertIn("PRINT_STDERR: First", content)
self.assertIn("LOG_STDERR: First", content)
self.assertNotIn("PRINT_STDERR: Second", content)
self.assertNotIn("LOG_STDERR: Second", content)
# Check trial 2
self.assertEqual(trial2.last_result["num_resets"], 3)
self.assertTrue(os.path.exists(os.path.join(trial2.logdir, "stdout")))
self.assertTrue(os.path.exists(os.path.join(trial2.logdir, "stderr")))
with open(os.path.join(trial2.logdir, "stdout"), "rt") as fp:
content = fp.read()
self.assertIn("PRINT_STDOUT: Second", content)
self.assertNotIn("PRINT_STDOUT: First", content)
with open(os.path.join(trial2.logdir, "stderr"), "rt") as fp:
content = fp.read()
self.assertIn("PRINT_STDERR: Second", content)
self.assertIn("LOG_STDERR: Second", content)
self.assertNotIn("PRINT_STDERR: First", content)
self.assertNotIn("LOG_STDERR: First", content)
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
| |
# -*- coding: utf-8 -*-
from __future__ import division
import copy
import functools
import logging
import math
import re
import unicodedata
from elasticsearch import (
ConnectionError,
Elasticsearch,
NotFoundError,
RequestError,
TransportError,
helpers,
)
from modularodm import Q
import six
from framework import sentry
from framework.celery_tasks import app as celery_app
from framework.mongo.utils import paginated
from website import settings
from website.filters import gravatar
from website.models import User, Node
from website.project.licenses import serialize_node_license_record
from website.search import exceptions
from website.search.util import build_query
from website.util import sanitize
from website.views import validate_page_num
logger = logging.getLogger(__name__)
# These are the doc_types that exist in the search database
ALIASES = {
'project': 'Projects',
'component': 'Components',
'registration': 'Registrations',
'user': 'Users',
'total': 'Total',
'file': 'Files',
'institution': 'Institutions',
}
# Prevent tokenizing and stop word removal.
NOT_ANALYZED_PROPERTY = {'type': 'string', 'index': 'not_analyzed'}
# Perform stemming on the field it's applied to.
ENGLISH_ANALYZER_PROPERTY = {'type': 'string', 'analyzer': 'english'}
INDEX = settings.ELASTIC_INDEX
try:
es = Elasticsearch(
settings.ELASTIC_URI,
request_timeout=settings.ELASTIC_TIMEOUT
)
logging.getLogger('elasticsearch').setLevel(logging.WARN)
logging.getLogger('elasticsearch.trace').setLevel(logging.WARN)
logging.getLogger('urllib3').setLevel(logging.WARN)
logging.getLogger('requests').setLevel(logging.WARN)
es.cluster.health(wait_for_status='yellow')
except ConnectionError as e:
message = (
'The SEARCH_ENGINE setting is set to "elastic", but there '
'was a problem starting the elasticsearch interface. Is '
'elasticsearch running?'
)
try:
sentry.log_exception()
sentry.log_message(message)
except AssertionError: # App has not yet been initialized
logger.exception(message)
es = None
def requires_search(func):
def wrapped(*args, **kwargs):
if es is not None:
try:
return func(*args, **kwargs)
except ConnectionError:
raise exceptions.SearchUnavailableError('Could not connect to elasticsearch')
except NotFoundError as e:
raise exceptions.IndexNotFoundError(e.error)
except RequestError as e:
if 'ParseException' in e.error:
raise exceptions.MalformedQueryError(e.error)
raise exceptions.SearchException(e.error)
except TransportError as e:
# Catch and wrap generic uncaught ES error codes. TODO: Improve fix for https://openscience.atlassian.net/browse/OSF-4538
raise exceptions.SearchException(e.error)
sentry.log_message('Elastic search action failed. Is elasticsearch running?')
raise exceptions.SearchUnavailableError('Failed to connect to elasticsearch')
return wrapped
@requires_search
def get_aggregations(query, doc_type):
query['aggregations'] = {
'licenses': {
'terms': {
'field': 'license.id'
}
}
}
res = es.search(index=INDEX, doc_type=doc_type, search_type='count', body=query)
ret = {
doc_type: {
item['key']: item['doc_count']
for item in agg['buckets']
}
for doc_type, agg in res['aggregations'].iteritems()
}
ret['total'] = res['hits']['total']
return ret
@requires_search
def get_counts(count_query, clean=True):
count_query['aggregations'] = {
'counts': {
'terms': {
'field': '_type',
}
}
}
res = es.search(index=INDEX, doc_type=None, search_type='count', body=count_query)
counts = {x['key']: x['doc_count'] for x in res['aggregations']['counts']['buckets'] if x['key'] in ALIASES.keys()}
counts['total'] = sum([val for val in counts.values()])
return counts
@requires_search
def get_tags(query, index):
query['aggregations'] = {
'tag_cloud': {
'terms': {'field': 'tags'}
}
}
results = es.search(index=index, doc_type=None, body=query)
tags = results['aggregations']['tag_cloud']['buckets']
return tags
@requires_search
def search(query, index=None, doc_type='_all'):
"""Search for a query
:param query: The substring of the username/project name/tag to search for
:param index:
:param doc_type:
:return: List of dictionaries, each containing the results, counts, tags and typeAliases
results: All results returned by the query, that are within the index and search type
counts: A dictionary in which keys are types and values are counts for that type, e.g, count['total'] is the sum of the other counts
tags: A list of tags that are returned by the search query
typeAliases: the doc_types that exist in the search database
"""
index = index or INDEX
tag_query = copy.deepcopy(query)
aggs_query = copy.deepcopy(query)
count_query = copy.deepcopy(query)
for key in ['from', 'size', 'sort']:
try:
del tag_query[key]
del aggs_query[key]
del count_query[key]
except KeyError:
pass
tags = get_tags(tag_query, index)
try:
del aggs_query['query']['filtered']['filter']
del count_query['query']['filtered']['filter']
except KeyError:
pass
aggregations = get_aggregations(aggs_query, doc_type=doc_type)
counts = get_counts(count_query, index)
# Run the real query and get the results
raw_results = es.search(index=index, doc_type=doc_type, body=query)
results = [hit['_source'] for hit in raw_results['hits']['hits']]
return_value = {
'results': format_results(results),
'counts': counts,
'aggs': aggregations,
'tags': tags,
'typeAliases': ALIASES
}
return return_value
def format_results(results):
ret = []
for result in results:
if result.get('category') == 'user':
result['url'] = '/profile/' + result['id']
elif result.get('category') == 'file':
parent_info = load_parent(result.get('parent_id'))
result['parent_url'] = parent_info.get('url') if parent_info else None
result['parent_title'] = parent_info.get('title') if parent_info else None
elif result.get('category') in {'project', 'component', 'registration'}:
result = format_result(result, result.get('parent_id'))
ret.append(result)
return ret
def format_result(result, parent_id=None):
parent_info = load_parent(parent_id)
formatted_result = {
'contributors': result['contributors'],
'wiki_link': result['url'] + 'wiki/',
# TODO: Remove unescape_entities when mako html safe comes in
'title': sanitize.unescape_entities(result['title']),
'url': result['url'],
'is_component': False if parent_info is None else True,
'parent_title': sanitize.unescape_entities(parent_info.get('title')) if parent_info else None,
'parent_url': parent_info.get('url') if parent_info is not None else None,
'tags': result['tags'],
'is_registration': (result['is_registration'] if parent_info is None
else parent_info.get('is_registration')),
'is_retracted': result['is_retracted'],
'is_pending_retraction': result['is_pending_retraction'],
'embargo_end_date': result['embargo_end_date'],
'is_pending_embargo': result['is_pending_embargo'],
'description': result['description'] if parent_info is None else None,
'category': result.get('category'),
'date_created': result.get('date_created'),
'date_registered': result.get('registered_date'),
'n_wikis': len(result['wikis']),
'license': result.get('license'),
'affiliated_institutions': result.get('affiliated_institutions'),
}
return formatted_result
def load_parent(parent_id):
parent = Node.load(parent_id)
if parent is None:
return None
parent_info = {}
if parent is not None and parent.is_public:
parent_info['title'] = parent.title
parent_info['url'] = parent.url
parent_info['is_registration'] = parent.is_registration
parent_info['id'] = parent._id
else:
parent_info['title'] = '-- private project --'
parent_info['url'] = ''
parent_info['is_registration'] = None
parent_info['id'] = None
return parent_info
COMPONENT_CATEGORIES = set(settings.NODE_CATEGORY_MAP.keys())
def get_doctype_from_node(node):
if node.is_registration:
return 'registration'
elif node.parent_node is None:
# ElasticSearch categorizes top-level projects differently than children
return 'project'
elif node.category in COMPONENT_CATEGORIES:
return 'component'
else:
return node.category
@celery_app.task(bind=True, max_retries=5, default_retry_delay=60)
def update_node_async(self, node_id, index=None, bulk=False):
node = Node.load(node_id)
try:
update_node(node=node, index=index, bulk=bulk)
except Exception as exc:
self.retry(exc=exc)
@requires_search
def update_node(node, index=None, bulk=False):
index = index or INDEX
from website.addons.wiki.model import NodeWikiPage
category = get_doctype_from_node(node)
elastic_document_id = node._id
parent_id = node.parent_id
from website.files.models.osfstorage import OsfStorageFile
for file_ in paginated(OsfStorageFile, Q('node', 'eq', node)):
update_file(file_, index=index)
if node.is_deleted or not node.is_public or node.archiving:
delete_doc(elastic_document_id, node)
else:
try:
normalized_title = six.u(node.title)
except TypeError:
normalized_title = node.title
normalized_title = unicodedata.normalize('NFKD', normalized_title).encode('ascii', 'ignore')
elastic_document = {
'id': elastic_document_id,
'contributors': [
{
'fullname': x.fullname,
'url': x.profile_url if x.is_active else None
}
for x in node.visible_contributors
if x is not None
],
'title': node.title,
'normalized_title': normalized_title,
'category': category,
'public': node.is_public,
'tags': [tag._id for tag in node.tags if tag],
'description': node.description,
'url': node.url,
'is_registration': node.is_registration,
'is_pending_registration': node.is_pending_registration,
'is_retracted': node.is_retracted,
'is_pending_retraction': node.is_pending_retraction,
'embargo_end_date': node.embargo_end_date.strftime('%A, %b. %d, %Y') if node.embargo_end_date else False,
'is_pending_embargo': node.is_pending_embargo,
'registered_date': node.registered_date,
'wikis': {},
'parent_id': parent_id,
'date_created': node.date_created,
'license': serialize_node_license_record(node.license),
'affiliated_institutions': [inst.name for inst in node.affiliated_institutions],
'boost': int(not node.is_registration) + 1, # This is for making registered projects less relevant
}
if not node.is_retracted:
for wiki in [
NodeWikiPage.load(x)
for x in node.wiki_pages_current.values()
]:
elastic_document['wikis'][wiki.page_name] = wiki.raw_text(node)
if bulk:
return elastic_document
else:
es.index(index=index, doc_type=category, id=elastic_document_id, body=elastic_document, refresh=True)
def bulk_update_nodes(serialize, nodes, index=None):
"""Updates the list of input projects
:param function Node-> dict serialize:
:param Node[] nodes: Projects, components or registrations
:param str index: Index of the nodes
:return:
"""
index = index or INDEX
actions = []
for node in nodes:
serialized = serialize(node)
if serialized:
actions.append({
'_op_type': 'update',
'_index': index,
'_id': node._id,
'_type': get_doctype_from_node(node),
'doc': serialized
})
if actions:
return helpers.bulk(es, actions)
def serialize_contributors(node):
return {
'contributors': [
{
'fullname': user.fullname,
'url': user.profile_url if user.is_active else None
} for user in node.visible_contributors
if user is not None
and user.is_active
]
}
bulk_update_contributors = functools.partial(bulk_update_nodes, serialize_contributors)
@requires_search
def update_user(user, index=None):
index = index or INDEX
if not user.is_active:
try:
es.delete(index=index, doc_type='user', id=user._id, refresh=True, ignore=[404])
except NotFoundError:
pass
return
names = dict(
fullname=user.fullname,
given_name=user.given_name,
family_name=user.family_name,
middle_names=user.middle_names,
suffix=user.suffix
)
normalized_names = {}
for key, val in names.items():
if val is not None:
try:
val = six.u(val)
except TypeError:
pass # This is fine, will only happen in 2.x if val is already unicode
normalized_names[key] = unicodedata.normalize('NFKD', val).encode('ascii', 'ignore')
user_doc = {
'id': user._id,
'user': user.fullname,
'normalized_user': normalized_names['fullname'],
'normalized_names': normalized_names,
'names': names,
'job': user.jobs[0]['institution'] if user.jobs else '',
'job_title': user.jobs[0]['title'] if user.jobs else '',
'all_jobs': [job['institution'] for job in user.jobs[1:]],
'school': user.schools[0]['institution'] if user.schools else '',
'all_schools': [school['institution'] for school in user.schools],
'category': 'user',
'degree': user.schools[0]['degree'] if user.schools else '',
'social': user.social_links,
'boost': 2, # TODO(fabianvf): Probably should make this a constant or something
}
es.index(index=index, doc_type='user', body=user_doc, id=user._id, refresh=True)
@requires_search
def update_file(file_, index=None, delete=False):
index = index or INDEX
if not file_.node.is_public or delete or file_.node.is_deleted or file_.node.archiving:
es.delete(
index=index,
doc_type='file',
id=file_._id,
refresh=True,
ignore=[404]
)
return
# We build URLs manually here so that this function can be
# run outside of a Flask request context (e.g. in a celery task)
file_deep_url = '/{node_id}/files/{provider}{path}/'.format(
node_id=file_.node._id,
provider=file_.provider,
path=file_.path,
)
node_url = '/{node_id}/'.format(node_id=file_.node._id)
file_doc = {
'id': file_._id,
'deep_url': file_deep_url,
'tags': [tag._id for tag in file_.tags],
'name': file_.name,
'category': 'file',
'node_url': node_url,
'node_title': file_.node.title,
'parent_id': file_.node.parent_node._id if file_.node.parent_node else None,
'is_registration': file_.node.is_registration,
'is_retracted': file_.node.is_retracted
}
es.index(
index=index,
doc_type='file',
body=file_doc,
id=file_._id,
refresh=True
)
@requires_search
def update_institution(institution, index=None):
index = index or INDEX
id_ = institution._id
if institution.is_deleted:
es.delete(index=index, doc_type='institution', id=id_, refresh=True, ignore=[404])
else:
institution_doc = {
'id': id_,
'url': '/institutions/{}/'.format(institution._id),
'logo_path': institution.logo_path,
'category': 'institution',
'name': institution.name,
}
es.index(index=index, doc_type='institution', body=institution_doc, id=id_, refresh=True)
@requires_search
def delete_all():
delete_index(INDEX)
@requires_search
def delete_index(index):
es.indices.delete(index, ignore=[404])
@requires_search
def create_index(index=None):
'''Creates index with some specified mappings to begin with,
all of which are applied to all projects, components, and registrations.
'''
index = index or INDEX
document_types = ['project', 'component', 'registration', 'user', 'file', 'institution']
project_like_types = ['project', 'component', 'registration']
analyzed_fields = ['title', 'description']
es.indices.create(index, ignore=[400]) # HTTP 400 if index already exists
for type_ in document_types:
mapping = {
'properties': {
'tags': NOT_ANALYZED_PROPERTY,
'license': {
'properties': {
'id': NOT_ANALYZED_PROPERTY,
'name': NOT_ANALYZED_PROPERTY,
}
}
}
}
if type_ in project_like_types:
analyzers = {field: ENGLISH_ANALYZER_PROPERTY
for field in analyzed_fields}
mapping['properties'].update(analyzers)
if type_ == 'user':
fields = {
'job': {
'type': 'string',
'boost': '1',
},
'all_jobs': {
'type': 'string',
'boost': '0.01',
},
'school': {
'type': 'string',
'boost': '1',
},
'all_schools': {
'type': 'string',
'boost': '0.01'
},
}
mapping['properties'].update(fields)
es.indices.put_mapping(index=index, doc_type=type_, body=mapping, ignore=[400, 404])
@requires_search
def delete_doc(elastic_document_id, node, index=None, category=None):
index = index or INDEX
category = category or 'registration' if node.is_registration else node.project_or_component
es.delete(index=index, doc_type=category, id=elastic_document_id, refresh=True, ignore=[404])
@requires_search
def search_contributor(query, page=0, size=10, exclude=None, current_user=None):
"""Search for contributors to add to a project using elastic search. Request must
include JSON data with a "query" field.
:param query: The substring of the username to search for
:param page: For pagination, the page number to use for results
:param size: For pagination, the number of results per page
:param exclude: A list of User objects to exclude from the search
:param current_user: A User object of the current user
:return: List of dictionaries, each containing the ID, full name,
most recent employment and education, gravatar URL of an OSF user
"""
start = (page * size)
items = re.split(r'[\s-]+', query)
exclude = exclude or []
normalized_items = []
for item in items:
try:
normalized_item = six.u(item)
except TypeError:
normalized_item = item
normalized_item = unicodedata.normalize('NFKD', normalized_item).encode('ascii', 'ignore')
normalized_items.append(normalized_item)
items = normalized_items
query = ' AND '.join('{}*~'.format(re.escape(item)) for item in items) + \
''.join(' NOT id:"{}"'.format(excluded._id) for excluded in exclude)
results = search(build_query(query, start=start, size=size), index=INDEX, doc_type='user')
docs = results['results']
pages = math.ceil(results['counts'].get('user', 0) / size)
validate_page_num(page, pages)
users = []
for doc in docs:
# TODO: use utils.serialize_user
user = User.load(doc['id'])
if current_user and current_user._id == user._id:
n_projects_in_common = -1
elif current_user:
n_projects_in_common = current_user.n_projects_in_common(user)
else:
n_projects_in_common = 0
if user is None:
logger.error('Could not load user {0}'.format(doc['id']))
continue
if user.is_active: # exclude merged, unregistered, etc.
current_employment = None
education = None
if user.jobs:
current_employment = user.jobs[0]['institution']
if user.schools:
education = user.schools[0]['institution']
users.append({
'fullname': doc['user'],
'id': doc['id'],
'employment': current_employment,
'education': education,
'n_projects_in_common': n_projects_in_common,
'gravatar_url': gravatar(
user,
use_ssl=True,
size=settings.PROFILE_IMAGE_MEDIUM
),
'profile_url': user.profile_url,
'registered': user.is_registered,
'active': user.is_active
})
return {
'users': users,
'total': results['counts']['total'],
'pages': pages,
'page': page,
}
| |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Wrappers around standard crypto data elements.
Includes root and intermediate CAs, SSH key_pairs and x509 certificates.
"""
import base64
import binascii
import io
import os
import typing as ty
from castellan.common import exception as castellan_exception
from castellan.common.objects import passphrase
from castellan import key_manager
from cryptography.hazmat import backends
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives import serialization
from cryptography import x509
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_utils.secretutils import md5
import paramiko
import nova.conf
from nova import context as nova_context
from nova import exception
from nova.i18n import _
from nova import objects
from nova import utils
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
_KEYMGR = None
_VTPM_SECRET_BYTE_LENGTH = 384
def _get_key_manager():
global _KEYMGR
if _KEYMGR is None:
_KEYMGR = key_manager.API(configuration=CONF)
return _KEYMGR
def generate_fingerprint(public_key: str) -> str:
try:
pub_bytes = public_key.encode('utf-8')
# Test that the given public_key string is a proper ssh key. The
# returned object is unused since pyca/cryptography does not have a
# fingerprint method.
serialization.load_ssh_public_key(
pub_bytes, backends.default_backend())
pub_data = base64.b64decode(public_key.split(' ')[1])
raw_fp = md5(pub_data, usedforsecurity=False).hexdigest()
return ':'.join(a + b for a, b in zip(raw_fp[::2], raw_fp[1::2]))
except Exception:
raise exception.InvalidKeypair(
reason=_('failed to generate fingerprint'))
def generate_x509_fingerprint(pem_key: ty.Union[bytes, str]) -> str:
try:
if isinstance(pem_key, str):
pem_key = pem_key.encode('utf-8')
cert = x509.load_pem_x509_certificate(
pem_key, backends.default_backend())
raw_fp = binascii.hexlify(
cert.fingerprint(hashes.SHA1())
).decode('ascii')
return ':'.join(a + b for a, b in zip(raw_fp[::2], raw_fp[1::2]))
except (ValueError, TypeError, binascii.Error) as ex:
raise exception.InvalidKeypair(
reason=_('failed to generate X509 fingerprint. '
'Error message: %s') % ex)
def generate_key_pair(bits: int = 2048) -> ty.Tuple[str, str, str]:
key = paramiko.RSAKey.generate(bits)
keyout = io.StringIO()
key.write_private_key(keyout)
private_key = keyout.getvalue()
public_key = '%s %s Generated-by-Nova' % (key.get_name(), key.get_base64())
fingerprint = generate_fingerprint(public_key)
return (private_key, public_key, fingerprint)
def ssh_encrypt_text(ssh_public_key: str, text: ty.Union[str, bytes]) -> bytes:
"""Encrypt text with an ssh public key.
If text is a Unicode string, encode it to UTF-8.
"""
if isinstance(text, str):
text = text.encode('utf-8')
try:
pub_bytes = ssh_public_key.encode('utf-8')
pub_key = serialization.load_ssh_public_key(
pub_bytes, backends.default_backend())
return pub_key.encrypt(text, padding.PKCS1v15())
except Exception as exc:
raise exception.EncryptionFailure(reason=str(exc))
def generate_winrm_x509_cert(
user_id: str,
bits: int = 2048
) -> ty.Tuple[str, str, str]:
"""Generate a cert for passwordless auth for user in project."""
subject = '/CN=%s' % user_id
upn = '%s@localhost' % user_id
with utils.tempdir() as tmpdir:
keyfile = os.path.abspath(os.path.join(tmpdir, 'temp.key'))
conffile = os.path.abspath(os.path.join(tmpdir, 'temp.conf'))
_create_x509_openssl_config(conffile, upn)
out, _ = processutils.execute(
'openssl', 'req', '-x509', '-nodes', '-days', '3650',
'-config', conffile, '-newkey', 'rsa:%s' % bits,
'-outform', 'PEM', '-keyout', keyfile, '-subj', subject,
'-extensions', 'v3_req_client',
binary=True)
certificate = out.decode('utf-8')
out, _ = processutils.execute(
'openssl', 'pkcs12', '-export', '-inkey', keyfile, '-password',
'pass:', process_input=out, binary=True)
private_key = base64.b64encode(out).decode('ascii')
fingerprint = generate_x509_fingerprint(certificate)
return (private_key, certificate, fingerprint)
def _create_x509_openssl_config(conffile: str, upn: str):
content = ("distinguished_name = req_distinguished_name\n"
"[req_distinguished_name]\n"
"[v3_req_client]\n"
"extendedKeyUsage = clientAuth\n"
"subjectAltName = otherName:""1.3.6.1.4.1.311.20.2.3;UTF8:%s\n")
with open(conffile, 'w') as file:
file.write(content % upn)
def ensure_vtpm_secret(
context: nova_context.RequestContext,
instance: 'objects.Instance',
) -> ty.Tuple[str, str]:
"""Communicates with the key manager service to retrieve or create a secret
for an instance's emulated TPM.
When creating a secret, its UUID is saved to the instance's system_metadata
as ``vtpm_secret_uuid``.
:param context: Nova auth context.
:param instance: Instance object.
:return: A tuple comprising (secret_uuid, passphrase).
:raise: castellan_exception.ManagedObjectNotFoundError if communication
with the key manager API fails, or if a vtpm_secret_uuid was present in
the instance's system metadata but could not be found in the key
manager service.
"""
key_mgr = _get_key_manager()
secret_uuid = instance.system_metadata.get('vtpm_secret_uuid')
if secret_uuid is not None:
# Try to retrieve the secret from the key manager
try:
secret = key_mgr.get(context, secret_uuid)
# assert secret_uuid == secret.id ?
LOG.debug(
"Found existing vTPM secret with UUID %s.",
secret_uuid, instance=instance)
return secret.id, secret.get_encoded()
except castellan_exception.ManagedObjectNotFoundError:
LOG.warning(
"Despite being set on the instance, failed to find a vTPM "
"secret with UUID %s. This should only happen if the secret "
"was manually deleted from the key manager service. Your vTPM "
"is likely to be unrecoverable.",
secret_uuid, instance=instance)
raise
# If we get here, the instance has no vtpm_secret_uuid. Create a new one
# and register it with the key manager.
secret = base64.b64encode(os.urandom(_VTPM_SECRET_BYTE_LENGTH))
# Castellan ManagedObject
cmo = passphrase.Passphrase(
secret, name="vTPM secret for instance %s" % instance.uuid)
secret_uuid = key_mgr.store(context, cmo)
LOG.debug("Created vTPM secret with UUID %s",
secret_uuid, instance=instance)
instance.system_metadata['vtpm_secret_uuid'] = secret_uuid
instance.save()
return secret_uuid, secret
def delete_vtpm_secret(
context: nova_context.RequestContext,
instance: 'objects.Instance',
):
"""Communicates with the key manager service to destroy the secret for an
instance's emulated TPM.
This operation is idempotent: if the instance never had a vTPM secret, OR
if the secret has already been deleted, it is a no-op.
The ``vtpm_secret_uuid`` member of the instance's system_metadata is
cleared as a side effect of this method.
:param context: Nova auth context.
:param instance: Instance object.
:return: None
:raise: castellan_exception.ManagedObjectNotFoundError if communication
with the key manager API.
"""
secret_uuid = instance.system_metadata.get('vtpm_secret_uuid')
if not secret_uuid:
return
key_mgr = _get_key_manager()
try:
key_mgr.delete(context, secret_uuid)
LOG.debug("Deleted vTPM secret with UUID %s",
secret_uuid, instance=instance)
except castellan_exception.ManagedObjectNotFoundError:
LOG.debug("vTPM secret with UUID %s already deleted or never existed.",
secret_uuid, instance=instance)
del instance.system_metadata['vtpm_secret_uuid']
instance.save()
| |
"""
Run hugs pipeline.
"""
from __future__ import division, print_function
import os
from time import time
import mpi4py.MPI as MPI
import schwimmbad
from hugs.pipeline import next_gen_search
from hugs.utils import PatchMeta, project_dir
from astropy.table import vstack
import hugs
def ingest_data(args):
"""
Write data to database with the master process.
"""
timer = time()
success, cats, meta_data = args
sources, recovered, injected, synth_cat = cats
run_name, tract, patch, patch_meta = meta_data
db_ingest = hugs.database.HugsIngest(session, run_name)
if success and (len(sources) > 0):
db_ingest.add_all(tract, patch, patch_meta, sources.to_pandas())
all_recovered.append(recovered)
all_injected.append(injected)
all_synth_cat.append(synth_cat)
else:
failed_patches['tract'].append(tract)
failed_patches['patch'].append(patch)
failed_patches['good_data_frac'].append(patch_meta.good_data_frac)
failed_patches['success'].append(success)
delta_time = time() - timer
print('time to ingest =', delta_time)
def worker(p):
"""
Workers initialize pipe configuration and run pipeline.
"""
rank = MPI.COMM_WORLD.Get_rank()
if p['seed'] is None:
tract, p1, p2 = p['tract'], int(p['patch'][0]), int(p['patch'][-1])
seed = [int(time()), tract, p1, p2, rank]
else:
seed = p['seed']
config = hugs.PipeConfig(run_name=p['run_name'],
config_fn=p['config_fn'],
random_state=seed,
rerun_path=p['rerun_path'])
config.set_patch_id(p['tract'], p['patch'])
config.logger.info('random seed set to {}'.format(seed))
results = next_gen_search.run(config)
pm = results.hugs_exp.patch_meta
patch_meta = PatchMeta(
x0 = pm.x0,
y0 = pm.y0,
small_frac = pm.small_frac,
cleaned_frac = pm.cleaned_frac,
bright_obj_frac = pm.bright_obj_frac,
good_data_frac = pm.good_data_frac
)
meta_data = [
config.run_name,
config.tract,
config.patch,
patch_meta,
]
if results.success:
sources = results.sources
sources['flags'] = sources['flags'].astype(int)
synth_cat = config.synth_cat
synth_cat['tract'] = config.tract
synth_cat['patch'] = config.patch
synth_cat.rename_column('x', 'x_image')
synth_cat.rename_column('y', 'y_image')
(match, match_synth), _ = hugs.cattools.xmatch(
sources, synth_cat, max_sep=config.synth_max_match_sep)
recovered = results.sources[match]
injected = synth_cat[match_synth]
injected['tract'] = config.tract
injected['patch'] = config.patch
txt = '{} injected, {} recovered'.format(len(synth_cat),
len(injected))
config.logger.info(txt)
else:
sources = None
recovered = None
injected = None
synth_cat = None
config.logger.info('passing results to master process')
cats = [sources, recovered, injected, synth_cat]
return results.success, cats, meta_data
if __name__=='__main__':
from argparse import ArgumentParser
from astropy.table import Table
rank = MPI.COMM_WORLD.Get_rank()
config_dir = os.path.join(project_dir, 'pipe-configs')
# parse command-line arguments
parser = ArgumentParser('Run hugs pipeline')
parser.add_argument('-t', '--tract', type=int, help='HSC tract')
parser.add_argument('-p', '--patch', type=str, help='HSC patch')
parser.add_argument('-c', '--config_fn', help='hugs config file',
default=os.path.join(config_dir, 'hugs-run-dev.yml'))
parser.add_argument('--patches_fn', help='patches file')
parser.add_argument('-r', '--run_name', type=str, default='synth-run')
parser.add_argument('--seed', help='rng seed', default=None)
parser.add_argument('--rerun_path', help='full rerun path', default=None)
group = parser.add_mutually_exclusive_group()
group.add_argument('--ncores', default=1, type=int,
help='Number of processes (uses multiprocessing).')
group.add_argument('--mpi', default=False, action="store_true",
help="Run with MPI.")
args = parser.parse_args()
config_params = hugs.utils.read_config(args.config_fn)
outdir = config_params['hugs_io']
#######################################################################
# run on a single patch
#######################################################################
if args.tract is not None:
assert args.patch is not None
tract, patch = args.tract, args.patch
patches = Table([[tract], [patch]], names=['tract', 'patch'])
run_dir_name = '{}-{}-{}'.format(args.run_name, tract, patch)
outdir = os.path.join(outdir, run_dir_name)
hugs.utils.mkdir_if_needed(outdir)
log_fn = os.path.join(outdir, 'hugs-pipe.log')
patches['outdir'] = outdir
patches['log_fn'] = log_fn
#######################################################################
# OR run on all patches in file
#######################################################################
elif args.patches_fn is not None:
patches = Table.read(args.patches_fn)
if rank==0:
time_label = hugs.utils.get_time_label()
outdir = os.path.join(
outdir, '{}-{}'.format(args.run_name, time_label))
hugs.utils.mkdir_if_needed(outdir)
log_dir = os.path.join(outdir, 'log')
hugs.utils.mkdir_if_needed(log_dir)
log_fn = []
for tract, patch in patches['tract', 'patch']:
fn = os.path.join(log_dir, '{}-{}.log'.format(tract, patch))
log_fn.append(fn)
patches['outdir'] = outdir
patches['log_fn'] = log_fn
else:
print('\n**** must give tract and patch --or-- a patch file ****\n')
parser.print_help()
exit()
patches['rerun_path'] = args.rerun_path
patches['seed'] = args.seed
patches['config_fn'] = args.config_fn
patches['run_name'] = args.run_name
if rank==0:
# master process lists for results
db_fn = os.path.join(outdir, args.run_name+'.db')
engine = hugs.database.connect(db_fn, True)
session = hugs.database.Session()
all_recovered = []
all_injected = []
all_synth_cat = []
failed_patches = {'tract': [],
'patch': [],
'good_data_frac': [],
'success': []}
pool = schwimmbad.choose_pool(mpi=args.mpi, processes=args.ncores)
list(pool.map(worker, patches, callback=ingest_data))
pool.close()
if rank==0:
fn = lambda lab: os.path.join(outdir, args.run_name + lab + '.csv')
if len(all_recovered) > 0:
all_recovered = vstack(all_recovered)
all_injected = vstack(all_injected)
all_synth_cat = vstack(all_synth_cat)
all_recovered.write(fn('-recovered'), overwrite=True)
all_injected.write(fn('-injected'), overwrite=True)
all_synth_cat.write(fn('-synth-cat'), overwrite=True)
failed_patches = Table(failed_patches)
failed_patches.write(fn('-failed-patches'), overwrite=True)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.