gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
import time
from piston.instance import shared_steem_instance
from .block import Block
from .utils import parse_time
virtual_operations = [
"fill_convert_request",
"author_reward",
"curation_reward",
"comment_reward",
"liquidity_reward",
"interest",
"fill_vesting_withdraw",
"fill_order",
"shutdown_witness",
"fill_transfer_from_savings",
"hardfork",
"comment_payout_update"
]
class Blockchain(object):
""" This class allows to access the blockchain and read data
from it
:param Steem steem_instance: Steem() instance to use when accesing a RPC
:param str mode: (default) Irreversible block
(``irreversible``) or actual head block (``head``)
"""
def __init__(
self,
steem_instance=None,
mode="irreversible"
):
self.steem = steem_instance or shared_steem_instance()
if mode == "irreversible":
self.mode = 'last_irreversible_block_num'
elif mode == "head":
self.mode = "head_block_number"
else:
raise ValueError("invalid value for 'mode'!")
def info(self):
""" This call returns the *dynamic global properties*
"""
return self.steem.rpc.get_dynamic_global_properties()
def chainParameters(self):
return self.config()["parameters"]
def get_network(self):
return self.steem.rpc.get_network()
def get_chain_properties(self):
return self.steem.rpc.get_chain_properties()
def config(self):
return self.steem.rpc.get_config()
def get_current_block_num(self):
""" This call returns the current block
"""
return self.info().get(self.mode)
def get_current_block(self):
""" This call returns the current block
"""
return Block(self.get_current_block_num(), steem_instance=self.steem)
def block_time(self, block_num):
""" Returns a datetime of the block with the given block
number.
:param int block_num: Block number
"""
return Block(block_num, steem_instance=self.steem).time()
def block_timestamp(self, block_num):
""" Returns the timestamp of the block with the given block
number.
:param int block_num: Block number
"""
return int(Block(block_num, steem_instance=self.steem).time().timestamp())
def blocks(self, start=None, stop=None):
""" Yields blocks starting from ``start``.
:param int start: Starting block
:param int stop: Stop at this block
:param str mode: We here have the choice between
* "head": the last block
* "irreversible": the block that is confirmed by 2/3 of all block producers and is thus irreversible!
"""
# Let's find out how often blocks are generated!
block_interval = self.config().get("STEEMIT_BLOCK_INTERVAL")
if not start:
start = self.get_current_block_num()
# We are going to loop indefinitely
while True:
retry = False
# Get chain properies to identify the
head_block = self.get_current_block_num()
if(stop):
head_block = min(stop, head_block)
# Blocks from start until head block
for blocknum in range(start, head_block + 1):
# Get full block
block = self.steem.rpc.get_block(blocknum)
if not block:
start = blocknum
retry = True
break
block.update({"block_num": blocknum})
yield block
if retry:
continue
# Set new start
start = head_block + 1
if stop and start > stop:
break
# Sleep for one block
time.sleep(block_interval)
def ops(self, start=None, stop=None, only_virtual_ops=False, **kwargs):
""" Yields all operations (including virtual operations) starting from ``start``.
:param int start: Starting block
:param int stop: Stop at this block
:param str mode: We here have the choice between
* "head": the last block
* "irreversible": the block that is confirmed by 2/3 of all block producers and is thus irreversible!
:param bool only_virtual_ops: Only yield virtual operations
This call returns a list with elements that look like
this and carries only one operation each:::
{'block': 8411453,
'op': ['vote',
{'author': 'dana-edwards',
'permlink': 'church-encoding-numbers-defined-as-functions',
'voter': 'juanmiguelsalas',
'weight': 6000}],
'op_in_trx': 0,
'timestamp': '2017-01-12T12:26:03',
'trx_id': 'e897886e8b7560f37da31eb1a42177c6f236c985',
'trx_in_block': 1,
'virtual_op': 0}
"""
# Let's find out how often blocks are generated!
block_interval = self.config().get("STEEMIT_BLOCK_INTERVAL")
if not start:
start = self.get_current_block_num()
# We are going to loop indefinitely
while True:
# Get chain properies to identify the
head_block = self.get_current_block_num()
# Blocks from start until head block
for blocknum in range(start, head_block + 1):
# Get full block
ops = self.get_ops_in_block(blocknum, only_virtual_ops)
for op in ops:
if op:
yield op
# Set new start
start = head_block + 1
if stop and start > stop:
break
# Sleep for one block
time.sleep(block_interval)
def get_ops_in_block(self, blocknum, only_virtual_ops=False):
""" Get all the operations from the block
"""
block = self.steem.rpc.get_block(blocknum)
ret = list()
if not block:
return ret
for i, tx in enumerate(block.get("transactions", [])):
for j, op in enumerate(tx.get("operations", [])):
ret.append({
"block": blocknum,
"op": op,
"timestamp": block["timestamp"],
})
return ret
def stream(self, opNames=[], *args, **kwargs):
""" Yield specific operations (e.g. comments) only
:param array opNames: List of operations to filter for, e.g.
vote, comment, transfer, transfer_to_vesting,
withdraw_vesting, limit_order_create, limit_order_cancel,
feed_publish, convert, account_create, account_update,
witness_update, account_witness_vote, account_witness_proxy,
pow, custom, report_over_production, fill_convert_request,
comment_reward, curate_reward, liquidity_reward, interest,
fill_vesting_withdraw, fill_order,
:param int start: Start at this block
:param int stop: Stop at this block
:param str mode: We here have the choice between
* "head": the last block
* "irreversible": the block that is confirmed by 2/3 of all block producers and is thus irreversible!
The dict output is formated such that ``type`` caries the
operation type, timestamp and block_num are taken from the
block the operation was stored in and the other key depend
on the actualy operation.
"""
if isinstance(opNames, str):
opNames = [opNames]
if not bool(set(opNames).intersection(virtual_operations)):
# uses get_block instead of get_ops_in_block
for block in self.blocks(*args, **kwargs):
for tx in block.get("transactions"):
for op in tx["operations"]:
if not opNames or op[0] in opNames:
r = {
"type": op[0],
"timestamp": block.get("timestamp"),
"block_num": block.get("block_num")
}
r.update(op[1])
yield r
else:
# uses get_ops_in_block
kwargs["only_virtual_ops"] = not bool(set(opNames).difference(virtual_operations))
for op in self.ops(*args, **kwargs):
if not opNames or op["op"][0] in opNames:
r = {
"type": op["op"][0],
"timestamp": op.get("timestamp"),
"block_num": op.get("block")
}
r.update(op["op"][1])
yield r
def replay(self, start_block=1, end_block=None, filter_by=list(), **kwargs):
""" Same as ``stream`` with different prototyp
"""
return self.stream(
opNames=filter_by,
start=start_block,
stop=end_block,
mode=self.mode,
**kwargs
)
def get_block_from_time(self, timestring, error_margin=10):
""" Estimate block number from given time
:param str timestring: String representing time
:param int error_margin: Estimate block number within this interval (in seconds)
"""
known_block = self.get_current_block()
known_block_timestamp = self.block_timestamp(known_block)
timestring_timestamp = parse_time(timestring).timestamp()
delta = known_block_timestamp - timestring_timestamp
block_delta = delta / 3
guess_block = known_block.block - block_delta
guess_block_timestamp = self.block_timestamp(guess_block)
error = timestring_timestamp - guess_block_timestamp
while abs(error) > error_margin:
guess_block += error / 3
guess_block_timestamp = self.block_timestamp(guess_block)
error = timestring_timestamp - guess_block_timestamp
return int(guess_block)
def get_all_accounts(self, start='', stop='', steps=1e6, **kwargs):
""" Yields account names between start and stop.
:param str start: Start at this account name
:param str stop: Stop at this account name
:param int steps: Obtain ``steps`` names with a single call from RPC
"""
lastname = start
while True:
names = self.steem.rpc.lookup_accounts(lastname, steps)
for name in names:
yield name
if name == stop:
break
if lastname == names[-1]:
break
lastname = names[-1]
if len(names) < steps:
break
| |
"""Unit tests for the bytes and bytearray types.
XXX This is a mess. Common tests should be moved to buffer_tests.py,
which itself ought to be unified with string_tests.py (and the latter
should be modernized).
"""
import os
import re
import sys
import copy
import functools
import pickle
import tempfile
import unittest
import test.support
import test.string_tests
import test.buffer_tests
if sys.flags.bytes_warning:
def check_bytes_warnings(func):
@functools.wraps(func)
def wrapper(*args, **kw):
with test.support.check_warnings(('', BytesWarning)):
return func(*args, **kw)
return wrapper
else:
# no-op
def check_bytes_warnings(func):
return func
class Indexable:
def __init__(self, value=0):
self.value = value
def __index__(self):
return self.value
class BaseBytesTest(unittest.TestCase):
def test_basics(self):
b = self.type2test()
self.assertEqual(type(b), self.type2test)
self.assertEqual(b.__class__, self.type2test)
def test_copy(self):
a = self.type2test(b"abcd")
for copy_method in (copy.copy, copy.deepcopy):
b = copy_method(a)
self.assertEqual(a, b)
self.assertEqual(type(a), type(b))
def test_empty_sequence(self):
b = self.type2test()
self.assertEqual(len(b), 0)
self.assertRaises(IndexError, lambda: b[0])
self.assertRaises(IndexError, lambda: b[1])
self.assertRaises(IndexError, lambda: b[sys.maxsize])
self.assertRaises(IndexError, lambda: b[sys.maxsize+1])
self.assertRaises(IndexError, lambda: b[10**100])
self.assertRaises(IndexError, lambda: b[-1])
self.assertRaises(IndexError, lambda: b[-2])
self.assertRaises(IndexError, lambda: b[-sys.maxsize])
self.assertRaises(IndexError, lambda: b[-sys.maxsize-1])
self.assertRaises(IndexError, lambda: b[-sys.maxsize-2])
self.assertRaises(IndexError, lambda: b[-10**100])
def test_from_list(self):
ints = list(range(256))
b = self.type2test(i for i in ints)
self.assertEqual(len(b), 256)
self.assertEqual(list(b), ints)
def test_from_index(self):
b = self.type2test([Indexable(), Indexable(1), Indexable(254),
Indexable(255)])
self.assertEqual(list(b), [0, 1, 254, 255])
self.assertRaises(ValueError, self.type2test, [Indexable(-1)])
self.assertRaises(ValueError, self.type2test, [Indexable(256)])
def test_from_ssize(self):
self.assertEqual(self.type2test(0), b'')
self.assertEqual(self.type2test(1), b'\x00')
self.assertEqual(self.type2test(5), b'\x00\x00\x00\x00\x00')
self.assertRaises(ValueError, self.type2test, -1)
self.assertEqual(self.type2test('0', 'ascii'), b'0')
self.assertEqual(self.type2test(b'0'), b'0')
self.assertRaises(OverflowError, self.type2test, sys.maxsize + 1)
def test_constructor_type_errors(self):
self.assertRaises(TypeError, self.type2test, 0.0)
class C:
pass
self.assertRaises(TypeError, self.type2test, ["0"])
self.assertRaises(TypeError, self.type2test, [0.0])
self.assertRaises(TypeError, self.type2test, [None])
self.assertRaises(TypeError, self.type2test, [C()])
def test_constructor_value_errors(self):
self.assertRaises(ValueError, self.type2test, [-1])
self.assertRaises(ValueError, self.type2test, [-sys.maxsize])
self.assertRaises(ValueError, self.type2test, [-sys.maxsize-1])
self.assertRaises(ValueError, self.type2test, [-sys.maxsize-2])
self.assertRaises(ValueError, self.type2test, [-10**100])
self.assertRaises(ValueError, self.type2test, [256])
self.assertRaises(ValueError, self.type2test, [257])
self.assertRaises(ValueError, self.type2test, [sys.maxsize])
self.assertRaises(ValueError, self.type2test, [sys.maxsize+1])
self.assertRaises(ValueError, self.type2test, [10**100])
def test_compare(self):
b1 = self.type2test([1, 2, 3])
b2 = self.type2test([1, 2, 3])
b3 = self.type2test([1, 3])
self.assertEqual(b1, b2)
self.assertTrue(b2 != b3)
self.assertTrue(b1 <= b2)
self.assertTrue(b1 <= b3)
self.assertTrue(b1 < b3)
self.assertTrue(b1 >= b2)
self.assertTrue(b3 >= b2)
self.assertTrue(b3 > b2)
self.assertFalse(b1 != b2)
self.assertFalse(b2 == b3)
self.assertFalse(b1 > b2)
self.assertFalse(b1 > b3)
self.assertFalse(b1 >= b3)
self.assertFalse(b1 < b2)
self.assertFalse(b3 < b2)
self.assertFalse(b3 <= b2)
@check_bytes_warnings
def test_compare_to_str(self):
# Byte comparisons with unicode should always fail!
# Test this for all expected byte orders and Unicode character
# sizes.
self.assertEqual(self.type2test(b"\0a\0b\0c") == "abc", False)
self.assertEqual(self.type2test(b"\0\0\0a\0\0\0b\0\0\0c") == "abc",
False)
self.assertEqual(self.type2test(b"a\0b\0c\0") == "abc", False)
self.assertEqual(self.type2test(b"a\0\0\0b\0\0\0c\0\0\0") == "abc",
False)
self.assertEqual(self.type2test() == str(), False)
self.assertEqual(self.type2test() != str(), True)
def test_reversed(self):
input = list(map(ord, "Hello"))
b = self.type2test(input)
output = list(reversed(b))
input.reverse()
self.assertEqual(output, input)
def test_getslice(self):
def by(s):
return self.type2test(map(ord, s))
b = by("Hello, world")
self.assertEqual(b[:5], by("Hello"))
self.assertEqual(b[1:5], by("ello"))
self.assertEqual(b[5:7], by(", "))
self.assertEqual(b[7:], by("world"))
self.assertEqual(b[7:12], by("world"))
self.assertEqual(b[7:100], by("world"))
self.assertEqual(b[:-7], by("Hello"))
self.assertEqual(b[-11:-7], by("ello"))
self.assertEqual(b[-7:-5], by(", "))
self.assertEqual(b[-5:], by("world"))
self.assertEqual(b[-5:12], by("world"))
self.assertEqual(b[-5:100], by("world"))
self.assertEqual(b[-100:5], by("Hello"))
def test_extended_getslice(self):
# Test extended slicing by comparing with list slicing.
L = list(range(255))
b = self.type2test(L)
indices = (0, None, 1, 3, 19, 100, -1, -2, -31, -100)
for start in indices:
for stop in indices:
# Skip step 0 (invalid)
for step in indices[1:]:
self.assertEqual(b[start:stop:step], self.type2test(L[start:stop:step]))
def test_encoding(self):
sample = "Hello world\n\u1234\u5678\u9abc"
for enc in ("utf-8", "utf-16"):
b = self.type2test(sample, enc)
self.assertEqual(b, self.type2test(sample.encode(enc)))
self.assertRaises(UnicodeEncodeError, self.type2test, sample, "latin-1")
b = self.type2test(sample, "latin-1", "ignore")
self.assertEqual(b, self.type2test(sample[:-3], "utf-8"))
def test_decode(self):
sample = "Hello world\n\u1234\u5678\u9abc\def0\def0"
for enc in ("utf-8", "utf-16"):
b = self.type2test(sample, enc)
self.assertEqual(b.decode(enc), sample)
sample = "Hello world\n\x80\x81\xfe\xff"
b = self.type2test(sample, "latin-1")
self.assertRaises(UnicodeDecodeError, b.decode, "utf-8")
self.assertEqual(b.decode("utf-8", "ignore"), "Hello world\n")
self.assertEqual(b.decode(errors="ignore", encoding="utf-8"),
"Hello world\n")
# Default encoding is utf-8
self.assertEqual(self.type2test(b'\xe2\x98\x83').decode(), '\u2603')
def test_from_int(self):
b = self.type2test(0)
self.assertEqual(b, self.type2test())
b = self.type2test(10)
self.assertEqual(b, self.type2test([0]*10))
b = self.type2test(10000)
self.assertEqual(b, self.type2test([0]*10000))
def test_concat(self):
b1 = self.type2test(b"abc")
b2 = self.type2test(b"def")
self.assertEqual(b1 + b2, b"abcdef")
self.assertEqual(b1 + bytes(b"def"), b"abcdef")
self.assertEqual(bytes(b"def") + b1, b"defabc")
self.assertRaises(TypeError, lambda: b1 + "def")
self.assertRaises(TypeError, lambda: "abc" + b2)
def test_repeat(self):
for b in b"abc", self.type2test(b"abc"):
self.assertEqual(b * 3, b"abcabcabc")
self.assertEqual(b * 0, b"")
self.assertEqual(b * -1, b"")
self.assertRaises(TypeError, lambda: b * 3.14)
self.assertRaises(TypeError, lambda: 3.14 * b)
# XXX Shouldn't bytes and bytearray agree on what to raise?
with self.assertRaises((OverflowError, MemoryError)):
c = b * sys.maxsize
with self.assertRaises((OverflowError, MemoryError)):
b *= sys.maxsize
def test_repeat_1char(self):
self.assertEqual(self.type2test(b'x')*100, self.type2test([ord('x')]*100))
def test_contains(self):
b = self.type2test(b"abc")
self.assertIn(ord('a'), b)
self.assertIn(int(ord('a')), b)
self.assertNotIn(200, b)
self.assertRaises(ValueError, lambda: 300 in b)
self.assertRaises(ValueError, lambda: -1 in b)
self.assertRaises(TypeError, lambda: None in b)
self.assertRaises(TypeError, lambda: float(ord('a')) in b)
self.assertRaises(TypeError, lambda: "a" in b)
for f in bytes, bytearray:
self.assertIn(f(b""), b)
self.assertIn(f(b"a"), b)
self.assertIn(f(b"b"), b)
self.assertIn(f(b"c"), b)
self.assertIn(f(b"ab"), b)
self.assertIn(f(b"bc"), b)
self.assertIn(f(b"abc"), b)
self.assertNotIn(f(b"ac"), b)
self.assertNotIn(f(b"d"), b)
self.assertNotIn(f(b"dab"), b)
self.assertNotIn(f(b"abd"), b)
def test_fromhex(self):
self.assertRaises(TypeError, self.type2test.fromhex)
self.assertRaises(TypeError, self.type2test.fromhex, 1)
self.assertEqual(self.type2test.fromhex(''), self.type2test())
b = bytearray([0x1a, 0x2b, 0x30])
self.assertEqual(self.type2test.fromhex('1a2B30'), b)
self.assertEqual(self.type2test.fromhex(' 1A 2B 30 '), b)
self.assertEqual(self.type2test.fromhex('0000'), b'\0\0')
self.assertRaises(TypeError, self.type2test.fromhex, b'1B')
self.assertRaises(ValueError, self.type2test.fromhex, 'a')
self.assertRaises(ValueError, self.type2test.fromhex, 'rt')
self.assertRaises(ValueError, self.type2test.fromhex, '1a b cd')
self.assertRaises(ValueError, self.type2test.fromhex, '\x00')
self.assertRaises(ValueError, self.type2test.fromhex, '12 \x00 34')
def test_join(self):
self.assertEqual(self.type2test(b"").join([]), b"")
self.assertEqual(self.type2test(b"").join([b""]), b"")
for lst in [[b"abc"], [b"a", b"bc"], [b"ab", b"c"], [b"a", b"b", b"c"]]:
lst = list(map(self.type2test, lst))
self.assertEqual(self.type2test(b"").join(lst), b"abc")
self.assertEqual(self.type2test(b"").join(tuple(lst)), b"abc")
self.assertEqual(self.type2test(b"").join(iter(lst)), b"abc")
self.assertEqual(self.type2test(b".").join([b"ab", b"cd"]), b"ab.cd")
# XXX more...
def test_count(self):
b = self.type2test(b'mississippi')
i = 105
p = 112
w = 119
self.assertEqual(b.count(b'i'), 4)
self.assertEqual(b.count(b'ss'), 2)
self.assertEqual(b.count(b'w'), 0)
self.assertEqual(b.count(i), 4)
self.assertEqual(b.count(w), 0)
self.assertEqual(b.count(b'i', 6), 2)
self.assertEqual(b.count(b'p', 6), 2)
self.assertEqual(b.count(b'i', 1, 3), 1)
self.assertEqual(b.count(b'p', 7, 9), 1)
self.assertEqual(b.count(i, 6), 2)
self.assertEqual(b.count(p, 6), 2)
self.assertEqual(b.count(i, 1, 3), 1)
self.assertEqual(b.count(p, 7, 9), 1)
def test_startswith(self):
b = self.type2test(b'hello')
self.assertFalse(self.type2test().startswith(b"anything"))
self.assertTrue(b.startswith(b"hello"))
self.assertTrue(b.startswith(b"hel"))
self.assertTrue(b.startswith(b"h"))
self.assertFalse(b.startswith(b"hellow"))
self.assertFalse(b.startswith(b"ha"))
with self.assertRaises(TypeError) as cm:
b.startswith([b'h'])
exc = str(cm.exception)
self.assertIn('bytes', exc)
self.assertIn('tuple', exc)
def test_endswith(self):
b = self.type2test(b'hello')
self.assertFalse(bytearray().endswith(b"anything"))
self.assertTrue(b.endswith(b"hello"))
self.assertTrue(b.endswith(b"llo"))
self.assertTrue(b.endswith(b"o"))
self.assertFalse(b.endswith(b"whello"))
self.assertFalse(b.endswith(b"no"))
with self.assertRaises(TypeError) as cm:
b.endswith([b'o'])
exc = str(cm.exception)
self.assertIn('bytes', exc)
self.assertIn('tuple', exc)
def test_find(self):
b = self.type2test(b'mississippi')
i = 105
w = 119
self.assertEqual(b.find(b'ss'), 2)
self.assertEqual(b.find(b'w'), -1)
self.assertEqual(b.find(b'mississippian'), -1)
self.assertEqual(b.find(i), 1)
self.assertEqual(b.find(w), -1)
self.assertEqual(b.find(b'ss', 3), 5)
self.assertEqual(b.find(b'ss', 1, 7), 2)
self.assertEqual(b.find(b'ss', 1, 3), -1)
self.assertEqual(b.find(i, 6), 7)
self.assertEqual(b.find(i, 1, 3), 1)
self.assertEqual(b.find(w, 1, 3), -1)
for index in (-1, 256, sys.maxsize + 1):
self.assertRaisesRegex(
ValueError, r'byte must be in range\(0, 256\)',
b.find, index)
def test_rfind(self):
b = self.type2test(b'mississippi')
i = 105
w = 119
self.assertEqual(b.rfind(b'ss'), 5)
self.assertEqual(b.rfind(b'w'), -1)
self.assertEqual(b.rfind(b'mississippian'), -1)
self.assertEqual(b.rfind(i), 10)
self.assertEqual(b.rfind(w), -1)
self.assertEqual(b.rfind(b'ss', 3), 5)
self.assertEqual(b.rfind(b'ss', 0, 6), 2)
self.assertEqual(b.rfind(i, 1, 3), 1)
self.assertEqual(b.rfind(i, 3, 9), 7)
self.assertEqual(b.rfind(w, 1, 3), -1)
def test_index(self):
b = self.type2test(b'mississippi')
i = 105
w = 119
self.assertEqual(b.index(b'ss'), 2)
self.assertRaises(ValueError, b.index, b'w')
self.assertRaises(ValueError, b.index, b'mississippian')
self.assertEqual(b.index(i), 1)
self.assertRaises(ValueError, b.index, w)
self.assertEqual(b.index(b'ss', 3), 5)
self.assertEqual(b.index(b'ss', 1, 7), 2)
self.assertRaises(ValueError, b.index, b'ss', 1, 3)
self.assertEqual(b.index(i, 6), 7)
self.assertEqual(b.index(i, 1, 3), 1)
self.assertRaises(ValueError, b.index, w, 1, 3)
def test_rindex(self):
b = self.type2test(b'mississippi')
i = 105
w = 119
self.assertEqual(b.rindex(b'ss'), 5)
self.assertRaises(ValueError, b.rindex, b'w')
self.assertRaises(ValueError, b.rindex, b'mississippian')
self.assertEqual(b.rindex(i), 10)
self.assertRaises(ValueError, b.rindex, w)
self.assertEqual(b.rindex(b'ss', 3), 5)
self.assertEqual(b.rindex(b'ss', 0, 6), 2)
self.assertEqual(b.rindex(i, 1, 3), 1)
self.assertEqual(b.rindex(i, 3, 9), 7)
self.assertRaises(ValueError, b.rindex, w, 1, 3)
def test_replace(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.replace(b'i', b'a'), b'massassappa')
self.assertEqual(b.replace(b'ss', b'x'), b'mixixippi')
def test_split(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.split(b'i'), [b'm', b'ss', b'ss', b'pp', b''])
self.assertEqual(b.split(b'ss'), [b'mi', b'i', b'ippi'])
self.assertEqual(b.split(b'w'), [b])
# with keyword args
b = self.type2test(b'a|b|c|d')
self.assertEqual(b.split(sep=b'|'), [b'a', b'b', b'c', b'd'])
self.assertEqual(b.split(b'|', maxsplit=1), [b'a', b'b|c|d'])
self.assertEqual(b.split(sep=b'|', maxsplit=1), [b'a', b'b|c|d'])
self.assertEqual(b.split(maxsplit=1, sep=b'|'), [b'a', b'b|c|d'])
b = self.type2test(b'a b c d')
self.assertEqual(b.split(maxsplit=1), [b'a', b'b c d'])
def test_split_whitespace(self):
for b in (b' arf barf ', b'arf\tbarf', b'arf\nbarf', b'arf\rbarf',
b'arf\fbarf', b'arf\vbarf'):
b = self.type2test(b)
self.assertEqual(b.split(), [b'arf', b'barf'])
self.assertEqual(b.split(None), [b'arf', b'barf'])
self.assertEqual(b.split(None, 2), [b'arf', b'barf'])
for b in (b'a\x1Cb', b'a\x1Db', b'a\x1Eb', b'a\x1Fb'):
b = self.type2test(b)
self.assertEqual(b.split(), [b])
self.assertEqual(self.type2test(b' a bb c ').split(None, 0), [b'a bb c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 1), [b'a', b'bb c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 2), [b'a', b'bb', b'c '])
self.assertEqual(self.type2test(b' a bb c ').split(None, 3), [b'a', b'bb', b'c'])
def test_split_string_error(self):
self.assertRaises(TypeError, self.type2test(b'a b').split, ' ')
def test_split_unicodewhitespace(self):
b = self.type2test(b"\x09\x0A\x0B\x0C\x0D\x1C\x1D\x1E\x1F")
self.assertEqual(b.split(), [b'\x1c\x1d\x1e\x1f'])
def test_rsplit(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rsplit(b'i'), [b'm', b'ss', b'ss', b'pp', b''])
self.assertEqual(b.rsplit(b'ss'), [b'mi', b'i', b'ippi'])
self.assertEqual(b.rsplit(b'w'), [b])
# with keyword args
b = self.type2test(b'a|b|c|d')
self.assertEqual(b.rsplit(sep=b'|'), [b'a', b'b', b'c', b'd'])
self.assertEqual(b.rsplit(b'|', maxsplit=1), [b'a|b|c', b'd'])
self.assertEqual(b.rsplit(sep=b'|', maxsplit=1), [b'a|b|c', b'd'])
self.assertEqual(b.rsplit(maxsplit=1, sep=b'|'), [b'a|b|c', b'd'])
b = self.type2test(b'a b c d')
self.assertEqual(b.rsplit(maxsplit=1), [b'a b c', b'd'])
def test_rsplit_whitespace(self):
for b in (b' arf barf ', b'arf\tbarf', b'arf\nbarf', b'arf\rbarf',
b'arf\fbarf', b'arf\vbarf'):
b = self.type2test(b)
self.assertEqual(b.rsplit(), [b'arf', b'barf'])
self.assertEqual(b.rsplit(None), [b'arf', b'barf'])
self.assertEqual(b.rsplit(None, 2), [b'arf', b'barf'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 0), [b' a bb c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 1), [b' a bb', b'c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 2), [b' a', b'bb', b'c'])
self.assertEqual(self.type2test(b' a bb c ').rsplit(None, 3), [b'a', b'bb', b'c'])
def test_rsplit_string_error(self):
self.assertRaises(TypeError, self.type2test(b'a b').rsplit, ' ')
def test_rsplit_unicodewhitespace(self):
b = self.type2test(b"\x09\x0A\x0B\x0C\x0D\x1C\x1D\x1E\x1F")
self.assertEqual(b.rsplit(), [b'\x1c\x1d\x1e\x1f'])
def test_partition(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.partition(b'ss'), (b'mi', b'ss', b'issippi'))
self.assertEqual(b.partition(b'w'), (b'mississippi', b'', b''))
def test_rpartition(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rpartition(b'ss'), (b'missi', b'ss', b'ippi'))
self.assertEqual(b.rpartition(b'i'), (b'mississipp', b'i', b''))
self.assertEqual(b.rpartition(b'w'), (b'', b'', b'mississippi'))
def test_pickling(self):
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
for b in b"", b"a", b"abc", b"\xffab\x80", b"\0\0\377\0\0":
b = self.type2test(b)
ps = pickle.dumps(b, proto)
q = pickle.loads(ps)
self.assertEqual(b, q)
def test_iterator_pickling(self):
for b in b"", b"a", b"abc", b"\xffab\x80", b"\0\0\377\0\0":
it = itorg = iter(self.type2test(b))
data = list(self.type2test(b))
d = pickle.dumps(it)
it = pickle.loads(d)
self.assertEqual(type(itorg), type(it))
self.assertEqual(list(it), data)
it = pickle.loads(d)
try:
next(it)
except StopIteration:
continue
d = pickle.dumps(it)
it = pickle.loads(d)
self.assertEqual(list(it), data[1:])
def test_strip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.strip(b'i'), b'mississipp')
self.assertEqual(b.strip(b'm'), b'ississippi')
self.assertEqual(b.strip(b'pi'), b'mississ')
self.assertEqual(b.strip(b'im'), b'ssissipp')
self.assertEqual(b.strip(b'pim'), b'ssiss')
self.assertEqual(b.strip(b), b'')
def test_lstrip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.lstrip(b'i'), b'mississippi')
self.assertEqual(b.lstrip(b'm'), b'ississippi')
self.assertEqual(b.lstrip(b'pi'), b'mississippi')
self.assertEqual(b.lstrip(b'im'), b'ssissippi')
self.assertEqual(b.lstrip(b'pim'), b'ssissippi')
def test_rstrip(self):
b = self.type2test(b'mississippi')
self.assertEqual(b.rstrip(b'i'), b'mississipp')
self.assertEqual(b.rstrip(b'm'), b'mississippi')
self.assertEqual(b.rstrip(b'pi'), b'mississ')
self.assertEqual(b.rstrip(b'im'), b'mississipp')
self.assertEqual(b.rstrip(b'pim'), b'mississ')
def test_strip_whitespace(self):
b = self.type2test(b' \t\n\r\f\vabc \t\n\r\f\v')
self.assertEqual(b.strip(), b'abc')
self.assertEqual(b.lstrip(), b'abc \t\n\r\f\v')
self.assertEqual(b.rstrip(), b' \t\n\r\f\vabc')
def test_strip_bytearray(self):
self.assertEqual(self.type2test(b'abc').strip(memoryview(b'ac')), b'b')
self.assertEqual(self.type2test(b'abc').lstrip(memoryview(b'ac')), b'bc')
self.assertEqual(self.type2test(b'abc').rstrip(memoryview(b'ac')), b'ab')
def test_strip_string_error(self):
self.assertRaises(TypeError, self.type2test(b'abc').strip, 'b')
self.assertRaises(TypeError, self.type2test(b'abc').lstrip, 'b')
self.assertRaises(TypeError, self.type2test(b'abc').rstrip, 'b')
def test_center(self):
# Fill character can be either bytes or bytearray (issue 12380)
b = self.type2test(b'abc')
for fill_type in (bytes, bytearray):
self.assertEqual(b.center(7, fill_type(b'-')),
self.type2test(b'--abc--'))
def test_ljust(self):
# Fill character can be either bytes or bytearray (issue 12380)
b = self.type2test(b'abc')
for fill_type in (bytes, bytearray):
self.assertEqual(b.ljust(7, fill_type(b'-')),
self.type2test(b'abc----'))
def test_rjust(self):
# Fill character can be either bytes or bytearray (issue 12380)
b = self.type2test(b'abc')
for fill_type in (bytes, bytearray):
self.assertEqual(b.rjust(7, fill_type(b'-')),
self.type2test(b'----abc'))
def test_ord(self):
b = self.type2test(b'\0A\x7f\x80\xff')
self.assertEqual([ord(b[i:i+1]) for i in range(len(b))],
[0, 65, 127, 128, 255])
def test_maketrans(self):
transtable = b'\000\001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037 !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`xyzdefghijklmnopqrstuvwxyz{|}~\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374\375\376\377'
self.assertEqual(self.type2test.maketrans(b'abc', b'xyz'), transtable)
transtable = b'\000\001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037 !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~\177\200\201\202\203\204\205\206\207\210\211\212\213\214\215\216\217\220\221\222\223\224\225\226\227\230\231\232\233\234\235\236\237\240\241\242\243\244\245\246\247\250\251\252\253\254\255\256\257\260\261\262\263\264\265\266\267\270\271\272\273\274\275\276\277\300\301\302\303\304\305\306\307\310\311\312\313\314\315\316\317\320\321\322\323\324\325\326\327\330\331\332\333\334\335\336\337\340\341\342\343\344\345\346\347\350\351\352\353\354\355\356\357\360\361\362\363\364\365\366\367\370\371\372\373\374xyz'
self.assertEqual(self.type2test.maketrans(b'\375\376\377', b'xyz'), transtable)
self.assertRaises(ValueError, self.type2test.maketrans, b'abc', b'xyzq')
self.assertRaises(TypeError, self.type2test.maketrans, 'abc', 'def')
def test_none_arguments(self):
# issue 11828
b = self.type2test(b'hello')
l = self.type2test(b'l')
h = self.type2test(b'h')
x = self.type2test(b'x')
o = self.type2test(b'o')
self.assertEqual(2, b.find(l, None))
self.assertEqual(3, b.find(l, -2, None))
self.assertEqual(2, b.find(l, None, -2))
self.assertEqual(0, b.find(h, None, None))
self.assertEqual(3, b.rfind(l, None))
self.assertEqual(3, b.rfind(l, -2, None))
self.assertEqual(2, b.rfind(l, None, -2))
self.assertEqual(0, b.rfind(h, None, None))
self.assertEqual(2, b.index(l, None))
self.assertEqual(3, b.index(l, -2, None))
self.assertEqual(2, b.index(l, None, -2))
self.assertEqual(0, b.index(h, None, None))
self.assertEqual(3, b.rindex(l, None))
self.assertEqual(3, b.rindex(l, -2, None))
self.assertEqual(2, b.rindex(l, None, -2))
self.assertEqual(0, b.rindex(h, None, None))
self.assertEqual(2, b.count(l, None))
self.assertEqual(1, b.count(l, -2, None))
self.assertEqual(1, b.count(l, None, -2))
self.assertEqual(0, b.count(x, None, None))
self.assertEqual(True, b.endswith(o, None))
self.assertEqual(True, b.endswith(o, -2, None))
self.assertEqual(True, b.endswith(l, None, -2))
self.assertEqual(False, b.endswith(x, None, None))
self.assertEqual(True, b.startswith(h, None))
self.assertEqual(True, b.startswith(l, -2, None))
self.assertEqual(True, b.startswith(h, None, -2))
self.assertEqual(False, b.startswith(x, None, None))
def test_integer_arguments_out_of_byte_range(self):
b = self.type2test(b'hello')
for method in (b.count, b.find, b.index, b.rfind, b.rindex):
self.assertRaises(ValueError, method, -1)
self.assertRaises(ValueError, method, 256)
self.assertRaises(ValueError, method, 9999)
def test_find_etc_raise_correct_error_messages(self):
# issue 11828
b = self.type2test(b'hello')
x = self.type2test(b'x')
self.assertRaisesRegex(TypeError, r'\bfind\b', b.find,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'\brfind\b', b.rfind,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'\bindex\b', b.index,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'\brindex\b', b.rindex,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'\bcount\b', b.count,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'\bstartswith\b', b.startswith,
x, None, None, None)
self.assertRaisesRegex(TypeError, r'\bendswith\b', b.endswith,
x, None, None, None)
class BytesTest(BaseBytesTest):
type2test = bytes
def test_buffer_is_readonly(self):
fd = os.dup(sys.stdin.fileno())
with open(fd, "rb", buffering=0) as f:
self.assertRaises(TypeError, f.readinto, b"")
def test_custom(self):
class A:
def __bytes__(self):
return b'abc'
self.assertEqual(bytes(A()), b'abc')
class A: pass
self.assertRaises(TypeError, bytes, A())
class A:
def __bytes__(self):
return None
self.assertRaises(TypeError, bytes, A())
# Test PyBytes_FromFormat()
def test_from_format(self):
test.support.import_module('ctypes')
from ctypes import pythonapi, py_object, c_int, c_char_p
PyBytes_FromFormat = pythonapi.PyBytes_FromFormat
PyBytes_FromFormat.restype = py_object
self.assertEqual(PyBytes_FromFormat(b'format'),
b'format')
self.assertEqual(PyBytes_FromFormat(b'%'), b'%')
self.assertEqual(PyBytes_FromFormat(b'%%'), b'%')
self.assertEqual(PyBytes_FromFormat(b'%%s'), b'%s')
self.assertEqual(PyBytes_FromFormat(b'[%%]'), b'[%]')
self.assertEqual(PyBytes_FromFormat(b'%%%c', c_int(ord('_'))), b'%_')
self.assertEqual(PyBytes_FromFormat(b'c:%c', c_int(255)),
b'c:\xff')
self.assertEqual(PyBytes_FromFormat(b's:%s', c_char_p(b'cstr')),
b's:cstr')
class ByteArrayTest(BaseBytesTest):
type2test = bytearray
def test_nohash(self):
self.assertRaises(TypeError, hash, bytearray())
def test_bytearray_api(self):
short_sample = b"Hello world\n"
sample = short_sample + b"\0"*(20 - len(short_sample))
tfn = tempfile.mktemp()
try:
# Prepare
with open(tfn, "wb") as f:
f.write(short_sample)
# Test readinto
with open(tfn, "rb") as f:
b = bytearray(20)
n = f.readinto(b)
self.assertEqual(n, len(short_sample))
self.assertEqual(list(b), list(sample))
# Test writing in binary mode
with open(tfn, "wb") as f:
f.write(b)
with open(tfn, "rb") as f:
self.assertEqual(f.read(), sample)
# Text mode is ambiguous; don't test
finally:
try:
os.remove(tfn)
except os.error:
pass
def test_reverse(self):
b = bytearray(b'hello')
self.assertEqual(b.reverse(), None)
self.assertEqual(b, b'olleh')
b = bytearray(b'hello1') # test even number of items
b.reverse()
self.assertEqual(b, b'1olleh')
b = bytearray()
b.reverse()
self.assertFalse(b)
def test_clear(self):
b = bytearray(b'python')
b.clear()
self.assertEqual(b, b'')
b = bytearray(b'')
b.clear()
self.assertEqual(b, b'')
b = bytearray(b'')
b.append(ord('r'))
b.clear()
b.append(ord('p'))
self.assertEqual(b, b'p')
def test_copy(self):
b = bytearray(b'abc')
bb = b.copy()
self.assertEqual(bb, b'abc')
b = bytearray(b'')
bb = b.copy()
self.assertEqual(bb, b'')
# test that it's indeed a copy and not a reference
b = bytearray(b'abc')
bb = b.copy()
self.assertEqual(b, bb)
self.assertIsNot(b, bb)
bb.append(ord('d'))
self.assertEqual(bb, b'abcd')
self.assertEqual(b, b'abc')
def test_regexps(self):
def by(s):
return bytearray(map(ord, s))
b = by("Hello, world")
self.assertEqual(re.findall(br"\w+", b), [by("Hello"), by("world")])
def test_setitem(self):
b = bytearray([1, 2, 3])
b[1] = 100
self.assertEqual(b, bytearray([1, 100, 3]))
b[-1] = 200
self.assertEqual(b, bytearray([1, 100, 200]))
b[0] = Indexable(10)
self.assertEqual(b, bytearray([10, 100, 200]))
try:
b[3] = 0
self.fail("Didn't raise IndexError")
except IndexError:
pass
try:
b[-10] = 0
self.fail("Didn't raise IndexError")
except IndexError:
pass
try:
b[0] = 256
self.fail("Didn't raise ValueError")
except ValueError:
pass
try:
b[0] = Indexable(-1)
self.fail("Didn't raise ValueError")
except ValueError:
pass
try:
b[0] = None
self.fail("Didn't raise TypeError")
except TypeError:
pass
def test_delitem(self):
b = bytearray(range(10))
del b[0]
self.assertEqual(b, bytearray(range(1, 10)))
del b[-1]
self.assertEqual(b, bytearray(range(1, 9)))
del b[4]
self.assertEqual(b, bytearray([1, 2, 3, 4, 6, 7, 8]))
def test_setslice(self):
b = bytearray(range(10))
self.assertEqual(list(b), list(range(10)))
b[0:5] = bytearray([1, 1, 1, 1, 1])
self.assertEqual(b, bytearray([1, 1, 1, 1, 1, 5, 6, 7, 8, 9]))
del b[0:-5]
self.assertEqual(b, bytearray([5, 6, 7, 8, 9]))
b[0:0] = bytearray([0, 1, 2, 3, 4])
self.assertEqual(b, bytearray(range(10)))
b[-7:-3] = bytearray([100, 101])
self.assertEqual(b, bytearray([0, 1, 2, 100, 101, 7, 8, 9]))
b[3:5] = [3, 4, 5, 6]
self.assertEqual(b, bytearray(range(10)))
b[3:0] = [42, 42, 42]
self.assertEqual(b, bytearray([0, 1, 2, 42, 42, 42, 3, 4, 5, 6, 7, 8, 9]))
def test_extended_set_del_slice(self):
indices = (0, None, 1, 3, 19, 300, 1<<333, -1, -2, -31, -300)
for start in indices:
for stop in indices:
# Skip invalid step 0
for step in indices[1:]:
L = list(range(255))
b = bytearray(L)
# Make sure we have a slice of exactly the right length,
# but with different data.
data = L[start:stop:step]
data.reverse()
L[start:stop:step] = data
b[start:stop:step] = data
self.assertEqual(b, bytearray(L))
del L[start:stop:step]
del b[start:stop:step]
self.assertEqual(b, bytearray(L))
def test_setslice_trap(self):
# This test verifies that we correctly handle assigning self
# to a slice of self (the old Lambert Meertens trap).
b = bytearray(range(256))
b[8:] = b
self.assertEqual(b, bytearray(list(range(8)) + list(range(256))))
def test_iconcat(self):
b = bytearray(b"abc")
b1 = b
b += b"def"
self.assertEqual(b, b"abcdef")
self.assertEqual(b, b1)
self.assertTrue(b is b1)
b += b"xyz"
self.assertEqual(b, b"abcdefxyz")
try:
b += ""
except TypeError:
pass
else:
self.fail("bytes += unicode didn't raise TypeError")
def test_irepeat(self):
b = bytearray(b"abc")
b1 = b
b *= 3
self.assertEqual(b, b"abcabcabc")
self.assertEqual(b, b1)
self.assertTrue(b is b1)
def test_irepeat_1char(self):
b = bytearray(b"x")
b1 = b
b *= 100
self.assertEqual(b, b"x"*100)
self.assertEqual(b, b1)
self.assertTrue(b is b1)
def test_alloc(self):
b = bytearray()
alloc = b.__alloc__()
self.assertTrue(alloc >= 0)
seq = [alloc]
for i in range(100):
b += b"x"
alloc = b.__alloc__()
self.assertTrue(alloc >= len(b))
if alloc not in seq:
seq.append(alloc)
def test_extend(self):
orig = b'hello'
a = bytearray(orig)
a.extend(a)
self.assertEqual(a, orig + orig)
self.assertEqual(a[5:], orig)
a = bytearray(b'')
# Test iterators that don't have a __length_hint__
a.extend(map(int, orig * 25))
a.extend(int(x) for x in orig * 25)
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
a.extend(iter(map(int, orig * 50)))
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
a.extend(list(map(int, orig * 50)))
self.assertEqual(a, orig * 50)
self.assertEqual(a[-5:], orig)
a = bytearray(b'')
self.assertRaises(ValueError, a.extend, [0, 1, 2, 256])
self.assertRaises(ValueError, a.extend, [0, 1, 2, -1])
self.assertEqual(len(a), 0)
a = bytearray(b'')
a.extend([Indexable(ord('a'))])
self.assertEqual(a, b'a')
def test_remove(self):
b = bytearray(b'hello')
b.remove(ord('l'))
self.assertEqual(b, b'helo')
b.remove(ord('l'))
self.assertEqual(b, b'heo')
self.assertRaises(ValueError, lambda: b.remove(ord('l')))
self.assertRaises(ValueError, lambda: b.remove(400))
self.assertRaises(TypeError, lambda: b.remove('e'))
# remove first and last
b.remove(ord('o'))
b.remove(ord('h'))
self.assertEqual(b, b'e')
self.assertRaises(TypeError, lambda: b.remove(b'e'))
b.remove(Indexable(ord('e')))
self.assertEqual(b, b'')
def test_pop(self):
b = bytearray(b'world')
self.assertEqual(b.pop(), ord('d'))
self.assertEqual(b.pop(0), ord('w'))
self.assertEqual(b.pop(-2), ord('r'))
self.assertRaises(IndexError, lambda: b.pop(10))
self.assertRaises(IndexError, lambda: bytearray().pop())
# test for issue #6846
self.assertEqual(bytearray(b'\xff').pop(), 0xff)
def test_nosort(self):
self.assertRaises(AttributeError, lambda: bytearray().sort())
def test_append(self):
b = bytearray(b'hell')
b.append(ord('o'))
self.assertEqual(b, b'hello')
self.assertEqual(b.append(100), None)
b = bytearray()
b.append(ord('A'))
self.assertEqual(len(b), 1)
self.assertRaises(TypeError, lambda: b.append(b'o'))
b = bytearray()
b.append(Indexable(ord('A')))
self.assertEqual(b, b'A')
def test_insert(self):
b = bytearray(b'msssspp')
b.insert(1, ord('i'))
b.insert(4, ord('i'))
b.insert(-2, ord('i'))
b.insert(1000, ord('i'))
self.assertEqual(b, b'mississippi')
self.assertRaises(TypeError, lambda: b.insert(0, b'1'))
b = bytearray()
b.insert(0, Indexable(ord('A')))
self.assertEqual(b, b'A')
def test_copied(self):
# Issue 4348. Make sure that operations that don't mutate the array
# copy the bytes.
b = bytearray(b'abc')
self.assertFalse(b is b.replace(b'abc', b'cde', 0))
t = bytearray([i for i in range(256)])
x = bytearray(b'')
self.assertFalse(x is x.translate(t))
def test_partition_bytearray_doesnt_share_nullstring(self):
a, b, c = bytearray(b"x").partition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
self.assertTrue(b is not c)
b += b"!"
self.assertEqual(c, b"")
a, b, c = bytearray(b"x").partition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
# Same for rpartition
b, c, a = bytearray(b"x").rpartition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
self.assertTrue(b is not c)
b += b"!"
self.assertEqual(c, b"")
c, b, a = bytearray(b"x").rpartition(b"y")
self.assertEqual(b, b"")
self.assertEqual(c, b"")
def test_resize_forbidden(self):
# #4509: can't resize a bytearray when there are buffer exports, even
# if it wouldn't reallocate the underlying buffer.
# Furthermore, no destructive changes to the buffer may be applied
# before raising the error.
b = bytearray(range(10))
v = memoryview(b)
def resize(n):
b[1:-1] = range(n + 1, 2*n - 1)
resize(10)
orig = b[:]
self.assertRaises(BufferError, resize, 11)
self.assertEqual(b, orig)
self.assertRaises(BufferError, resize, 9)
self.assertEqual(b, orig)
self.assertRaises(BufferError, resize, 0)
self.assertEqual(b, orig)
# Other operations implying resize
self.assertRaises(BufferError, b.pop, 0)
self.assertEqual(b, orig)
self.assertRaises(BufferError, b.remove, b[1])
self.assertEqual(b, orig)
def delitem():
del b[1]
self.assertRaises(BufferError, delitem)
self.assertEqual(b, orig)
# deleting a non-contiguous slice
def delslice():
b[1:-1:2] = b""
self.assertRaises(BufferError, delslice)
self.assertEqual(b, orig)
class AssortedBytesTest(unittest.TestCase):
#
# Test various combinations of bytes and bytearray
#
@check_bytes_warnings
def test_repr_str(self):
for f in str, repr:
self.assertEqual(f(bytearray()), "bytearray(b'')")
self.assertEqual(f(bytearray([0])), "bytearray(b'\\x00')")
self.assertEqual(f(bytearray([0, 1, 254, 255])),
"bytearray(b'\\x00\\x01\\xfe\\xff')")
self.assertEqual(f(b"abc"), "b'abc'")
self.assertEqual(f(b"'"), '''b"'"''') # '''
self.assertEqual(f(b"'\""), r"""b'\'"'""") # '
def test_compare_bytes_to_bytearray(self):
self.assertEqual(b"abc" == bytes(b"abc"), True)
self.assertEqual(b"ab" != bytes(b"abc"), True)
self.assertEqual(b"ab" <= bytes(b"abc"), True)
self.assertEqual(b"ab" < bytes(b"abc"), True)
self.assertEqual(b"abc" >= bytes(b"ab"), True)
self.assertEqual(b"abc" > bytes(b"ab"), True)
self.assertEqual(b"abc" != bytes(b"abc"), False)
self.assertEqual(b"ab" == bytes(b"abc"), False)
self.assertEqual(b"ab" > bytes(b"abc"), False)
self.assertEqual(b"ab" >= bytes(b"abc"), False)
self.assertEqual(b"abc" < bytes(b"ab"), False)
self.assertEqual(b"abc" <= bytes(b"ab"), False)
self.assertEqual(bytes(b"abc") == b"abc", True)
self.assertEqual(bytes(b"ab") != b"abc", True)
self.assertEqual(bytes(b"ab") <= b"abc", True)
self.assertEqual(bytes(b"ab") < b"abc", True)
self.assertEqual(bytes(b"abc") >= b"ab", True)
self.assertEqual(bytes(b"abc") > b"ab", True)
self.assertEqual(bytes(b"abc") != b"abc", False)
self.assertEqual(bytes(b"ab") == b"abc", False)
self.assertEqual(bytes(b"ab") > b"abc", False)
self.assertEqual(bytes(b"ab") >= b"abc", False)
self.assertEqual(bytes(b"abc") < b"ab", False)
self.assertEqual(bytes(b"abc") <= b"ab", False)
def test_doc(self):
self.assertIsNotNone(bytearray.__doc__)
self.assertTrue(bytearray.__doc__.startswith("bytearray("), bytearray.__doc__)
self.assertIsNotNone(bytes.__doc__)
self.assertTrue(bytes.__doc__.startswith("bytes("), bytes.__doc__)
def test_from_bytearray(self):
sample = bytes(b"Hello world\n\x80\x81\xfe\xff")
buf = memoryview(sample)
b = bytearray(buf)
self.assertEqual(b, bytearray(sample))
@check_bytes_warnings
def test_to_str(self):
self.assertEqual(str(b''), "b''")
self.assertEqual(str(b'x'), "b'x'")
self.assertEqual(str(b'\x80'), "b'\\x80'")
self.assertEqual(str(bytearray(b'')), "bytearray(b'')")
self.assertEqual(str(bytearray(b'x')), "bytearray(b'x')")
self.assertEqual(str(bytearray(b'\x80')), "bytearray(b'\\x80')")
def test_literal(self):
tests = [
(b"Wonderful spam", "Wonderful spam"),
(br"Wonderful spam too", "Wonderful spam too"),
(b"\xaa\x00\000\200", "\xaa\x00\000\200"),
(br"\xaa\x00\000\200", r"\xaa\x00\000\200"),
]
for b, s in tests:
self.assertEqual(b, bytearray(s, 'latin-1'))
for c in range(128, 256):
self.assertRaises(SyntaxError, eval,
'b"%s"' % chr(c))
def test_translate(self):
b = b'hello'
ba = bytearray(b)
rosetta = bytearray(range(0, 256))
rosetta[ord('o')] = ord('e')
c = b.translate(rosetta, b'l')
self.assertEqual(b, b'hello')
self.assertEqual(c, b'hee')
c = ba.translate(rosetta, b'l')
self.assertEqual(ba, b'hello')
self.assertEqual(c, b'hee')
c = b.translate(None, b'e')
self.assertEqual(c, b'hllo')
c = ba.translate(None, b'e')
self.assertEqual(c, b'hllo')
self.assertRaises(TypeError, b.translate, None, None)
self.assertRaises(TypeError, ba.translate, None, None)
def test_split_bytearray(self):
self.assertEqual(b'a b'.split(memoryview(b' ')), [b'a', b'b'])
def test_rsplit_bytearray(self):
self.assertEqual(b'a b'.rsplit(memoryview(b' ')), [b'a', b'b'])
def test_return_self(self):
# bytearray.replace must always return a new bytearray
b = bytearray()
self.assertFalse(b.replace(b'', b'') is b)
def test_compare(self):
if sys.flags.bytes_warning:
def bytes_warning():
return test.support.check_warnings(('', BytesWarning))
with bytes_warning():
b'' == ''
with bytes_warning():
b'' != ''
with bytes_warning():
bytearray(b'') == ''
with bytes_warning():
bytearray(b'') != ''
else:
self.skipTest("BytesWarning is needed for this test: use -bb option")
# Optimizations:
# __iter__? (optimization)
# __reversed__? (optimization)
# XXX More string methods? (Those that don't use character properties)
# There are tests in string_tests.py that are more
# comprehensive for things like split, partition, etc.
# Unfortunately they are all bundled with tests that
# are not appropriate for bytes
# I've started porting some of those into bytearray_tests.py, we should port
# the rest that make sense (the code can be cleaned up to use modern
# unittest methods at the same time).
class BytearrayPEP3137Test(unittest.TestCase,
test.buffer_tests.MixinBytesBufferCommonTests):
def marshal(self, x):
return bytearray(x)
def test_returns_new_copy(self):
val = self.marshal(b'1234')
# On immutable types these MAY return a reference to themselves
# but on mutable types like bytearray they MUST return a new copy.
for methname in ('zfill', 'rjust', 'ljust', 'center'):
method = getattr(val, methname)
newval = method(3)
self.assertEqual(val, newval)
self.assertTrue(val is not newval,
methname+' returned self on a mutable object')
for expr in ('val.split()[0]', 'val.rsplit()[0]',
'val.partition(b".")[0]', 'val.rpartition(b".")[2]',
'val.splitlines()[0]', 'val.replace(b"", b"")'):
newval = eval(expr)
self.assertEqual(val, newval)
self.assertTrue(val is not newval,
expr+' returned val on a mutable object')
class FixedStringTest(test.string_tests.BaseTest):
def fixtype(self, obj):
if isinstance(obj, str):
return obj.encode("utf-8")
return super().fixtype(obj)
# Currently the bytes containment testing uses a single integer
# value. This may not be the final design, but until then the
# bytes section with in a bytes containment not valid
def test_contains(self):
pass
def test_expandtabs(self):
pass
def test_upper(self):
pass
def test_lower(self):
pass
class ByteArrayAsStringTest(FixedStringTest):
type2test = bytearray
contains_bytes = True
class BytesAsStringTest(FixedStringTest):
type2test = bytes
contains_bytes = True
class SubclassTest(unittest.TestCase):
def test_basic(self):
self.assertTrue(issubclass(self.subclass2test, self.type2test))
self.assertIsInstance(self.subclass2test(), self.type2test)
a, b = b"abcd", b"efgh"
_a, _b = self.subclass2test(a), self.subclass2test(b)
# test comparison operators with subclass instances
self.assertTrue(_a == _a)
self.assertTrue(_a != _b)
self.assertTrue(_a < _b)
self.assertTrue(_a <= _b)
self.assertTrue(_b >= _a)
self.assertTrue(_b > _a)
self.assertTrue(_a is not a)
# test concat of subclass instances
self.assertEqual(a + b, _a + _b)
self.assertEqual(a + b, a + _b)
self.assertEqual(a + b, _a + b)
# test repeat
self.assertTrue(a*5 == _a*5)
def test_join(self):
# Make sure join returns a NEW object for single item sequences
# involving a subclass.
# Make sure that it is of the appropriate type.
s1 = self.subclass2test(b"abcd")
s2 = self.type2test().join([s1])
self.assertTrue(s1 is not s2)
self.assertTrue(type(s2) is self.type2test, type(s2))
# Test reverse, calling join on subclass
s3 = s1.join([b"abcd"])
self.assertTrue(type(s3) is self.type2test)
def test_pickle(self):
a = self.subclass2test(b"abcd")
a.x = 10
a.y = self.subclass2test(b"efgh")
for proto in range(pickle.HIGHEST_PROTOCOL + 1):
b = pickle.loads(pickle.dumps(a, proto))
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(a.y, b.y)
self.assertEqual(type(a), type(b))
self.assertEqual(type(a.y), type(b.y))
def test_copy(self):
a = self.subclass2test(b"abcd")
a.x = 10
a.y = self.subclass2test(b"efgh")
for copy_method in (copy.copy, copy.deepcopy):
b = copy_method(a)
self.assertNotEqual(id(a), id(b))
self.assertEqual(a, b)
self.assertEqual(a.x, b.x)
self.assertEqual(a.y, b.y)
self.assertEqual(type(a), type(b))
self.assertEqual(type(a.y), type(b.y))
class ByteArraySubclass(bytearray):
pass
class BytesSubclass(bytes):
pass
class ByteArraySubclassTest(SubclassTest):
type2test = bytearray
subclass2test = ByteArraySubclass
def test_init_override(self):
class subclass(bytearray):
def __init__(me, newarg=1, *args, **kwargs):
bytearray.__init__(me, *args, **kwargs)
x = subclass(4, b"abcd")
x = subclass(4, source=b"abcd")
self.assertEqual(x, b"abcd")
x = subclass(newarg=4, source=b"abcd")
self.assertEqual(x, b"abcd")
class BytesSubclassTest(SubclassTest):
type2test = bytes
subclass2test = BytesSubclass
def test_main():
test.support.run_unittest(
BytesTest, AssortedBytesTest, BytesAsStringTest,
ByteArrayTest, ByteArrayAsStringTest, BytesSubclassTest,
ByteArraySubclassTest, BytearrayPEP3137Test)
if __name__ == "__main__":
test_main()
| |
from datetime import timedelta
import os
import shutil
import string
import tempfile
import warnings
from django.conf import settings
from django.contrib.sessions.backends.db import SessionStore as DatabaseSession
from django.contrib.sessions.backends.cache import SessionStore as CacheSession
from django.contrib.sessions.backends.cached_db import SessionStore as CacheDBSession
from django.contrib.sessions.backends.file import SessionStore as FileSession
from django.contrib.sessions.backends.signed_cookies import SessionStore as CookieSession
from django.contrib.sessions.models import Session
from django.contrib.sessions.middleware import SessionMiddleware
from django.core.cache import get_cache
from django.core import management
from django.core.exceptions import ImproperlyConfigured, SuspiciousOperation
from django.http import HttpResponse
from django.test import TestCase, RequestFactory
from django.test.utils import override_settings
from django.utils import six
from django.utils import timezone
from django.utils import unittest
class SessionTestsMixin(object):
# This does not inherit from TestCase to avoid any tests being run with this
# class, which wouldn't work, and to allow different TestCase subclasses to
# be used.
backend = None # subclasses must specify
def setUp(self):
self.session = self.backend()
def tearDown(self):
# NB: be careful to delete any sessions created; stale sessions fill up
# the /tmp (with some backends) and eventually overwhelm it after lots
# of runs (think buildbots)
self.session.delete()
def test_new_session(self):
self.assertFalse(self.session.modified)
self.assertFalse(self.session.accessed)
def test_get_empty(self):
self.assertEqual(self.session.get('cat'), None)
def test_store(self):
self.session['cat'] = "dog"
self.assertTrue(self.session.modified)
self.assertEqual(self.session.pop('cat'), 'dog')
def test_pop(self):
self.session['some key'] = 'exists'
# Need to reset these to pretend we haven't accessed it:
self.accessed = False
self.modified = False
self.assertEqual(self.session.pop('some key'), 'exists')
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
self.assertEqual(self.session.get('some key'), None)
def test_pop_default(self):
self.assertEqual(self.session.pop('some key', 'does not exist'),
'does not exist')
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
def test_setdefault(self):
self.assertEqual(self.session.setdefault('foo', 'bar'), 'bar')
self.assertEqual(self.session.setdefault('foo', 'baz'), 'bar')
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
def test_update(self):
self.session.update({'update key': 1})
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
self.assertEqual(self.session.get('update key', None), 1)
def test_has_key(self):
self.session['some key'] = 1
self.session.modified = False
self.session.accessed = False
self.assertIn('some key', self.session)
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
def test_values(self):
self.assertEqual(list(self.session.values()), [])
self.assertTrue(self.session.accessed)
self.session['some key'] = 1
self.assertEqual(list(self.session.values()), [1])
def test_iterkeys(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = six.iterkeys(self.session)
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), ['x'])
def test_itervalues(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = six.itervalues(self.session)
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), [1])
def test_iteritems(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
i = six.iteritems(self.session)
self.assertTrue(hasattr(i, '__iter__'))
self.assertTrue(self.session.accessed)
self.assertFalse(self.session.modified)
self.assertEqual(list(i), [('x', 1)])
def test_clear(self):
self.session['x'] = 1
self.session.modified = False
self.session.accessed = False
self.assertEqual(list(self.session.items()), [('x', 1)])
self.session.clear()
self.assertEqual(list(self.session.items()), [])
self.assertTrue(self.session.accessed)
self.assertTrue(self.session.modified)
def test_save(self):
if (hasattr(self.session, '_cache') and'DummyCache' in
settings.CACHES[settings.SESSION_CACHE_ALIAS]['BACKEND']):
raise unittest.SkipTest("Session saving tests require a real cache backend")
self.session.save()
self.assertTrue(self.session.exists(self.session.session_key))
def test_delete(self):
self.session.save()
self.session.delete(self.session.session_key)
self.assertFalse(self.session.exists(self.session.session_key))
def test_flush(self):
self.session['foo'] = 'bar'
self.session.save()
prev_key = self.session.session_key
self.session.flush()
self.assertFalse(self.session.exists(prev_key))
self.assertNotEqual(self.session.session_key, prev_key)
self.assertTrue(self.session.modified)
self.assertTrue(self.session.accessed)
def test_cycle(self):
self.session['a'], self.session['b'] = 'c', 'd'
self.session.save()
prev_key = self.session.session_key
prev_data = list(self.session.items())
self.session.cycle_key()
self.assertNotEqual(self.session.session_key, prev_key)
self.assertEqual(list(self.session.items()), prev_data)
def test_invalid_key(self):
# Submitting an invalid session key (either by guessing, or if the db has
# removed the key) results in a new key being generated.
try:
session = self.backend('1')
try:
session.save()
except AttributeError:
self.fail("The session object did not save properly. Middleware may be saving cache items without namespaces.")
self.assertNotEqual(session.session_key, '1')
self.assertEqual(session.get('cat'), None)
session.delete()
finally:
# Some backends leave a stale cache entry for the invalid
# session key; make sure that entry is manually deleted
session.delete('1')
def test_session_key_is_read_only(self):
def set_session_key(session):
session.session_key = session._get_new_session_key()
self.assertRaises(AttributeError, set_session_key, self.session)
# Custom session expiry
def test_default_expiry(self):
# A normal session has a max age equal to settings
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
# So does a custom session with an idle expiration time of 0 (but it'll
# expire at browser close)
self.session.set_expiry(0)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_custom_expiry_seconds(self):
modification = timezone.now()
self.session.set_expiry(10)
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_timedelta(self):
modification = timezone.now()
# Mock timezone.now, because set_expiry calls it on this code path.
original_now = timezone.now
try:
timezone.now = lambda: modification
self.session.set_expiry(timedelta(seconds=10))
finally:
timezone.now = original_now
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_datetime(self):
modification = timezone.now()
self.session.set_expiry(modification + timedelta(seconds=10))
date = self.session.get_expiry_date(modification=modification)
self.assertEqual(date, modification + timedelta(seconds=10))
age = self.session.get_expiry_age(modification=modification)
self.assertEqual(age, 10)
def test_custom_expiry_reset(self):
self.session.set_expiry(None)
self.session.set_expiry(10)
self.session.set_expiry(None)
self.assertEqual(self.session.get_expiry_age(), settings.SESSION_COOKIE_AGE)
def test_get_expire_at_browser_close(self):
# Tests get_expire_at_browser_close with different settings and different
# set_expiry calls
with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=False):
self.session.set_expiry(10)
self.assertFalse(self.session.get_expire_at_browser_close())
self.session.set_expiry(0)
self.assertTrue(self.session.get_expire_at_browser_close())
self.session.set_expiry(None)
self.assertFalse(self.session.get_expire_at_browser_close())
with override_settings(SESSION_EXPIRE_AT_BROWSER_CLOSE=True):
self.session.set_expiry(10)
self.assertFalse(self.session.get_expire_at_browser_close())
self.session.set_expiry(0)
self.assertTrue(self.session.get_expire_at_browser_close())
self.session.set_expiry(None)
self.assertTrue(self.session.get_expire_at_browser_close())
def test_decode(self):
# Ensure we can decode what we encode
data = {'a test key': 'a test value'}
encoded = self.session.encode(data)
self.assertEqual(self.session.decode(encoded), data)
def test_actual_expiry(self):
# Regression test for #19200
old_session_key = None
new_session_key = None
try:
self.session['foo'] = 'bar'
self.session.set_expiry(-timedelta(seconds=10))
self.session.save()
old_session_key = self.session.session_key
# With an expiry date in the past, the session expires instantly.
new_session = self.backend(self.session.session_key)
new_session_key = new_session.session_key
self.assertNotIn('foo', new_session)
finally:
self.session.delete(old_session_key)
self.session.delete(new_session_key)
class DatabaseSessionTests(SessionTestsMixin, TestCase):
backend = DatabaseSession
def test_session_get_decoded(self):
"""
Test we can use Session.get_decoded to retrieve data stored
in normal way
"""
self.session['x'] = 1
self.session.save()
s = Session.objects.get(session_key=self.session.session_key)
self.assertEqual(s.get_decoded(), {'x': 1})
def test_sessionmanager_save(self):
"""
Test SessionManager.save method
"""
# Create a session
self.session['y'] = 1
self.session.save()
s = Session.objects.get(session_key=self.session.session_key)
# Change it
Session.objects.save(s.session_key, {'y': 2}, s.expire_date)
# Clear cache, so that it will be retrieved from DB
del self.session._session_cache
self.assertEqual(self.session['y'], 2)
@override_settings(SESSION_ENGINE="django.contrib.sessions.backends.db")
def test_clearsessions_command(self):
"""
Test clearsessions command for clearing expired sessions.
"""
self.assertEqual(0, Session.objects.count())
# One object in the future
self.session['foo'] = 'bar'
self.session.set_expiry(3600)
self.session.save()
# One object in the past
other_session = self.backend()
other_session['foo'] = 'bar'
other_session.set_expiry(-3600)
other_session.save()
# Two sessions are in the database before clearsessions...
self.assertEqual(2, Session.objects.count())
management.call_command('clearsessions')
# ... and one is deleted.
self.assertEqual(1, Session.objects.count())
#@override_settings(USE_TZ=True)
class DatabaseSessionWithTimeZoneTests(DatabaseSessionTests):
pass
class CacheDBSessionTests(SessionTestsMixin, TestCase):
backend = CacheDBSession
@unittest.skipIf('DummyCache' in
settings.CACHES[settings.SESSION_CACHE_ALIAS]['BACKEND'],
"Session saving tests require a real cache backend")
def test_exists_searches_cache_first(self):
self.session.save()
with self.assertNumQueries(0):
self.assertTrue(self.session.exists(self.session.session_key))
def test_load_overlong_key(self):
# Some backends might issue a warning
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.session._session_key = (string.ascii_letters + string.digits) * 20
self.assertEqual(self.session.load(), {})
####@override_settings(USE_TZ=True)
class CacheDBSessionWithTimeZoneTests(CacheDBSessionTests):
pass
# Don't need DB flushing for these tests, so can use unittest.TestCase as base class
class FileSessionTests(SessionTestsMixin, unittest.TestCase):
backend = FileSession
def setUp(self):
# Do file session tests in an isolated directory, and kill it after we're done.
self.original_session_file_path = settings.SESSION_FILE_PATH
self.temp_session_store = settings.SESSION_FILE_PATH = tempfile.mkdtemp()
# Reset the file session backend's internal caches
if hasattr(self.backend, '_storage_path'):
del self.backend._storage_path
super(FileSessionTests, self).setUp()
def tearDown(self):
super(FileSessionTests, self).tearDown()
settings.SESSION_FILE_PATH = self.original_session_file_path
shutil.rmtree(self.temp_session_store)
@override_settings(
SESSION_FILE_PATH="/if/this/directory/exists/you/have/a/weird/computer")
def test_configuration_check(self):
del self.backend._storage_path
# Make sure the file backend checks for a good storage dir
self.assertRaises(ImproperlyConfigured, self.backend)
def test_invalid_key_backslash(self):
# Ensure we don't allow directory-traversal
self.assertRaises(SuspiciousOperation,
self.backend("a\\b\\c").load)
def test_invalid_key_forwardslash(self):
# Ensure we don't allow directory-traversal
self.assertRaises(SuspiciousOperation,
self.backend("a/b/c").load)
@override_settings(SESSION_ENGINE="django.contrib.sessions.backends.file")
def test_clearsessions_command(self):
"""
Test clearsessions command for clearing expired sessions.
"""
storage_path = self.backend._get_storage_path()
file_prefix = settings.SESSION_COOKIE_NAME
def count_sessions():
return len([session_file for session_file in os.listdir(storage_path)
if session_file.startswith(file_prefix)])
self.assertEqual(0, count_sessions())
# One object in the future
self.session['foo'] = 'bar'
self.session.set_expiry(3600)
self.session.save()
# One object in the past
other_session = self.backend()
other_session['foo'] = 'bar'
other_session.set_expiry(-3600)
other_session.save()
# Two sessions are in the filesystem before clearsessions...
self.assertEqual(2, count_sessions())
management.call_command('clearsessions')
# ... and one is deleted.
self.assertEqual(1, count_sessions())
class CacheSessionTests(SessionTestsMixin, unittest.TestCase):
backend = CacheSession
def test_load_overlong_key(self):
# Some backends might issue a warning
with warnings.catch_warnings():
warnings.simplefilter("ignore")
self.session._session_key = (string.ascii_letters + string.digits) * 20
self.assertEqual(self.session.load(), {})
def test_default_cache(self):
self.session.save()
self.assertNotEqual(get_cache('default').get(self.session.cache_key), None)
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
'sessions': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
}, SESSION_CACHE_ALIAS='sessions')
def test_non_default_cache(self):
self.session.save()
self.assertEqual(get_cache('default').get(self.session.cache_key), None)
self.assertNotEqual(get_cache('sessions').get(self.session.cache_key), None)
class SessionMiddlewareTests(unittest.TestCase):
@override_settings(SESSION_COOKIE_SECURE=True)
def test_secure_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertTrue(
response.cookies[settings.SESSION_COOKIE_NAME]['secure'])
@override_settings(SESSION_COOKIE_HTTPONLY=True)
def test_httponly_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertTrue(
response.cookies[settings.SESSION_COOKIE_NAME]['httponly'])
self.assertIn('httponly',
str(response.cookies[settings.SESSION_COOKIE_NAME]))
@override_settings(SESSION_COOKIE_HTTPONLY=False)
def test_no_httponly_session_cookie(self):
request = RequestFactory().get('/')
response = HttpResponse('Session test')
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
self.assertFalse(response.cookies[settings.SESSION_COOKIE_NAME]['httponly'])
self.assertNotIn('httponly',
str(response.cookies[settings.SESSION_COOKIE_NAME]))
def test_session_save_on_500(self):
request = RequestFactory().get('/')
response = HttpResponse('Horrible error')
response.status_code = 500
middleware = SessionMiddleware()
# Simulate a request the modifies the session
middleware.process_request(request)
request.session['hello'] = 'world'
# Handle the response through the middleware
response = middleware.process_response(request, response)
# Check that the value wasn't saved above.
self.assertNotIn('hello', request.session.load())
class CookieSessionTests(SessionTestsMixin, TestCase):
backend = CookieSession
def test_save(self):
"""
This test tested exists() in the other session backends, but that
doesn't make sense for us.
"""
pass
def test_cycle(self):
"""
This test tested cycle_key() which would create a new session
key for the same session data. But we can't invalidate previously
signed cookies (other than letting them expire naturally) so
testing for this behavior is meaningless.
"""
pass
@unittest.expectedFailure
def test_actual_expiry(self):
# The cookie backend doesn't handle non-default expiry dates, see #19201
super(CookieSessionTests, self).test_actual_expiry()
| |
# coding: utf-8
from math import pi, sin, cos, acos
import csv
from django.core.exceptions import ObjectDoesNotExist
from django.db import IntegrityError, transaction
from .models import Event, Role, Person, Task
class InternalError(Exception):
pass
def earth_distance(pos1, pos2):
'''Taken from http://www.johndcook.com/python_longitude_latitude.html.'''
# Extract fields.
lat1, long1 = pos1
lat2, long2 = pos2
# Convert latitude and longitude to spherical coordinates in radians.
degrees_to_radians = pi/180.0
# phi = 90 - latitude
phi1 = (90.0 - lat1) * degrees_to_radians
phi2 = (90.0 - lat2) * degrees_to_radians
# theta = longitude
theta1 = long1 * degrees_to_radians
theta2 = long2 * degrees_to_radians
# Compute spherical distance from spherical coordinates.
# For two locations in spherical coordinates
# (1, theta, phi) and (1, theta, phi)
# cosine( arc length ) = sin phi sin phi' cos(theta-theta') + cos phi cos phi'
# distance = rho * arc length
c = sin(phi1) * sin(phi2) * cos(theta1 - theta2) + cos(phi1) * cos(phi2)
arc = acos(c)
# Multiply by 6373 to get distance in km.
return arc * 6373
def upload_person_task_csv(stream):
"""Read people from CSV and return a JSON-serializable list of dicts.
The input `stream` should be a file-like object that returns
Unicode data.
"Serializability" is required because we put this data into session. See
https://docs.djangoproject.com/en/1.7/topics/http/sessions/ for details.
Also return a list of fields from Person.PERSON_UPLOAD_FIELDS for which
no data was given.
"""
result = []
reader = csv.DictReader(stream)
empty_fields = set()
for row in reader:
entry = {}
for col in Person.PERSON_UPLOAD_FIELDS:
try:
entry[col] = row[col].strip()
except (KeyError, IndexError, AttributeError):
# either `col` is not in `entry`, or not in `row`, or
# `.strip()` doesn't work (e.g. `row[col]` gives `None` instead
# of string)
entry[col] = None
empty_fields.add(col)
for col in Person.PERSON_TASK_EXTRA_FIELDS:
entry[col] = row.get(col, None)
entry['errors'] = None
result.append(entry)
return result, list(empty_fields)
def verify_upload_person_task(data):
"""
Verify that uploaded data is correct. Show errors by populating ``errors``
dictionary item. This function changes ``data`` in place.
"""
errors_occur = False
for item in data:
errors = []
event = item.get('event', None)
if event:
try:
Event.objects.get(slug=event)
except Event.DoesNotExist:
errors.append(u'Event with slug {0} does not exist.'
.format(event))
role = item.get('role', None)
if role:
try:
Role.objects.get(name=role)
except Role.DoesNotExist:
errors.append(u'Role with name {0} does not exist.'
.format(role))
except Role.MultipleObjectsReturned:
errors.append(u'More than one role named {0} exists.'
.format(role))
# check if the user exists, and if so: check if existing user's
# personal and family names are the same as uploaded
email = item.get('email', None)
personal = item.get('personal', None)
middle = item.get('middle', None)
family = item.get('family', None)
person = None
if email:
# we don't have to check if the user exists in the database
# but we should check if, in case the email matches, family and
# personal names match, too
try:
person = Person.objects.get(email__iexact=email)
for (which, actual, uploaded) in (
('personal', person.personal, personal),
('middle', person.middle, middle),
('family', person.family, family)):
if (actual == uploaded) or ((actual is None) and (uploaded == '')):
pass
else:
errors.append('{0}: database "{1}" vs uploaded "{2}"'
.format(which, actual, uploaded))
except Person.DoesNotExist:
# in this case we need to add the user
pass
if person:
if not any([event, role]):
errors.append("User exists but no event and role to assign to"
" the user to was provided")
if (event and not role) or (role and not event):
errors.append("Must have both/either event ({0}) and role ({1})"
.format(event, role))
if errors:
errors_occur = True
item['errors'] = errors
return errors_occur
def create_uploaded_persons_tasks(data):
"""
Create persons and tasks from upload data.
"""
# Quick sanity check.
if any([row.get('errors') for row in data]):
raise InternalError('Uploaded data contains errors, cancelling upload')
persons_created = []
tasks_created = []
with transaction.atomic():
for row in data:
try:
fields = {key: row[key] for key in Person.PERSON_UPLOAD_FIELDS}
fields['username'] = create_username(row['personal'],
row['family'])
if fields['email']:
# we should use existing Person or create one
p, created = Person.objects.get_or_create(
email__iexact=fields['email'], defaults=fields
)
if created:
persons_created.append(p)
else:
# we should create a new Person without any email provided
p = Person(**fields)
p.save()
persons_created.append(p)
if row['event'] and row['role']:
e = Event.objects.get(slug=row['event'])
r = Role.objects.get(name=row['role'])
t, created = Task.objects.get_or_create(person=p, event=e,
role=r)
if created:
tasks_created.append(t)
except IntegrityError as e:
raise IntegrityError('{0} (for {1})'.format(str(e), row))
except ObjectDoesNotExist as e:
raise ObjectDoesNotExist('{0} (for {1})'.format(str(e), row))
return persons_created, tasks_created
def create_username(personal, family):
'''Generate unique username.'''
stem = normalize_name(family) + '.' + normalize_name(personal)
counter = None
while True:
try:
if counter is None:
username = stem
counter = 1
else:
counter += 1
username = '{0}.{1}'.format(stem, counter)
Person.objects.get(username=username)
except ObjectDoesNotExist:
break
if any([ord(c) >= 128 for c in username]):
raise InternalError('Normalized username still contains non-normal '
'characters "{0}"'.format(username))
return username
def normalize_name(name):
'''Get rid of spaces, funky characters, etc.'''
name = name.strip()
for (accented, flat) in [(' ', '-')]:
name = name.replace(accented, flat)
# We should use lower-cased username, because it directly corresponds to
# some files Software Carpentry stores about some people - and, as we know,
# some filesystems are not case-sensitive.
return name.lower()
| |
import os
import socket
import time
import types
from OpenSSL import SSL
from netlib.exceptions import HttpReadDisconnect, HttpException
from netlib.tcp import Address
import netlib.tutils
from netlib import tcp, http, socks
from netlib.certutils import SSLCert
from netlib.http import authentication, http1
from netlib.tutils import raises
from pathod import pathoc, pathod
from mitmproxy.builtins import script
from mitmproxy import controller
from mitmproxy.proxy.config import HostMatcher, parse_server_spec
from mitmproxy.models import Error, HTTPResponse, HTTPFlow
from . import tutils, tservers
"""
Note that the choice of response code in these tests matters more than you
might think. libcurl treats a 304 response code differently from, say, a
200 response code - it will correctly terminate a 304 response with no
content-length header, whereas it will block forever waiting for content
for a 200 response.
"""
class CommonMixin:
def test_large(self):
assert len(self.pathod("200:b@50k").content) == 1024 * 50
@staticmethod
def wait_until_not_live(flow):
"""
Race condition: We don't want to replay the flow while it is still live.
"""
s = time.time()
while flow.live:
time.sleep(0.001)
if time.time() - s > 5:
raise RuntimeError("Flow is live for too long.")
def test_replay(self):
assert self.pathod("304").status_code == 304
if isinstance(self, tservers.HTTPUpstreamProxyTest) and self.ssl:
assert len(self.master.state.view) == 2
else:
assert len(self.master.state.view) == 1
l = self.master.state.view[-1]
assert l.response.status_code == 304
l.request.path = "/p/305"
self.wait_until_not_live(l)
rt = self.master.replay_request(l, block=True)
assert l.response.status_code == 305
# Disconnect error
l.request.path = "/p/305:d0"
rt = self.master.replay_request(l, block=True)
assert not rt
if isinstance(self, tservers.HTTPUpstreamProxyTest):
assert l.response.status_code == 502
else:
assert l.error
# Port error
l.request.port = 1
# In upstream mode, we get a 502 response from the upstream proxy server.
# In upstream mode with ssl, the replay will fail as we cannot establish
# SSL with the upstream proxy.
rt = self.master.replay_request(l, block=True)
assert not rt
if isinstance(self, tservers.HTTPUpstreamProxyTest):
assert l.response.status_code == 502
else:
assert l.error
def test_http(self):
f = self.pathod("304")
assert f.status_code == 304
# In Upstream mode with SSL, we may already have a previous CONNECT
# request.
l = self.master.state.view[-1]
assert l.client_conn.address
assert "host" in l.request.headers
assert l.response.status_code == 304
def test_invalid_http(self):
t = tcp.TCPClient(("127.0.0.1", self.proxy.port))
t.connect()
t.wfile.write(b"invalid\r\n\r\n")
t.wfile.flush()
line = t.rfile.readline()
assert (b"Bad Request" in line) or (b"Bad Gateway" in line)
def test_sni(self):
if not self.ssl:
return
f = self.pathod("304", sni="testserver.com")
assert f.status_code == 304
log = self.server.last_log()
assert log["request"]["sni"] == "testserver.com"
class TcpMixin:
def _ignore_on(self):
assert not hasattr(self, "_ignore_backup")
self._ignore_backup = self.config.check_ignore
self.config.check_ignore = HostMatcher(
[".+:%s" % self.server.port] + self.config.check_ignore.patterns)
def _ignore_off(self):
assert hasattr(self, "_ignore_backup")
self.config.check_ignore = self._ignore_backup
del self._ignore_backup
def test_ignore(self):
n = self.pathod("304")
self._ignore_on()
i = self.pathod("305")
i2 = self.pathod("306")
self._ignore_off()
self.master.event_queue.join()
assert n.status_code == 304
assert i.status_code == 305
assert i2.status_code == 306
assert any(f.response.status_code == 304 for f in self.master.state.flows)
assert not any(f.response.status_code == 305 for f in self.master.state.flows)
assert not any(f.response.status_code == 306 for f in self.master.state.flows)
# Test that we get the original SSL cert
if self.ssl:
i_cert = SSLCert(i.sslinfo.certchain[0])
i2_cert = SSLCert(i2.sslinfo.certchain[0])
n_cert = SSLCert(n.sslinfo.certchain[0])
assert i_cert == i2_cert
assert i_cert != n_cert
# Test Non-HTTP traffic
spec = "200:i0,@100:d0" # this results in just 100 random bytes
# mitmproxy responds with bad gateway
assert self.pathod(spec).status_code == 502
self._ignore_on()
with raises(HttpException):
self.pathod(spec) # pathoc tries to parse answer as HTTP
self._ignore_off()
def _tcpproxy_on(self):
assert not hasattr(self, "_tcpproxy_backup")
self._tcpproxy_backup = self.config.check_tcp
self.config.check_tcp = HostMatcher(
[".+:%s" % self.server.port] + self.config.check_tcp.patterns)
def _tcpproxy_off(self):
assert hasattr(self, "_tcpproxy_backup")
self.config.check_tcp = self._tcpproxy_backup
del self._tcpproxy_backup
def test_tcp(self):
n = self.pathod("304")
self._tcpproxy_on()
i = self.pathod("305")
i2 = self.pathod("306")
self._tcpproxy_off()
self.master.event_queue.join()
assert n.status_code == 304
assert i.status_code == 305
assert i2.status_code == 306
assert any(f.response.status_code == 304 for f in self.master.state.flows if isinstance(f, HTTPFlow))
assert not any(f.response.status_code == 305 for f in self.master.state.flows if isinstance(f, HTTPFlow))
assert not any(f.response.status_code == 306 for f in self.master.state.flows if isinstance(f, HTTPFlow))
# Test that we get the original SSL cert
if self.ssl:
i_cert = SSLCert(i.sslinfo.certchain[0])
i2_cert = SSLCert(i2.sslinfo.certchain[0])
n_cert = SSLCert(n.sslinfo.certchain[0])
assert i_cert == i2_cert == n_cert
# Make sure that TCP messages are in the event log.
# Re-enable and fix this when we start keeping TCPFlows in the state.
# assert any("305" in m for m in self.master.tlog)
# assert any("306" in m for m in self.master.tlog)
class AppMixin:
def test_app(self):
ret = self.app("/")
assert ret.status_code == 200
assert b"mitmproxy" in ret.content
class TestHTTP(tservers.HTTPProxyTest, CommonMixin, AppMixin):
def test_app_err(self):
p = self.pathoc()
ret = p.request("get:'http://errapp/'")
assert ret.status_code == 500
assert b"ValueError" in ret.content
def test_invalid_connect(self):
t = tcp.TCPClient(("127.0.0.1", self.proxy.port))
t.connect()
t.wfile.write(b"CONNECT invalid\n\n")
t.wfile.flush()
assert b"Bad Request" in t.rfile.readline()
def test_upstream_ssl_error(self):
p = self.pathoc()
ret = p.request("get:'https://localhost:%s/'" % self.server.port)
assert ret.status_code == 400
def test_connection_close(self):
# Add a body, so we have a content-length header, which combined with
# HTTP1.1 means the connection is kept alive.
response = '%s/p/200:b@1' % self.server.urlbase
# Lets sanity check that the connection does indeed stay open by
# issuing two requests over the same connection
p = self.pathoc()
assert p.request("get:'%s'" % response)
assert p.request("get:'%s'" % response)
# Now check that the connection is closed as the client specifies
p = self.pathoc()
assert p.request("get:'%s':h'Connection'='close'" % response)
# There's a race here, which means we can get any of a number of errors.
# Rather than introduce yet another sleep into the test suite, we just
# relax the Exception specification.
with raises(Exception):
p.request("get:'%s'" % response)
def test_reconnect(self):
req = "get:'%s/p/200:b@1:da'" % self.server.urlbase
p = self.pathoc()
assert p.request(req)
# Server has disconnected. Mitmproxy should detect this, and reconnect.
assert p.request(req)
assert p.request(req)
def test_get_connection_switching(self):
def switched(l):
for i in l:
if "serverdisconnect" in i:
return True
req = "get:'%s/p/200:b@1'"
p = self.pathoc()
assert p.request(req % self.server.urlbase)
assert p.request(req % self.server2.urlbase)
assert switched(self.proxy.tlog)
def test_blank_leading_line(self):
p = self.pathoc()
req = "get:'%s/p/201':i0,'\r\n'"
assert p.request(req % self.server.urlbase).status_code == 201
def test_invalid_headers(self):
p = self.pathoc()
resp = p.request("get:'http://foo':h':foo'='bar'")
assert resp.status_code == 400
def test_stream(self):
self.master.set_stream_large_bodies(1024 * 2)
self.pathod("200:b@1k")
assert not self.master.state.view[-1].response.stream
assert len(self.master.state.view[-1].response.content) == 1024 * 1
self.pathod("200:b@3k")
assert self.master.state.view[-1].response.stream
assert self.master.state.view[-1].response.content is None
self.master.set_stream_large_bodies(None)
def test_stream_modify(self):
s = script.Script(
tutils.test_data.path("data/addonscripts/stream_modify.py")
)
self.master.addons.add(s)
d = self.pathod('200:b"foo"')
assert d.content == b"bar"
self.master.addons.remove(s)
class TestHTTPAuth(tservers.HTTPProxyTest):
def test_auth(self):
self.master.options.auth_singleuser = "test:test"
assert self.pathod("202").status_code == 407
p = self.pathoc()
ret = p.request("""
get
'http://localhost:%s/p/202'
h'%s'='%s'
""" % (
self.server.port,
http.authentication.BasicProxyAuth.AUTH_HEADER,
authentication.assemble_http_basic_auth("basic", "test", "test")
))
assert ret.status_code == 202
class TestHTTPS(tservers.HTTPProxyTest, CommonMixin, TcpMixin):
ssl = True
ssloptions = pathod.SSLOptions(request_client_cert=True)
def test_clientcert_file(self):
try:
self.config.clientcerts = os.path.join(
tutils.test_data.path("data/clientcert"), "client.pem")
f = self.pathod("304")
assert f.status_code == 304
assert self.server.last_log()["request"]["clientcert"]["keyinfo"]
finally:
self.config.clientcerts = None
def test_clientcert_dir(self):
try:
self.config.clientcerts = tutils.test_data.path("data/clientcert")
f = self.pathod("304")
assert f.status_code == 304
assert self.server.last_log()["request"]["clientcert"]["keyinfo"]
finally:
self.config.clientcerts = None
def test_error_post_connect(self):
p = self.pathoc()
assert p.request("get:/:i0,'invalid\r\n\r\n'").status_code == 400
class TestHTTPSCertfile(tservers.HTTPProxyTest, CommonMixin):
ssl = True
certfile = True
def test_certfile(self):
assert self.pathod("304")
class TestHTTPSUpstreamServerVerificationWTrustedCert(tservers.HTTPProxyTest):
"""
Test upstream server certificate verification with a trusted server cert.
"""
ssl = True
ssloptions = pathod.SSLOptions(
cn=b"trusted-cert",
certs=[
("trusted-cert", tutils.test_data.path("data/trusted-server.crt"))
])
def test_verification_w_cadir(self):
self.config.options.update(
ssl_verify_upstream_cert = True,
ssl_verify_upstream_trusted_cadir = tutils.test_data.path(
"data/trusted-cadir/"
)
)
self.pathoc()
def test_verification_w_pemfile(self):
self.config.openssl_verification_mode_server = SSL.VERIFY_PEER
self.config.options.ssl_verify_upstream_trusted_ca = tutils.test_data.path(
"data/trusted-cadir/trusted-ca.pem")
self.pathoc()
class TestHTTPSUpstreamServerVerificationWBadCert(tservers.HTTPProxyTest):
"""
Test upstream server certificate verification with an untrusted server cert.
"""
ssl = True
ssloptions = pathod.SSLOptions(
cn=b"untrusted-cert",
certs=[
("untrusted-cert", tutils.test_data.path("data/untrusted-server.crt"))
])
def _request(self):
p = self.pathoc()
# We need to make an actual request because the upstream connection is lazy-loaded.
return p.request("get:/p/242")
def test_default_verification_w_bad_cert(self):
"""Should use no verification."""
self.config.options.update(
ssl_verify_upstream_trusted_ca = tutils.test_data.path(
"data/trusted-cadir/trusted-ca.pem"
)
)
assert self._request().status_code == 242
def test_no_verification_w_bad_cert(self):
self.config.options.update(
ssl_verify_upstream_cert = False,
ssl_verify_upstream_trusted_ca = tutils.test_data.path(
"data/trusted-cadir/trusted-ca.pem"
)
)
assert self._request().status_code == 242
def test_verification_w_bad_cert(self):
self.config.options.update(
ssl_verify_upstream_cert = True,
ssl_verify_upstream_trusted_ca = tutils.test_data.path(
"data/trusted-cadir/trusted-ca.pem"
)
)
assert self._request().status_code == 502
class TestHTTPSNoCommonName(tservers.HTTPProxyTest):
"""
Test what happens if we get a cert without common name back.
"""
ssl = True
ssloptions = pathod.SSLOptions(
certs=[
(b"*", tutils.test_data.path("data/no_common_name.pem"))
]
)
def test_http(self):
f = self.pathod("202")
assert f.sslinfo.certchain[0].get_subject().CN == "127.0.0.1"
class TestReverse(tservers.ReverseProxyTest, CommonMixin, TcpMixin):
reverse = True
class TestSocks5(tservers.SocksModeTest):
def test_simple(self):
p = self.pathoc()
p.socks_connect(("localhost", self.server.port))
f = p.request("get:/p/200")
assert f.status_code == 200
def test_with_authentication_only(self):
p = self.pathoc()
f = p.request("get:/p/200")
assert f.status_code == 502
assert b"SOCKS5 mode failure" in f.content
def test_no_connect(self):
"""
mitmproxy doesn't support UDP or BIND SOCKS CMDs
"""
p = self.pathoc()
socks.ClientGreeting(
socks.VERSION.SOCKS5,
[socks.METHOD.NO_AUTHENTICATION_REQUIRED]
).to_file(p.wfile)
socks.Message(
socks.VERSION.SOCKS5,
socks.CMD.BIND,
socks.ATYP.DOMAINNAME,
("example.com", 8080)
).to_file(p.wfile)
p.wfile.flush()
p.rfile.read(2) # read server greeting
f = p.request("get:/p/200") # the request doesn't matter, error response from handshake will be read anyway.
assert f.status_code == 502
assert b"SOCKS5 mode failure" in f.content
class TestHttps2Http(tservers.ReverseProxyTest):
@classmethod
def get_options(cls):
opts = super(TestHttps2Http, cls).get_options()
s = parse_server_spec(opts.upstream_server)
opts.upstream_server = "http://%s" % s.address
return opts
def pathoc(self, ssl, sni=None):
"""
Returns a connected Pathoc instance.
"""
p = pathoc.Pathoc(
("localhost", self.proxy.port), ssl=True, sni=sni, fp=None
)
p.connect()
return p
def test_all(self):
p = self.pathoc(ssl=True)
assert p.request("get:'/p/200'").status_code == 200
def test_sni(self):
p = self.pathoc(ssl=True, sni="example.com")
assert p.request("get:'/p/200'").status_code == 200
assert all("Error in handle_sni" not in msg for msg in self.proxy.tlog)
def test_http(self):
p = self.pathoc(ssl=False)
assert p.request("get:'/p/200'").status_code == 200
class TestTransparent(tservers.TransparentProxyTest, CommonMixin, TcpMixin):
ssl = False
def test_tcp_stream_modify(self):
s = script.Script(
tutils.test_data.path("data/addonscripts/tcp_stream_modify.py")
)
self.master.addons.add(s)
self._tcpproxy_on()
d = self.pathod('200:b"foo"')
self._tcpproxy_off()
assert d.content == b"bar"
self.master.addons.remove(s)
class TestTransparentSSL(tservers.TransparentProxyTest, CommonMixin, TcpMixin):
ssl = True
def test_sslerr(self):
p = pathoc.Pathoc(("localhost", self.proxy.port), fp=None)
p.connect()
r = p.request("get:/")
assert r.status_code == 502
class TestProxy(tservers.HTTPProxyTest):
def test_http(self):
f = self.pathod("304")
assert f.status_code == 304
f = self.master.state.view[0]
assert f.client_conn.address
assert "host" in f.request.headers
assert f.response.status_code == 304
@tutils.skip_appveyor
def test_response_timestamps(self):
# test that we notice at least 1 sec delay between timestamps
# in response object
f = self.pathod("304:b@1k:p50,1")
assert f.status_code == 304
response = self.master.state.view[0].response
# timestamp_start might fire a bit late, so we play safe and only require 300ms.
assert 0.3 <= response.timestamp_end - response.timestamp_start
@tutils.skip_appveyor
def test_request_timestamps(self):
# test that we notice a delay between timestamps in request object
connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connection.connect(("127.0.0.1", self.proxy.port))
# call pathod server, wait a second to complete the request
connection.send(
b"GET http://localhost:%d/p/304:b@1k HTTP/1.1\r\n" %
self.server.port)
time.sleep(1)
connection.send(b"\r\n")
connection.recv(50000)
connection.close()
request, response = self.master.state.view[
0].request, self.master.state.view[0].response
assert response.status_code == 304 # sanity test for our low level request
# timestamp_start might fire a bit late, so we play safe and only require 300ms.
assert 0.3 <= request.timestamp_end - request.timestamp_start
def test_request_tcp_setup_timestamp_presence(self):
# tests that the client_conn a tcp connection has a tcp_setup_timestamp
connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connection.connect(("localhost", self.proxy.port))
connection.send(
b"GET http://localhost:%d/p/200:b@1k HTTP/1.1\r\n" %
self.server.port)
connection.send(b"\r\n")
# a bit hacky: make sure that we don't just read the headers only.
recvd = 0
while recvd < 1024:
recvd += len(connection.recv(5000))
connection.send(
b"GET http://localhost:%d/p/200:b@1k HTTP/1.1\r\n" %
self.server.port)
connection.send(b"\r\nb")
recvd = 0
while recvd < 1024:
recvd += len(connection.recv(5000))
connection.close()
first_flow = self.master.state.view[0]
second_flow = self.master.state.view[1]
assert first_flow.server_conn.timestamp_tcp_setup
assert first_flow.server_conn.timestamp_ssl_setup is None
assert second_flow.server_conn.timestamp_tcp_setup
assert first_flow.server_conn.timestamp_tcp_setup == second_flow.server_conn.timestamp_tcp_setup
def test_request_ip(self):
f = self.pathod("200:b@100")
assert f.status_code == 200
f = self.master.state.view[0]
assert f.server_conn.address == ("127.0.0.1", self.server.port)
class TestProxySSL(tservers.HTTPProxyTest):
ssl = True
def test_request_ssl_setup_timestamp_presence(self):
# tests that the ssl timestamp is present when ssl is used
f = self.pathod("304:b@10k")
assert f.status_code == 304
first_flow = self.master.state.view[0]
assert first_flow.server_conn.timestamp_ssl_setup
class MasterRedirectRequest(tservers.TestMaster):
redirect_port = None # Set by TestRedirectRequest
@controller.handler
def request(self, f):
if f.request.path == "/p/201":
# This part should have no impact, but it should also not cause any exceptions.
addr = f.live.server_conn.address
addr2 = Address(("127.0.0.1", self.redirect_port))
f.live.set_server(addr2)
f.live.set_server(addr)
# This is the actual redirection.
f.request.port = self.redirect_port
super(MasterRedirectRequest, self).request(f)
@controller.handler
def response(self, f):
f.response.content = bytes(f.client_conn.address.port)
f.response.headers["server-conn-id"] = str(f.server_conn.source_address.port)
super(MasterRedirectRequest, self).response(f)
class TestRedirectRequest(tservers.HTTPProxyTest):
masterclass = MasterRedirectRequest
ssl = True
def test_redirect(self):
"""
Imagine a single HTTPS connection with three requests:
1. First request should pass through unmodified
2. Second request will be redirected to a different host by an inline script
3. Third request should pass through unmodified
This test verifies that the original destination is restored for the third request.
"""
self.master.redirect_port = self.server2.port
p = self.pathoc()
self.server.clear_log()
self.server2.clear_log()
r1 = p.request("get:'/p/200'")
assert r1.status_code == 200
assert self.server.last_log()
assert not self.server2.last_log()
self.server.clear_log()
self.server2.clear_log()
r2 = p.request("get:'/p/201'")
assert r2.status_code == 201
assert not self.server.last_log()
assert self.server2.last_log()
self.server.clear_log()
self.server2.clear_log()
r3 = p.request("get:'/p/202'")
assert r3.status_code == 202
assert self.server.last_log()
assert not self.server2.last_log()
assert r1.content == r2.content == r3.content
class MasterStreamRequest(tservers.TestMaster):
"""
Enables the stream flag on the flow for all requests
"""
@controller.handler
def responseheaders(self, f):
f.response.stream = True
class TestStreamRequest(tservers.HTTPProxyTest):
masterclass = MasterStreamRequest
def test_stream_simple(self):
p = self.pathoc()
# a request with 100k of data but without content-length
r1 = p.request("get:'%s/p/200:r:b@100k:d102400'" % self.server.urlbase)
assert r1.status_code == 200
assert len(r1.content) > 100000
def test_stream_multiple(self):
p = self.pathoc()
# simple request with streaming turned on
r1 = p.request("get:'%s/p/200'" % self.server.urlbase)
assert r1.status_code == 200
# now send back 100k of data, streamed but not chunked
r1 = p.request("get:'%s/p/201:b@100k'" % self.server.urlbase)
assert r1.status_code == 201
def test_stream_chunked(self):
connection = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
connection.connect(("127.0.0.1", self.proxy.port))
fconn = connection.makefile("rb")
spec = '200:h"Transfer-Encoding"="chunked":r:b"4\\r\\nthis\\r\\n11\\r\\nisatest__reachhex\\r\\n0\\r\\n\\r\\n"'
connection.send(
b"GET %s/p/%s HTTP/1.1\r\n" %
(self.server.urlbase.encode(), spec.encode()))
connection.send(b"\r\n")
resp = http1.read_response_head(fconn)
assert resp.headers["Transfer-Encoding"] == 'chunked'
assert resp.status_code == 200
chunks = list(http1.read_body(fconn, None))
assert chunks == [b"this", b"isatest__reachhex"]
connection.close()
class MasterFakeResponse(tservers.TestMaster):
@controller.handler
def request(self, f):
resp = HTTPResponse.wrap(netlib.tutils.tresp())
f.reply.send(resp)
class TestFakeResponse(tservers.HTTPProxyTest):
masterclass = MasterFakeResponse
def test_fake(self):
f = self.pathod("200")
assert "header-response" in f.headers
class TestServerConnect(tservers.HTTPProxyTest):
masterclass = MasterFakeResponse
ssl = True
@classmethod
def get_options(cls):
opts = tservers.HTTPProxyTest.get_options()
opts.no_upstream_cert = True
return opts
def test_unnecessary_serverconnect(self):
"""A replayed/fake response with no_upstream_cert should not connect to an upstream server"""
assert self.pathod("200").status_code == 200
for msg in self.proxy.tmaster.tlog:
assert "serverconnect" not in msg
class MasterKillRequest(tservers.TestMaster):
@controller.handler
def request(self, f):
f.reply.kill()
class TestKillRequest(tservers.HTTPProxyTest):
masterclass = MasterKillRequest
def test_kill(self):
with raises(HttpReadDisconnect):
self.pathod("200")
# Nothing should have hit the server
assert not self.server.last_log()
class MasterKillResponse(tservers.TestMaster):
@controller.handler
def response(self, f):
f.reply.kill()
class TestKillResponse(tservers.HTTPProxyTest):
masterclass = MasterKillResponse
def test_kill(self):
with raises(HttpReadDisconnect):
self.pathod("200")
# The server should have seen a request
assert self.server.last_log()
class EResolver(tservers.TResolver):
def original_addr(self, sock):
raise RuntimeError("Could not resolve original destination.")
class TestTransparentResolveError(tservers.TransparentProxyTest):
resolver = EResolver
def test_resolve_error(self):
assert self.pathod("304").status_code == 502
class MasterIncomplete(tservers.TestMaster):
@controller.handler
def request(self, f):
resp = HTTPResponse.wrap(netlib.tutils.tresp())
resp.content = None
f.reply.send(resp)
class TestIncompleteResponse(tservers.HTTPProxyTest):
masterclass = MasterIncomplete
def test_incomplete(self):
assert self.pathod("200").status_code == 502
class TestUpstreamProxy(tservers.HTTPUpstreamProxyTest, CommonMixin, AppMixin):
ssl = False
def test_order(self):
self.proxy.tmaster.options.replacements = [
("~q", "foo", "bar"),
("~q", "bar", "baz"),
("~q", "foo", "oh noes!"),
("~s", "baz", "ORLY")
]
p = self.pathoc()
req = p.request("get:'%s/p/418:b\"foo\"'" % self.server.urlbase)
assert req.content == b"ORLY"
assert req.status_code == 418
class TestUpstreamProxySSL(
tservers.HTTPUpstreamProxyTest,
CommonMixin,
TcpMixin):
ssl = True
def _host_pattern_on(self, attr):
"""
Updates config.check_tcp or check_ignore, depending on attr.
"""
assert not hasattr(self, "_ignore_%s_backup" % attr)
backup = []
for proxy in self.chain:
old_matcher = getattr(
proxy.tmaster.server.config,
"check_%s" %
attr)
backup.append(old_matcher)
setattr(
proxy.tmaster.server.config,
"check_%s" % attr,
HostMatcher([".+:%s" % self.server.port] + old_matcher.patterns)
)
setattr(self, "_ignore_%s_backup" % attr, backup)
def _host_pattern_off(self, attr):
backup = getattr(self, "_ignore_%s_backup" % attr)
for proxy in reversed(self.chain):
setattr(
proxy.tmaster.server.config,
"check_%s" % attr,
backup.pop()
)
assert not backup
delattr(self, "_ignore_%s_backup" % attr)
def _ignore_on(self):
super(TestUpstreamProxySSL, self)._ignore_on()
self._host_pattern_on("ignore")
def _ignore_off(self):
super(TestUpstreamProxySSL, self)._ignore_off()
self._host_pattern_off("ignore")
def _tcpproxy_on(self):
super(TestUpstreamProxySSL, self)._tcpproxy_on()
self._host_pattern_on("tcp")
def _tcpproxy_off(self):
super(TestUpstreamProxySSL, self)._tcpproxy_off()
self._host_pattern_off("tcp")
def test_simple(self):
p = self.pathoc()
req = p.request("get:'/p/418:b\"content\"'")
assert req.content == b"content"
assert req.status_code == 418
# CONNECT from pathoc to chain[0],
assert self.proxy.tmaster.state.flow_count() == 2
# request from pathoc to chain[0]
# CONNECT from proxy to chain[1],
assert self.chain[0].tmaster.state.flow_count() == 2
# request from proxy to chain[1]
# request from chain[0] (regular proxy doesn't store CONNECTs)
assert self.chain[1].tmaster.state.flow_count() == 1
class TestProxyChainingSSLReconnect(tservers.HTTPUpstreamProxyTest):
ssl = True
def test_reconnect(self):
"""
Tests proper functionality of ConnectionHandler.server_reconnect mock.
If we have a disconnect on a secure connection that's transparently proxified to
an upstream http proxy, we need to send the CONNECT request again.
"""
def kill_requests(master, attr, exclude):
k = [0] # variable scope workaround: put into array
_func = getattr(master, attr)
@controller.handler
def handler(*args):
f = args[-1]
k[0] += 1
if not (k[0] in exclude):
f.client_conn.finish()
f.error = Error("terminated")
f.reply.kill()
return _func(f)
setattr(master, attr, types.MethodType(handler, master))
kill_requests(
self.chain[1].tmaster,
"request",
exclude = [
# fail first request
2, # allow second request
]
)
kill_requests(self.chain[0].tmaster, "request",
exclude=[
1, # CONNECT
# fail first request
3, # reCONNECT
4, # request
])
p = self.pathoc()
req = p.request("get:'/p/418:b\"content\"'")
assert req.content == b"content"
assert req.status_code == 418
assert self.proxy.tmaster.state.flow_count() == 2 # CONNECT and request
# CONNECT, failing request,
assert self.chain[0].tmaster.state.flow_count() == 4
# reCONNECT, request
# failing request, request
assert self.chain[1].tmaster.state.flow_count() == 2
# (doesn't store (repeated) CONNECTs from chain[0]
# as it is a regular proxy)
assert not self.chain[1].tmaster.state.flows[0].response # killed
assert self.chain[1].tmaster.state.flows[1].response
assert self.proxy.tmaster.state.flows[0].request.first_line_format == "authority"
assert self.proxy.tmaster.state.flows[1].request.first_line_format == "relative"
assert self.chain[0].tmaster.state.flows[
0].request.first_line_format == "authority"
assert self.chain[0].tmaster.state.flows[
1].request.first_line_format == "relative"
assert self.chain[0].tmaster.state.flows[
2].request.first_line_format == "authority"
assert self.chain[0].tmaster.state.flows[
3].request.first_line_format == "relative"
assert self.chain[1].tmaster.state.flows[
0].request.first_line_format == "relative"
assert self.chain[1].tmaster.state.flows[
1].request.first_line_format == "relative"
req = p.request("get:'/p/418:b\"content2\"'")
assert req.status_code == 502
assert self.proxy.tmaster.state.flow_count() == 3 # + new request
# + new request, repeated CONNECT from chain[1]
assert self.chain[0].tmaster.state.flow_count() == 6
# (both terminated)
# nothing happened here
assert self.chain[1].tmaster.state.flow_count() == 2
class AddUpstreamCertsToClientChainMixin:
ssl = True
servercert = tutils.test_data.path("data/trusted-server.crt")
ssloptions = pathod.SSLOptions(
cn=b"trusted-cert",
certs=[
(b"trusted-cert", servercert)
]
)
def test_add_upstream_certs_to_client_chain(self):
with open(self.servercert, "rb") as f:
d = f.read()
upstreamCert = SSLCert.from_pem(d)
p = self.pathoc()
upstream_cert_found_in_client_chain = False
for receivedCert in p.server_certs:
if receivedCert.digest('sha256') == upstreamCert.digest('sha256'):
upstream_cert_found_in_client_chain = True
break
assert(upstream_cert_found_in_client_chain == self.master.options.add_upstream_certs_to_client_chain)
class TestHTTPSAddUpstreamCertsToClientChainTrue(
AddUpstreamCertsToClientChainMixin,
tservers.HTTPProxyTest
):
"""
If --add-server-certs-to-client-chain is True, then the client should
receive the upstream server's certificates
"""
@classmethod
def get_options(cls):
opts = super(tservers.HTTPProxyTest, cls).get_options()
opts.add_upstream_certs_to_client_chain = True
return opts
class TestHTTPSAddUpstreamCertsToClientChainFalse(
AddUpstreamCertsToClientChainMixin,
tservers.HTTPProxyTest
):
"""
If --add-server-certs-to-client-chain is False, then the client should not
receive the upstream server's certificates
"""
@classmethod
def get_options(cls):
opts = super(tservers.HTTPProxyTest, cls).get_options()
opts.add_upstream_certs_to_client_chain = False
return opts
| |
import datetime
import time
from decimal import Decimal, InvalidOperation
from django.conf import settings
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ugettext_noop
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.template import Context, Template
from django.db.models import Q
from django.contrib.contenttypes import generic
from django.contrib.contenttypes.models import ContentType
from django.utils import formats
from zorna.forms import fields
from zorna.models import ZornaEntity
class FormsWorkspace(ZornaEntity):
name = models.CharField(max_length=150)
slug = models.SlugField(max_length=150)
class Meta:
ordering = ['name']
db_table = settings.TABLE_PREFIX + "forms_workspace"
def __unicode__(self):
return self.name
def get_acl_permissions():
return {
u'manager': ugettext_noop(u'Who can use this workspace'),
}
get_acl_permissions = staticmethod(get_acl_permissions)
class FormsList(models.Model):
"""
"""
name = models.CharField(_("Name"), max_length=255)
description = models.CharField(_(
"Description"), blank=True, max_length=255)
workspace = models.ForeignKey(FormsWorkspace, editable=False)
class Meta:
verbose_name = _('list')
verbose_name_plural = _('lists')
db_table = settings.TABLE_PREFIX + "forms_list"
def __unicode__(self):
return self.name
class FormsListEntry(models.Model):
"""
"""
value = models.CharField(_("Value"), max_length=255)
list = models.ForeignKey(FormsList, editable=False)
class Meta:
verbose_name = _('list')
verbose_name_plural = _('lists')
db_table = settings.TABLE_PREFIX + "forms_list_entry"
def __unicode__(self):
return self.value
class FormsForm(ZornaEntity):
"""
"""
name = models.CharField(_("Name"), max_length=255)
slug = models.SlugField(max_length=100, unique=True)
bind_to_account = models.BooleanField(_("Bind to account"), default=False, help_text=_(
"If checked, each record will be linked to user account"))
bind_to_entry = models.CharField(_("Bind to entry"), default='', max_length=255, help_text=_(
"If filled, each record will be linked to target form entry"))
bind_display = models.CharField(_(
"Bind display"), default='', max_length=255, editable=False)
button_text = models.CharField(_(
"Button text"), max_length=50, default=_("Submit"))
description = models.TextField(_('Description'), blank=True)
send_email = models.BooleanField(_("Send email"), default=False, help_text=_(
"If checked, an email will be sent"), editable=False)
email_from = models.EmailField(_("From address"), blank=True, help_text=_(
"The address the email will be sent from"), editable=False)
email_copies = models.CharField(_("Send copies to"), blank=True, help_text=_(
"One or more email addresses, separated by commas"), max_length=200, editable=False)
email_subject = models.CharField(_(
"Subject"), max_length=200, blank=True, editable=False)
email_message = models.TextField(_("Message"), blank=True, editable=False)
template = models.TextField(_('Template'), editable=False, blank=True)
workspace = models.ForeignKey(FormsWorkspace, editable=False)
class Meta:
verbose_name = _('forms form')
verbose_name_plural = _('forms form')
db_table = settings.TABLE_PREFIX + "forms_form"
def __unicode__(self):
return self.name
def get_url_path(self):
return reverse('forms_add_form_entry', args=[self.slug])
def get_url_browse_path(self):
return reverse('form_browse_entries_view', args=[self.slug])
def get_acl_permissions():
return {
u'viewer': ugettext_noop(u'Who can see the list of recordings of this form'),
u'creator': ugettext_noop(u'Who can create new records'),
u'modifier': ugettext_noop(u'Who can modify records'),
}
get_acl_permissions = staticmethod(get_acl_permissions)
class FormsFormAction(models.Model):
form = models.ForeignKey(FormsForm, null=True, editable=False)
content_type = models.ForeignKey(ContentType, editable=False)
object_id = models.IntegerField(editable=False)
content_object = generic.GenericForeignKey('content_type', 'object_id')
class Meta:
verbose_name = _('form action')
verbose_name_plural = _('form actions')
db_table = settings.TABLE_PREFIX + "forms_form_actions"
def __unicode__(self):
return u'%s [%s]' % (self.content_object, self.form.name)
class FormsFormActionMessage(models.Model):
form = models.ForeignKey(FormsForm, null=True, editable=False)
message = models.TextField(_("Message"))
class Meta:
verbose_name = _('message form action')
verbose_name_plural = _('message form actions')
db_table = settings.TABLE_PREFIX + "forms_form_action_messages"
def __unicode__(self):
return u'%s' % self.message
class FormsFormActionUrl(models.Model):
form = models.ForeignKey(FormsForm, null=True, editable=False)
url = models.CharField(_("Url"), max_length=255)
class Meta:
verbose_name = _('url form action')
verbose_name_plural = _('url form actions')
db_table = settings.TABLE_PREFIX + "forms_form_action_urls"
def __unicode__(self):
return u'%s' % self.url
class FormsFormPanel(models.Model):
form = models.ForeignKey(FormsForm, null=True, editable=False)
name = models.CharField(_(
"Name"), max_length=255, help_text=_("Control name"))
label = models.CharField(_("Title"), max_length=255, help_text=_(
"Panel title"), null=True, blank=True)
width = models.CharField(_("Width"), max_length=255, help_text=_(
"Width including units (px, %, ...)"), null=True, blank=True)
height = models.CharField(_("Height"), max_length=255, help_text=_(
"Height including units (px, %, ...)"), null=True, blank=True)
margin = models.CharField(_("Margin"), max_length=255, help_text=_(
"Margin including units (px, %, ...)"), null=True, blank=True)
css_class = models.CharField(_("Css Class"), max_length=255, help_text=_(
"Css classes"), null=True, blank=True)
stacked = models.BooleanField(_("Stacked"), help_text=_(
"Label and control are stacked"), default=False)
sort_order = models.IntegerField(_(
'sort order'), default=0, editable=False, help_text='The order you would like panels to be displayed.')
panel_header = models.TextField(_('Panel header'), blank=True)
panel_footer = models.TextField(_('Panel footer'), blank=True)
class Meta:
verbose_name = _('form panel')
verbose_name_plural = _('form panels')
ordering = ['sort_order']
db_table = settings.TABLE_PREFIX + "forms_form_panels"
def __unicode__(self):
return u'%s (%s)' % (self.name, self.label)
class FormsFormFieldManager(models.Manager):
"""
Only show visible fields when displaying actual form..
"""
def visible(self):
return self.filter(visible=True)
def not_visible(self):
return self.filter(visible=False)
BORDER_STYLES = (
('', ''),
('none', 'none'),
('dotted', 'dotted'),
('dashed', 'dashed'),
('solid', 'solid'),
('double', 'double'),
('groove', 'groove'),
('ridge', 'ridge'),
('inset', 'inset'),
('outset', 'outset'),
)
LABEL_SIZE = (
('', ''),
('x-small', 'x-small'),
('small', 'small'),
('dashed', 'dashed'),
('medium', 'medium'),
('large', 'large'),
('x-large', 'x-large'),
)
class FormsFormField(models.Model):
"""
"""
form = models.ForeignKey(FormsForm, related_name="fields", editable=False)
label = models.CharField(_("Label"), max_length=255)
slug = models.SlugField(max_length=100)
help_text = models.CharField(_("Help text"), blank=True, max_length=255)
required = models.BooleanField(_("Required"), default=True)
visible = models.BooleanField(_("Visible"), default=True)
default_value = models.CharField(_(
"Default value"), blank=True, max_length=255)
sort_order = models.IntegerField(_(
'sort order'), default=0, editable=False, help_text='The order you would like fields to be displayed.')
field_type = models.IntegerField(_(
"Type"), choices=fields.NAMES, default=fields.TEXT)
list = models.ForeignKey(FormsList, null=True, blank=True,)
reference = models.CharField(_(
"Reference"), max_length=255, blank=True, default='')
reference_display = models.CharField(_(
"Reference display"), max_length=255, blank=True, default='')
panel = models.ForeignKey(FormsFormPanel, null=True, blank=True)
visible_in_list = models.BooleanField(_(
"Visible in list"), default=True, editable=False)
sort_order_list = models.IntegerField(_(
'sort order list'), default=0, editable=False, help_text='The order you would like fields to be displayed in lists.')
for_sort = models.BooleanField(_(
"Use this field to sort"), default=False, editable=False)
width = models.CharField(_("Width"), max_length=255, help_text=_(
"Width including units (px, %, ...)"), null=True, blank=True)
margin = models.CharField(_("Margin"), max_length=255, help_text=_(
"Margin including units (px, %, ...)"), null=True, blank=True)
padding = models.CharField(_("Height"), max_length=255, help_text=_(
"Padding including units (px, %, ...)"), null=True, blank=True)
css_class = models.CharField(_("Css Class"), max_length=255, help_text=_(
"Css classes"), null=True, blank=True)
bg_color = models.CharField(_("Background color"), max_length=255, help_text=_(
"Background color ( string or hex )"), null=True, blank=True)
border_width = models.CharField(_("Border width"), max_length=255, help_text=_(
"Border width including units (px, %, ...)"), null=True, blank=True)
border_style = models.CharField(_(
"Border style"), max_length=20, choices=BORDER_STYLES, default='', help_text=_("Border style"), blank=True)
border_color = models.CharField(_("Border color"), max_length=255, help_text=_(
"Background color ( string or hex )"), null=True, blank=True)
label_color = models.CharField(_("Label color"), max_length=255, help_text=_(
"Label color ( string or hex )"), null=True, blank=True)
label_size = models.CharField(_(
"Label size"), max_length=20, choices=LABEL_SIZE, default='', help_text=_("Label size"), blank=True)
bold = models.BooleanField(_("Bold"), help_text=_(
"Check this checkbox to make the control's label bold"), default=False)
italic = models.BooleanField(_("Italic"), help_text=_(
"Check this checkbox to italicize the control's label"), default=False)
objects = FormsFormFieldManager()
class Meta:
verbose_name = _('forms field')
verbose_name_plural = _('forms fields')
ordering = ['sort_order']
db_table = settings.TABLE_PREFIX + "forms_form_field"
def __unicode__(self):
return self.label
def get_choices(self, value=False, include_all=''):
if self.list:
if include_all:
yield '', include_all
for v in self.list.formslistentry_set.all():
yield v.value, v.value
elif self.reference and '.' in self.reference:
r = self.reference.split('.')
try:
form_target = FormsForm.objects.get(slug=r[0])
columns, entries = FormsFieldEntry.objects.forms_get_entries(
form_target, **{'ot': 'asc', 'o': r[1]})
except:
entries = []
if include_all:
yield '', include_all
if self.reference_display:
t = Template(self.reference_display)
else:
t = None
for e in entries:
if value:
f = e[r[1]]['value']
else:
f = e['entity'].pk
if t:
ec = {}
for fd in e['fields']:
ec[fd['slug']] = {'label': fd[
'label'], 'value': fd['value']}
c = Context(ec)
yield f, t.render(c)
else:
yield f, e[r[1]]['value']
def is_a(self, *args):
"""
Helper that returns True if the field's type is given in any arg.
"""
return self.field_type in args
class FormsFormEntry(ZornaEntity):
form = models.ForeignKey(FormsForm, related_name="entries")
account = models.ForeignKey(User, editable=False, null=True, blank=True)
entry = models.ForeignKey('self', editable=False, null=True, blank=True)
class Meta:
verbose_name = _('form entry')
verbose_name_plural = _('form entries')
db_table = settings.TABLE_PREFIX + "forms_form_entry"
def __unicode__(self):
return self.form.name
def get_account_avatar(self):
return self.get_user_avatar(self.account_id)
def get_account_full_name(self):
return self.get_user_full_name(self.account_id)
def get_entries(self, slug):
entries = self.formsformentry_set.filter(form__slug=slug)
return FormsFieldEntry.objects.forms_get_entries(slug, entries=entries)
"""
class FormsFormEntryExtra(models.Model):
entry = models.ForeignKey(FormsFormEntry, null=True, editable=False)
content_type = models.ForeignKey(ContentType, editable=False)
object_id = models.IntegerField(editable=False)
content_object = generic.GenericForeignKey('content_type', 'object_id')
class Meta:
verbose_name = _('forms entry extra info')
verbose_name_plural = _('forms entries extra info')
db_table = settings.TABLE_PREFIX + "forms_form_entry_extra"
def __unicode__(self):
return u'%s [%s]' % (self.content_object, self.message)
"""
class FormsFieldEntryManager(models.Manager):
def forms_get_entries(self, slug_or_form, *args, **kwargs):
if isinstance(slug_or_form, FormsForm):
form = slug_or_form
else:
try:
form = FormsForm.objects.select_related(
depth=1).get(slug=slug_or_form)
except Exception as e:
return [], []
form_fields = form.fields.all()
filterFields = None
form.fields_reference = {}
filterFieldsEntries = None
for f in form_fields:
if kwargs.has_key(f.slug):
value = kwargs[f.slug]
else:
value = None
if '.' in f.reference and f.is_a(*fields.CHOICES):
form.fields_reference[f.pk] = []
for e in f.get_choices():
form.fields_reference[f.pk].append(e)
if value and value == e[1]:
value = e[0]
if kwargs.has_key(f.slug):
if filterFieldsEntries:
filterFieldsEntries = FormsFieldEntry.objects.filter(form_entry__form=form, value=value, field__slug=f.slug,
form_entry__pk__in = [f.form_entry_id for f in filterFieldsEntries])
else:
filterFieldsEntries = FormsFieldEntry.objects.filter(form_entry__form=form, value=value, field__slug=f.slug)
try:
if not filterFieldsEntries.exists():
return [],[]
except:
pass
entries = kwargs.get('entries', None)
f = kwargs.get('f', None)
q = kwargs.get('q', None)
o = kwargs.get('o', None)
ot = kwargs.get('ot', 'asc')
hidden = kwargs.get('hidden', '')
if hidden:
hidden = hidden.split(',')
else:
hidden = []
filter = None
if q:
if q[0] == q[-1] and q[0] in ('"', "'"):
q = q[1:-1]
if f:
filter = Q(field__slug=f) & Q(value__iexact=q)
else:
filter = Q(form_entry__account__last_name__iexact=q) | \
Q(form_entry__account__first_name__iexact=q) | \
Q(value__iexact=q)
else:
if f:
filter = Q(field__slug=f) & Q(value__icontains=q)
else:
filter = Q(form_entry__account__last_name__icontains=q) | \
Q(form_entry__account__first_name__icontains=q) | \
Q(value__icontains=q)
if filter or filterFieldsEntries:
if filter and filterFieldsEntries:
field_entries = FormsFieldEntry.objects.filter(Q(form_entry__form=form) & filter, pk__in = [f.pk for f in filterFieldsEntries])
elif filterFieldsEntries:
field_entries = FormsFieldEntry.objects.filter(Q(form_entry__form=form), pk__in = [f.pk for f in filterFieldsEntries])
else:
field_entries = FormsFieldEntry.objects.filter(Q(
form_entry__form=form) & filter)
filter = Q(form_entry__in=[f.form_entry_id for f in field_entries])
else:
filter = Q(form_entry__form=form)
# Aggregate each column
# FormsFormEntry.objects.filter(form=form).values('fields__field__label').annotate(Avg('fields__value'))
where = kwargs.get('where', '')
if where:
try:
r = where.split(':')
entry_id = r[1]
r = r[0].split('.')
form_slug = r[0]
fr = ''
slug = form.bind_to_entry.split('.')[0]
while True:
f = FormsForm.objects.get(slug=slug)
if not f:
break
fr = fr + '__entry'
if f.slug == r[0] or not f.bind_to_entry:
break
slug = f.bind_to_entry.split('.')[0]
entry = FormsFormEntry.objects.select_related().get(
pk=entry_id)
if entry.form.slug == form_slug:
fr = 'form_entry%s' % fr
filter = filter & Q(**{fr: entry})
else:
return [], []
except:
return [], []
def entry_sort(entry):
try:
return float(entry[o]['value'])
except:
return entry[o]['value'].lower()
if entries is None:
field_entries = FormsFieldEntry.objects.select_related(
depth=1).filter(filter)
else:
field_entries = FormsFieldEntry.objects.select_related(
depth=1).filter(filter, form_entry__in=entries)
columns, entries = forms_format_entries(form, field_entries, hidden)
if o:
try:
entries.sort(
key=entry_sort, reverse=False if ot == 'asc' else True)
except Exception as e:
print e
pass
return columns, entries
def forms_get_entry(self, entry):
columns, entries = self.forms_get_entries(
entry.form, entries=[entry.pk])
return columns, entries[0]
class FormsFieldEntry(models.Model):
field = models.ForeignKey(FormsFormField)
form_entry = models.ForeignKey(FormsFormEntry, related_name="fields")
value = models.CharField(max_length=2000)
objects = FormsFieldEntryManager()
class Meta:
verbose_name = _('form entry')
verbose_name_plural = _('form entries')
db_table = settings.TABLE_PREFIX + "forms_field_entry"
def format_field_value(field_entry, field):
type = field.field_type
if type == fields.ZORNA_USER:
value = User.objects.get(pk=field_entry.value).get_profile().__unicode()
elif type == fields.FILE:
value = reverse("file_view", args=(field_entry.id,))
elif type in fields.DATES:
value = field_entry.value
elif type in fields.CHOICES:
if field.target_entries:
if type in fields.MULTIPLE_CHOICES:
d = field_entry.value.split(',')
val = []
for v in d:
val.append(field.target_entries[int(v)]['value'])
value = val
else:
value = field.target_entries[int(field_entry.value)]['value']
elif type in fields.MULTIPLE_CHOICES:
d = field_entry.value.split(',')
val = []
for v in d:
val.append(v)
value = val
else:
value = field_entry.value
elif type == fields.DECIMAL:
value = float(field_entry.value)
elif type == fields.INTEGER:
value = int(field_entry.value)
else:
value = field_entry.value
return {'value': value, 'type': fields.NAMES_TPL[type]}
def get_form_field_values_old(field):
return FormsFieldEntry.objects.raw('select * from zorna_forms_field_entry where field_id=%s order by value', [field.pk])
def get_form_field_values(field, visited=[]):
if not field.reference:
return FormsFieldEntry.objects.raw('select * from zorna_forms_field_entry where field_id=%s order by value', [field.pk])
elif field.reference in visited:
# if circular return values of original field
r = visited[0].split('.')
field = FormsFormField.objects.get(form__slug=r[0], slug=r[1])
return FormsFieldEntry.objects.raw('select * from zorna_forms_field_entry where field_id=%s order by value', [field.pk])
else:
visited.append(field.reference)
r = field.reference.split('.')
field = FormsFormField.objects.get(form__slug=r[0], slug=r[1])
return get_form_field_values(field, visited)
def deepish_copy(org):
'''
much, much faster than deepcopy, for a dict of the simple python types.
'''
out = dict().fromkeys(org)
for k, v in org.iteritems():
try:
out[k] = v.copy() # dicts, sets
except AttributeError:
try:
out[k] = v[:] # lists, tuples, strings, unicode
except TypeError:
out[k] = v # ints
return out
def forms_format_entries(form, query_set, hidden=[]):
'''
return columns and rows from FormFormEntry queryset
if a form is bind to account, two fields are added:
- zorna_owner_last_name
- zorna_owner_first_name
if a form is bind to another another form field, a new field is added with slug='slug of target field':
columns are formated as follow:
- columns[slug] = label . This let you access label directly from slug
- columns['fields'] = [ { 'slug':slug, 'label': label }, ...] list of columns
rows is a list where each entry is like this:
- entry['entity'] is FormsFormEntry record
- entry['id'] is pk of FormsFormEntry record
- for each field with slug='slug' in columns:
- entry['slug'] = { 'value': value of field, 'type': type of field}
where type=text|url|date|list
- entry[fields] is a list of fields in the same order as columns where each entry is:
- { 'value': value of field, 'type': type of field, 'slug':'slug'}
where type=text|url|date|list
'''
cols = {}
columns = {'fields': []}
forms_entry_fields = {}
forms_binds = []
forms_fields = {}
form_fields = FormsFormField.objects.filter(
form=form).order_by('sort_order_list')
for f in form_fields:
forms_fields[f.pk] = f
# if records are bind to users, add owner column
if f.field_type == fields.ZORNA_USER_SINGLETON and form.bind_to_account:
slug = 'zorna_owner'
columns[slug] = _(u'User')
columns['fields'].append({'label': columns[
slug], 'slug': slug, 'type': fields.ZORNA_USER, 'values': []})
cols[slug] = {'label': columns[
slug], 'help_text': '', 'type': fields.ZORNA_USER, 'value': ''}
elif f.field_type == fields.FORM_ENTRY and form.bind_to_entry:
# if records are bind to entries of an another form
if form.bind_to_entry:
bind = form.bind_to_entry.split('.')
tmp = []
while len(bind) == 2:
if bind[1] == 'zorna_owner':
form_target = FormsForm.objects.get(slug=bind[0])
tmp.append([bind, form_target])
bind = form_target.bind_to_entry.split('.')
else:
target_field = FormsFormField.objects.get(
slug=bind[1], form__slug=bind[0])
tmp.append([bind, target_field])
bind = target_field.form.bind_to_entry.split('.')
tmp.reverse()
for b in tmp:
bind = b[0]
if bind[1] == 'zorna_owner':
target_slug = 'zorna_owner_%s' % bind[0]
forms_entry_fields[target_slug] = {}
columns[target_slug] = _("User")
columns['fields'].append({'label': columns[
target_slug], 'slug': target_slug, 'type': fields.ZORNA_USER, 'values': []})
cols[target_slug] = {'label': columns[target_slug], 'help_text': '', 'form_bind': bind[
0], 'field_bind': bind[1], 'type': fields.ZORNA_USER, 'value': ''}
form_target = b[1]
tgcols, tgrows = FormsFieldEntry.objects.forms_get_entries(
form_target, entries=[e.id for e in FormsFormEntry.objects.filter(form=form_target)])
for r in tgrows:
forms_entry_fields[target_slug][r['id']] = r
else:
target_field = b[1]
target_slug = target_field.slug
forms_entry_fields[target_slug] = {}
columns[target_slug] = target_field.label
columns['fields'].append({'label': columns[
target_slug], 'slug': target_slug, 'type': target_field.field_type, 'values': []})
cols[target_slug] = {'label': columns[target_slug], 'help_text': '', 'form_bind': bind[
0], 'field_bind': bind[1], 'type': target_field.field_type, 'value': ''}
tgcols, tgrows = forms_format_entries(
target_field.form, FormsFieldEntry.objects.select_related(depth=1).filter(field=target_field))
for r in tgrows:
forms_entry_fields[target_slug][r['id']] = r
forms_binds.append(target_slug)
else:
columns['fields'].append({
'label': f.label, 'slug': f.slug, 'type': f.field_type, 'total': 0, 'values': []})
columns[f.slug] = f.label
cols[f.slug] = {'label': f.label, 'help_text':
f.help_text, 'type': fields.NAMES_TPL[f.field_type], 'value': ''}
f.target_entries = None
try:
form.fields_reference
except:
form.fields_reference = {}
if f.field_type in fields.CHOICES and '.' in f.reference and f.slug not in hidden:
try:
form.fields_reference[f.pk]
except:
form.fields_reference[f.pk] = [e for e in f.get_choices()]
f.target_entries = {}
for e in form.fields_reference[f.pk]:
# f.target_entries[e.form_entry_id] = {'value': e.value,
# 'type':field_target.field_type}
f.target_entries[e[0]] = {
'value': e[1], 'type': f.field_type}
forms_binds.reverse()
rows = {}
rows_order = []
rows_set = set() # for fast search
for field_entry in query_set:
try:
rows[field_entry.form_entry_id]
except KeyError:
rows[field_entry.form_entry_id] = deepish_copy(cols)
try:
val = format_field_value(field_entry,
forms_fields[field_entry.field_id])
except:
val = {'value': '', 'type':
forms_fields[field_entry.field_id].field_type}
# if type == fields.FILE:
# val['value'] = request.build_absolute_uri(val['value'])
# rows[field_entry.form_entry_id][forms_fields[field_entry.field_id].slug].update(val)
rows[field_entry.form_entry_id][
forms_fields[field_entry.field_id].slug]['value'] = val['value']
rows[field_entry.form_entry_id][
forms_fields[field_entry.field_id].slug]['type'] = val['type']
rows[field_entry.form_entry_id][
forms_fields[field_entry.field_id].slug]['id'] = field_entry.pk
if field_entry.form_entry_id not in rows_set:
rows_order.append(field_entry.form_entry_id)
rows_set.add(field_entry.form_entry_id)
rows[field_entry.form_entry_id][
'zorna_entity'] = field_entry.form_entry
if form.bind_to_account:
# rows[field_entry.form_entry_id]['zorna_owner'].update({'value':field_entry.form_entry.account.get_full_name(),
# 'type':fields.NAMES_TPL[fields.ZORNA_USER]})
rows[field_entry.form_entry_id]['zorna_owner'][
'value'] = field_entry.form_entry.account.get_profile().__unicode__()
rows[field_entry.form_entry_id]['zorna_owner'][
'type'] = fields.NAMES_TPL[fields.ZORNA_USER]
if form.bind_to_entry:
e_id = field_entry.form_entry.entry_id
for t in forms_binds:
if e_id and e_id in forms_entry_fields[t]:
if t.rfind('zorna_owner_') == 0:
# rows[field_entry.form_entry_id][t].update({'value':forms_entry_fields[t][e_id]['zorna_owner']['value'],
# 'type':fields.NAMES_TPL[fields.TEXT],
# 'entry_bind':forms_entry_fields[t][e_id]})
rows[field_entry.form_entry_id][t][
'value'] = forms_entry_fields[t][e_id]['zorna_owner']['value']
rows[field_entry.form_entry_id][t][
'type'] = fields.NAMES_TPL[fields.TEXT]
rows[field_entry.form_entry_id][t][
'entry_bind'] = forms_entry_fields[t][e_id]
else:
# rows[field_entry.form_entry_id][t].update({'value':forms_entry_fields[t][e_id][t]['value'],
# 'type':fields.NAMES_TPL[fields.TEXT],
# 'entry_bind':forms_entry_fields[t][e_id]})
rows[field_entry.form_entry_id][t][
'value'] = forms_entry_fields[t][e_id][t]['value']
rows[field_entry.form_entry_id][t][
'type'] = fields.NAMES_TPL[fields.TEXT]
rows[field_entry.form_entry_id][t][
'entry_bind'] = forms_entry_fields[t][e_id]
e_id = forms_entry_fields[t][e_id]['entity'].entry_id
else:
# rows[field_entry.form_entry_id][t].update({'value':'',
# 'type':fields.NAMES_TPL[fields.TEXT]})
rows[field_entry.form_entry_id][t]['value'] = ''
rows[field_entry.form_entry_id][t][
'type'] = fields.NAMES_TPL[fields.TEXT]
for h in hidden:
if h:
for i in range(len(columns['fields'])):
slug = columns['fields'][i]['slug']
if slug == h:
del columns['fields'][i]
del columns[slug]
break
# reorder records
entries = []
for row in rows_order:
entry = {}
entry['entity'] = rows[row]['zorna_entity']
entry['id'] = rows[row]['zorna_entity'].pk
# for f in FormsFormEntry._meta.fields:
# entry[f.name] = getattr(rows[row]['zorna_entity'], f.name)
r = []
for c in columns['fields']:
if c['type'] in [fields.DECIMAL, fields.INTEGER]:
try:
if c['type'] == fields.INTEGER and rows[row][c['slug']]['value']:
c['total'] = c['total'] + int(
rows[row][c['slug']]['value'])
else:
c['total'] = c['total'] + Decimal(
str(rows[row][c['slug']]['value']))
except InvalidOperation:
pass
elif c['type'] == fields.DATE_TIME:
try:
rows[row][c['slug']]['raw_value'] = datetime.datetime(*time.strptime(rows[row][c['slug']]['value'], "%Y-%m-%d %H:%M:%S")[0:5])
rows[row][c['slug']]['value'] = formats.date_format(
rows[row][c['slug']]['raw_value'], "SHORT_DATETIME_FORMAT")
except Exception, e:
rows[row][c['slug']]['value'] = ''
elif c['type'] == fields.DATE:
try:
rows[row][c['slug']]['raw_value'] = datetime.datetime(*time.strptime(rows[row][c['slug']]['value'], "%Y-%m-%d")[0:5])
rows[row][c['slug']]['value'] = formats.date_format(
rows[row][c['slug']]['raw_value'], "SHORT_DATE_FORMAT")
except Exception, e:
rows[row][c['slug']]['value'] = ''
c['values'].append(rows[row][c['slug']]['value'])
entry[c['slug']] = rows[row][c['slug']]
rows[row][c['slug']].update({'slug': c['slug']})
r.append(rows[row][c['slug']])
entry['fields'] = r
entries.append(entry)
return columns, entries
| |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# MicroPython documentation build configuration file, created by
# sphinx-quickstart on Sun Sep 21 11:42:03 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
# Work out the port to generate the docs for
from collections import OrderedDict
micropy_port = os.getenv('MICROPY_PORT') or 'pyboard'
tags.add('port_' + micropy_port)
ports = OrderedDict((
('unix', 'unix'),
('pyboard', 'the pyboard'),
('wipy', 'the WiPy'),
('esp8266', 'the ESP8266'),
))
# The members of the html_context dict are available inside topindex.html
micropy_version = os.getenv('MICROPY_VERSION') or 'latest'
micropy_all_versions = (os.getenv('MICROPY_ALL_VERSIONS') or 'latest').split(',')
url_pattern = '%s/en/%%s/%%s' % (os.getenv('MICROPY_URL_PREFIX') or '/',)
html_context = {
'port':micropy_port,
'port_name':ports[micropy_port],
'port_version':micropy_version,
'all_ports':[
(port_id, url_pattern % (micropy_version, port_id))
for port_id, port_name in ports.items()
],
'all_versions':[
(ver, url_pattern % (ver, micropy_port))
for ver in micropy_all_versions
],
'downloads':[
('PDF', url_pattern % (micropy_version, 'micropython-%s.pdf' % micropy_port)),
],
}
# Specify a custom master document based on the port name
master_doc = micropy_port + '_' + 'index'
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx_selective_exclude.modindex_exclude',
'sphinx_selective_exclude.eager_only',
'sphinx_selective_exclude.search_auto_exclude',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
#master_doc = 'index'
# General information about the project.
project = 'MicroPython'
copyright = '2014-2018, Damien P. George, Paul Sokolovsky, and contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# We don't follow "The short X.Y version" vs "The full version, including alpha/beta/rc tags"
# breakdown, so use the same version identifier for both to avoid confusion.
version = release = '1.9.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build', '.venv']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'any'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# Global include files. Sphinx docs suggest using rst_epilog in preference
# of rst_prolog, so we follow. Absolute paths below mean "from the base
# of the doctree".
rst_epilog = """
.. include:: /templates/replace.inc
"""
# -- Options for HTML output ----------------------------------------------
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
try:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path(), '.']
except:
html_theme = 'default'
html_theme_path = ['.']
else:
html_theme_path = ['.']
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = ['.']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = '../../logo/trans-logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%d %b %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
html_additional_pages = {"index": "topindex.html"}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'MicroPythondoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Include 3 levels of headers in PDF ToC
'preamble': '\setcounter{tocdepth}{2}',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'MicroPython.tex', 'MicroPython Documentation',
'Damien P. George, Paul Sokolovsky, and contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'micropython', 'MicroPython Documentation',
['Damien P. George, Paul Sokolovsky, and contributors'], 1),
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'MicroPython', 'MicroPython Documentation',
'Damien P. George, Paul Sokolovsky, and contributors', 'MicroPython', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('http://docs.python.org/3.5', None)}
# Append the other ports' specific folders/files to the exclude pattern
exclude_patterns.extend([port + '*' for port in ports if port != micropy_port])
modules_port_specific = {
'pyboard': ['pyb'],
'wipy': ['wipy'],
'esp8266': ['esp'],
}
modindex_exclude = []
for p, l in modules_port_specific.items():
if p != micropy_port:
modindex_exclude += l
# Exclude extra modules per port
modindex_exclude += {
'esp8266': ['cmath', 'select'],
'wipy': ['cmath'],
}.get(micropy_port, [])
| |
from mpf.core.platform import SwitchConfig
from mpf.core.rgb_color import RGBColor
from mpf.exceptions.config_file_error import ConfigFileError
from mpf.tests.MpfTestCase import MpfTestCase, MagicMock, test_config, expect_startup_error
from mpf.tests.loop import MockSerial
class BaseMockFast(MockSerial):
def __init__(self):
super().__init__()
self.type = None
self.queue = []
self.expected_commands = {}
self.ignore_commands = {}
def read(self, length):
del length
if not self.queue:
return
msg = (self.queue.pop() + '\r').encode()
return msg
def read_ready(self):
return bool(len(self.queue) > 0)
def write_ready(self):
return True
def _parse(self, msg):
return False
def write(self, msg):
"""Write message."""
parts = msg.split(b'\r')
# remove last newline
assert parts.pop() == b''
for part in parts:
self._handle_msg(part)
return len(msg)
def _handle_msg(self, msg):
msg_len = len(msg)
cmd = msg.decode()
# strip newline
# ignore init garbage
if cmd == (' ' * 256 * 4):
return msg_len
if cmd[:3] == "WD:" and cmd != "WD:1":
self.queue.append("WD:P")
return msg_len
if cmd in self.ignore_commands:
self.queue.append(cmd[:3] + "P")
return msg_len
if self._parse(cmd):
return msg_len
if cmd in self.expected_commands:
if self.expected_commands[cmd]:
self.queue.append(self.expected_commands[cmd])
del self.expected_commands[cmd]
return msg_len
else:
raise Exception("Unexpected command for " + self.type + ": " + str(cmd))
def stop(self):
pass
class MockFastDmd(BaseMockFast):
def __init__(self):
super().__init__()
self.type = "DMD"
def write(self, msg):
"""Write message."""
parts = msg.split(b'\r')
# remove last newline
if parts[len(parts) - 1] == b'':
parts.pop()
for part in parts:
self._handle_msg(part)
return len(msg)
def _handle_msg(self, msg):
msg_len = len(msg)
if msg == (b' ' * 256 * 4):
return msg_len
cmd = msg
if cmd[:3] == "WD:":
self.queue.append("WD:P")
return msg_len
if cmd in self.ignore_commands:
self.queue.append(cmd[:3] + "P")
return msg_len
if cmd in self.expected_commands:
if self.expected_commands[cmd]:
self.queue.append(self.expected_commands[cmd])
del self.expected_commands[cmd]
return msg_len
else:
raise Exception(self.type + ": " + str(cmd))
class MockFastRgb(BaseMockFast):
def __init__(self):
super().__init__()
self.type = "RGB"
self.ignore_commands["L1:23,FF"] = True
self.leds = {}
def _parse(self, cmd):
if cmd[:3] == "RS:":
remaining = cmd[3:]
while True:
self.leds[remaining[0:2]] = remaining[2:8]
remaining = remaining[9:]
if not remaining:
break
self.queue.append("RX:P")
return True
class MockFastNet(BaseMockFast):
def __init__(self):
super().__init__()
self.type = "NET"
class MockFastSeg(BaseMockFast):
def __init__(self):
super().__init__()
self.type = "SEG"
class TestFast(MpfTestCase):
def get_config_file(self):
return 'config.yaml'
def get_machine_path(self):
return 'tests/machine_files/fast/'
def get_platform(self):
return False
def _mock_loop(self):
self.clock.mock_serial("com3", self.seg_cpu)
self.clock.mock_serial("com4", self.net_cpu)
self.clock.mock_serial("com5", self.rgb_cpu)
self.clock.mock_serial("com6", self.dmd_cpu)
def tearDown(self):
self.dmd_cpu.expected_commands = {
b'BL:AA55': "!SRE"
}
self.rgb_cpu.expected_commands = {
"BL:AA55": "!SRE"
}
self.net_cpu.expected_commands = {
"WD:1": "WD:P"
}
super().tearDown()
if not self.startup_error:
self.assertFalse(self.net_cpu.expected_commands)
self.assertFalse(self.rgb_cpu.expected_commands)
self.assertFalse(self.dmd_cpu.expected_commands)
def setUp(self):
self.expected_duration = 2
self.net_cpu = MockFastNet()
self.rgb_cpu = MockFastRgb()
self.dmd_cpu = MockFastDmd()
self.seg_cpu = MockFastSeg()
self.dmd_cpu.expected_commands = {
b'ID:': 'ID:DMD FP-CPU-002-1 00.88',
}
self.rgb_cpu.expected_commands = {
'ID:': 'ID:RGB FP-CPU-002-1 00.89',
"RF:0": "RF:P",
"RA:000000": "RA:P",
"RF:00": "RF:P",
}
self.net_cpu.expected_commands = {
'BR:': '#!B:02', # there might be some garbage in front of the command
'ID:': 'ID:NET FP-CPU-002-1 01.03',
'NN:00': 'NN:00,FP-I/O-3208-2 ,01.00,08,20,04,06,00,00,00,00', # 3208 board
'NN:01': 'NN:01,FP-I/O-0804-1 ,01.00,04,08,04,06,00,00,00,00', # 0804 board
'NN:02': 'NN:02,FP-I/O-1616-2 ,01.00,10,10,04,06,00,00,00,00', # 1616 board
'NN:03': 'NN:03,FP-I/O-1616-2 ,01.00,10,10,04,06,00,00,00,00', # 1616 board
'NN:04': 'NN:04,,,,,,,,,,', # no board
"SA:": "SA:01,00,09,050000000000000000",
"SN:01,01,04,04": "SN:P",
"SN:02,01,04,04": "SN:P",
"SN:03,01,04,04": "SN:P",
"SN:0B,01,04,04": "SN:P",
"SN:0C,01,04,04": "SN:P",
"SN:16,01,04,04": "SN:P",
"SN:07,01,1A,05": "SN:P",
"SN:1A,01,04,04": "SN:P",
"SN:39,01,04,04": "SN:P",
"DN:01,00,00,00": "DN:P",
"DN:04,00,00,00": "DN:P",
"DN:06,00,00,00": "DN:P",
"DN:07,00,00,00": "DN:P",
"DN:11,00,00,00": "DN:P",
"DN:12,00,00,00": "DN:P",
"DN:13,00,00,00": "DN:P",
"DN:16,00,00,00": "DN:P",
"DN:17,00,00,00": "DN:P",
"DN:20,00,00,00": "DN:P",
"DN:21,00,00,00": "DN:P",
"DN:01,C1,00,18,00,FF,FF,00": "DN:P", # configure digital output
"XO:03,7F": "XO:P",
"XO:14,7F": "XO:P"
}
self.seg_cpu.expected_commands = {
'ID:': 'ID:SEG FP-CPU-002-1 00.10',
}
super().setUp()
if not self.startup_error:
self.advance_time_and_run()
self.assertFalse(self.net_cpu.expected_commands)
self.assertFalse(self.rgb_cpu.expected_commands)
self.assertFalse(self.dmd_cpu.expected_commands)
self.assertFalse(self.seg_cpu.expected_commands)
# test io board detection
self.assertEqual(4, len(self.machine.default_platform.io_boards))
self.assertEqual(32, self.machine.default_platform.io_boards[0].switch_count)
self.assertEqual(8, self.machine.default_platform.io_boards[0].driver_count)
self.assertEqual(8, self.machine.default_platform.io_boards[1].switch_count)
self.assertEqual(4, self.machine.default_platform.io_boards[1].driver_count)
self.assertEqual(16, self.machine.default_platform.io_boards[2].switch_count)
self.assertEqual(16, self.machine.default_platform.io_boards[2].driver_count)
self.assertEqual(16, self.machine.default_platform.io_boards[3].switch_count)
self.assertEqual(16, self.machine.default_platform.io_boards[3].driver_count)
self.assertEqual("00.88", self.machine.variables.get_machine_var("fast_dmd_firmware"))
self.assertEqual("FP-CPU-002-1", self.machine.variables.get_machine_var("fast_dmd_model"))
self.assertEqual("00.89", self.machine.variables.get_machine_var("fast_rgb_firmware"))
self.assertEqual("FP-CPU-002-1", self.machine.variables.get_machine_var("fast_rgb_model"))
self.assertEqual("01.03", self.machine.variables.get_machine_var("fast_net_firmware"))
self.assertEqual("FP-CPU-002-1", self.machine.variables.get_machine_var("fast_net_model"))
self.assertEqual("00.10", self.machine.variables.get_machine_var("fast_seg_firmware"))
self.assertEqual("FP-CPU-002-1", self.machine.variables.get_machine_var("fast_seg_model"))
def test_coils(self):
self._test_pulse()
self._test_long_pulse()
self._test_timed_enable()
self._test_default_timed_enable()
self._test_enable_exception()
self._test_allow_enable()
self._test_pwm_ssm()
self._test_coil_configure()
# test hardware scan
info_str = """NET CPU: NET FP-CPU-002-1 01.03
RGB CPU: RGB FP-CPU-002-1 00.89
DMD CPU: DMD FP-CPU-002-1 00.88
Segment Controller: SEG FP-CPU-002-1 00.10
Boards:
Board 0 - Model: FP-I/O-3208-2 Firmware: 01.00 Switches: 32 Drivers: 8
Board 1 - Model: FP-I/O-0804-1 Firmware: 01.00 Switches: 8 Drivers: 4
Board 2 - Model: FP-I/O-1616-2 Firmware: 01.00 Switches: 16 Drivers: 16
Board 3 - Model: FP-I/O-1616-2 Firmware: 01.00 Switches: 16 Drivers: 16
"""
self.assertEqual(info_str, self.machine.default_platform.get_info_string())
def _test_coil_configure(self):
self.assertEqual("FAST Board 0", self.machine.coils["c_test"].hw_driver.get_board_name())
self.assertEqual("FAST Board 3", self.machine.coils["c_flipper_hold"].hw_driver.get_board_name())
# last driver on board
self.net_cpu.expected_commands = {
"DN:2B,00,00,00": "DN:P"
}
coil = self.machine.default_platform.configure_driver(self.machine.coils["c_test"].hw_driver.config, '3-15',
{"connection": "network", "recycle_ms": 10})
self.assertEqual('2B', coil.number)
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# board 0 has 8 drivers. configuring driver 9 should not work
with self.assertRaises(AssertionError):
self.machine.default_platform.configure_driver(self.machine.coils["c_test"].hw_driver.config, '0-8',
{"connection": "network", "recycle_ms": 10})
# only boards 0-3 exist
with self.assertRaises(AssertionError):
self.machine.default_platform.configure_driver(self.machine.coils["c_test"].hw_driver.config, '4-0',
{"connection": "network", "recycle_ms": 10})
# only 8 + 4 + 16 + 16 = 44 = 0x2C driver exist
with self.assertRaises(AssertionError):
self.machine.default_platform.configure_driver(self.machine.coils["c_test"].hw_driver.config, '44',
{"connection": "network", "recycle_ms": 10})
def _test_pulse(self):
self.net_cpu.expected_commands = {
"DN:04,89,00,10,17,FF,00,00,00": "DN:P"
}
# pulse coil 4
self.machine.coils["c_test"].pulse()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
def _test_long_pulse(self):
# enable command
self.net_cpu.expected_commands = {
"DN:12,C1,00,18,00,FF,FF,00": "DN:P"
}
self.machine.coils["c_long_pulse"].pulse()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# disable command
self.net_cpu.expected_commands = {
"TN:12,02": "TN:P"
}
self.advance_time_and_run(1)
# pulse_ms is 2000ms, so after 1s, this should not be sent
self.assertTrue(self.net_cpu.expected_commands)
self.advance_time_and_run(1)
# but after 2s, it should be
self.assertFalse(self.net_cpu.expected_commands)
def _test_timed_enable(self):
# enable command
self.net_cpu.expected_commands = {
"DN:16,89,00,10,14,FF,88,C8,00": "DN:P"
}
self.machine.coils["c_timed_enable"].timed_enable()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
def _test_default_timed_enable(self):
# enable command
self.net_cpu.expected_commands = {
"DN:17,89,00,10,14,FF,88,C8,00": "DN:P"
}
self.machine.coils["c_default_timed_enable"].pulse()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
def _test_enable_exception(self):
# enable coil which does not have allow_enable
with self.assertRaises(AssertionError):
self.machine.coils["c_test"].enable()
self.advance_time_and_run(.1)
def _test_allow_enable(self):
self.net_cpu.expected_commands = {
"DN:06,C1,00,18,17,FF,FF,00": "DN:P"
}
self.machine.coils["c_test_allow_enable"].enable()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
def _test_pwm_ssm(self):
self.net_cpu.expected_commands = {
"DN:13,C1,00,18,0A,FF,84224244,00": "DN:P"
}
self.machine.coils["c_hold_ssm"].enable()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
def test_nano_reboot(self):
# NANO reboots
self.net_cpu.queue.append("!B:00")
self.advance_time_and_run(.1)
# assert that MPF will stop
self.assertTrue(self.machine.stop_future.done())
def test_rules(self):
self._test_enable_exception_hw_rule()
self._test_two_rules_one_switch()
self._test_hw_rule_pulse()
self._test_hw_rule_pulse_pwm32()
self._test_hw_rule_pulse_inverted_switch()
self._test_hw_rule_same_board()
def _test_hw_rule_same_board(self):
self.net_cpu.expected_commands = {
"DN:21,01,07,10,0A,FF,00,00,14": "DN:P"
}
# coil and switch are on different boards but first 8 switches always work
self.machine.autofire_coils["ac_different_boards"].enable()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# switch and coil on board 3. should work
self.net_cpu.expected_commands = {
"DN:21,01,39,10,0A,FF,00,00,14": "DN:P",
"SN:39,01,02,02": "SN:P"
}
self.machine.autofire_coils["ac_board_3"].enable()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
self.net_cpu.expected_commands = {
"DN:10,01,03,10,0A,89,00,00,14": "DN:P",
}
# coil and switch are on different boards
with self.assertRaises(AssertionError):
self.machine.autofire_coils["ac_broken_combination"].enable()
self.advance_time_and_run(.1)
def _test_enable_exception_hw_rule(self):
# enable coil which does not have allow_enable
with self.assertRaises(AssertionError):
self.machine.flippers["f_test_single"].config['main_coil_overwrite']['hold_power'] = 1.0
self.machine.flippers["f_test_single"].enable()
self.machine.flippers["f_test_single"].config['main_coil_overwrite']['hold_power'] = None
def _test_two_rules_one_switch(self):
self.net_cpu.expected_commands = {
"SN:03,01,02,02": "SN:P",
"DN:04,01,03,10,17,FF,00,00,1B": "DN:P",
"DN:06,01,03,10,17,FF,00,00,2E": "DN:P"
}
self.post_event("ac_same_switch")
self.hit_and_release_switch("s_flipper")
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
def _test_hw_rule_pulse(self):
self.net_cpu.expected_commands = {
"DN:07,01,16,10,0A,FF,00,00,14": "DN:P", # hw rule
"SN:16,01,02,02": "SN:P" # debounce quick on switch
}
self.machine.autofire_coils["ac_slingshot_test"].enable()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
self.net_cpu.expected_commands = {
"DN:07,81": "DN:P"
}
self.machine.autofire_coils["ac_slingshot_test"].disable()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
def _test_hw_rule_pulse_pwm32(self):
self.net_cpu.expected_commands = {
"DN:11,89,00,10,0A,AAAAAAAA,00,00,00": "DN:P"
}
self.machine.coils["c_pulse_pwm32_mask"].pulse()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
self.net_cpu.expected_commands = {
"DN:11,C1,00,18,0A,AAAAAAAA,4A4A4A4A,00": "DN:P"
}
self.machine.coils["c_pulse_pwm32_mask"].enable()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
def _test_hw_rule_pulse_inverted_switch(self):
self.net_cpu.expected_commands = {
"DN:07,11,1A,10,0A,FF,00,00,14": "DN:P",
"SN:1A,01,02,02": "SN:P"
}
self.machine.autofire_coils["ac_inverted_switch"].enable()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
def test_firmware_update(self):
commands = []
def _catch_update(cmd):
commands.append(cmd)
return len(cmd)
parse_func = self.net_cpu.write
self.net_cpu.write = _catch_update
output = self.machine.default_platform.update_firmware()
self.advance_time_and_run()
self.net_cpu.write = parse_func
# check if we send the dummy update
self.assertEqual([b'BL:AA55\r>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>'
b'>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>'
b'>>>>>>>>>>>>>>>>>>>>>>>>>\rBL:AA55\r<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'
b'<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'
b'<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\rBL:AA55\r>>>>>>>>>>>>>>>>>>>>>>>>>>>>>'
b'>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>'
b'>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\rDUMMY UPDAT'
b'E\r', b'WD:3e8\r', b'WD:3e8\r'], commands)
expected_output = """NET CPU is version 01.03
Found an update to version 1.04 for the NET CPU. Will flash file firmware/FAST_NET_01_04_00.txt
Update done.
"""
self.assertEqual(expected_output, output)
def test_servo(self):
# go to min position
self.net_cpu.expected_commands = {
"XO:03,00": "XO:P"
}
self.machine.servos["servo1"].go_to_position(0)
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# go to max position
self.net_cpu.expected_commands = {
"XO:03,FF": "XO:P"
}
self.machine.servos["servo1"].go_to_position(1)
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
def _switch_hit_cb(self, **kwargs):
self.switch_hit = True
def test_switches(self):
self._test_switch_changes()
self._test_switch_changes_nc()
self._test_switch_configure()
def _test_switch_configure(self):
# last switch on first board
self.net_cpu.expected_commands = {
"SN:1F,01,04,04": "SN:P"
}
self.machine.default_platform.configure_switch('0-31', SwitchConfig(name="", debounce='auto', invert=0), {})
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# next should not work
with self.assertRaises(AssertionError):
self.machine.default_platform.configure_switch('0-32', SwitchConfig(name="", debounce='auto', invert=0), {})
self.net_cpu.expected_commands = {
"SN:47,01,04,04": "SN:P"
}
self.machine.default_platform.configure_switch('3-15', SwitchConfig(name="", debounce='auto', invert=0), {})
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# invalid board
with self.assertRaises(AssertionError):
self.machine.default_platform.configure_switch('4-0', SwitchConfig(name="", debounce='auto', invert=0), {})
# last switch is 0x47. 0x48 = 72
with self.assertRaises(AssertionError):
self.machine.default_platform.configure_switch('72', SwitchConfig(name="", debounce='auto', invert=0), {})
def _test_switch_changes(self):
self.assertSwitchState("s_flipper", 0)
self.assertSwitchState("s_flipper_eos", 1)
self.switch_hit = False
self.advance_time_and_run(1)
self.assertSwitchState("s_test", 0)
self.assertFalse(self.switch_hit)
self.machine.events.add_handler("s_test_active", self._switch_hit_cb)
self.machine.default_platform.process_received_message("-N:07", "NET")
self.advance_time_and_run(1)
self.assertTrue(self.switch_hit)
self.assertSwitchState("s_test", 1)
self.switch_hit = False
self.advance_time_and_run(1)
self.assertFalse(self.switch_hit)
self.assertSwitchState("s_test", 1)
self.machine.default_platform.process_received_message("/N:07", "NET")
self.advance_time_and_run(1)
self.assertFalse(self.switch_hit)
self.assertSwitchState("s_test", 0)
def _test_switch_changes_nc(self):
self.switch_hit = False
self.advance_time_and_run(1)
self.assertSwitchState("s_test_nc", 1)
self.assertFalse(self.switch_hit)
self.advance_time_and_run(1)
self.assertFalse(self.switch_hit)
self.assertSwitchState("s_test_nc", 1)
self.machine.default_platform.process_received_message("-N:1A", "NET")
self.advance_time_and_run(1)
self.assertFalse(self.switch_hit)
self.assertSwitchState("s_test_nc", 0)
self.machine.events.add_handler("s_test_nc_active", self._switch_hit_cb)
self.machine.default_platform.process_received_message("/N:1A", "NET")
self.advance_time_and_run(1)
self.assertSwitchState("s_test_nc", 1)
self.assertTrue(self.switch_hit)
self.switch_hit = False
def test_flipper_single_coil(self):
# manual flip no hw rule
self.net_cpu.expected_commands = {
"DN:20,89,00,10,0A,FF,00,00,00": "DN:P",
}
self.machine.coils["c_flipper_main"].pulse()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# manual enable no hw rule
self.net_cpu.expected_commands = {
"DN:20,C1,00,18,0A,FF,01,00": "DN:P"
}
self.machine.coils["c_flipper_main"].enable()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# manual disable no hw rule
self.net_cpu.expected_commands = {
"TN:20,02": "TN:P"
}
self.machine.coils["c_flipper_main"].disable()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# flipper rule enable
self.net_cpu.expected_commands = {
"DN:20,01,01,18,0B,FF,01,00,00": "DN:P",
"SN:01,01,02,02": "SN:P"
}
self.machine.flippers["f_test_single"].enable()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# manual flip with hw rule in action
self.net_cpu.expected_commands = {
"DN:20,89,00,10,0A,FF,00,00,00": "DN:P", # configure and pulse
"DN:20,01,01,18,0B,FF,01,00,00": "DN:P", # restore rule
}
self.machine.coils["c_flipper_main"].pulse()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# manual flip with hw rule in action without reconfigure (same pulse)
self.net_cpu.expected_commands = {
"TN:20,01": "TN:P", # pulse
}
self.machine.coils["c_flipper_main"].pulse(11)
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# manual enable with hw rule (same pulse)
self.net_cpu.expected_commands = {
"TN:20,03": "TN:P"
}
self.machine.coils["c_flipper_main"].enable(pulse_ms=11)
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# manual disable with hw rule
self.net_cpu.expected_commands = {
"TN:20,02": "TN:P",
"TN:20,00": "TN:P" # reenable autofire rule
}
self.machine.coils["c_flipper_main"].disable()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# manual enable with hw rule (different pulse)
self.net_cpu.expected_commands = {
"DN:20,C1,00,18,0A,FF,01,00": "DN:P", # configure pwm + enable
}
self.machine.coils["c_flipper_main"].enable()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# manual disable with hw rule
self.net_cpu.expected_commands = {
"TN:20,02": "TN:P",
"DN:20,01,01,18,0B,FF,01,00,00": "DN_P", # configure rules
"TN:20,00": "TN:P" # reenable autofire rule
}
self.machine.coils["c_flipper_main"].disable()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# disable rule
self.net_cpu.expected_commands = {
"DN:20,81": "DN:P"
}
self.machine.flippers["f_test_single"].disable()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# manual flip no hw rule
self.net_cpu.expected_commands = {
"DN:20,89,00,10,0A,FF,00,00,00": "DN:P"
}
self.machine.coils["c_flipper_main"].pulse()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# manual flip again with cached config
self.net_cpu.expected_commands = {
"TN:20,01": "TN:P",
}
self.machine.coils["c_flipper_main"].pulse()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
def test_flipper_two_coils(self):
# we pulse the main coil (20)
# hold coil (21) is pulsed + enabled
self.net_cpu.expected_commands = {
"DN:20,01,01,18,0A,FF,00,00,00": "DN:P",
"DN:21,01,01,18,0A,FF,01,00,00": "DN:P",
"SN:01,01,02,02": "SN:P",
}
self.machine.flippers["f_test_hold"].enable()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
self.net_cpu.expected_commands = {
"DN:20,81": "DN:P",
"DN:21,81": "DN:P"
}
self.machine.flippers["f_test_hold"].disable()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
def test_dmd_update(self):
# test configure
dmd = self.machine.default_platform.configure_dmd()
# test set frame to buffer
frame = bytearray()
for i in range(4096):
frame.append(64 + i % 192)
frame = bytes(frame)
# test draw
self.dmd_cpu.expected_commands = {
b'BM:' + frame: False
}
dmd.update(frame)
self.advance_time_and_run(.1)
self.assertFalse(self.dmd_cpu.expected_commands)
def test_bootloader_crash(self):
# Test that the machine stops if the RGB processor sends a bootloader msg
self.machine.stop = MagicMock()
self.machine.default_platform.process_received_message("!B:00", "RGB")
self.advance_time_and_run(1)
self.assertTrue(self.machine.stop.called)
def test_bootloader_crash_ignored(self):
# Test that RGB processor bootloader msgs can be ignored
self.machine.default_platform.config['ignore_rgb_crash'] = True
self.mock_event('fast_rgb_rebooted')
self.machine.stop = MagicMock()
self.machine.default_platform.process_received_message("!B:00", "RGB")
self.advance_time_and_run(1)
self.assertFalse(self.machine.stop.called)
self.assertEventCalled('fast_rgb_rebooted')
def test_lights_and_leds(self):
self._test_matrix_light()
self._test_pdb_gi_light()
self._test_pdb_led()
def _test_matrix_light(self):
# test enable of matrix light
self.net_cpu.expected_commands = {
"L1:23,FF": "L1:P",
}
self.machine.lights["test_pdb_light"].on()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# test enable of matrix light with brightness
self.net_cpu.expected_commands = {
"L1:23,80": "L1:P",
}
self.machine.lights["test_pdb_light"].on(brightness=128)
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# test disable of matrix light
self.net_cpu.expected_commands = {
"L1:23,00": "L1:P",
}
self.machine.lights["test_pdb_light"].off()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# test disable of matrix light with brightness
self.net_cpu.expected_commands = {
"L1:23,00": "L1:P",
}
self.machine.lights["test_pdb_light"].on(brightness=255, fade_ms=100)
self.advance_time_and_run(.02)
self.assertFalse(self.net_cpu.expected_commands)
# step 1
self.net_cpu.expected_commands = {
"L1:23,32": "L1:P",
"L1:23,33": "L1:P",
}
self.advance_time_and_run(.02)
self.assertEqual(1, len(self.net_cpu.expected_commands))
# step 2
self.net_cpu.expected_commands = {
"L1:23,65": "L1:P",
"L1:23,66": "L1:P",
}
self.advance_time_and_run(.02)
self.assertEqual(1, len(self.net_cpu.expected_commands))
# step 3
self.net_cpu.expected_commands = {
"L1:23,98": "L1:P",
"L1:23,99": "L1:P",
}
self.advance_time_and_run(.02)
self.assertEqual(1, len(self.net_cpu.expected_commands))
# step 4
self.net_cpu.expected_commands = {
"L1:23,CB": "L1:P",
"L1:23,CC": "L1:P",
}
self.advance_time_and_run(.02)
self.assertEqual(1, len(self.net_cpu.expected_commands))
# step 5
self.net_cpu.expected_commands = {
"L1:23,FE": "L1:P",
"L1:23,FF": "L1:P",
}
self.advance_time_and_run(.02)
self.assertEqual(1, len(self.net_cpu.expected_commands))
# step 6 if step 5 did not send FF
if "L1:23,FE" not in self.net_cpu.expected_commands:
self.net_cpu.expected_commands = {
"L1:23,FF": "L1:P",
}
self.advance_time_and_run(.02)
self.assertFalse(self.net_cpu.expected_commands)
def _test_pdb_gi_light(self):
# test gi on
device = self.machine.lights["test_gi"]
self.net_cpu.expected_commands = {
"GI:2A,FF": "GI:P",
}
device.on()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
self.net_cpu.expected_commands = {
"GI:2A,80": "GI:P",
}
device.on(brightness=128)
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
self.net_cpu.expected_commands = {
"GI:2A,F5": "GI:P",
}
device.on(brightness=245)
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
# test gi off
self.net_cpu.expected_commands = {
"GI:2A,00": "GI:P",
}
device.off()
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
self.net_cpu.expected_commands = {
"GI:2A,F5": "GI:P",
}
device.on(brightness=245)
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
self.net_cpu.expected_commands = {
"GI:2A,00": "GI:P",
}
device.on(brightness=0)
self.advance_time_and_run(.1)
self.assertFalse(self.net_cpu.expected_commands)
def _test_pdb_led(self):
self.advance_time_and_run()
device = self.machine.lights["test_led"]
device2 = self.machine.lights["test_led2"]
self.assertEqual("000000", self.rgb_cpu.leds['97'])
self.assertEqual("000000", self.rgb_cpu.leds['98'])
# test led on
device.on()
self.advance_time_and_run(1)
self.assertEqual("ffffff", self.rgb_cpu.leds['97'])
self.assertEqual("000000", self.rgb_cpu.leds['98'])
device2.color("001122")
# test led off
device.off()
self.advance_time_and_run(1)
self.assertEqual("000000", self.rgb_cpu.leds['97'])
self.assertEqual("001122", self.rgb_cpu.leds['98'])
# test led color
device.color(RGBColor((2, 23, 42)))
self.advance_time_and_run(1)
self.assertEqual("02172a", self.rgb_cpu.leds['97'])
# test led off
device.off()
self.advance_time_and_run(1)
self.assertEqual("000000", self.rgb_cpu.leds['97'])
self.advance_time_and_run(.02)
# fade led over 100ms
device.color(RGBColor((100, 100, 100)), fade_ms=100)
self.advance_time_and_run(.03)
self.assertTrue(10 < int(self.rgb_cpu.leds['97'][0:2], 16) < 40)
self.assertTrue(self.rgb_cpu.leds['97'][0:2] == self.rgb_cpu.leds['97'][2:4] == self.rgb_cpu.leds['97'][4:6])
self.advance_time_and_run(.03)
self.assertTrue(40 < int(self.rgb_cpu.leds['97'][0:2], 16) < 60)
self.assertTrue(self.rgb_cpu.leds['97'][0:2] == self.rgb_cpu.leds['97'][2:4] == self.rgb_cpu.leds['97'][4:6])
self.advance_time_and_run(.03)
self.assertTrue(60 < int(self.rgb_cpu.leds['97'][0:2], 16) < 90)
self.assertTrue(self.rgb_cpu.leds['97'][0:2] == self.rgb_cpu.leds['97'][2:4] == self.rgb_cpu.leds['97'][4:6])
self.advance_time_and_run(2)
self.assertEqual("646464", self.rgb_cpu.leds['97'])
@expect_startup_error()
@test_config("error_lights.yaml")
def test_light_errors(self):
self.assertIsInstance(self.startup_error, ConfigFileError)
self.assertEqual(7, self.startup_error.get_error_no())
self.assertEqual("light.test_led", self.startup_error.get_logger_name())
self.assertIsInstance(self.startup_error.__cause__, ConfigFileError)
self.assertEqual(9, self.startup_error.__cause__.get_error_no())
self.assertEqual("FAST", self.startup_error.__cause__.get_logger_name())
self.assertEqual("Light syntax is number-channel (but was \"3\") for light test_led.",
self.startup_error.__cause__._message)
| |
"""Support for Frontier Silicon Devices (Medion, Hama, Auna,...)."""
from __future__ import annotations
import logging
from afsapi import AFSAPI
import requests
import voluptuous as vol
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerEntity
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MUSIC,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SEEK,
SUPPORT_SELECT_SOURCE,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
STATE_IDLE,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
STATE_UNKNOWN,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType
_LOGGER = logging.getLogger(__name__)
SUPPORT_FRONTIER_SILICON = (
SUPPORT_PAUSE
| SUPPORT_VOLUME_SET
| SUPPORT_VOLUME_MUTE
| SUPPORT_VOLUME_STEP
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_SEEK
| SUPPORT_PLAY_MEDIA
| SUPPORT_PLAY
| SUPPORT_STOP
| SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_SELECT_SOURCE
)
DEFAULT_PORT = 80
DEFAULT_PASSWORD = "1234"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_PASSWORD, default=DEFAULT_PASSWORD): cv.string,
vol.Optional(CONF_NAME): cv.string,
}
)
async def async_setup_platform(
hass: HomeAssistant,
config: ConfigType,
async_add_entities: AddEntitiesCallback,
discovery_info: DiscoveryInfoType | None = None,
) -> None:
"""Set up the Frontier Silicon platform."""
if discovery_info is not None:
async_add_entities(
[AFSAPIDevice(discovery_info["ssdp_description"], DEFAULT_PASSWORD, None)],
True,
)
return
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
password = config.get(CONF_PASSWORD)
name = config.get(CONF_NAME)
try:
async_add_entities(
[AFSAPIDevice(f"http://{host}:{port}/device", password, name)], True
)
_LOGGER.debug("FSAPI device %s:%s -> %s", host, port, password)
except requests.exceptions.RequestException:
_LOGGER.error(
"Could not add the FSAPI device at %s:%s -> %s", host, port, password
)
class AFSAPIDevice(MediaPlayerEntity):
"""Representation of a Frontier Silicon device on the network."""
def __init__(self, device_url, password, name):
"""Initialize the Frontier Silicon API device."""
self._device_url = device_url
self._password = password
self._state = None
self._name = name
self._title = None
self._artist = None
self._album_name = None
self._mute = None
self._source = None
self._source_list = None
self._media_image_url = None
self._max_volume = None
self._volume_level = None
# Properties
@property
def fs_device(self):
"""
Create a fresh fsapi session.
A new session is created for each request in case someone else
connected to the device in between the updates and invalidated the
existing session (i.e UNDOK).
"""
return AFSAPI(self._device_url, self._password)
@property
def name(self):
"""Return the device name."""
return self._name
@property
def media_title(self):
"""Title of current playing media."""
return self._title
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
return self._artist
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
return self._album_name
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def supported_features(self):
"""Flag of media commands that are supported."""
return SUPPORT_FRONTIER_SILICON
@property
def state(self):
"""Return the state of the player."""
return self._state
# source
@property
def source_list(self):
"""List of available input sources."""
return self._source_list
@property
def source(self):
"""Name of the current input source."""
return self._source
@property
def media_image_url(self):
"""Image url of current playing media."""
return self._media_image_url
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume_level
async def async_update(self):
"""Get the latest date and update device state."""
fs_device = self.fs_device
if not self._name:
self._name = await fs_device.get_friendly_name()
if not self._source_list:
self._source_list = await fs_device.get_mode_list()
# The API seems to include 'zero' in the number of steps (e.g. if the range is
# 0-40 then get_volume_steps returns 41) subtract one to get the max volume.
# If call to get_volume fails set to 0 and try again next time.
if not self._max_volume:
self._max_volume = int(await fs_device.get_volume_steps() or 1) - 1
if await fs_device.get_power():
status = await fs_device.get_play_status()
self._state = {
"playing": STATE_PLAYING,
"paused": STATE_PAUSED,
"stopped": STATE_IDLE,
"unknown": STATE_UNKNOWN,
None: STATE_IDLE,
}.get(status, STATE_UNKNOWN)
else:
self._state = STATE_OFF
if self._state != STATE_OFF:
info_name = await fs_device.get_play_name()
info_text = await fs_device.get_play_text()
self._title = " - ".join(filter(None, [info_name, info_text]))
self._artist = await fs_device.get_play_artist()
self._album_name = await fs_device.get_play_album()
self._source = await fs_device.get_mode()
self._mute = await fs_device.get_mute()
self._media_image_url = await fs_device.get_play_graphic()
volume = await self.fs_device.get_volume()
# Prevent division by zero if max_volume not known yet
self._volume_level = float(volume or 0) / (self._max_volume or 1)
else:
self._title = None
self._artist = None
self._album_name = None
self._source = None
self._mute = None
self._media_image_url = None
self._volume_level = None
# Management actions
# power control
async def async_turn_on(self):
"""Turn on the device."""
await self.fs_device.set_power(True)
async def async_turn_off(self):
"""Turn off the device."""
await self.fs_device.set_power(False)
async def async_media_play(self):
"""Send play command."""
await self.fs_device.play()
async def async_media_pause(self):
"""Send pause command."""
await self.fs_device.pause()
async def async_media_play_pause(self):
"""Send play/pause command."""
if "playing" in self._state:
await self.fs_device.pause()
else:
await self.fs_device.play()
async def async_media_stop(self):
"""Send play/pause command."""
await self.fs_device.pause()
async def async_media_previous_track(self):
"""Send previous track command (results in rewind)."""
await self.fs_device.rewind()
async def async_media_next_track(self):
"""Send next track command (results in fast-forward)."""
await self.fs_device.forward()
# mute
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._mute
async def async_mute_volume(self, mute):
"""Send mute command."""
await self.fs_device.set_mute(mute)
# volume
async def async_volume_up(self):
"""Send volume up command."""
volume = await self.fs_device.get_volume()
volume = int(volume or 0) + 1
await self.fs_device.set_volume(min(volume, self._max_volume))
async def async_volume_down(self):
"""Send volume down command."""
volume = await self.fs_device.get_volume()
volume = int(volume or 0) - 1
await self.fs_device.set_volume(max(volume, 0))
async def async_set_volume_level(self, volume):
"""Set volume command."""
if self._max_volume: # Can't do anything sensible if not set
volume = int(volume * self._max_volume)
await self.fs_device.set_volume(volume)
async def async_select_source(self, source):
"""Select input source."""
await self.fs_device.set_mode(source)
| |
"""Append module search paths for third-party packages to sys.path.
****************************************************************
* This module is automatically imported during initialization. *
****************************************************************
This will append site-specific paths to the module search path. On
Unix (including Mac OSX), it starts with sys.prefix and
sys.exec_prefix (if different) and appends
lib/python<version>/site-packages as well as lib/site-python.
On other platforms (such as Windows), it tries each of the
prefixes directly, as well as with lib/site-packages appended. The
resulting directories, if they exist, are appended to sys.path, and
also inspected for path configuration files.
If a file named "pyvenv.cfg" exists one directory above sys.executable,
sys.prefix and sys.exec_prefix are set to that directory and
it is also checked for site-packages and site-python (sys.base_prefix and
sys.base_exec_prefix will always be the "real" prefixes of the Python
installation). If "pyvenv.cfg" (a bootstrap configuration file) contains
the key "include-system-site-packages" set to anything other than "false"
(case-insensitive), the system-level prefixes will still also be
searched for site-packages; otherwise they won't.
All of the resulting site-specific directories, if they exist, are
appended to sys.path, and also inspected for path configuration
files.
A path configuration file is a file whose name has the form
<package>.pth; its contents are additional directories (one per line)
to be added to sys.path. Non-existing directories (or
non-directories) are never added to sys.path; no directory is added to
sys.path more than once. Blank lines and lines beginning with
'#' are skipped. Lines starting with 'import' are executed.
For example, suppose sys.prefix and sys.exec_prefix are set to
/usr/local and there is a directory /usr/local/lib/python2.5/site-packages
with three subdirectories, foo, bar and spam, and two path
configuration files, foo.pth and bar.pth. Assume foo.pth contains the
following:
# foo package configuration
foo
bar
bletch
and bar.pth contains:
# bar package configuration
bar
Then the following directories are added to sys.path, in this order:
/usr/local/lib/python2.5/site-packages/bar
/usr/local/lib/python2.5/site-packages/foo
Note that bletch is omitted because it doesn't exist; bar precedes foo
because bar.pth comes alphabetically before foo.pth; and spam is
omitted because it is not mentioned in either path configuration file.
The readline module is also automatically configured to enable
completion for systems that support it. This can be overriden in
sitecustomize, usercustomize or PYTHONSTARTUP.
After these operations, an attempt is made to import a module
named sitecustomize, which can perform arbitrary additional
site-specific customizations. If this import fails with an
ImportError exception, it is silently ignored.
"""
import sys
import os
import builtins
import _sitebuiltins
# Prefixes for site-packages; add additional prefixes like /usr/local here
PREFIXES = [sys.prefix, sys.exec_prefix]
# Enable per user site-packages directory
# set it to False to disable the feature or True to force the feature
ENABLE_USER_SITE = None
# for distutils.commands.install
# These values are initialized by the getuserbase() and getusersitepackages()
# functions, through the main() function when Python starts.
USER_SITE = None
USER_BASE = None
def makepath(*paths):
dir = os.path.join(*paths)
try:
dir = os.path.abspath(dir)
except OSError:
pass
return dir, os.path.normcase(dir)
def abs_paths():
"""Set all module __file__ and __cached__ attributes to an absolute path"""
for m in set(sys.modules.values()):
if (getattr(getattr(m, '__loader__', None), '__module__', None) !=
'_frozen_importlib'):
continue # don't mess with a PEP 302-supplied __file__
try:
m.__file__ = os.path.abspath(m.__file__)
except (AttributeError, OSError):
pass
try:
m.__cached__ = os.path.abspath(m.__cached__)
except (AttributeError, OSError):
pass
def removeduppaths():
""" Remove duplicate entries from sys.path along with making them
absolute"""
# This ensures that the initial path provided by the interpreter contains
# only absolute pathnames, even if we're running from the build directory.
L = []
known_paths = set()
for dir in sys.path:
# Filter out duplicate paths (on case-insensitive file systems also
# if they only differ in case); turn relative paths into absolute
# paths.
dir, dircase = makepath(dir)
if not dircase in known_paths:
L.append(dir)
known_paths.add(dircase)
sys.path[:] = L
return known_paths
def _init_pathinfo():
"""Return a set containing all existing directory entries from sys.path"""
d = set()
for dir in sys.path:
try:
if os.path.isdir(dir):
dir, dircase = makepath(dir)
d.add(dircase)
except TypeError:
continue
return d
def addpackage(sitedir, name, known_paths):
"""Process a .pth file within the site-packages directory:
For each line in the file, either combine it with sitedir to a path
and add that to known_paths, or execute it if it starts with 'import '.
"""
if known_paths is None:
known_paths = _init_pathinfo()
reset = 1
else:
reset = 0
fullname = os.path.join(sitedir, name)
try:
f = open(fullname, "r")
except OSError:
return
with f:
for n, line in enumerate(f):
if line.startswith("#"):
continue
try:
if line.startswith(("import ", "import\t")):
exec(line)
continue
line = line.rstrip()
dir, dircase = makepath(sitedir, line)
if not dircase in known_paths and os.path.exists(dir):
sys.path.append(dir)
known_paths.add(dircase)
except Exception:
print("Error processing line {:d} of {}:\n".format(n+1, fullname),
file=sys.stderr)
import traceback
for record in traceback.format_exception(*sys.exc_info()):
for line in record.splitlines():
print(' '+line, file=sys.stderr)
print("\nRemainder of file ignored", file=sys.stderr)
break
if reset:
known_paths = None
return known_paths
def addsitedir(sitedir, known_paths=None):
"""Add 'sitedir' argument to sys.path if missing and handle .pth files in
'sitedir'"""
if known_paths is None:
known_paths = _init_pathinfo()
reset = 1
else:
reset = 0
sitedir, sitedircase = makepath(sitedir)
if not sitedircase in known_paths:
sys.path.append(sitedir) # Add path component
known_paths.add(sitedircase)
try:
names = os.listdir(sitedir)
except OSError:
return
names = [name for name in names if name.endswith(".pth")]
for name in sorted(names):
addpackage(sitedir, name, known_paths)
if reset:
known_paths = None
return known_paths
def check_enableusersite():
"""Check if user site directory is safe for inclusion
The function tests for the command line flag (including environment var),
process uid/gid equal to effective uid/gid.
None: Disabled for security reasons
False: Disabled by user (command line option)
True: Safe and enabled
"""
if sys.flags.no_user_site:
return False
if hasattr(os, "getuid") and hasattr(os, "geteuid"):
# check process uid == effective uid
if os.geteuid() != os.getuid():
return None
if hasattr(os, "getgid") and hasattr(os, "getegid"):
# check process gid == effective gid
if os.getegid() != os.getgid():
return None
return True
def getuserbase():
"""Returns the `user base` directory path.
The `user base` directory can be used to store data. If the global
variable ``USER_BASE`` is not initialized yet, this function will also set
it.
"""
global USER_BASE
if USER_BASE is not None:
return USER_BASE
from sysconfig import get_config_var
USER_BASE = get_config_var('userbase')
return USER_BASE
def getusersitepackages():
"""Returns the user-specific site-packages directory path.
If the global variable ``USER_SITE`` is not initialized yet, this
function will also set it.
"""
global USER_SITE
user_base = getuserbase() # this will also set USER_BASE
if USER_SITE is not None:
return USER_SITE
from sysconfig import get_path
if sys.platform == 'darwin':
from sysconfig import get_config_var
if get_config_var('PYTHONFRAMEWORK'):
USER_SITE = get_path('purelib', 'osx_framework_user')
return USER_SITE
USER_SITE = get_path('purelib', '%s_user' % os.name)
return USER_SITE
def addusersitepackages(known_paths):
"""Add a per user site-package to sys.path
Each user has its own python directory with site-packages in the
home directory.
"""
# get the per user site-package path
# this call will also make sure USER_BASE and USER_SITE are set
user_site = getusersitepackages()
if ENABLE_USER_SITE and os.path.isdir(user_site):
addsitedir(user_site, known_paths)
return known_paths
def getsitepackages(prefixes=None):
"""Returns a list containing all global site-packages directories
(and possibly site-python).
For each directory present in ``prefixes`` (or the global ``PREFIXES``),
this function will find its `site-packages` subdirectory depending on the
system environment, and will return a list of full paths.
"""
sitepackages = []
seen = set()
if prefixes is None:
prefixes = PREFIXES
for prefix in prefixes:
if not prefix or prefix in seen:
continue
seen.add(prefix)
if os.sep == '/':
sitepackages.append(os.path.join(prefix, "lib",
"python" + sys.version[:3],
"site-packages"))
sitepackages.append(os.path.join(prefix, "lib", "site-python"))
else:
sitepackages.append(prefix)
sitepackages.append(os.path.join(prefix, "lib", "site-packages"))
if sys.platform == "darwin":
# for framework builds *only* we add the standard Apple
# locations.
from sysconfig import get_config_var
framework = get_config_var("PYTHONFRAMEWORK")
if framework:
sitepackages.append(
os.path.join("/Library", framework,
sys.version[:3], "site-packages"))
return sitepackages
def addsitepackages(known_paths, prefixes=None):
"""Add site-packages (and possibly site-python) to sys.path"""
for sitedir in getsitepackages(prefixes):
if os.path.isdir(sitedir):
if "site-python" in sitedir:
import warnings
warnings.warn('"site-python" directories will not be '
'supported in 3.5 anymore',
DeprecationWarning)
addsitedir(sitedir, known_paths)
return known_paths
def setquit():
"""Define new builtins 'quit' and 'exit'.
These are objects which make the interpreter exit when called.
The repr of each object contains a hint at how it works.
"""
if os.sep == ':':
eof = 'Cmd-Q'
elif os.sep == '\\':
eof = 'Ctrl-Z plus Return'
else:
eof = 'Ctrl-D (i.e. EOF)'
builtins.quit = _sitebuiltins.Quitter('quit', eof)
builtins.exit = _sitebuiltins.Quitter('exit', eof)
def setcopyright():
"""Set 'copyright' and 'credits' in builtins"""
builtins.copyright = _sitebuiltins._Printer("copyright", sys.copyright)
if sys.platform[:4] == 'java':
builtins.credits = _sitebuiltins._Printer(
"credits",
"Jython is maintained by the Jython developers (www.jython.org).")
else:
builtins.credits = _sitebuiltins._Printer("credits", """\
Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands
for supporting Python development. See www.python.org for more information.""")
here = os.path.dirname(os.__file__)
builtins.license = _sitebuiltins._Printer(
"license",
"See http://www.python.org/download/releases/%.5s/license" % sys.version,
["LICENSE.txt", "LICENSE"],
[os.path.join(here, os.pardir), here, os.curdir])
def sethelper():
builtins.help = _sitebuiltins._Helper()
def enablerlcompleter():
"""Enable default readline configuration on interactive prompts, by
registering a sys.__interactivehook__.
If the readline module can be imported, the hook will set the Tab key
as completion key and register ~/.python_history as history file.
This can be overriden in the sitecustomize or usercustomize module,
or in a PYTHONSTARTUP file.
"""
def register_readline():
import atexit
try:
import readline
import rlcompleter
except ImportError:
return
# Reading the initialization (config) file may not be enough to set a
# completion key, so we set one first and then read the file.
readline_doc = getattr(readline, '__doc__', '')
if readline_doc is not None and 'libedit' in readline_doc:
readline.parse_and_bind('bind ^I rl_complete')
else:
readline.parse_and_bind('tab: complete')
try:
readline.read_init_file()
except OSError:
# An OSError here could have many causes, but the most likely one
# is that there's no .inputrc file (or .editrc file in the case of
# Mac OS X + libedit) in the expected location. In that case, we
# want to ignore the exception.
pass
if readline.get_current_history_length() == 0:
# If no history was loaded, default to .python_history.
# The guard is necessary to avoid doubling history size at
# each interpreter exit when readline was already configured
# through a PYTHONSTARTUP hook, see:
# http://bugs.python.org/issue5845#msg198636
history = os.path.join(os.path.expanduser('~'),
'.python_history')
try:
readline.read_history_file(history)
except IOError:
pass
atexit.register(readline.write_history_file, history)
sys.__interactivehook__ = register_readline
def aliasmbcs():
"""On Windows, some default encodings are not provided by Python,
while they are always available as "mbcs" in each locale. Make
them usable by aliasing to "mbcs" in such a case."""
if sys.platform == 'win32':
import _bootlocale, codecs
enc = _bootlocale.getpreferredencoding(False)
if enc.startswith('cp'): # "cp***" ?
try:
codecs.lookup(enc)
except LookupError:
import encodings
encodings._cache[enc] = encodings._unknown
encodings.aliases.aliases[enc] = 'mbcs'
CONFIG_LINE = r'^(?P<key>(\w|[-_])+)\s*=\s*(?P<value>.*)\s*$'
def venv(known_paths):
global PREFIXES, ENABLE_USER_SITE
env = os.environ
if sys.platform == 'darwin' and '__PYVENV_LAUNCHER__' in env:
executable = os.environ['__PYVENV_LAUNCHER__']
else:
executable = sys.executable
exe_dir, _ = os.path.split(os.path.abspath(executable))
site_prefix = os.path.dirname(exe_dir)
sys._home = None
conf_basename = 'pyvenv.cfg'
candidate_confs = [
conffile for conffile in (
os.path.join(exe_dir, conf_basename),
os.path.join(site_prefix, conf_basename)
)
if os.path.isfile(conffile)
]
if candidate_confs:
import re
config_line = re.compile(CONFIG_LINE)
virtual_conf = candidate_confs[0]
system_site = "true"
with open(virtual_conf) as f:
for line in f:
line = line.strip()
m = config_line.match(line)
if m:
d = m.groupdict()
key, value = d['key'].lower(), d['value']
if key == 'include-system-site-packages':
system_site = value.lower()
elif key == 'home':
sys._home = value
sys.prefix = sys.exec_prefix = site_prefix
# Doing this here ensures venv takes precedence over user-site
addsitepackages(known_paths, [sys.prefix])
# addsitepackages will process site_prefix again if its in PREFIXES,
# but that's ok; known_paths will prevent anything being added twice
if system_site == "true":
PREFIXES.insert(0, sys.prefix)
else:
PREFIXES = [sys.prefix]
ENABLE_USER_SITE = False
return known_paths
def execsitecustomize():
"""Run custom site specific code, if available."""
try:
import sitecustomize
except ImportError:
pass
except Exception as err:
if os.environ.get("PYTHONVERBOSE"):
sys.excepthook(*sys.exc_info())
else:
sys.stderr.write(
"Error in sitecustomize; set PYTHONVERBOSE for traceback:\n"
"%s: %s\n" %
(err.__class__.__name__, err))
def execusercustomize():
"""Run custom user specific code, if available."""
try:
import usercustomize
except ImportError:
pass
except Exception as err:
if os.environ.get("PYTHONVERBOSE"):
sys.excepthook(*sys.exc_info())
else:
sys.stderr.write(
"Error in usercustomize; set PYTHONVERBOSE for traceback:\n"
"%s: %s\n" %
(err.__class__.__name__, err))
def main():
"""Add standard site-specific directories to the module search path.
This function is called automatically when this module is imported,
unless the python interpreter was started with the -S flag.
"""
global ENABLE_USER_SITE
abs_paths()
known_paths = removeduppaths()
known_paths = venv(known_paths)
if ENABLE_USER_SITE is None:
ENABLE_USER_SITE = check_enableusersite()
known_paths = addusersitepackages(known_paths)
known_paths = addsitepackages(known_paths)
setquit()
setcopyright()
sethelper()
enablerlcompleter()
aliasmbcs()
execsitecustomize()
if ENABLE_USER_SITE:
execusercustomize()
# Prevent edition of sys.path when python was started with -S and
# site is imported later.
if not sys.flags.no_site:
main()
def _script():
help = """\
%s [--user-base] [--user-site]
Without arguments print some useful information
With arguments print the value of USER_BASE and/or USER_SITE separated
by '%s'.
Exit codes with --user-base or --user-site:
0 - user site directory is enabled
1 - user site directory is disabled by user
2 - uses site directory is disabled by super user
or for security reasons
>2 - unknown error
"""
args = sys.argv[1:]
if not args:
user_base = getuserbase()
user_site = getusersitepackages()
print("sys.path = [")
for dir in sys.path:
print(" %r," % (dir,))
print("]")
print("USER_BASE: %r (%s)" % (user_base,
"exists" if os.path.isdir(user_base) else "doesn't exist"))
print("USER_SITE: %r (%s)" % (user_site,
"exists" if os.path.isdir(user_site) else "doesn't exist"))
print("ENABLE_USER_SITE: %r" % ENABLE_USER_SITE)
sys.exit(0)
buffer = []
if '--user-base' in args:
buffer.append(USER_BASE)
if '--user-site' in args:
buffer.append(USER_SITE)
if buffer:
print(os.pathsep.join(buffer))
if ENABLE_USER_SITE:
sys.exit(0)
elif ENABLE_USER_SITE is False:
sys.exit(1)
elif ENABLE_USER_SITE is None:
sys.exit(2)
else:
sys.exit(3)
else:
import textwrap
print(textwrap.dedent(help % (sys.argv[0], os.pathsep)))
sys.exit(10)
if __name__ == '__main__':
_script()
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Executes task in a Kubernetes POD"""
import re
import warnings
from typing import Any, Dict, Iterable, List, Optional, Tuple
import yaml
from kubernetes.client import CoreV1Api, models as k8s
from airflow.exceptions import AirflowException
from airflow.kubernetes import kube_client, pod_generator, pod_launcher
from airflow.kubernetes.pod_generator import PodGenerator
from airflow.kubernetes.secret import Secret
from airflow.models import BaseOperator
from airflow.providers.cncf.kubernetes.backcompat.backwards_compat_converters import (
convert_affinity,
convert_configmap,
convert_env_vars,
convert_image_pull_secrets,
convert_pod_runtime_info_env,
convert_port,
convert_resources,
convert_toleration,
convert_volume,
convert_volume_mount,
)
from airflow.providers.cncf.kubernetes.backcompat.pod_runtime_info_env import PodRuntimeInfoEnv
from airflow.utils.decorators import apply_defaults
from airflow.utils.helpers import validate_key
from airflow.utils.state import State
from airflow.version import version as airflow_version
class KubernetesPodOperator(BaseOperator): # pylint: disable=too-many-instance-attributes
"""
Execute a task in a Kubernetes Pod
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`apache-airflow:howto/operator:KubernetesPodOperator`
.. note::
If you use `Google Kubernetes Engine <https://cloud.google.com/kubernetes-engine/>`__
and Airflow is not running in the same cluster, consider using
:class:`~airflow.providers.google.cloud.operators.kubernetes_engine.GKEStartPodOperator`, which
simplifies the authorization process.
:param namespace: the namespace to run within kubernetes.
:type namespace: str
:param image: Docker image you wish to launch. Defaults to hub.docker.com,
but fully qualified URLS will point to custom repositories. (templated)
:type image: str
:param name: name of the pod in which the task will run, will be used (plus a random
suffix) to generate a pod id (DNS-1123 subdomain, containing only [a-z0-9.-]).
:type name: str
:param cmds: entrypoint of the container. (templated)
The docker images's entrypoint is used if this is not provided.
:type cmds: list[str]
:param arguments: arguments of the entrypoint. (templated)
The docker image's CMD is used if this is not provided.
:type arguments: list[str]
:param ports: ports for launched pod.
:type ports: list[k8s.V1ContainerPort]
:param volume_mounts: volumeMounts for launched pod.
:type volume_mounts: list[k8s.V1VolumeMount]
:param volumes: volumes for launched pod. Includes ConfigMaps and PersistentVolumes.
:type volumes: list[k8s.V1Volume]
:param env_vars: Environment variables initialized in the container. (templated)
:type env_vars: list[k8s.V1EnvVar]
:param secrets: Kubernetes secrets to inject in the container.
They can be exposed as environment vars or files in a volume.
:type secrets: list[airflow.kubernetes.secret.Secret]
:param in_cluster: run kubernetes client with in_cluster configuration.
:type in_cluster: bool
:param cluster_context: context that points to kubernetes cluster.
Ignored when in_cluster is True. If None, current-context is used.
:type cluster_context: str
:param reattach_on_restart: if the scheduler dies while the pod is running, reattach and monitor
:type reattach_on_restart: bool
:param labels: labels to apply to the Pod. (templated)
:type labels: dict
:param startup_timeout_seconds: timeout in seconds to startup the pod.
:type startup_timeout_seconds: int
:param get_logs: get the stdout of the container as logs of the tasks.
:type get_logs: bool
:param image_pull_policy: Specify a policy to cache or always pull an image.
:type image_pull_policy: str
:param annotations: non-identifying metadata you can attach to the Pod.
Can be a large range of data, and can include characters
that are not permitted by labels.
:type annotations: dict
:param resources: A dict containing resources requests and limits.
Possible keys are request_memory, request_cpu, limit_memory, limit_cpu,
and limit_gpu, which will be used to generate airflow.kubernetes.pod.Resources.
See also kubernetes.io/docs/concepts/configuration/manage-compute-resources-container
:type resources: k8s.V1ResourceRequirements
:param affinity: A dict containing a group of affinity scheduling rules.
:type affinity: k8s.V1Affinity
:param config_file: The path to the Kubernetes config file. (templated)
If not specified, default value is ``~/.kube/config``
:type config_file: str
:param node_selectors: A dict containing a group of scheduling rules.
:type node_selectors: dict
:param image_pull_secrets: Any image pull secrets to be given to the pod.
If more than one secret is required, provide a
comma separated list: secret_a,secret_b
:type image_pull_secrets: List[k8s.V1LocalObjectReference]
:param service_account_name: Name of the service account
:type service_account_name: str
:param is_delete_operator_pod: What to do when the pod reaches its final
state, or the execution is interrupted.
If False (default): do nothing, If True: delete the pod
:type is_delete_operator_pod: bool
:param hostnetwork: If True enable host networking on the pod.
:type hostnetwork: bool
:param tolerations: A list of kubernetes tolerations.
:type tolerations: List[k8s.V1Toleration]
:param security_context: security options the pod should run with (PodSecurityContext).
:type security_context: dict
:param dnspolicy: dnspolicy for the pod.
:type dnspolicy: str
:param schedulername: Specify a schedulername for the pod
:type schedulername: str
:param full_pod_spec: The complete podSpec
:type full_pod_spec: kubernetes.client.models.V1Pod
:param init_containers: init container for the launched Pod
:type init_containers: list[kubernetes.client.models.V1Container]
:param log_events_on_failure: Log the pod's events if a failure occurs
:type log_events_on_failure: bool
:param do_xcom_push: If True, the content of the file
/airflow/xcom/return.json in the container will also be pushed to an
XCom when the container completes.
:type do_xcom_push: bool
:param pod_template_file: path to pod template file (templated)
:type pod_template_file: str
:param priority_class_name: priority class name for the launched Pod
:type priority_class_name: str
:param termination_grace_period: Termination grace period if task killed in UI,
defaults to kubernetes default
:type termination_grace_period: int
"""
template_fields: Iterable[str] = (
'image',
'cmds',
'arguments',
'env_vars',
'labels',
'config_file',
'pod_template_file',
)
# fmt: off
@apply_defaults
def __init__( # pylint: disable=too-many-arguments,too-many-locals
# fmt: on
self,
*,
namespace: Optional[str] = None,
image: Optional[str] = None,
name: Optional[str] = None,
cmds: Optional[List[str]] = None,
arguments: Optional[List[str]] = None,
ports: Optional[List[k8s.V1ContainerPort]] = None,
volume_mounts: Optional[List[k8s.V1VolumeMount]] = None,
volumes: Optional[List[k8s.V1Volume]] = None,
env_vars: Optional[List[k8s.V1EnvVar]] = None,
env_from: Optional[List[k8s.V1EnvFromSource]] = None,
secrets: Optional[List[Secret]] = None,
in_cluster: Optional[bool] = None,
cluster_context: Optional[str] = None,
labels: Optional[Dict] = None,
reattach_on_restart: bool = True,
startup_timeout_seconds: int = 120,
get_logs: bool = True,
image_pull_policy: str = 'IfNotPresent',
annotations: Optional[Dict] = None,
resources: Optional[k8s.V1ResourceRequirements] = None,
affinity: Optional[k8s.V1Affinity] = None,
config_file: Optional[str] = None,
node_selectors: Optional[dict] = None,
node_selector: Optional[dict] = None,
image_pull_secrets: Optional[List[k8s.V1LocalObjectReference]] = None,
service_account_name: str = 'default',
is_delete_operator_pod: bool = False,
hostnetwork: bool = False,
tolerations: Optional[List[k8s.V1Toleration]] = None,
security_context: Optional[Dict] = None,
dnspolicy: Optional[str] = None,
schedulername: Optional[str] = None,
full_pod_spec: Optional[k8s.V1Pod] = None,
init_containers: Optional[List[k8s.V1Container]] = None,
log_events_on_failure: bool = False,
do_xcom_push: bool = False,
pod_template_file: Optional[str] = None,
priority_class_name: Optional[str] = None,
pod_runtime_info_envs: List[PodRuntimeInfoEnv] = None,
termination_grace_period: Optional[int] = None,
configmaps: Optional[str] = None,
**kwargs,
) -> None:
if kwargs.get('xcom_push') is not None:
raise AirflowException("'xcom_push' was deprecated, use 'do_xcom_push' instead")
super().__init__(resources=None, **kwargs)
self.do_xcom_push = do_xcom_push
self.image = image
self.namespace = namespace
self.cmds = cmds or []
self.arguments = arguments or []
self.labels = labels or {}
self.startup_timeout_seconds = startup_timeout_seconds
self.env_vars = convert_env_vars(env_vars) if env_vars else []
if pod_runtime_info_envs:
self.env_vars.extend([convert_pod_runtime_info_env(p) for p in pod_runtime_info_envs])
self.env_from = env_from or []
if configmaps:
self.env_from.extend([convert_configmap(c) for c in configmaps])
self.ports = [convert_port(p) for p in ports] if ports else []
self.volume_mounts = [convert_volume_mount(v) for v in volume_mounts] if volume_mounts else []
self.volumes = [convert_volume(volume) for volume in volumes] if volumes else []
self.secrets = secrets or []
self.in_cluster = in_cluster
self.cluster_context = cluster_context
self.reattach_on_restart = reattach_on_restart
self.get_logs = get_logs
self.image_pull_policy = image_pull_policy
if node_selectors:
# Node selectors is incorrect based on k8s API
warnings.warn("node_selectors is deprecated. Please use node_selector instead.")
self.node_selector = node_selectors or {}
elif node_selector:
self.node_selector = node_selector or {}
else:
self.node_selector = None
self.annotations = annotations or {}
self.affinity = convert_affinity(affinity) if affinity else k8s.V1Affinity()
self.k8s_resources = convert_resources(resources) if resources else {}
self.config_file = config_file
self.image_pull_secrets = convert_image_pull_secrets(image_pull_secrets) if image_pull_secrets else []
self.service_account_name = service_account_name
self.is_delete_operator_pod = is_delete_operator_pod
self.hostnetwork = hostnetwork
self.tolerations = [convert_toleration(toleration) for toleration in tolerations] \
if tolerations else []
self.security_context = security_context or {}
self.dnspolicy = dnspolicy
self.schedulername = schedulername
self.full_pod_spec = full_pod_spec
self.init_containers = init_containers or []
self.log_events_on_failure = log_events_on_failure
self.priority_class_name = priority_class_name
self.pod_template_file = pod_template_file
self.name = self._set_name(name)
self.termination_grace_period = termination_grace_period
self.client: CoreV1Api = None
self.pod: k8s.V1Pod = None
@staticmethod
def create_labels_for_pod(context) -> dict:
"""
Generate labels for the pod to track the pod in case of Operator crash
:param context: task context provided by airflow DAG
:return: dict
"""
labels = {
'dag_id': context['dag'].dag_id,
'task_id': context['task'].task_id,
'execution_date': context['ts'],
'try_number': context['ti'].try_number,
}
# In the case of sub dags this is just useful
if context['dag'].is_subdag:
labels['parent_dag_id'] = context['dag'].parent_dag.dag_id
# Ensure that label is valid for Kube,
# and if not truncate/remove invalid chars and replace with short hash.
for label_id, label in labels.items():
safe_label = pod_generator.make_safe_label_value(str(label))
labels[label_id] = safe_label
return labels
def execute(self, context) -> Optional[str]:
try:
if self.in_cluster is not None:
client = kube_client.get_kube_client(
in_cluster=self.in_cluster,
cluster_context=self.cluster_context,
config_file=self.config_file,
)
else:
client = kube_client.get_kube_client(
cluster_context=self.cluster_context, config_file=self.config_file
)
self.pod = self.create_pod_request_obj()
self.namespace = self.pod.metadata.namespace
self.client = client
# Add combination of labels to uniquely identify a running pod
labels = self.create_labels_for_pod(context)
label_selector = self._get_pod_identifying_label_string(labels)
self.namespace = self.pod.metadata.namespace
pod_list = client.list_namespaced_pod(self.namespace, label_selector=label_selector)
if len(pod_list.items) > 1 and self.reattach_on_restart:
raise AirflowException(
'More than one pod running with labels: '
'{label_selector}'.format(label_selector=label_selector)
)
launcher = pod_launcher.PodLauncher(kube_client=client, extract_xcom=self.do_xcom_push)
if len(pod_list.items) == 1:
try_numbers_match = self._try_numbers_match(context, pod_list.items[0])
final_state, result = self.handle_pod_overlap(
labels, try_numbers_match, launcher, pod_list.items[0]
)
else:
self.log.info("creating pod with labels %s and launcher %s", labels, launcher)
final_state, _, result = self.create_new_pod_for_operator(labels, launcher)
if final_state != State.SUCCESS:
status = self.client.read_namespaced_pod(self.pod.metadata.name, self.namespace)
raise AirflowException(f'Pod {self.pod.metadata.name} returned a failure: {status}')
return result
except AirflowException as ex:
raise AirflowException(f'Pod Launching failed: {ex}')
def handle_pod_overlap(
self, labels: dict, try_numbers_match: bool, launcher: Any, pod: k8s.V1Pod
) -> Tuple[State, Optional[str]]:
"""
In cases where the Scheduler restarts while a KubernetesPodOperator task is running,
this function will either continue to monitor the existing pod or launch a new pod
based on the `reattach_on_restart` parameter.
:param labels: labels used to determine if a pod is repeated
:type labels: dict
:param try_numbers_match: do the try numbers match? Only needed for logging purposes
:type try_numbers_match: bool
:param launcher: PodLauncher
:param pod_list: list of pods found
"""
if try_numbers_match:
log_line = f"found a running pod with labels {labels} and the same try_number."
else:
log_line = f"found a running pod with labels {labels} but a different try_number."
# In case of failed pods, should reattach the first time, but only once
# as the task will have already failed.
if self.reattach_on_restart and not pod.metadata.labels.get("already_checked"):
log_line += " Will attach to this pod and monitor instead of starting new one"
self.log.info(log_line)
self.pod = pod
final_state, result = self.monitor_launched_pod(launcher, pod)
else:
log_line += f"creating pod with labels {labels} and launcher {launcher}"
self.log.info(log_line)
final_state, _, result = self.create_new_pod_for_operator(labels, launcher)
return final_state, result
@staticmethod
def _get_pod_identifying_label_string(labels) -> str:
filtered_labels = {label_id: label for label_id, label in labels.items() if label_id != 'try_number'}
return ','.join([label_id + '=' + label for label_id, label in sorted(filtered_labels.items())])
@staticmethod
def _try_numbers_match(context, pod) -> bool:
return pod.metadata.labels['try_number'] == context['ti'].try_number
def _set_name(self, name):
if self.pod_template_file or self.full_pod_spec:
return None
validate_key(name, max_length=220)
return re.sub(r'[^a-z0-9.-]+', '-', name.lower())
def create_pod_request_obj(self) -> k8s.V1Pod:
"""
Creates a V1Pod based on user parameters. Note that a `pod` or `pod_template_file`
will supersede all other values.
"""
self.log.debug("Creating pod for K8sPodOperator task %s", self.task_id)
if self.pod_template_file:
self.log.debug("Pod template file found, will parse for base pod")
pod_template = pod_generator.PodGenerator.deserialize_model_file(self.pod_template_file)
if self.full_pod_spec:
pod_template = PodGenerator.reconcile_pods(pod_template, self.full_pod_spec)
elif self.full_pod_spec:
pod_template = self.full_pod_spec
else:
pod_template = k8s.V1Pod(metadata=k8s.V1ObjectMeta(name="name"))
pod = k8s.V1Pod(
api_version="v1",
kind="Pod",
metadata=k8s.V1ObjectMeta(
namespace=self.namespace,
labels=self.labels,
name=PodGenerator.make_unique_pod_id(self.name),
annotations=self.annotations,
),
spec=k8s.V1PodSpec(
node_selector=self.node_selector,
affinity=self.affinity,
tolerations=self.tolerations,
init_containers=self.init_containers,
containers=[
k8s.V1Container(
image=self.image,
name="base",
command=self.cmds,
ports=self.ports,
resources=self.k8s_resources,
volume_mounts=self.volume_mounts,
args=self.arguments,
env=self.env_vars,
env_from=self.env_from,
)
],
image_pull_secrets=self.image_pull_secrets,
service_account_name=self.service_account_name,
host_network=self.hostnetwork,
security_context=self.security_context,
dns_policy=self.dnspolicy,
scheduler_name=self.schedulername,
restart_policy='Never',
priority_class_name=self.priority_class_name,
volumes=self.volumes,
),
)
pod = PodGenerator.reconcile_pods(pod_template, pod)
for secret in self.secrets:
self.log.debug("Adding secret to task %s", self.task_id)
pod = secret.attach_to_pod(pod)
if self.do_xcom_push:
self.log.debug("Adding xcom sidecar to task %s", self.task_id)
pod = PodGenerator.add_xcom_sidecar(pod)
return pod
def create_new_pod_for_operator(self, labels, launcher) -> Tuple[State, k8s.V1Pod, Optional[str]]:
"""
Creates a new pod and monitors for duration of task
:param labels: labels used to track pod
:param launcher: pod launcher that will manage launching and monitoring pods
:return:
"""
if not (self.full_pod_spec or self.pod_template_file):
# Add Airflow Version to the label
# And a label to identify that pod is launched by KubernetesPodOperator
self.log.debug("Adding k8spodoperator labels to pod before launch for task %s", self.task_id)
self.labels.update(
{
'airflow_version': airflow_version.replace('+', '-'),
'kubernetes_pod_operator': 'True',
}
)
self.labels.update(labels)
self.pod.metadata.labels = self.labels
self.log.debug("Starting pod:\n%s", yaml.safe_dump(self.pod.to_dict()))
try:
launcher.start_pod(self.pod, startup_timeout=self.startup_timeout_seconds)
final_state, result = launcher.monitor_pod(pod=self.pod, get_logs=self.get_logs)
except AirflowException:
if self.log_events_on_failure:
for event in launcher.read_pod_events(self.pod).items:
self.log.error("Pod Event: %s - %s", event.reason, event.message)
raise
finally:
if self.is_delete_operator_pod:
self.log.debug("Deleting pod for task %s", self.task_id)
launcher.delete_pod(self.pod)
return final_state, self.pod, result
def patch_already_checked(self, pod: k8s.V1Pod):
"""Add an "already tried annotation to ensure we only retry once"""
pod.metadata.labels["already_checked"] = "True"
body = PodGenerator.serialize_pod(pod)
self.client.patch_namespaced_pod(pod.metadata.name, pod.metadata.namespace, body)
def monitor_launched_pod(self, launcher, pod) -> Tuple[State, Optional[str]]:
"""
Monitors a pod to completion that was created by a previous KubernetesPodOperator
:param launcher: pod launcher that will manage launching and monitoring pods
:param pod: podspec used to find pod using k8s API
:return:
"""
try:
(final_state, result) = launcher.monitor_pod(pod, get_logs=self.get_logs)
finally:
if self.is_delete_operator_pod:
launcher.delete_pod(pod)
if final_state != State.SUCCESS:
if self.log_events_on_failure:
for event in launcher.read_pod_events(pod).items:
self.log.error("Pod Event: %s - %s", event.reason, event.message)
self.patch_already_checked(self.pod)
raise AirflowException(f'Pod returned a failure: {final_state}')
return final_state, result
def on_kill(self) -> None:
if self.pod:
pod: k8s.V1Pod = self.pod
namespace = pod.metadata.namespace
name = pod.metadata.name
kwargs = {}
if self.termination_grace_period is not None:
kwargs = {"grace_period_seconds": self.termination_grace_period}
self.client.delete_namespaced_pod(name=name, namespace=namespace, **kwargs)
| |
# -*- coding: utf-8 -*-
"""
InformationMachineAPILib.Controllers.UserScansController
"""
import unirest
from InformationMachineAPILib.APIHelper import APIHelper
from InformationMachineAPILib.Configuration import Configuration
from InformationMachineAPILib.APIException import APIException
from InformationMachineAPILib.Models.UploadReceiptWrapper import UploadReceiptWrapper
from InformationMachineAPILib.Models.UploadReceiptStatusWrapper import UploadReceiptStatusWrapper
from InformationMachineAPILib.Models.UploadBarcodeWrapper import UploadBarcodeWrapper
class UserScansController(object):
"""A Controller to access Endpoints in the InformationMachineAPILib API."""
def __init__(self,
client_id,
client_secret):
"""
Constructor with authentication and configuration parameters
"""
self.__client_id = client_id
self.__client_secret = client_secret
def user_scans_upload_receipt(self,
payload,
user_id):
"""Does a POST request to /v1/users/{user_id}/receipt.
Upload a receipt with unique ID ("receipt_id") and associate it to a
specified user using "user_id" parameter. Note: Uploaded receipt image
should be Base 64 encoded. For testing purposes you can find our Base
64 encoded logo here:
http://api.iamdata.co/images/base64/encoded_logo.txt
Args:
payload (UploadReceiptRequest): TODO: type description here.
user_id (string): TODO: type description here.
Returns:
UploadReceiptWrapper: Response from the API. Created
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# The base uri for api requests
query_builder = Configuration.BASE_URI
# Prepare query string for API call
query_builder += "/v1/users/{user_id}/receipt"
# Process optional template parameters
query_builder = APIHelper.append_url_with_template_parameters(query_builder, {
"user_id": user_id
})
# Process optional query parameters
query_parameters = {
"client_id": self.__client_id,
"client_secret": self.__client_secret
}
query_builder = APIHelper.append_url_with_query_parameters(query_builder, query_parameters)
# Validate and preprocess url
query_url = APIHelper.clean_url(query_builder)
# Prepare headers
headers = {
"user-agent": "IAMDATA V1",
"accept": "application/json",
"content-type": "application/json; charset=utf-8"
}
# Prepare and invoke the API call request to fetch the response
response = unirest.post(query_url, headers=headers, params=APIHelper.json_serialize(payload))
# Error handling using HTTP status codes
if response.code == 400:
raise APIException("Bad request", 400, response.body)
elif response.code == 401:
raise APIException("Unauthorized", 401, response.body)
elif response.code == 500:
raise APIException("Internal Server Error", 500, response.body)
elif response.code < 200 or response.code > 206: # 200 = HTTP OK
raise APIException("HTTP Response Not OK", response.code, response.body)
# Try to cast response to desired type
if isinstance(response.body, dict):
# Response is already in a dictionary, return the object
return UploadReceiptWrapper(**response.body)
# If we got here then an error occured while trying to parse the response
raise APIException("Invalid JSON returned", response.code, response.body)
def user_scans_get_receipt_status(self,
user_id,
receipt_id):
"""Does a GET request to /v1/users/{user_id}/receipt/{receipt_id}.
Receipt statuses: Unknown, Uploaded, Processing, Done, Unreadable,
Duplicate, StoreMissing.
Args:
user_id (string): TODO: type description here.
receipt_id (string): TODO: type description here.
Returns:
UploadReceiptStatusWrapper: Response from the API. OK
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# The base uri for api requests
query_builder = Configuration.BASE_URI
# Prepare query string for API call
query_builder += "/v1/users/{user_id}/receipt/{receipt_id}"
# Process optional template parameters
query_builder = APIHelper.append_url_with_template_parameters(query_builder, {
"user_id": user_id,
"receipt_id": receipt_id
})
# Process optional query parameters
query_parameters = {
"client_id": self.__client_id,
"client_secret": self.__client_secret
}
query_builder = APIHelper.append_url_with_query_parameters(query_builder, query_parameters)
# Validate and preprocess url
query_url = APIHelper.clean_url(query_builder)
# Prepare headers
headers = {
"user-agent": "IAMDATA V1",
"accept": "application/json"
}
# Prepare and invoke the API call request to fetch the response
response = unirest.get(query_url, headers=headers)
# Error handling using HTTP status codes
if response.code == 400:
raise APIException("Bad request", 400, response.body)
elif response.code == 401:
raise APIException("Unauthorized", 401, response.body)
elif response.code == 500:
raise APIException("Internal Server Error", 500, response.body)
elif response.code < 200 or response.code > 206: # 200 = HTTP OK
raise APIException("HTTP Response Not OK", response.code, response.body)
# Try to cast response to desired type
if isinstance(response.body, dict):
# Response is already in a dictionary, return the object
return UploadReceiptStatusWrapper(**response.body)
# If we got here then an error occured while trying to parse the response
raise APIException("Invalid JSON returned", response.code, response.body)
def user_scans_upload_barcode(self,
payload,
user_id):
"""Does a POST request to /v1/users/{user_id}/barcode.
Upload a new product by barcode and associate it to a specified user.
Note: Execution might take up to 15 seconds, depending on whether
barcode exists in database or IM service must gather data around
uploaded barcode. POST payload example: { "bar_code" :
"021130126026", "bar_code_type" : "UPC-A" }
Args:
payload (UploadBarcodeRequest): TODO: type description here.
user_id (string): ID of user in your system
Returns:
UploadBarcodeWrapper: Response from the API. Created
Raises:
APIException: When an error occurs while fetching the data from
the remote API. This exception includes the HTTP Response
code, an error message, and the HTTP body that was received in
the request.
"""
# The base uri for api requests
query_builder = Configuration.BASE_URI
# Prepare query string for API call
query_builder += "/v1/users/{user_id}/barcode"
# Process optional template parameters
query_builder = APIHelper.append_url_with_template_parameters(query_builder, {
"user_id": user_id
})
# Process optional query parameters
query_parameters = {
"client_id": self.__client_id,
"client_secret": self.__client_secret
}
query_builder = APIHelper.append_url_with_query_parameters(query_builder, query_parameters)
# Validate and preprocess url
query_url = APIHelper.clean_url(query_builder)
# Prepare headers
headers = {
"user-agent": "IAMDATA V1",
"accept": "application/json",
"content-type": "application/json; charset=utf-8"
}
# Prepare and invoke the API call request to fetch the response
response = unirest.post(query_url, headers=headers, params=APIHelper.json_serialize(payload))
# Error handling using HTTP status codes
if response.code == 400:
raise APIException("Bad request", 400, response.body)
elif response.code == 401:
raise APIException("Unauthorized", 401, response.body)
elif response.code == 500:
raise APIException("Internal Server Error", 500, response.body)
elif response.code < 200 or response.code > 206: # 200 = HTTP OK
raise APIException("HTTP Response Not OK", response.code, response.body)
# Try to cast response to desired type
if isinstance(response.body, dict):
# Response is already in a dictionary, return the object
return UploadBarcodeWrapper(**response.body)
# If we got here then an error occured while trying to parse the response
raise APIException("Invalid JSON returned", response.code, response.body)
| |
# sql/operators.py
# Copyright (C) 2005-2021 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""Defines operators used in SQL expressions."""
from operator import add
from operator import and_
from operator import contains
from operator import eq
from operator import ge
from operator import getitem
from operator import gt
from operator import inv
from operator import le
from operator import lshift
from operator import lt
from operator import mod
from operator import mul
from operator import ne
from operator import neg
from operator import or_
from operator import rshift
from operator import sub
from operator import truediv
from .. import util
if util.py2k:
from operator import div
else:
div = truediv
class Operators(object):
"""Base of comparison and logical operators.
Implements base methods
:meth:`~sqlalchemy.sql.operators.Operators.operate` and
:meth:`~sqlalchemy.sql.operators.Operators.reverse_operate`, as well as
:meth:`~sqlalchemy.sql.operators.Operators.__and__`,
:meth:`~sqlalchemy.sql.operators.Operators.__or__`,
:meth:`~sqlalchemy.sql.operators.Operators.__invert__`.
Usually is used via its most common subclass
:class:`.ColumnOperators`.
"""
__slots__ = ()
def __and__(self, other):
"""Implement the ``&`` operator.
When used with SQL expressions, results in an
AND operation, equivalent to
:func:`_expression.and_`, that is::
a & b
is equivalent to::
from sqlalchemy import and_
and_(a, b)
Care should be taken when using ``&`` regarding
operator precedence; the ``&`` operator has the highest precedence.
The operands should be enclosed in parenthesis if they contain
further sub expressions::
(a == 2) & (b == 4)
"""
return self.operate(and_, other)
def __or__(self, other):
"""Implement the ``|`` operator.
When used with SQL expressions, results in an
OR operation, equivalent to
:func:`_expression.or_`, that is::
a | b
is equivalent to::
from sqlalchemy import or_
or_(a, b)
Care should be taken when using ``|`` regarding
operator precedence; the ``|`` operator has the highest precedence.
The operands should be enclosed in parenthesis if they contain
further sub expressions::
(a == 2) | (b == 4)
"""
return self.operate(or_, other)
def __invert__(self):
"""Implement the ``~`` operator.
When used with SQL expressions, results in a
NOT operation, equivalent to
:func:`_expression.not_`, that is::
~a
is equivalent to::
from sqlalchemy import not_
not_(a)
"""
return self.operate(inv)
def op(
self, opstring, precedence=0, is_comparison=False, return_type=None
):
"""Produce a generic operator function.
e.g.::
somecolumn.op("*")(5)
produces::
somecolumn * 5
This function can also be used to make bitwise operators explicit. For
example::
somecolumn.op('&')(0xff)
is a bitwise AND of the value in ``somecolumn``.
:param operator: a string which will be output as the infix operator
between this element and the expression passed to the
generated function.
:param precedence: precedence to apply to the operator, when
parenthesizing expressions. A lower number will cause the expression
to be parenthesized when applied against another operator with
higher precedence. The default value of ``0`` is lower than all
operators except for the comma (``,``) and ``AS`` operators.
A value of 100 will be higher or equal to all operators, and -100
will be lower than or equal to all operators.
:param is_comparison: if True, the operator will be considered as a
"comparison" operator, that is which evaluates to a boolean
true/false value, like ``==``, ``>``, etc. This flag should be set
so that ORM relationships can establish that the operator is a
comparison operator when used in a custom join condition.
.. versionadded:: 0.9.2 - added the
:paramref:`.Operators.op.is_comparison` flag.
:param return_type: a :class:`.TypeEngine` class or object that will
force the return type of an expression produced by this operator
to be of that type. By default, operators that specify
:paramref:`.Operators.op.is_comparison` will resolve to
:class:`.Boolean`, and those that do not will be of the same
type as the left-hand operand.
.. seealso::
:ref:`types_operators`
:ref:`relationship_custom_operator`
"""
operator = custom_op(opstring, precedence, is_comparison, return_type)
def against(other):
return operator(self, other)
return against
def bool_op(self, opstring, precedence=0):
"""Return a custom boolean operator.
This method is shorthand for calling
:meth:`.Operators.op` and passing the
:paramref:`.Operators.op.is_comparison`
flag with True.
.. seealso::
:meth:`.Operators.op`
"""
return self.op(opstring, precedence=precedence, is_comparison=True)
def operate(self, op, *other, **kwargs):
r"""Operate on an argument.
This is the lowest level of operation, raises
:class:`NotImplementedError` by default.
Overriding this on a subclass can allow common
behavior to be applied to all operations.
For example, overriding :class:`.ColumnOperators`
to apply ``func.lower()`` to the left and right
side::
class MyComparator(ColumnOperators):
def operate(self, op, other):
return op(func.lower(self), func.lower(other))
:param op: Operator callable.
:param \*other: the 'other' side of the operation. Will
be a single scalar for most operations.
:param \**kwargs: modifiers. These may be passed by special
operators such as :meth:`ColumnOperators.contains`.
"""
raise NotImplementedError(str(op))
def reverse_operate(self, op, other, **kwargs):
"""Reverse operate on an argument.
Usage is the same as :meth:`operate`.
"""
raise NotImplementedError(str(op))
class custom_op(object):
"""Represent a 'custom' operator.
:class:`.custom_op` is normally instantiated when the
:meth:`.Operators.op` or :meth:`.Operators.bool_op` methods
are used to create a custom operator callable. The class can also be
used directly when programmatically constructing expressions. E.g.
to represent the "factorial" operation::
from sqlalchemy.sql import UnaryExpression
from sqlalchemy.sql import operators
from sqlalchemy import Numeric
unary = UnaryExpression(table.c.somecolumn,
modifier=operators.custom_op("!"),
type_=Numeric)
.. seealso::
:meth:`.Operators.op`
:meth:`.Operators.bool_op`
"""
__name__ = "custom_op"
def __init__(
self,
opstring,
precedence=0,
is_comparison=False,
return_type=None,
natural_self_precedent=False,
eager_grouping=False,
):
self.opstring = opstring
self.precedence = precedence
self.is_comparison = is_comparison
self.natural_self_precedent = natural_self_precedent
self.eager_grouping = eager_grouping
self.return_type = (
return_type._to_instance(return_type) if return_type else None
)
def __eq__(self, other):
return isinstance(other, custom_op) and other.opstring == self.opstring
def __hash__(self):
return id(self)
def __call__(self, left, right, **kw):
return left.operate(self, right, **kw)
class ColumnOperators(Operators):
"""Defines boolean, comparison, and other operators for
:class:`_expression.ColumnElement` expressions.
By default, all methods call down to
:meth:`.operate` or :meth:`.reverse_operate`,
passing in the appropriate operator function from the
Python builtin ``operator`` module or
a SQLAlchemy-specific operator function from
:mod:`sqlalchemy.expression.operators`. For example
the ``__eq__`` function::
def __eq__(self, other):
return self.operate(operators.eq, other)
Where ``operators.eq`` is essentially::
def eq(a, b):
return a == b
The core column expression unit :class:`_expression.ColumnElement`
overrides :meth:`.Operators.operate` and others
to return further :class:`_expression.ColumnElement` constructs,
so that the ``==`` operation above is replaced by a clause
construct.
.. seealso::
:ref:`types_operators`
:attr:`.TypeEngine.comparator_factory`
:class:`.ColumnOperators`
:class:`.PropComparator`
"""
__slots__ = ()
timetuple = None
"""Hack, allows datetime objects to be compared on the LHS."""
def __lt__(self, other):
"""Implement the ``<`` operator.
In a column context, produces the clause ``a < b``.
"""
return self.operate(lt, other)
def __le__(self, other):
"""Implement the ``<=`` operator.
In a column context, produces the clause ``a <= b``.
"""
return self.operate(le, other)
__hash__ = Operators.__hash__
def __eq__(self, other):
"""Implement the ``==`` operator.
In a column context, produces the clause ``a = b``.
If the target is ``None``, produces ``a IS NULL``.
"""
return self.operate(eq, other)
def __ne__(self, other):
"""Implement the ``!=`` operator.
In a column context, produces the clause ``a != b``.
If the target is ``None``, produces ``a IS NOT NULL``.
"""
return self.operate(ne, other)
def is_distinct_from(self, other):
"""Implement the ``IS DISTINCT FROM`` operator.
Renders "a IS DISTINCT FROM b" on most platforms;
on some such as SQLite may render "a IS NOT b".
.. versionadded:: 1.1
"""
return self.operate(is_distinct_from, other)
def is_not_distinct_from(self, other):
"""Implement the ``IS NOT DISTINCT FROM`` operator.
Renders "a IS NOT DISTINCT FROM b" on most platforms;
on some such as SQLite may render "a IS b".
.. versionchanged:: 1.4 The ``is_not_distinct_from()`` operator is
renamed from ``isnot_distinct_from()`` in previous releases.
The previous name remains available for backwards compatibility.
.. versionadded:: 1.1
"""
return self.operate(is_not_distinct_from, other)
# deprecated 1.4; see #5435
isnot_distinct_from = is_not_distinct_from
def __gt__(self, other):
"""Implement the ``>`` operator.
In a column context, produces the clause ``a > b``.
"""
return self.operate(gt, other)
def __ge__(self, other):
"""Implement the ``>=`` operator.
In a column context, produces the clause ``a >= b``.
"""
return self.operate(ge, other)
def __neg__(self):
"""Implement the ``-`` operator.
In a column context, produces the clause ``-a``.
"""
return self.operate(neg)
def __contains__(self, other):
return self.operate(contains, other)
def __getitem__(self, index):
"""Implement the [] operator.
This can be used by some database-specific types
such as PostgreSQL ARRAY and HSTORE.
"""
return self.operate(getitem, index)
def __lshift__(self, other):
"""implement the << operator.
Not used by SQLAlchemy core, this is provided
for custom operator systems which want to use
<< as an extension point.
"""
return self.operate(lshift, other)
def __rshift__(self, other):
"""implement the >> operator.
Not used by SQLAlchemy core, this is provided
for custom operator systems which want to use
>> as an extension point.
"""
return self.operate(rshift, other)
def concat(self, other):
"""Implement the 'concat' operator.
In a column context, produces the clause ``a || b``,
or uses the ``concat()`` operator on MySQL.
"""
return self.operate(concat_op, other)
def like(self, other, escape=None):
r"""Implement the ``like`` operator.
In a column context, produces the expression::
a LIKE other
E.g.::
stmt = select(sometable).\
where(sometable.c.column.like("%foobar%"))
:param other: expression to be compared
:param escape: optional escape character, renders the ``ESCAPE``
keyword, e.g.::
somecolumn.like("foo/%bar", escape="/")
.. seealso::
:meth:`.ColumnOperators.ilike`
"""
return self.operate(like_op, other, escape=escape)
def ilike(self, other, escape=None):
r"""Implement the ``ilike`` operator, e.g. case insensitive LIKE.
In a column context, produces an expression either of the form::
lower(a) LIKE lower(other)
Or on backends that support the ILIKE operator::
a ILIKE other
E.g.::
stmt = select(sometable).\
where(sometable.c.column.ilike("%foobar%"))
:param other: expression to be compared
:param escape: optional escape character, renders the ``ESCAPE``
keyword, e.g.::
somecolumn.ilike("foo/%bar", escape="/")
.. seealso::
:meth:`.ColumnOperators.like`
"""
return self.operate(ilike_op, other, escape=escape)
def in_(self, other):
"""Implement the ``in`` operator.
In a column context, produces the clause ``column IN <other>``.
The given parameter ``other`` may be:
* A list of literal values, e.g.::
stmt.where(column.in_([1, 2, 3]))
In this calling form, the list of items is converted to a set of
bound parameters the same length as the list given::
WHERE COL IN (?, ?, ?)
* A list of tuples may be provided if the comparison is against a
:func:`.tuple_` containing multiple expressions::
from sqlalchemy import tuple_
stmt.where(tuple_(col1, col2).in_([(1, 10), (2, 20), (3, 30)]))
* An empty list, e.g.::
stmt.where(column.in_([]))
In this calling form, the expression renders an "empty set"
expression. These expressions are tailored to individual backends
and are generally trying to get an empty SELECT statement as a
subquery. Such as on SQLite, the expression is::
WHERE col IN (SELECT 1 FROM (SELECT 1) WHERE 1!=1)
.. versionchanged:: 1.4 empty IN expressions now use an
execution-time generated SELECT subquery in all cases.
* A bound parameter, e.g. :func:`.bindparam`, may be used if it
includes the :paramref:`.bindparam.expanding` flag::
stmt.where(column.in_(bindparam('value', expanding=True)))
In this calling form, the expression renders a special non-SQL
placeholder expression that looks like::
WHERE COL IN ([EXPANDING_value])
This placeholder expression is intercepted at statement execution
time to be converted into the variable number of bound parameter
form illustrated earlier. If the statement were executed as::
connection.execute(stmt, {"value": [1, 2, 3]})
The database would be passed a bound parameter for each value::
WHERE COL IN (?, ?, ?)
.. versionadded:: 1.2 added "expanding" bound parameters
If an empty list is passed, a special "empty list" expression,
which is specific to the database in use, is rendered. On
SQLite this would be::
WHERE COL IN (SELECT 1 FROM (SELECT 1) WHERE 1!=1)
.. versionadded:: 1.3 "expanding" bound parameters now support
empty lists
* a :func:`_expression.select` construct, which is usually a
correlated scalar select::
stmt.where(
column.in_(
select(othertable.c.y).
where(table.c.x == othertable.c.x)
)
)
In this calling form, :meth:`.ColumnOperators.in_` renders as given::
WHERE COL IN (SELECT othertable.y
FROM othertable WHERE othertable.x = table.x)
:param other: a list of literals, a :func:`_expression.select`
construct, or a :func:`.bindparam` construct that includes the
:paramref:`.bindparam.expanding` flag set to True.
"""
return self.operate(in_op, other)
def not_in(self, other):
"""implement the ``NOT IN`` operator.
This is equivalent to using negation with
:meth:`.ColumnOperators.in_`, i.e. ``~x.in_(y)``.
In the case that ``other`` is an empty sequence, the compiler
produces an "empty not in" expression. This defaults to the
expression "1 = 1" to produce true in all cases. The
:paramref:`_sa.create_engine.empty_in_strategy` may be used to
alter this behavior.
.. versionchanged:: 1.4 The ``not_in()`` operator is renamed from
``notin_()`` in previous releases. The previous name remains
available for backwards compatibility.
.. versionchanged:: 1.2 The :meth:`.ColumnOperators.in_` and
:meth:`.ColumnOperators.not_in` operators
now produce a "static" expression for an empty IN sequence
by default.
.. seealso::
:meth:`.ColumnOperators.in_`
"""
return self.operate(not_in_op, other)
# deprecated 1.4; see #5429
notin_ = not_in
def not_like(self, other, escape=None):
"""implement the ``NOT LIKE`` operator.
This is equivalent to using negation with
:meth:`.ColumnOperators.like`, i.e. ``~x.like(y)``.
.. versionchanged:: 1.4 The ``not_like()`` operator is renamed from
``notlike()`` in previous releases. The previous name remains
available for backwards compatibility.
.. seealso::
:meth:`.ColumnOperators.like`
"""
return self.operate(notlike_op, other, escape=escape)
# deprecated 1.4; see #5435
notlike = not_like
def not_ilike(self, other, escape=None):
"""implement the ``NOT ILIKE`` operator.
This is equivalent to using negation with
:meth:`.ColumnOperators.ilike`, i.e. ``~x.ilike(y)``.
.. versionchanged:: 1.4 The ``not_ilike()`` operator is renamed from
``notilike()`` in previous releases. The previous name remains
available for backwards compatibility.
.. seealso::
:meth:`.ColumnOperators.ilike`
"""
return self.operate(notilike_op, other, escape=escape)
# deprecated 1.4; see #5435
notilike = not_ilike
def is_(self, other):
"""Implement the ``IS`` operator.
Normally, ``IS`` is generated automatically when comparing to a
value of ``None``, which resolves to ``NULL``. However, explicit
usage of ``IS`` may be desirable if comparing to boolean values
on certain platforms.
.. seealso:: :meth:`.ColumnOperators.is_not`
"""
return self.operate(is_, other)
def is_not(self, other):
"""Implement the ``IS NOT`` operator.
Normally, ``IS NOT`` is generated automatically when comparing to a
value of ``None``, which resolves to ``NULL``. However, explicit
usage of ``IS NOT`` may be desirable if comparing to boolean values
on certain platforms.
.. versionchanged:: 1.4 The ``is_not()`` operator is renamed from
``isnot()`` in previous releases. The previous name remains
available for backwards compatibility.
.. seealso:: :meth:`.ColumnOperators.is_`
"""
return self.operate(is_not, other)
# deprecated 1.4; see #5429
isnot = is_not
def startswith(self, other, **kwargs):
r"""Implement the ``startswith`` operator.
Produces a LIKE expression that tests against a match for the start
of a string value::
column LIKE <other> || '%'
E.g.::
stmt = select(sometable).\
where(sometable.c.column.startswith("foobar"))
Since the operator uses ``LIKE``, wildcard characters
``"%"`` and ``"_"`` that are present inside the <other> expression
will behave like wildcards as well. For literal string
values, the :paramref:`.ColumnOperators.startswith.autoescape` flag
may be set to ``True`` to apply escaping to occurrences of these
characters within the string value so that they match as themselves
and not as wildcard characters. Alternatively, the
:paramref:`.ColumnOperators.startswith.escape` parameter will establish
a given character as an escape character which can be of use when
the target expression is not a literal string.
:param other: expression to be compared. This is usually a plain
string value, but can also be an arbitrary SQL expression. LIKE
wildcard characters ``%`` and ``_`` are not escaped by default unless
the :paramref:`.ColumnOperators.startswith.autoescape` flag is
set to True.
:param autoescape: boolean; when True, establishes an escape character
within the LIKE expression, then applies it to all occurrences of
``"%"``, ``"_"`` and the escape character itself within the
comparison value, which is assumed to be a literal string and not a
SQL expression.
An expression such as::
somecolumn.startswith("foo%bar", autoescape=True)
Will render as::
somecolumn LIKE :param || '%' ESCAPE '/'
With the value of ``:param`` as ``"foo/%bar"``.
:param escape: a character which when given will render with the
``ESCAPE`` keyword to establish that character as the escape
character. This character can then be placed preceding occurrences
of ``%`` and ``_`` to allow them to act as themselves and not
wildcard characters.
An expression such as::
somecolumn.startswith("foo/%bar", escape="^")
Will render as::
somecolumn LIKE :param || '%' ESCAPE '^'
The parameter may also be combined with
:paramref:`.ColumnOperators.startswith.autoescape`::
somecolumn.startswith("foo%bar^bat", escape="^", autoescape=True)
Where above, the given literal parameter will be converted to
``"foo^%bar^^bat"`` before being passed to the database.
.. seealso::
:meth:`.ColumnOperators.endswith`
:meth:`.ColumnOperators.contains`
:meth:`.ColumnOperators.like`
"""
return self.operate(startswith_op, other, **kwargs)
def endswith(self, other, **kwargs):
r"""Implement the 'endswith' operator.
Produces a LIKE expression that tests against a match for the end
of a string value::
column LIKE '%' || <other>
E.g.::
stmt = select(sometable).\
where(sometable.c.column.endswith("foobar"))
Since the operator uses ``LIKE``, wildcard characters
``"%"`` and ``"_"`` that are present inside the <other> expression
will behave like wildcards as well. For literal string
values, the :paramref:`.ColumnOperators.endswith.autoescape` flag
may be set to ``True`` to apply escaping to occurrences of these
characters within the string value so that they match as themselves
and not as wildcard characters. Alternatively, the
:paramref:`.ColumnOperators.endswith.escape` parameter will establish
a given character as an escape character which can be of use when
the target expression is not a literal string.
:param other: expression to be compared. This is usually a plain
string value, but can also be an arbitrary SQL expression. LIKE
wildcard characters ``%`` and ``_`` are not escaped by default unless
the :paramref:`.ColumnOperators.endswith.autoescape` flag is
set to True.
:param autoescape: boolean; when True, establishes an escape character
within the LIKE expression, then applies it to all occurrences of
``"%"``, ``"_"`` and the escape character itself within the
comparison value, which is assumed to be a literal string and not a
SQL expression.
An expression such as::
somecolumn.endswith("foo%bar", autoescape=True)
Will render as::
somecolumn LIKE '%' || :param ESCAPE '/'
With the value of ``:param`` as ``"foo/%bar"``.
:param escape: a character which when given will render with the
``ESCAPE`` keyword to establish that character as the escape
character. This character can then be placed preceding occurrences
of ``%`` and ``_`` to allow them to act as themselves and not
wildcard characters.
An expression such as::
somecolumn.endswith("foo/%bar", escape="^")
Will render as::
somecolumn LIKE '%' || :param ESCAPE '^'
The parameter may also be combined with
:paramref:`.ColumnOperators.endswith.autoescape`::
somecolumn.endswith("foo%bar^bat", escape="^", autoescape=True)
Where above, the given literal parameter will be converted to
``"foo^%bar^^bat"`` before being passed to the database.
.. seealso::
:meth:`.ColumnOperators.startswith`
:meth:`.ColumnOperators.contains`
:meth:`.ColumnOperators.like`
"""
return self.operate(endswith_op, other, **kwargs)
def contains(self, other, **kwargs):
r"""Implement the 'contains' operator.
Produces a LIKE expression that tests against a match for the middle
of a string value::
column LIKE '%' || <other> || '%'
E.g.::
stmt = select(sometable).\
where(sometable.c.column.contains("foobar"))
Since the operator uses ``LIKE``, wildcard characters
``"%"`` and ``"_"`` that are present inside the <other> expression
will behave like wildcards as well. For literal string
values, the :paramref:`.ColumnOperators.contains.autoescape` flag
may be set to ``True`` to apply escaping to occurrences of these
characters within the string value so that they match as themselves
and not as wildcard characters. Alternatively, the
:paramref:`.ColumnOperators.contains.escape` parameter will establish
a given character as an escape character which can be of use when
the target expression is not a literal string.
:param other: expression to be compared. This is usually a plain
string value, but can also be an arbitrary SQL expression. LIKE
wildcard characters ``%`` and ``_`` are not escaped by default unless
the :paramref:`.ColumnOperators.contains.autoescape` flag is
set to True.
:param autoescape: boolean; when True, establishes an escape character
within the LIKE expression, then applies it to all occurrences of
``"%"``, ``"_"`` and the escape character itself within the
comparison value, which is assumed to be a literal string and not a
SQL expression.
An expression such as::
somecolumn.contains("foo%bar", autoescape=True)
Will render as::
somecolumn LIKE '%' || :param || '%' ESCAPE '/'
With the value of ``:param`` as ``"foo/%bar"``.
:param escape: a character which when given will render with the
``ESCAPE`` keyword to establish that character as the escape
character. This character can then be placed preceding occurrences
of ``%`` and ``_`` to allow them to act as themselves and not
wildcard characters.
An expression such as::
somecolumn.contains("foo/%bar", escape="^")
Will render as::
somecolumn LIKE '%' || :param || '%' ESCAPE '^'
The parameter may also be combined with
:paramref:`.ColumnOperators.contains.autoescape`::
somecolumn.contains("foo%bar^bat", escape="^", autoescape=True)
Where above, the given literal parameter will be converted to
``"foo^%bar^^bat"`` before being passed to the database.
.. seealso::
:meth:`.ColumnOperators.startswith`
:meth:`.ColumnOperators.endswith`
:meth:`.ColumnOperators.like`
"""
return self.operate(contains_op, other, **kwargs)
def match(self, other, **kwargs):
"""Implements a database-specific 'match' operator.
:meth:`_sql.ColumnOperators.match` attempts to resolve to
a MATCH-like function or operator provided by the backend.
Examples include:
* PostgreSQL - renders ``x @@ to_tsquery(y)``
* MySQL - renders ``MATCH (x) AGAINST (y IN BOOLEAN MODE)``
.. seealso::
:class:`_mysql.match` - MySQL specific construct with
additional features.
* Oracle - renders ``CONTAINS(x, y)``
* other backends may provide special implementations.
* Backends without any special implementation will emit
the operator as "MATCH". This is compatible with SQLite, for
example.
"""
return self.operate(match_op, other, **kwargs)
def regexp_match(self, pattern, flags=None):
"""Implements a database-specific 'regexp match' operator.
E.g.::
stmt = select(table.c.some_column).where(
table.c.some_column.regexp_match('^(b|c)')
)
:meth:`_sql.ColumnOperators.regexp_match` attempts to resolve to
a REGEXP-like function or operator provided by the backend, however
the specific regular expression syntax and flags available are
**not backend agnostic**.
Examples include:
* PostgreSQL - renders ``x ~ y`` or ``x !~ y`` when negated.
* Oracle - renders ``REGEXP_LIKE(x, y)``
* SQLite - uses SQLite's ``REGEXP`` placeholder operator and calls into
the Python ``re.match()`` builtin.
* other backends may provide special implementations.
* Backends without any special implementation will emit
the operator as "REGEXP" or "NOT REGEXP". This is compatible with
SQLite and MySQL, for example.
Regular expression support is currently implemented for Oracle,
PostgreSQL, MySQL and MariaDB. Partial support is available for
SQLite. Support among third-party dialects may vary.
:param pattern: The regular expression pattern string or column
clause.
:param flags: Any regular expression string flags to apply. Flags
tend to be backend specific. It can be a string or a column clause.
Some backends, like PostgreSQL and MariaDB, may alternatively
specify the flags as part of the pattern.
When using the ignore case flag 'i' in PostgreSQL, the ignore case
regexp match operator ``~*`` or ``!~*`` will be used.
.. versionadded:: 1.4
.. seealso::
:meth:`_sql.ColumnOperators.regexp_replace`
"""
return self.operate(regexp_match_op, pattern, flags=flags)
def regexp_replace(self, pattern, replacement, flags=None):
"""Implements a database-specific 'regexp replace' operator.
E.g.::
stmt = select(
table.c.some_column.regexp_replace(
'b(..)',
'X\1Y',
flags='g'
)
)
:meth:`_sql.ColumnOperators.regexp_replace` attempts to resolve to
a REGEXP_REPLACE-like function provided by the backend, that
usually emit the function ``REGEXP_REPLACE()``. However,
the specific regular expression syntax and flags available are
**not backend agnostic**.
Regular expression replacement support is currently implemented for
Oracle, PostgreSQL, MySQL 8 or greater and MariaDB. Support among
third-party dialects may vary.
:param pattern: The regular expression pattern string or column
clause.
:param pattern: The replacement string or column clause.
:param flags: Any regular expression string flags to apply. Flags
tend to be backend specific. It can be a string or a column clause.
Some backends, like PostgreSQL and MariaDB, may alternatively
specify the flags as part of the pattern.
.. versionadded:: 1.4
.. seealso::
:meth:`_sql.ColumnOperators.regexp_match`
"""
return self.operate(
regexp_replace_op, pattern, replacement=replacement, flags=flags
)
def desc(self):
"""Produce a :func:`_expression.desc` clause against the
parent object."""
return self.operate(desc_op)
def asc(self):
"""Produce a :func:`_expression.asc` clause against the
parent object."""
return self.operate(asc_op)
def nulls_first(self):
"""Produce a :func:`_expression.nulls_first` clause against the
parent object.
.. versionchanged:: 1.4 The ``nulls_first()`` operator is
renamed from ``nullsfirst()`` in previous releases.
The previous name remains available for backwards compatibility.
"""
return self.operate(nulls_first_op)
# deprecated 1.4; see #5435
nullsfirst = nulls_first
def nulls_last(self):
"""Produce a :func:`_expression.nulls_last` clause against the
parent object.
.. versionchanged:: 1.4 The ``nulls_last()`` operator is
renamed from ``nullslast()`` in previous releases.
The previous name remains available for backwards compatibility.
"""
return self.operate(nulls_last_op)
# deprecated 1.4; see #5429
nullslast = nulls_last
def collate(self, collation):
"""Produce a :func:`_expression.collate` clause against
the parent object, given the collation string.
.. seealso::
:func:`_expression.collate`
"""
return self.operate(collate, collation)
def __radd__(self, other):
"""Implement the ``+`` operator in reverse.
See :meth:`.ColumnOperators.__add__`.
"""
return self.reverse_operate(add, other)
def __rsub__(self, other):
"""Implement the ``-`` operator in reverse.
See :meth:`.ColumnOperators.__sub__`.
"""
return self.reverse_operate(sub, other)
def __rmul__(self, other):
"""Implement the ``*`` operator in reverse.
See :meth:`.ColumnOperators.__mul__`.
"""
return self.reverse_operate(mul, other)
def __rdiv__(self, other):
"""Implement the ``/`` operator in reverse.
See :meth:`.ColumnOperators.__div__`.
"""
return self.reverse_operate(div, other)
def __rmod__(self, other):
"""Implement the ``%`` operator in reverse.
See :meth:`.ColumnOperators.__mod__`.
"""
return self.reverse_operate(mod, other)
def between(self, cleft, cright, symmetric=False):
"""Produce a :func:`_expression.between` clause against
the parent object, given the lower and upper range.
"""
return self.operate(between_op, cleft, cright, symmetric=symmetric)
def distinct(self):
"""Produce a :func:`_expression.distinct` clause against the
parent object.
"""
return self.operate(distinct_op)
def any_(self):
"""Produce an :func:`_expression.any_` clause against the
parent object.
See the documentation for :func:`_sql.any_` for examples.
.. note:: be sure to not confuse the newer
:meth:`_sql.ColumnOperators.any_` method with its older
:class:`_types.ARRAY`-specific counterpart, the
:meth:`_types.ARRAY.Comparator.any` method, which a different
calling syntax and usage pattern.
.. versionadded:: 1.1
"""
return self.operate(any_op)
def all_(self):
"""Produce an :func:`_expression.all_` clause against the
parent object.
See the documentation for :func:`_sql.all_` for examples.
.. note:: be sure to not confuse the newer
:meth:`_sql.ColumnOperators.all_` method with its older
:class:`_types.ARRAY`-specific counterpart, the
:meth:`_types.ARRAY.Comparator.all` method, which a different
calling syntax and usage pattern.
.. versionadded:: 1.1
"""
return self.operate(all_op)
def __add__(self, other):
"""Implement the ``+`` operator.
In a column context, produces the clause ``a + b``
if the parent object has non-string affinity.
If the parent object has a string affinity,
produces the concatenation operator, ``a || b`` -
see :meth:`.ColumnOperators.concat`.
"""
return self.operate(add, other)
def __sub__(self, other):
"""Implement the ``-`` operator.
In a column context, produces the clause ``a - b``.
"""
return self.operate(sub, other)
def __mul__(self, other):
"""Implement the ``*`` operator.
In a column context, produces the clause ``a * b``.
"""
return self.operate(mul, other)
def __div__(self, other):
"""Implement the ``/`` operator.
In a column context, produces the clause ``a / b``.
"""
return self.operate(div, other)
def __mod__(self, other):
"""Implement the ``%`` operator.
In a column context, produces the clause ``a % b``.
"""
return self.operate(mod, other)
def __truediv__(self, other):
"""Implement the ``//`` operator.
In a column context, produces the clause ``a / b``.
"""
return self.operate(truediv, other)
def __rtruediv__(self, other):
"""Implement the ``//`` operator in reverse.
See :meth:`.ColumnOperators.__truediv__`.
"""
return self.reverse_operate(truediv, other)
_commutative = {eq, ne, add, mul}
_comparison = {eq, ne, lt, gt, ge, le}
def commutative_op(fn):
_commutative.add(fn)
return fn
def comparison_op(fn):
_comparison.add(fn)
return fn
def from_():
raise NotImplementedError()
@comparison_op
def function_as_comparison_op():
raise NotImplementedError()
def as_():
raise NotImplementedError()
def exists():
raise NotImplementedError()
def is_true(a):
raise NotImplementedError()
# 1.4 deprecated; see #5435
istrue = is_true
def is_false(a):
raise NotImplementedError()
# 1.4 deprecated; see #5435
isfalse = is_false
@comparison_op
def is_distinct_from(a, b):
return a.is_distinct_from(b)
@comparison_op
def is_not_distinct_from(a, b):
return a.is_not_distinct_from(b)
# deprecated 1.4; see #5435
isnot_distinct_from = is_not_distinct_from
@comparison_op
def is_(a, b):
return a.is_(b)
@comparison_op
def is_not(a, b):
return a.is_not(b)
# 1.4 deprecated; see #5429
isnot = is_not
def collate(a, b):
return a.collate(b)
def op(a, opstring, b):
return a.op(opstring)(b)
@comparison_op
def like_op(a, b, escape=None):
return a.like(b, escape=escape)
@comparison_op
def not_like_op(a, b, escape=None):
return a.notlike(b, escape=escape)
# 1.4 deprecated; see #5435
notlike_op = not_like_op
@comparison_op
def ilike_op(a, b, escape=None):
return a.ilike(b, escape=escape)
@comparison_op
def not_ilike_op(a, b, escape=None):
return a.not_ilike(b, escape=escape)
# 1.4 deprecated; see #5435
notilike_op = not_ilike_op
@comparison_op
def between_op(a, b, c, symmetric=False):
return a.between(b, c, symmetric=symmetric)
@comparison_op
def not_between_op(a, b, c, symmetric=False):
return ~a.between(b, c, symmetric=symmetric)
# 1.4 deprecated; see #5435
notbetween_op = not_between_op
@comparison_op
def in_op(a, b):
return a.in_(b)
@comparison_op
def not_in_op(a, b):
return a.not_in(b)
# 1.4 deprecated; see #5429
notin_op = not_in_op
def distinct_op(a):
return a.distinct()
def any_op(a):
return a.any_()
def all_op(a):
return a.all_()
def _escaped_like_impl(fn, other, escape, autoescape):
if autoescape:
if autoescape is not True:
util.warn(
"The autoescape parameter is now a simple boolean True/False"
)
if escape is None:
escape = "/"
if not isinstance(other, util.compat.string_types):
raise TypeError("String value expected when autoescape=True")
if escape not in ("%", "_"):
other = other.replace(escape, escape + escape)
other = other.replace("%", escape + "%").replace("_", escape + "_")
return fn(other, escape=escape)
@comparison_op
def startswith_op(a, b, escape=None, autoescape=False):
return _escaped_like_impl(a.startswith, b, escape, autoescape)
@comparison_op
def not_startswith_op(a, b, escape=None, autoescape=False):
return ~_escaped_like_impl(a.startswith, b, escape, autoescape)
# 1.4 deprecated; see #5435
notstartswith_op = not_startswith_op
@comparison_op
def endswith_op(a, b, escape=None, autoescape=False):
return _escaped_like_impl(a.endswith, b, escape, autoescape)
@comparison_op
def not_endswith_op(a, b, escape=None, autoescape=False):
return ~_escaped_like_impl(a.endswith, b, escape, autoescape)
# 1.4 deprecated; see #5435
notendswith_op = not_endswith_op
@comparison_op
def contains_op(a, b, escape=None, autoescape=False):
return _escaped_like_impl(a.contains, b, escape, autoescape)
@comparison_op
def not_contains_op(a, b, escape=None, autoescape=False):
return ~_escaped_like_impl(a.contains, b, escape, autoescape)
# 1.4 deprecated; see #5435
notcontains_op = not_contains_op
@comparison_op
def match_op(a, b, **kw):
return a.match(b, **kw)
@comparison_op
def regexp_match_op(a, b, flags=None):
return a.regexp_match(b, flags=flags)
@comparison_op
def not_regexp_match_op(a, b, flags=None):
return ~a.regexp_match(b, flags=flags)
def regexp_replace_op(a, b, replacement, flags=None):
return a.regexp_replace(b, replacement=replacement, flags=flags)
@comparison_op
def not_match_op(a, b, **kw):
return ~a.match(b, **kw)
# 1.4 deprecated; see #5429
notmatch_op = not_match_op
def comma_op(a, b):
raise NotImplementedError()
def filter_op(a, b):
raise NotImplementedError()
def concat_op(a, b):
return a.concat(b)
def desc_op(a):
return a.desc()
def asc_op(a):
return a.asc()
def nulls_first_op(a):
return a.nulls_first()
# 1.4 deprecated; see #5435
nullsfirst_op = nulls_first_op
def nulls_last_op(a):
return a.nulls_last()
# 1.4 deprecated; see #5435
nullslast_op = nulls_last_op
def json_getitem_op(a, b):
raise NotImplementedError()
def json_path_getitem_op(a, b):
raise NotImplementedError()
def is_comparison(op):
return op in _comparison or isinstance(op, custom_op) and op.is_comparison
def is_commutative(op):
return op in _commutative
def is_ordering_modifier(op):
return op in (asc_op, desc_op, nulls_first_op, nulls_last_op)
def is_natural_self_precedent(op):
return (
op in _natural_self_precedent
or isinstance(op, custom_op)
and op.natural_self_precedent
)
_booleans = (inv, is_true, is_false, and_, or_)
def is_boolean(op):
return is_comparison(op) or op in _booleans
_mirror = {gt: lt, ge: le, lt: gt, le: ge}
def mirror(op):
"""rotate a comparison operator 180 degrees.
Note this is not the same as negation.
"""
return _mirror.get(op, op)
_associative = _commutative.union([concat_op, and_, or_]).difference([eq, ne])
def is_associative(op):
return op in _associative
_natural_self_precedent = _associative.union(
[getitem, json_getitem_op, json_path_getitem_op]
)
"""Operators where if we have (a op b) op c, we don't want to
parenthesize (a op b).
"""
_asbool = util.symbol("_asbool", canonical=-10)
_smallest = util.symbol("_smallest", canonical=-100)
_largest = util.symbol("_largest", canonical=100)
_PRECEDENCE = {
from_: 15,
function_as_comparison_op: 15,
any_op: 15,
all_op: 15,
getitem: 15,
json_getitem_op: 15,
json_path_getitem_op: 15,
mul: 8,
truediv: 8,
div: 8,
mod: 8,
neg: 8,
add: 7,
sub: 7,
concat_op: 6,
filter_op: 6,
match_op: 5,
not_match_op: 5,
regexp_match_op: 5,
not_regexp_match_op: 5,
regexp_replace_op: 5,
ilike_op: 5,
not_ilike_op: 5,
like_op: 5,
not_like_op: 5,
in_op: 5,
not_in_op: 5,
is_: 5,
is_not: 5,
eq: 5,
ne: 5,
is_distinct_from: 5,
is_not_distinct_from: 5,
gt: 5,
lt: 5,
ge: 5,
le: 5,
between_op: 5,
not_between_op: 5,
distinct_op: 5,
inv: 5,
is_true: 5,
is_false: 5,
and_: 3,
or_: 2,
comma_op: -1,
desc_op: 3,
asc_op: 3,
collate: 4,
as_: -1,
exists: 0,
_asbool: -10,
_smallest: _smallest,
_largest: _largest,
}
def is_precedent(operator, against):
if operator is against and is_natural_self_precedent(operator):
return False
else:
return _PRECEDENCE.get(
operator, getattr(operator, "precedence", _smallest)
) <= _PRECEDENCE.get(against, getattr(against, "precedence", _largest))
| |
#!/usr/bin/python
from k5test import *
import os
realm = K5Realm(create_host=False, create_user=False)
def make_client(name):
global realm
realm.addprinc(name, password(name))
ccache = os.path.join(realm.testdir,
'kadmin_ccache_' + name.replace('/', '_'))
realm.kinit(name, password(name),
flags=['-S', 'kadmin/admin', '-c', ccache])
return ccache
def kadmin_as(client, query, **kwargs):
global realm
return realm.run([kadmin, '-c', client] + query, **kwargs)
all_add = make_client('all_add')
all_changepw = make_client('all_changepw')
all_delete = make_client('all_delete')
all_inquire = make_client('all_inquire')
all_list = make_client('all_list')
all_modify = make_client('all_modify')
all_rename = make_client('all_rename')
all_wildcard = make_client('all_wildcard')
all_extract = make_client('all_extract')
some_add = make_client('some_add')
some_changepw = make_client('some_changepw')
some_delete = make_client('some_delete')
some_inquire = make_client('some_inquire')
some_modify = make_client('some_modify')
some_rename = make_client('some_rename')
restricted_add = make_client('restricted_add')
restricted_modify = make_client('restricted_modify')
restricted_rename = make_client('restricted_rename')
wctarget = make_client('wctarget')
admin = make_client('user/admin')
none = make_client('none')
restrictions = make_client('restrictions')
onetwothreefour = make_client('one/two/three/four')
realm.run([kadminl, 'addpol', '-minlife', '1 day', 'minlife'])
f = open(os.path.join(realm.testdir, 'acl'), 'w')
f.write('''
all_add a
all_changepw c
all_delete d
all_inquire i
all_list l
all_modify im
all_rename ad
all_wildcard x
all_extract ie
some_add a selected
some_changepw c selected
some_delete d selected
some_inquire i selected
some_modify im selected
some_rename d from
some_rename a to
restricted_add a * +preauth
restricted_modify im * +preauth
restricted_rename ad * +preauth
*/* d *2/*1
# The next line is a regression test for #8154; it is not used directly.
one/*/*/five l
*/two/*/* d *3/*1/*2
*/admin a
wctarget a wild/*
restrictions a type1 -policy minlife
restrictions a type2 -clearpolicy
restrictions a type3 -maxlife 1h -maxrenewlife 2h
''')
f.close()
realm.start_kadmind()
# cpw can generate four different RPC calls depending on options.
realm.addprinc('selected', 'oldpw')
realm.addprinc('unselected', 'oldpw')
for pw in (['-pw', 'newpw'], ['-randkey']):
for ks in ([], ['-e', 'aes256-cts']):
args = pw + ks
kadmin_as(all_changepw, ['cpw'] + args + ['unselected'])
kadmin_as(some_changepw, ['cpw'] + args + ['selected'])
msg = "Operation requires ``change-password'' privilege"
kadmin_as(none, ['cpw'] + args + ['selected'], expected_code=1,
expected_msg=msg)
kadmin_as(some_changepw, ['cpw'] + args + ['unselected'],
expected_code=1, expected_msg=msg)
kadmin_as(none, ['cpw'] + args + ['none'])
realm.run([kadminl, 'modprinc', '-policy', 'minlife', 'none'])
msg = "Current password's minimum life has not expired"
kadmin_as(none, ['cpw'] + args + ['none'], expected_code=1,
expected_msg=msg)
realm.run([kadminl, 'modprinc', '-clearpolicy', 'none'])
realm.run([kadminl, 'delprinc', 'selected'])
realm.run([kadminl, 'delprinc', 'unselected'])
kadmin_as(all_add, ['addpol', 'policy'])
realm.run([kadminl, 'delpol', 'policy'])
kadmin_as(none, ['addpol', 'policy'], expected_code=1,
expected_msg="Operation requires ``add'' privilege")
# addprinc can generate two different RPC calls depending on options.
for ks in ([], ['-e', 'aes256-cts']):
args = ['-pw', 'pw'] + ks
kadmin_as(all_add, ['addprinc'] + args + ['unselected'])
realm.run([kadminl, 'delprinc', 'unselected'])
kadmin_as(some_add, ['addprinc'] + args + ['selected'])
realm.run([kadminl, 'delprinc', 'selected'])
kadmin_as(restricted_add, ['addprinc'] + args + ['unselected'])
realm.run([kadminl, 'getprinc', 'unselected'],
expected_msg='REQUIRES_PRE_AUTH')
realm.run([kadminl, 'delprinc', 'unselected'])
kadmin_as(none, ['addprinc'] + args + ['selected'], expected_code=1,
expected_msg="Operation requires ``add'' privilege")
kadmin_as(some_add, ['addprinc'] + args + ['unselected'], expected_code=1,
expected_msg="Operation requires ``add'' privilege")
realm.addprinc('unselected', 'pw')
kadmin_as(all_delete, ['delprinc', 'unselected'])
realm.addprinc('selected', 'pw')
kadmin_as(some_delete, ['delprinc', 'selected'])
realm.addprinc('unselected', 'pw')
kadmin_as(none, ['delprinc', 'unselected'], expected_code=1,
expected_msg="Operation requires ``delete'' privilege")
kadmin_as(some_delete, ['delprinc', 'unselected'], expected_code=1,
expected_msg="Operation requires ``delete'' privilege")
realm.run([kadminl, 'delprinc', 'unselected'])
kadmin_as(all_inquire, ['getpol', 'minlife'], expected_msg='Policy: minlife')
kadmin_as(none, ['getpol', 'minlife'], expected_code=1,
expected_msg="Operation requires ``get'' privilege")
realm.run([kadminl, 'modprinc', '-policy', 'minlife', 'none'])
kadmin_as(none, ['getpol', 'minlife'], expected_msg='Policy: minlife')
realm.run([kadminl, 'modprinc', '-clearpolicy', 'none'])
realm.addprinc('selected', 'pw')
realm.addprinc('unselected', 'pw')
kadmin_as(all_inquire, ['getprinc', 'unselected'],
expected_msg='Principal: unselected@KRBTEST.COM')
kadmin_as(some_inquire, ['getprinc', 'selected'],
expected_msg='Principal: selected@KRBTEST.COM')
kadmin_as(none, ['getprinc', 'selected'], expected_code=1,
expected_msg="Operation requires ``get'' privilege")
kadmin_as(some_inquire, ['getprinc', 'unselected'], expected_code=1,
expected_msg="Operation requires ``get'' privilege")
kadmin_as(none, ['getprinc', 'none'],
expected_msg='Principal: none@KRBTEST.COM')
realm.run([kadminl, 'delprinc', 'selected'])
realm.run([kadminl, 'delprinc', 'unselected'])
kadmin_as(all_list, ['listprincs'], expected_msg='K/M@KRBTEST.COM')
kadmin_as(none, ['listprincs'], expected_code=1,
expected_msg="Operation requires ``list'' privilege")
realm.addprinc('selected', 'pw')
realm.addprinc('unselected', 'pw')
realm.run([kadminl, 'setstr', 'selected', 'key', 'value'])
realm.run([kadminl, 'setstr', 'unselected', 'key', 'value'])
kadmin_as(all_inquire, ['getstrs', 'unselected'], expected_msg='key: value')
kadmin_as(some_inquire, ['getstrs', 'selected'], expected_msg='key: value')
kadmin_as(none, ['getstrs', 'selected'], expected_code=1,
expected_msg="Operation requires ``get'' privilege")
kadmin_as(some_inquire, ['getstrs', 'unselected'], expected_code=1,
expected_msg="Operation requires ``get'' privilege")
kadmin_as(none, ['getstrs', 'none'], expected_msg='(No string attributes.)')
realm.run([kadminl, 'delprinc', 'selected'])
realm.run([kadminl, 'delprinc', 'unselected'])
out = kadmin_as(all_modify, ['modpol', '-maxlife', '1 hour', 'policy'],
expected_code=1)
if 'Operation requires' in out:
fail('modpol success (acl)')
kadmin_as(none, ['modpol', '-maxlife', '1 hour', 'policy'], expected_code=1,
expected_msg="Operation requires ``modify'' privilege")
realm.addprinc('selected', 'pw')
realm.addprinc('unselected', 'pw')
kadmin_as(all_modify, ['modprinc', '-maxlife', '1 hour', 'unselected'])
kadmin_as(some_modify, ['modprinc', '-maxlife', '1 hour', 'selected'])
kadmin_as(restricted_modify, ['modprinc', '-maxlife', '1 hour', 'unselected'])
realm.run([kadminl, 'getprinc', 'unselected'],
expected_msg='REQUIRES_PRE_AUTH')
kadmin_as(all_inquire, ['modprinc', '-maxlife', '1 hour', 'selected'],
expected_code=1,
expected_msg="Operation requires ``modify'' privilege")
kadmin_as(some_modify, ['modprinc', '-maxlife', '1 hour', 'unselected'],
expected_code=1, expected_msg='Operation requires')
realm.run([kadminl, 'delprinc', 'selected'])
realm.run([kadminl, 'delprinc', 'unselected'])
realm.addprinc('selected', 'pw')
realm.addprinc('unselected', 'pw')
kadmin_as(all_modify, ['purgekeys', 'unselected'])
kadmin_as(some_modify, ['purgekeys', 'selected'])
kadmin_as(none, ['purgekeys', 'selected'], expected_code=1,
expected_msg="Operation requires ``modify'' privilege")
kadmin_as(some_modify, ['purgekeys', 'unselected'], expected_code=1,
expected_msg="Operation requires ``modify'' privilege")
kadmin_as(none, ['purgekeys', 'none'])
realm.run([kadminl, 'delprinc', 'selected'])
realm.run([kadminl, 'delprinc', 'unselected'])
realm.addprinc('from', 'pw')
kadmin_as(all_rename, ['renprinc', 'from', 'to'])
realm.run([kadminl, 'renprinc', 'to', 'from'])
kadmin_as(some_rename, ['renprinc', 'from', 'to'])
realm.run([kadminl, 'renprinc', 'to', 'from'])
kadmin_as(all_add, ['renprinc', 'from', 'to'], expected_code=1,
expected_msg="Insufficient authorization for operation")
kadmin_as(all_delete, ['renprinc', 'from', 'to'], expected_code=1,
expected_msg="Insufficient authorization for operation")
kadmin_as(some_rename, ['renprinc', 'from', 'notto'], expected_code=1,
expected_msg="Insufficient authorization for operation")
realm.run([kadminl, 'renprinc', 'from', 'notfrom'])
kadmin_as(some_rename, ['renprinc', 'notfrom', 'to'], expected_code=1,
expected_msg="Insufficient authorization for operation")
kadmin_as(restricted_rename, ['renprinc', 'notfrom', 'to'], expected_code=1,
expected_msg="Insufficient authorization for operation")
realm.run([kadminl, 'delprinc', 'notfrom'])
realm.addprinc('selected', 'pw')
realm.addprinc('unselected', 'pw')
kadmin_as(all_modify, ['setstr', 'unselected', 'key', 'value'])
kadmin_as(some_modify, ['setstr', 'selected', 'key', 'value'])
kadmin_as(none, ['setstr', 'selected', 'key', 'value'], expected_code=1,
expected_msg="Operation requires ``modify'' privilege")
kadmin_as(some_modify, ['setstr', 'unselected', 'key', 'value'],
expected_code=1, expected_msg='Operation requires')
realm.run([kadminl, 'delprinc', 'selected'])
realm.run([kadminl, 'delprinc', 'unselected'])
kadmin_as(admin, ['addprinc', '-pw', 'pw', 'anytarget'])
realm.run([kadminl, 'delprinc', 'anytarget'])
kadmin_as(wctarget, ['addprinc', '-pw', 'pw', 'wild/card'])
realm.run([kadminl, 'delprinc', 'wild/card'])
kadmin_as(wctarget, ['addprinc', '-pw', 'pw', 'wild/card/extra'],
expected_code=1, expected_msg='Operation requires')
realm.addprinc('admin/user', 'pw')
kadmin_as(admin, ['delprinc', 'admin/user'])
kadmin_as(admin, ['delprinc', 'none'], expected_code=1,
expected_msg='Operation requires')
realm.addprinc('four/one/three', 'pw')
kadmin_as(onetwothreefour, ['delprinc', 'four/one/three'])
kadmin_as(restrictions, ['addprinc', '-pw', 'pw', 'type1'])
realm.run([kadminl, 'getprinc', 'type1'], expected_msg='Policy: minlife')
realm.run([kadminl, 'delprinc', 'type1'])
kadmin_as(restrictions, ['addprinc', '-pw', 'pw', '-policy', 'minlife',
'type2'])
realm.run([kadminl, 'getprinc', 'type2'], expected_msg='Policy: [none]')
realm.run([kadminl, 'delprinc', 'type2'])
kadmin_as(restrictions, ['addprinc', '-pw', 'pw', '-maxlife', '1 minute',
'type3'])
out = realm.run([kadminl, 'getprinc', 'type3'])
if ('Maximum ticket life: 0 days 00:01:00' not in out or
'Maximum renewable life: 0 days 02:00:00' not in out):
fail('restriction (maxlife low, maxrenewlife unspec)')
realm.run([kadminl, 'delprinc', 'type3'])
kadmin_as(restrictions, ['addprinc', '-pw', 'pw', '-maxrenewlife', '1 day',
'type3'])
realm.run([kadminl, 'getprinc', 'type3'],
expected_msg='Maximum renewable life: 0 days 02:00:00')
realm.run([kadminl, 'addprinc', '-pw', 'pw', 'extractkeys'])
kadmin_as(all_wildcard, ['ktadd', '-norandkey', 'extractkeys'],
expected_code=1,
expected_msg="Operation requires ``extract-keys'' privilege")
kadmin_as(all_extract, ['ktadd', '-norandkey', 'extractkeys'])
realm.kinit('extractkeys', flags=['-k'])
os.remove(realm.keytab)
kadmin_as(all_modify, ['modprinc', '+lockdown_keys', 'extractkeys'])
kadmin_as(all_changepw, ['cpw', '-pw', 'newpw', 'extractkeys'],
expected_code=1,
expected_msg="Operation requires ``change-password'' privilege")
kadmin_as(all_changepw, ['cpw', '-randkey', 'extractkeys'])
kadmin_as(all_extract, ['ktadd', '-norandkey', 'extractkeys'], expected_code=1,
expected_msg="Operation requires ``extract-keys'' privilege")
kadmin_as(all_delete, ['delprinc', 'extractkeys'], expected_code=1,
expected_msg="Operation requires ``delete'' privilege")
kadmin_as(all_rename, ['renprinc', 'extractkeys', 'renamedprinc'],
expected_code=1,
expected_msg="Operation requires ``delete'' privilege")
kadmin_as(all_modify, ['modprinc', '-lockdown_keys', 'extractkeys'],
expected_code=1,
expected_msg="Operation requires ``modify'' privilege")
realm.run([kadminl, 'modprinc', '-lockdown_keys', 'extractkeys'])
kadmin_as(all_extract, ['ktadd', '-norandkey', 'extractkeys'])
realm.kinit('extractkeys', flags=['-k'])
os.remove(realm.keytab)
# Verify that self-service key changes require an initial ticket.
realm.run([kadminl, 'cpw', '-pw', password('none'), 'none'])
realm.run([kadminl, 'modprinc', '+allow_tgs_req', 'kadmin/admin'])
realm.kinit('none', password('none'))
realm.run([kvno, 'kadmin/admin'])
msg = 'Operation requires initial ticket'
realm.run([kadmin, '-c', realm.ccache, 'cpw', '-pw', 'newpw', 'none'],
expected_code=1, expected_msg=msg)
realm.run([kadmin, '-c', realm.ccache, 'cpw', '-pw', 'newpw',
'-e', 'aes256-cts', 'none'], expected_code=1, expected_msg=msg)
realm.run([kadmin, '-c', realm.ccache, 'cpw', '-randkey', 'none'],
expected_code=1, expected_msg=msg)
realm.run([kadmin, '-c', realm.ccache, 'cpw', '-randkey', '-e', 'aes256-cts',
'none'], expected_code=1, expected_msg=msg)
success('kadmin ACL enforcement')
| |
#!/usr/bin/env python
#
# getopt_tests.py: testing the svn command line processing
#
# Subversion is a tool for revision control.
# See http://subversion.apache.org for more information.
#
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
######################################################################
# General modules
import sys, re, os.path, logging
logger = logging.getLogger()
# Our testing module
import svntest
######################################################################
# Tests
#----------------------------------------------------------------------
# This directory contains all the expected output from svn.
getopt_output_dir = os.path.join(os.path.dirname(sys.argv[0]),
'getopt_tests_data')
# Naming convention for golden files: take the svn command line as a
# single string and apply the following sed transformations:
# echo svn option1 option2 ... | sed -e 's/ /_/g' -e 's/_--/--/g'
# Then append either _stdout or _stderr for the file descriptor to
# compare against.
def load_expected_output(basename):
"load the expected standard output and standard error"
stdout_filename = os.path.join(getopt_output_dir, basename + '_stdout')
stderr_filename = os.path.join(getopt_output_dir, basename + '_stderr')
exp_stdout = open(stdout_filename, 'r').readlines()
exp_stderr = open(stderr_filename, 'r').readlines()
return exp_stdout, exp_stderr
# With plaintext password storage enabled, `svn --version' emits a warning:
warn_line_re = re.compile("WARNING: Plaintext password storage")
# This is a list of lines to delete.
del_lines_res = [
# In 'svn --version', the date line is variable, for example:
# "compiled Apr 5 2002, 10:08:45"
re.compile(r'\s+compiled\s+'),
# Also for 'svn --version':
re.compile(r"\* ra_(neon|local|svn|serf) :"),
re.compile(r" - handles '(https?|file|svn)' scheme"),
re.compile(r" - with Cyrus SASL authentication"),
re.compile(r" - using serf \d+\.\d+\.\d+"),
re.compile(r"\* fs_(base|fs) :"),
]
# This is a list of lines to search and replace text on.
rep_lines_res = [
# In 'svn --version', this line varies, for example:
# "Subversion Client, version 0.10.2-dev (under development)"
# "Subversion Client, version 0.10.2 (r1729)"
(re.compile(r'version \d+\.\d+\.\d+(-[^ ]*)? \(.*\)'),
'version X.Y.Z '),
# The copyright end date keeps changing; fix forever.
(re.compile(r'Copyright \(C\) 20\d\d The Apache '
'Software Foundation\.'),
'Copyright (C) YYYY The Apache Software Foundation'),
# In 'svn --version --quiet', we print only the version
# number in a single line.
(re.compile(r'^\d+\.\d+\.\d+(-[a-zA-Z0-9]+)?$'), 'X.Y.Z\n'),
# 'svn --help' has a line with the version number.
# It can vary, for example:
# "Subversion command-line client, version 1.1.0."
# "Subversion command-line client, version 1.1.0-dev."
(re.compile(r'Subversion command-line client, '
'version \d+\.\d+\.\d+(.|-[a-zA-Z0-9]+\.)$'),
'Subversion command-line client, version X.Y.Z.'),
]
# This is a trigger pattern that selects the secondary set of
# delete/replace patterns
switch_res_line = 'System information:'
# This is a list of lines to delete after having seen switch_res_line.
switched_warn_line_re = None
switched_del_lines_res = [
# In svn --version --verbose, dependent libs loaded
# shared libs are optional.
re.compile(r'^\* (loaded|linked)'),
# In svn --version --verbose, remove everything from
# the extended lists
re.compile(r'^ - '),
]
# This is a list of lines to search and replace text on after having
# seen switch_res_line.
switched_rep_lines_res = [
# We don't care about the actual canonical host
(re.compile('^\* running on.*$'), '* running on'),
]
def process_lines(lines):
"delete lines that should not be compared and search and replace the rest"
output = [ ]
warn_re = warn_line_re
del_res = del_lines_res
rep_res = rep_lines_res
skip_next_line = 0
for line in lines:
if skip_next_line:
skip_next_line = 0
continue
if line.startswith(switch_res_line):
warn_re = switched_warn_line_re
del_res = switched_del_lines_res
rep_res = switched_rep_lines_res
# Skip these lines from the output list.
delete_line = 0
if warn_re and warn_re.match(line):
delete_line = 1
skip_next_line = 1 # Ignore the empty line after the warning
else:
for delete_re in del_res:
if delete_re.match(line):
delete_line = 1
break
if delete_line:
continue
# Search and replace text on the rest.
for replace_re, replace_str in rep_res:
line = replace_re.sub(replace_str, line)
output.append(line)
return output
def run_one_test(sbox, basename, *varargs):
"run svn with args and compare against the specified output files"
### no need to use sbox.build() -- we don't need a repos or working copy
### for these tests.
exp_stdout, exp_stderr = load_expected_output(basename)
# special case the 'svn' test so that no extra arguments are added
if basename != 'svn':
exit_code, actual_stdout, actual_stderr = svntest.main.run_svn(1, *varargs)
else:
exit_code, actual_stdout, actual_stderr = svntest.main.run_command(svntest.main.svn_binary,
1, False, *varargs)
# Delete and perform search and replaces on the lines from the
# actual and expected output that may differ between build
# environments.
exp_stdout = process_lines(exp_stdout)
exp_stderr = process_lines(exp_stderr)
actual_stdout = process_lines(actual_stdout)
actual_stderr = process_lines(actual_stderr)
svntest.verify.compare_and_display_lines("Standard output does not match.",
"STDOUT", exp_stdout, actual_stdout)
svntest.verify.compare_and_display_lines("Standard error does not match.",
"STDERR", exp_stderr, actual_stderr)
def getopt_no_args(sbox):
"run svn with no arguments"
run_one_test(sbox, 'svn')
def getopt__version(sbox):
"run svn --version"
run_one_test(sbox, 'svn--version', '--version')
def getopt__version__quiet(sbox):
"run svn --version --quiet"
run_one_test(sbox, 'svn--version--quiet', '--version', '--quiet')
def getopt__version__verbose(sbox):
"run svn --version --verbose"
run_one_test(sbox, 'svn--version--verbose', '--version', '--verbose')
def getopt__help(sbox):
"run svn --help"
run_one_test(sbox, 'svn--help', '--help')
def getopt_help(sbox):
"run svn help"
run_one_test(sbox, 'svn_help', 'help')
def getopt_help_log_switch(sbox):
"run svn help log switch"
run_one_test(sbox, 'svn_help_log_switch', 'help', 'log', 'switch')
def getopt_help_bogus_cmd(sbox):
"run svn help bogus-cmd"
run_one_test(sbox, 'svn_help_bogus-cmd', 'help', 'bogus-cmd')
########################################################################
# Run the tests
# list all tests here, starting with None:
test_list = [ None,
getopt_no_args,
getopt__version,
getopt__version__quiet,
getopt__version__verbose,
getopt__help,
getopt_help,
getopt_help_bogus_cmd,
getopt_help_log_switch,
]
if __name__ == '__main__':
svntest.main.run_tests(test_list)
# NOTREACHED
### End of file.
| |
import numpy as np
import geoprobe
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredSizeBar
import matplotlib as mpl
import scipy.ndimage
import shapely.geometry
import utilities
class Section(object):
"""
An "arbitrary" cross section plot extracted from a geoprobe volume.
"""
def __init__(self, vol, x, y, ax, colormap, zmin=None, zmax=None, ve=2.0,
name='Cross Section', resample_factor=2):
"""
Make a new cross section along `x` and `y` from seismic data in `vol`
on the matplotlib axes `ax`.
Parameters:
-----------
vol : A geoprobe volume object containing the seismic data to
be displayed on the cross section.
x : A sequence of x-coordinates (in inline/crossline) representing
points along the cross section line
y : A sequence of y-coordinates (in inline/crossline) representing
points along the cross section line
ax : A matplotlib axes
colormap : A matplotlib colormap
zmin : The minimum (top) depth/time for the cross section
zmax : The maximum (bottom) depth/time for the cross section
ve : The vertical exaggeration of the displayed cross section. If
the seismic data is in depth, then this is the true vertical
exaggeration.
name : The title of the cross section
resample_factor : Interpolation factor for the "raw" seismic data.
If > 1, the seismic data will be linearly interpolated before
display.
"""
self.ax = ax
self.x, self.y = np.asarray(x), np.asarray(y)
self.vol = vol
self.dxw, self.dyw = self.vol.dxW, self.vol.dyW
if zmin is None:
zmin = self.vol.zmin
if zmax is None:
zmax = self.vol.zmax
self.zmin = max(self.vol.zmin, zmin)
self.zmax = min(self.vol.zmax, zmax)
self.ve = ve
self.colormap = colormap
self.resample_factor = resample_factor
self.name = name
self.horizon_lines = None
self.loc_line = None
def extract_section(self, vol=None):
"""
Extract data along this cross section's profile from a geoprobe volume.
In most cases, this method will only be called internally, and you won't
need to call it explictly. However, it is often useful for constructing
"unusual" plots.
Parameters
----------
vol : (optional) A geoprobe volume instance
Defaults to the geoprobe volume specified at initalization.
Returns
-------
data : A 2D numpy array of seismic data
extent : A 4-element list of the minimum and maximum distances along
this cross section's profile line and the minimum and maximum
z-values of the seismic data.
"""
if vol is None:
vol = self.vol
data, xi, yi = vol.extract_section(self.x, self.y, self.zmin, self.zmax)
data = self.make_raw_image(data)
distance = self.calculate_distance_along_section(xi, yi)
extent = [distance.min(), distance.max(), self.zmin, self.zmax]
return data, extent
def update_position(self, x, y):
"""Not fully implemented."""
# TODO: Finish this...
self.x, self.y = x, y
data, extent = self.extract_section()
self.im.set_data(data)
self.im.set_extent(extent)
def update_horizons(self, hor_set):
"""Not fully implemented."""
# TODO: Finish this...
for line, hor in zip(self.horizon_lines, hor_set.horizons):
x, y = self.slice_horizon(hor)
line.set_data(x, y)
@property
def line(self):
"""
A shapely LineString representing this cross section's profile line.
"""
return shapely.geometry.LineString(zip(self.x, self.y))
def calculate_distance_along_section(self, xi, yi):
"""
Calculates the distance along this cross section's profile line to the
specified point.
Parameters:
-----------
xi, yi : Sequences of x and y coordinates
Returns:
--------
distance : The distance along the profile line to the point
"""
start, _ = self.project_onto(xi[0], yi[0])
distance = np.hypot(self.dxw * np.diff(xi),
self.dyw * np.diff(yi))
distance = np.cumsum(np.r_[start, distance])
return distance
def plot_image(self, **kwargs):
"""
Extract data for the cross section (i.e. call self.extract_section())
and plot it on the axes from the plot (self.ax) using the approriate
parameters (e.g. self.colormap, self.ve, etc).
Basically, this displays the seismic data along the cross section.
Usually this will be called by the SectionManager, and you won't need to
call it directly. However, it is not called during initialization of
the Section object to give more flexibility in plotting.
Keyword arguments are passed on to matplotlib's ``imshow``.
"""
data, extent = self.extract_section()
self.im = self.ax.imshow(data, origin='lower', extent=extent,
interpolation='bilinear', aspect=self.ve, cmap=self.colormap,
**kwargs)
if not self.ax.yaxis_inverted():
self.ax.invert_yaxis()
return self.im
def plot_scalebar(self, length=1, title=None, loc='upper right'):
"""
Plot a scalebar on the cross section.
Parameters:
-----------
length : number, default=1
The length of the scale bar in world units (e.g. meters)
title: string, default="{length} world units"
The title/label for the scale bar
loc: int or string
The position of the scale bar. Valid parameters are:
'upper right' : 1,
'upper left' : 2,
'lower left' : 3,
'lower right' : 4,
'right' : 5,
'center left' : 6,
'center right' : 7,
'lower center' : 8,
'upper center' : 9,
'center' : 10,
Returns:
--------
A matplotlib AnchoredSizeBar artist.
"""
if title is None:
title = '{} world units'.format(length)
self.sizebar = AnchoredSizeBar(self.ax.transData, length,
title, loc=loc, pad=0.5,
borderpad=0.5, sep=5, frameon=True)
self.ax.add_artist(self.sizebar)
return self.sizebar
def plot_on_map(self, mapview, **kwargs):
"""
Plot a cross section line for the cross section on the map.
Usually this will be called by the SectionManager, and you won't need to
call it directly.
The style of line plotted is controlled by the supplied keyword
arguments (which are passed on to matplotlib's ``plot``).
Parameters:
-----------
mapview : A section.Map instance.
Additional keyword arguments are pass on to maplotlib.pyplot.plot.
Returns:
--------
A matplotlib.Line2D artist.
"""
kwargs.pop('label', None)
kwargs['label'] = self.name
self.loc_line, = mapview.ax.plot(self.x, self.y, **kwargs)
return self.loc_line
def seafloor_mute(self, seafloor, pad=0, color='white', value=None):
"""
"Mute" (i.e. plot a filled polygon over) the seismic data above the
seafloor.
Parameters:
-----------
seafloor: string filename or geoprobe.horizon instance
The seafloor horizon (or filename of one) to use.
pad: number
The vertical (above the seafloor) padding in z (time or depth)
units between the actual horizon values and the bottom of the
filled polygon.
color: matplotlib color specifier (e.g. string or tuple of floats)
The color of the filled polygon.
value: number (default=None)
If specified, the "color" kwarg is ignored, and self.colormap
is used to choose the color based on the input "value".
Returns:
--------
A matplotlib PolyCollection artist.
"""
if isinstance(seafloor, basestring):
seafloor = geoprobe.horizon(seafloor)
dist, z = self.slice_horizon(seafloor)
z -= pad
if value is not None:
color = self.im.cmap(self.im.norm(value))
collection = self.ax.fill_between(dist, self.zmin, z, facecolor=color,
edgecolor='none')
return collection
def mark_intersection(self, *args, **kwargs):
"""
Plot a vertical line on this section marking its intersection with or
nearest approach to another cross section (or other linear feature).
Parameters:
-----------
Either another Section object or x and y coordinates of a linear
feature
Additional keyword arguments are passed onto matplotlib's
``axvline``.
Returns:
--------
A matplotlib Line2D artist.
Examples:
---------
Plot the intersection with another Section as a solid blue line.
>>> self.mark_intersection(other_section)
Plot the intersection with another linear feature as a dashed red
line.
>>> self.mark_intersection([x0,x1,x2], [y0,y1,y2], linestyle='--',
... color='red')
"""
if len(args) == 1:
other = args[0]
x, y = other.x, other.y
elif len(args) == 2:
x, y = args
else:
raise ValueError('Input must be either another section or x, y')
x, _ = self.project_onto(x, y)
return self.ax.axvline(x, **kwargs)
def label_endpoints(self, template='XL: {}, IL: {}', **kwargs):
"""
Plot labeled endpoint coordinates on the section.
Additional keyword arguments are passed on to `annotate`, which allows
the position, style, etc of the labels to be controlled. By default,
the labels will be something like "<-- XL: x0, IL: y0" and
"XL: x1, IL: y1 -->", with arrows pointing to each lower corner.
Parameters:
-----------
template : The formatting template for the endpoint coords.
Additional keyword arguments are passed on to `annotate`
Returns:
--------
A 2-item list of matplotlib Annotation objects
"""
kwargs.pop('xy', None)
kwargs['textcoords'] = kwargs.get('textcoords', 'offset points')
kwargs['xycoords'] = kwargs.get('xycoords', 'axes fraction')
kwargs['xytext'] = kwargs.get('xytext', (15, 15))
kwargs['arrowprops'] = kwargs.get('arrowprops', dict(arrowstyle='->'))
self.endpoint_labels = []
for limit, position in zip(self.ax.get_xlim(), ['left', 'right']):
x, y = self.line.interpolate(limit).coords[0]
kwargs['ha'] = position
if position is 'left':
kwargs['xy'] = (0, 0)
if position is 'right':
kwargs['xy'] = (1, 0)
xtext, ytext = kwargs['xytext']
kwargs['xytext'] = (-xtext, ytext)
anno = self.ax.annotate(template.format(x,y), **kwargs)
self.endpoint_labels.append(anno)
return self.endpoint_labels
def dip_rose(self, pos=(1, 1), values=None, width=40, **kwargs):
"""
Plot a dip rose on the section.
Parameters
----------
`pos` : Position given as a tuple of x,y in axes fraction. The
default position is (1,1) -- The upper-right corner of the plot.
`values` : A sequence of dips to plot. Defaults to 15 degree
increments between 0 and 75 (inclusive).
`width` : The length of the 0 degree bar in points. The default is
40 points.
Additional keyword arguments are passed on to `annotate`.
Returns
-------
A sequence of annotation objects.
"""
def apparent_dip(theta, ve):
theta = np.radians(theta)
dx, dy = np.cos(theta), np.sin(theta)
return np.arctan(ve * dy / dx)
ve = self.ax.get_aspect()
if values is None:
values = range(0, 90, 15)
x, y = pos
dx = -1 if x > 0.5 else 1
dy = -1 if y > 0.5 else 1
artists = []
for theta in values:
app_theta = apparent_dip(theta, ve)
x = width * np.cos(app_theta)
y = width * np.sin(app_theta)
ha = {1:'left', -1:'right'}[dx]
props = dict(xy=pos, xytext=(dx * x, dy * y),
xycoords='axes fraction', textcoords='offset points',
va='center', ha=ha, rotation_mode='anchor',
rotation=dx*dy*np.degrees(app_theta),
arrowprops=dict(arrowstyle='-', shrinkA=0, shrinkB=4))
kwargs.update(props)
artists.append(self.ax.annotate(r'%i$^{\circ}$' % theta, **kwargs))
return artists
def project_onto(self, *args):
"""
Returns the distance along the section to the point/line defined by
*x*, *y* and the mininum distance between it and the section.
"""
if len(args) == 1:
# Assume a shapely geometry has been passed in
other = args[0]
else:
try:
x, y = args
except ValueError:
raise ValueError('Expecting a shapely geometry or x and y!')
# Try to build a shapely geometry from x, y
try:
length = len(x)
except TypeError:
length = 1
if length > 1:
other = shapely.geometry.LineString(zip(x, y))
other = self.line.intersection(other)
else:
other = shapely.geometry.Point(x, y)
position = self.line.project(other, normalized=True)
total_distance = np.hypot(self.dxw * np.diff(self.x),
self.dyw * np.diff(self.y)).sum()
return position * total_distance, self.line.distance(other)
def plot_horizons(self, hor_set):
self.horizon_lines = hor_set.plot(self)
def slice_horizon(self, hor):
"""Slices a geoprobe horizon along the section line. Returns a sequence
of distances along the section and a sequence of z-values."""
# Get only the portions of the section inside the horizon extents
try:
bounds = hor.bounds
except AttributeError:
hor.bounds = utilities.extents_to_poly(*hor.grid_extents)
bounds = hor.bounds
if not self.line.intersects(bounds):
return np.array([]), np.array([])
inside = self.line.intersection(bounds)
x, y = inside.xy
# Extract the section
xmin, xmax, ymin, ymax = hor.grid_extents
x, y = x - xmin, y - ymin
z, xi, yi = geoprobe.utilities.extract_section(hor.grid.T, x, y)
# Put the distances back in to distance along the section line
start, _ = self.project_onto(xi[0]+xmin, yi[0]+ymin)
distance = np.hypot(self.dxw * np.diff(xi),
self.dyw * np.diff(yi))
z = np.ma.squeeze(z)
distance = np.cumsum(np.r_[start, distance])
distance = np.ma.masked_array(distance, z.mask)
return distance, z
def make_raw_image(self, data):
if self.resample_factor != 1:
data = scipy.ndimage.interpolation.zoom(data,
(1, self.resample_factor),
output=data.dtype, order=1, prefilter=False)
return data.T
def save(self, filename=None, **kwargs):
if filename is None:
filename = self.name + '.png'
fig = self.ax.figure
fig.savefig(filename, **kwargs)
class Section2D(Section):
"""
A cross section from a 2D line instead of extracted from a 3D volume.
"""
def __init__(self, line, ax, colormap, dx=1.0, dy=1.0, zmin=None, zmax=None,
ve=2.0, name='2D Section', resample_factor=2):
self.dxw, self.dyw = dx, dy
self.x, self.y = line.x, line.y
self.line2d = line
self.ax = ax
if zmin is None:
zmin = self.line2d.zmin
if zmax is None:
zmax = self.line2d.zmax
self.zmin = max(self.line2d.zmin, zmin)
self.zmax = min(self.line2d.zmax, zmax)
self.ve = ve
self.colormap = colormap
self.resample_factor = resample_factor
self.name = name
def extract_section(self):
data = self.line2d.scaled_data
zstart = np.searchsorted(self.line2d.z, self.zmin)
zstop = np.searchsorted(self.line2d.z, self.zmax)
data = data[:, zstart:zstop]
data = self.make_raw_image(data)
distance = self.calculate_distance_along_section(self.x, self.y)
extent = [distance.min(), distance.max(), self.zmin, self.zmax]
return data, extent
class CoRenderedSection(Section):
"""
A cross section with a second data volume (usually a coherence/sembelance
volume) blended with the seismic data. This esentially treats the coherence
data as "topography" and overlays a "hillshade" effect on top of the
original data."""
def __init__(self, vol, coherence_vol, x, y, ax, colormap, **kwargs):
"""
Identical to initializing a Section instance, except for one argument.
In addition to ``vol``, this takes a second geoprobe volume
(``coherence_vol``) to use as the co-rendered data.
"""
self.coherence_vol = coherence_vol
Section.__init__(self, vol, x, y, ax, colormap, **kwargs)
def _make_rgb(self, data, mode='overlay', fraction=0.85, mask=None):
coh_data, coh_extent = self.extract_section(self.coherence_vol)
if mask is None:
mask = coh_data < (coh_data.mean() + 0.5 * coh_data.std())
shader = utilities.Shader(azdeg=90)
rgb = shader.shade(data, coh_data, self.colormap, mask=mask,
fraction=fraction, mode=mode)
return rgb
def plot_image(self, mode='overlay', fraction=0.85, mask=None, **kwargs):
data, extent = self.extract_section()
rgb = self._make_rgb(data, mode, fraction, mask)
self.im = self.ax.imshow(rgb, origin='lower', extent=extent,
interpolation='bilinear', aspect=self.ve, **kwargs)
if not self.ax.yaxis_inverted():
self.ax.invert_yaxis()
return self.im
class SketchSection(Section):
"""
A cross section with the original data filtered to appear similar to a line
drawing.
"""
def plot_image(self, radius=4, **kwargs):
data, extent = self.extract_section()
rgb = self.colormap(data.astype(float) / 255)
rgb = self.sketch_filter(rgb, radius)
self.im = self.ax.imshow(rgb, origin='lower', extent=extent,
interpolation='bilinear', aspect=self.ve, **kwargs)
if not self.ax.yaxis_inverted():
self.ax.invert_yaxis()
return self.im
def sketch_filter(self, rgb, radius=4):
hsv = mpl.colors.rgb_to_hsv(rgb[:,:,:3])
hsv[:,:,1] = 0
rgb = mpl.colors.hsv_to_rgb(hsv)
original = rgb
blur = scipy.ndimage.gaussian_filter(rgb, (radius, radius, 0))
blur = 1 - blur
rgb = 0.5 * rgb + 0.5 * blur
rgb = original[:,:,:3] / (1 - rgb)
hsv = mpl.colors.rgb_to_hsv(rgb.clip(0, 1))
hsv[:,:,1] = 0
return mpl.colors.hsv_to_rgb(hsv)
| |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
import six
from heat.common import exception
from heat.engine import node_data
from heat.engine.resources.openstack.heat import resource_chain
from heat.engine import rsrc_defn
from heat.objects import service as service_objects
from heat.tests import common
from heat.tests import utils
RESOURCE_PROPERTIES = {
'group': 'test-group',
}
TEMPLATE = {
'heat_template_version': '2016-04-08',
'resources': {
'test-chain': {
'type': 'OS::Heat::ResourceChain',
'properties': {
'resources': ['OS::Heat::SoftwareConfig',
'OS::Heat::StructuredConfig'],
'concurrent': False,
'resource_properties': RESOURCE_PROPERTIES,
}
}
}
}
class ResourceChainTest(common.HeatTestCase):
def setUp(self):
super(ResourceChainTest, self).setUp()
self.stack = None # hold on to stack to prevent weakref cleanup
def test_child_template_without_concurrency(self):
# Test
chain = self._create_chain(TEMPLATE)
child_template = chain.child_template()
# Verify
tmpl = child_template.t
self.assertEqual('2015-04-30', tmpl['heat_template_version'])
self.assertEqual(2, len(child_template.t['resources']))
resource = tmpl['resources']['0']
self.assertEqual('OS::Heat::SoftwareConfig', resource['type'])
self.assertEqual(RESOURCE_PROPERTIES, resource['properties'])
self.assertNotIn('depends_on', resource)
resource = tmpl['resources']['1']
self.assertEqual('OS::Heat::StructuredConfig', resource['type'])
self.assertEqual(RESOURCE_PROPERTIES, resource['properties'])
self.assertEqual(['0'], resource['depends_on'])
@mock.patch.object(service_objects.Service, 'active_service_count')
def test_child_template_with_concurrent(self, mock_count):
# Setup
tmpl_def = copy.deepcopy(TEMPLATE)
tmpl_def['resources']['test-chain']['properties']['concurrent'] = True
chain = self._create_chain(tmpl_def)
mock_count.return_value = 5
# Test
child_template = chain.child_template()
# Verify
# Trimmed down version of above that just checks the depends_on
# isn't present
tmpl = child_template.t
resource = tmpl['resources']['0']
self.assertNotIn('depends_on', resource)
resource = tmpl['resources']['1']
self.assertNotIn('depends_on', resource)
@mock.patch.object(service_objects.Service, 'active_service_count')
def test_child_template_with_concurrent_limit(self, mock_count):
tmpl_def = copy.deepcopy(TEMPLATE)
tmpl_def['resources']['test-chain']['properties']['concurrent'] = True
tmpl_def['resources']['test-chain']['properties']['resources'] = [
'OS::Heat::SoftwareConfig', 'OS::Heat::StructuredConfig',
'OS::Heat::SoftwareConfig', 'OS::Heat::StructuredConfig']
chain = self._create_chain(tmpl_def)
mock_count.return_value = 2
child_template = chain.child_template()
tmpl = child_template.t
resource = tmpl['resources']['0']
self.assertNotIn('depends_on', resource)
resource = tmpl['resources']['1']
self.assertNotIn('depends_on', resource)
resource = tmpl['resources']['2']
self.assertEqual(['0'], resource['depends_on'])
resource = tmpl['resources']['3']
self.assertEqual(['1'], resource['depends_on'])
def test_child_template_default_concurrent(self):
# Setup
tmpl_def = copy.deepcopy(TEMPLATE)
tmpl_def['resources']['test-chain']['properties'].pop('concurrent')
chain = self._create_chain(tmpl_def)
# Test
child_template = chain.child_template()
# Verify
# Trimmed down version of above that just checks the depends_on
# isn't present
tmpl = child_template.t
resource = tmpl['resources']['0']
self.assertNotIn('depends_on', resource)
resource = tmpl['resources']['1']
self.assertEqual(['0'], resource['depends_on'])
def test_child_template_empty_resource_list(self):
# Setup
tmpl_def = copy.deepcopy(TEMPLATE)
tmpl_def['resources']['test-chain']['properties']['resources'] = []
chain = self._create_chain(tmpl_def)
# Test
child_template = chain.child_template()
# Verify
tmpl = child_template.t
# No error, but no resources to create
self.assertNotIn('resources', tmpl)
# Sanity check that it's actually a template
self.assertIn('heat_template_version', tmpl)
def test_validate_nested_stack(self):
# Test - should not raise exception
chain = self._create_chain(TEMPLATE)
chain.validate_nested_stack()
def test_validate_reference_attr_with_none_ref(self):
chain = self._create_chain(TEMPLATE)
self.patchobject(chain, 'referenced_attrs',
return_value=set([('config', None)]))
self.assertIsNone(chain.validate())
def test_validate_incompatible_properties(self):
# Tests a resource in the chain that does not support the properties
# specified to each resource.
# Setup
tmpl_def = copy.deepcopy(TEMPLATE)
tmpl_res_prop = tmpl_def['resources']['test-chain']['properties']
res_list = tmpl_res_prop['resources']
res_list.append('OS::Heat::RandomString')
# Test
chain = self._create_chain(tmpl_def)
try:
chain.validate_nested_stack()
self.fail('Exception expected')
except exception.StackValidationFailed as e:
self.assertEqual('property error: '
'resources.test<nested_stack>.resources[2].'
'properties: unknown property group',
e.message.lower())
def test_validate_fake_resource_type(self):
# Setup
tmpl_def = copy.deepcopy(TEMPLATE)
tmpl_res_prop = tmpl_def['resources']['test-chain']['properties']
res_list = tmpl_res_prop['resources']
res_list.append('foo')
# Test
chain = self._create_chain(tmpl_def)
try:
chain.validate_nested_stack()
self.fail('Exception expected')
except exception.StackValidationFailed as e:
self.assertIn('could not be found', e.message.lower())
self.assertIn('foo', e.message)
@mock.patch.object(resource_chain.ResourceChain, 'create_with_template')
def test_handle_create(self, mock_create):
# Tests the handle create is propagated upwards with the
# child template.
# Setup
chain = self._create_chain(TEMPLATE)
# Test
chain.handle_create()
# Verify
expected_tmpl = chain.child_template()
mock_create.assert_called_once_with(expected_tmpl)
@mock.patch.object(resource_chain.ResourceChain, 'update_with_template')
def test_handle_update(self, mock_update):
# Test the handle update is propagated upwards with the child
# template.
# Setup
chain = self._create_chain(TEMPLATE)
# Test
json_snippet = rsrc_defn.ResourceDefinition(
'test-chain', 'OS::Heat::ResourceChain',
TEMPLATE['resources']['test-chain']['properties'])
chain.handle_update(json_snippet, None, None)
# Verify
expected_tmpl = chain.child_template()
mock_update.assert_called_once_with(expected_tmpl)
def test_child_params(self):
chain = self._create_chain(TEMPLATE)
self.assertEqual({}, chain.child_params())
def _create_chain(self, t):
self.stack = utils.parse_stack(t)
snip = self.stack.t.resource_definitions(self.stack)['test-chain']
chain = resource_chain.ResourceChain('test', snip, self.stack)
return chain
def test_get_attribute_convg(self):
cache_data = {'test-chain': node_data.NodeData.from_dict({
'uuid': mock.ANY,
'id': mock.ANY,
'action': 'CREATE',
'status': 'COMPLETE',
'attrs': {'refs': ['rsrc1', 'rsrc2']}
})}
stack = utils.parse_stack(TEMPLATE, cache_data=cache_data)
rsrc = stack.defn['test-chain']
self.assertEqual(['rsrc1', 'rsrc2'], rsrc.FnGetAtt('refs'))
class ResourceChainAttrTest(common.HeatTestCase):
def test_aggregate_attribs(self):
"""Test attribute aggregation.
Test attribute aggregation and that we mimic the nested resource's
attributes.
"""
chain = self._create_dummy_stack()
expected = ['0', '1']
self.assertEqual(expected, chain.FnGetAtt('foo'))
self.assertEqual(expected, chain.FnGetAtt('Foo'))
def test_index_dotted_attribs(self):
"""Test attribute aggregation.
Test attribute aggregation and that we mimic the nested resource's
attributes.
"""
chain = self._create_dummy_stack()
self.assertEqual('0', chain.FnGetAtt('resource.0.Foo'))
self.assertEqual('1', chain.FnGetAtt('resource.1.Foo'))
def test_index_path_attribs(self):
"""Test attribute aggregation.
Test attribute aggregation and that we mimic the nested resource's
attributes.
"""
chain = self._create_dummy_stack()
self.assertEqual('0', chain.FnGetAtt('resource.0', 'Foo'))
self.assertEqual('1', chain.FnGetAtt('resource.1', 'Foo'))
def test_index_deep_path_attribs(self):
"""Test attribute aggregation.
Test attribute aggregation and that we mimic the nested resource's
attributes.
"""
chain = self._create_dummy_stack(expect_attrs={'0': 2, '1': 3})
self.assertEqual(2, chain.FnGetAtt('resource.0',
'nested_dict', 'dict', 'b'))
self.assertEqual(3, chain.FnGetAtt('resource.1',
'nested_dict', 'dict', 'b'))
def test_aggregate_deep_path_attribs(self):
"""Test attribute aggregation.
Test attribute aggregation and that we mimic the nested resource's
attributes.
"""
chain = self._create_dummy_stack(expect_attrs={'0': 3, '1': 3})
expected = [3, 3]
self.assertEqual(expected, chain.FnGetAtt('nested_dict', 'list', 2))
def test_aggregate_refs(self):
"""Test resource id aggregation."""
chain = self._create_dummy_stack()
expected = ['ID-0', 'ID-1']
self.assertEqual(expected, chain.FnGetAtt("refs"))
def test_aggregate_refs_with_index(self):
"""Test resource id aggregation with index."""
chain = self._create_dummy_stack()
expected = ['ID-0', 'ID-1']
self.assertEqual(expected[0], chain.FnGetAtt("refs", 0))
self.assertEqual(expected[1], chain.FnGetAtt("refs", 1))
self.assertIsNone(chain.FnGetAtt("refs", 2))
def test_aggregate_outputs(self):
"""Test outputs aggregation."""
expected = {'0': ['foo', 'bar'], '1': ['foo', 'bar']}
chain = self._create_dummy_stack(expect_attrs=expected)
self.assertEqual(expected, chain.FnGetAtt('attributes', 'list'))
def test_aggregate_outputs_no_path(self):
"""Test outputs aggregation with missing path."""
chain = self._create_dummy_stack()
self.assertRaises(exception.InvalidTemplateAttribute,
chain.FnGetAtt, 'attributes')
def test_index_refs(self):
"""Tests getting ids of individual resources."""
chain = self._create_dummy_stack()
self.assertEqual("ID-0", chain.FnGetAtt('resource.0'))
self.assertEqual("ID-1", chain.FnGetAtt('resource.1'))
ex = self.assertRaises(exception.NotFound, chain.FnGetAtt,
'resource.2')
self.assertIn("Member '2' not found in group resource 'test'",
six.text_type(ex))
def _create_dummy_stack(self, expect_count=2, expect_attrs=None):
self.stack = utils.parse_stack(TEMPLATE)
snip = self.stack.t.resource_definitions(self.stack)['test-chain']
chain = resource_chain.ResourceChain('test', snip, self.stack)
attrs = {}
refids = {}
if expect_attrs is None:
expect_attrs = {}
for index in range(expect_count):
res = str(index)
attrs[index] = expect_attrs.get(res, res)
refids[index] = 'ID-%s' % res
names = [str(name) for name in range(expect_count)]
chain._resource_names = mock.Mock(return_value=names)
self._stub_get_attr(chain, refids, attrs)
return chain
def _stub_get_attr(self, chain, refids, attrs):
def ref_id_fn(res_name):
return refids[int(res_name)]
def attr_fn(args):
res_name = args[0]
return attrs[int(res_name)]
def get_output(output_name):
outputs = chain._nested_output_defns(chain._resource_names(),
attr_fn, ref_id_fn)
op_defns = {od.name: od for od in outputs}
if output_name not in op_defns:
raise exception.NotFound('Specified output key %s not found.' %
output_name)
return op_defns[output_name].get_value()
orig_get_attr = chain.FnGetAtt
def get_attr(attr_name, *path):
if not path:
attr = attr_name
else:
attr = (attr_name,) + path
# Mock referenced_attrs() so that _nested_output_definitions()
# will include the output required for this attribute
chain.referenced_attrs = mock.Mock(return_value=[attr])
# Pass through to actual function under test
return orig_get_attr(attr_name, *path)
chain.FnGetAtt = mock.Mock(side_effect=get_attr)
chain.get_output = mock.Mock(side_effect=get_output)
class ResourceChainAttrFallbackTest(ResourceChainAttrTest):
def _stub_get_attr(self, chain, refids, attrs):
# Raise NotFound when getting output, to force fallback to old-school
# grouputils functions
chain.get_output = mock.Mock(side_effect=exception.NotFound)
def make_fake_res(idx):
fr = mock.Mock()
fr.stack = chain.stack
fr.FnGetRefId.return_value = refids[idx]
fr.FnGetAtt.return_value = attrs[idx]
return fr
fake_res = {str(i): make_fake_res(i) for i in refids}
chain.nested = mock.Mock(return_value=fake_res)
| |
from __future__ import print_function, unicode_literals
import frappe
import pytz
from frappe import _
from frappe.auth import LoginManager
from oauthlib.oauth2.rfc6749.tokens import BearerToken
from oauthlib.oauth2.rfc6749.grant_types import AuthorizationCodeGrant, ImplicitGrant, ResourceOwnerPasswordCredentialsGrant, ClientCredentialsGrant, RefreshTokenGrant
from oauthlib.oauth2 import RequestValidator
from oauthlib.oauth2.rfc6749.endpoints.authorization import AuthorizationEndpoint
from oauthlib.oauth2.rfc6749.endpoints.token import TokenEndpoint
from oauthlib.oauth2.rfc6749.endpoints.resource import ResourceEndpoint
from oauthlib.oauth2.rfc6749.endpoints.revocation import RevocationEndpoint
from oauthlib.common import Request
from six.moves.urllib.parse import parse_qs, urlparse, unquote
def get_url_delimiter(separator_character=" "):
return separator_character
class WebApplicationServer(AuthorizationEndpoint, TokenEndpoint, ResourceEndpoint,
RevocationEndpoint):
"""An all-in-one endpoint featuring Authorization code grant and Bearer tokens."""
def __init__(self, request_validator, token_generator=None,
token_expires_in=None, refresh_token_generator=None, **kwargs):
"""Construct a new web application server.
:param request_validator: An implementation of
oauthlib.oauth2.RequestValidator.
:param token_expires_in: An int or a function to generate a token
expiration offset (in seconds) given a
oauthlib.common.Request object.
:param token_generator: A function to generate a token from a request.
:param refresh_token_generator: A function to generate a token from a
request for the refresh token.
:param kwargs: Extra parameters to pass to authorization-,
token-, resource-, and revocation-endpoint constructors.
"""
implicit_grant = ImplicitGrant(request_validator)
auth_grant = AuthorizationCodeGrant(request_validator)
refresh_grant = RefreshTokenGrant(request_validator)
resource_owner_password_credentials_grant = ResourceOwnerPasswordCredentialsGrant(request_validator)
bearer = BearerToken(request_validator, token_generator,
token_expires_in, refresh_token_generator)
AuthorizationEndpoint.__init__(self, default_response_type='code',
response_types={
'code': auth_grant,
'token': implicit_grant
},
default_token_type=bearer)
TokenEndpoint.__init__(self, default_grant_type='authorization_code',
grant_types={
'authorization_code': auth_grant,
'refresh_token': refresh_grant,
'password': resource_owner_password_credentials_grant
},
default_token_type=bearer)
ResourceEndpoint.__init__(self, default_token='Bearer',
token_types={'Bearer': bearer})
RevocationEndpoint.__init__(self, request_validator)
class OAuthWebRequestValidator(RequestValidator):
# Pre- and post-authorization.
def validate_client_id(self, client_id, request, *args, **kwargs):
# Simple validity check, does client exist? Not banned?
cli_id = frappe.db.get_value("OAuth Client",{ "name":client_id })
if cli_id:
request.client = frappe.get_doc("OAuth Client", client_id).as_dict()
return True
else:
return False
def validate_redirect_uri(self, client_id, redirect_uri, request, *args, **kwargs):
# Is the client allowed to use the supplied redirect_uri? i.e. has
# the client previously registered this EXACT redirect uri.
redirect_uris = frappe.db.get_value("OAuth Client", client_id, 'redirect_uris').split(get_url_delimiter())
if redirect_uri in redirect_uris:
return True
else:
return False
def get_default_redirect_uri(self, client_id, request, *args, **kwargs):
# The redirect used if none has been supplied.
# Prefer your clients to pre register a redirect uri rather than
# supplying one on each authorization request.
redirect_uri = frappe.db.get_value("OAuth Client", client_id, 'default_redirect_uri')
return redirect_uri
def validate_scopes(self, client_id, scopes, client, request, *args, **kwargs):
# Is the client allowed to access the requested scopes?
client_scopes = frappe.db.get_value("OAuth Client", client_id, 'scopes').split(get_url_delimiter())
are_scopes_valid = True
for scp in scopes:
are_scopes_valid = are_scopes_valid and True if scp in client_scopes else False
return are_scopes_valid
def get_default_scopes(self, client_id, request, *args, **kwargs):
# Scopes a client will authorize for if none are supplied in the
# authorization request.
scopes = frappe.db.get_value("OAuth Client", client_id, 'scopes').split(get_url_delimiter())
request.scopes = scopes #Apparently this is possible.
return scopes
def validate_response_type(self, client_id, response_type, client, request, *args, **kwargs):
# Clients should only be allowed to use one type of response type, the
# one associated with their one allowed grant type.
# In this case it must be "code".
allowed_response_types = [client.response_type.lower(),
"code token", "code id_token", "code token id_token",
"code+token", "code+id_token", "code+token id_token"]
return (response_type in allowed_response_types)
# Post-authorization
def save_authorization_code(self, client_id, code, request, *args, **kwargs):
cookie_dict = get_cookie_dict_from_headers(request)
oac = frappe.new_doc('OAuth Authorization Code')
oac.scopes = get_url_delimiter().join(request.scopes)
oac.redirect_uri_bound_to_authorization_code = request.redirect_uri
oac.client = client_id
oac.user = unquote(cookie_dict['user_id'])
oac.authorization_code = code['code']
oac.save(ignore_permissions=True)
frappe.db.commit()
def authenticate_client(self, request, *args, **kwargs):
cookie_dict = get_cookie_dict_from_headers(request)
#Get ClientID in URL
if request.client_id:
oc = frappe.get_doc("OAuth Client", request.client_id)
else:
#Extract token, instantiate OAuth Bearer Token and use clientid from there.
if "refresh_token" in frappe.form_dict:
oc = frappe.get_doc("OAuth Client", frappe.db.get_value("OAuth Bearer Token", {"refresh_token": frappe.form_dict["refresh_token"]}, 'client'))
elif "token" in frappe.form_dict:
oc = frappe.get_doc("OAuth Client", frappe.db.get_value("OAuth Bearer Token", frappe.form_dict["token"], 'client'))
else:
oc = frappe.get_doc("OAuth Client", frappe.db.get_value("OAuth Bearer Token", frappe.get_request_header("Authorization").split(" ")[1], 'client'))
try:
request.client = request.client or oc.as_dict()
except Exception as e:
print("Failed body authentication: Application %s does not exist".format(cid=request.client_id))
return frappe.session.user == unquote(cookie_dict.get('user_id', "Guest"))
def authenticate_client_id(self, client_id, request, *args, **kwargs):
cli_id = frappe.db.get_value('OAuth Client', client_id, 'name')
if not cli_id:
# Don't allow public (non-authenticated) clients
return False
else:
request["client"] = frappe.get_doc("OAuth Client", cli_id)
return True
def validate_code(self, client_id, code, client, request, *args, **kwargs):
# Validate the code belongs to the client. Add associated scopes,
# state and user to request.scopes and request.user.
validcodes = frappe.get_all("OAuth Authorization Code", filters={"client": client_id, "validity": "Valid"})
checkcodes = []
for vcode in validcodes:
checkcodes.append(vcode["name"])
if code in checkcodes:
request.scopes = frappe.db.get_value("OAuth Authorization Code", code, 'scopes').split(get_url_delimiter())
request.user = frappe.db.get_value("OAuth Authorization Code", code, 'user')
return True
else:
return False
def confirm_redirect_uri(self, client_id, code, redirect_uri, client, *args, **kwargs):
saved_redirect_uri = frappe.db.get_value('OAuth Client', client_id, 'default_redirect_uri')
return saved_redirect_uri == redirect_uri
def validate_grant_type(self, client_id, grant_type, client, request, *args, **kwargs):
# Clients should only be allowed to use one type of grant.
# In this case, it must be "authorization_code" or "refresh_token"
return (grant_type in ["authorization_code", "refresh_token", "password"])
def save_bearer_token(self, token, request, *args, **kwargs):
# Remember to associate it with request.scopes, request.user and
# request.client. The two former will be set when you validate
# the authorization code. Don't forget to save both the
# access_token and the refresh_token and set expiration for the
# access_token to now + expires_in seconds.
otoken = frappe.new_doc("OAuth Bearer Token")
otoken.client = request.client['name']
try:
otoken.user = request.user if request.user else frappe.db.get_value("OAuth Bearer Token", {"refresh_token":request.body.get("refresh_token")}, "user")
except Exception as e:
otoken.user = frappe.session.user
otoken.scopes = get_url_delimiter().join(request.scopes)
otoken.access_token = token['access_token']
otoken.refresh_token = token.get('refresh_token')
otoken.expires_in = token['expires_in']
otoken.save(ignore_permissions=True)
frappe.db.commit()
default_redirect_uri = frappe.db.get_value("OAuth Client", request.client['name'], "default_redirect_uri")
return default_redirect_uri
def invalidate_authorization_code(self, client_id, code, request, *args, **kwargs):
# Authorization codes are use once, invalidate it when a Bearer token
# has been acquired.
frappe.db.set_value("OAuth Authorization Code", code, "validity", "Invalid")
frappe.db.commit()
# Protected resource request
def validate_bearer_token(self, token, scopes, request):
# Remember to check expiration and scope membership
otoken = frappe.get_doc("OAuth Bearer Token", token)
token_expiration_local = otoken.expiration_time.replace(tzinfo=pytz.timezone(frappe.utils.get_time_zone()))
token_expiration_utc = token_expiration_local.astimezone(pytz.utc)
is_token_valid = (frappe.utils.datetime.datetime.utcnow().replace(tzinfo=pytz.utc) < token_expiration_utc) \
and otoken.status != "Revoked"
client_scopes = frappe.db.get_value("OAuth Client", otoken.client, 'scopes').split(get_url_delimiter())
are_scopes_valid = True
for scp in scopes:
are_scopes_valid = are_scopes_valid and True if scp in client_scopes else False
return is_token_valid and are_scopes_valid
# Token refresh request
def get_original_scopes(self, refresh_token, request, *args, **kwargs):
# Obtain the token associated with the given refresh_token and
# return its scopes, these will be passed on to the refreshed
# access token if the client did not specify a scope during the
# request.
obearer_token = frappe.get_doc("OAuth Bearer Token", {"refresh_token": refresh_token})
return obearer_token.scopes
def revoke_token(self, token, token_type_hint, request, *args, **kwargs):
"""Revoke an access or refresh token.
:param token: The token string.
:param token_type_hint: access_token or refresh_token.
:param request: The HTTP Request (oauthlib.common.Request)
Method is used by:
- Revocation Endpoint
"""
otoken = None
if token_type_hint == "access_token":
otoken = frappe.db.set_value("OAuth Bearer Token", token, 'status', 'Revoked')
elif token_type_hint == "refresh_token":
otoken = frappe.db.set_value("OAuth Bearer Token", {"refresh_token": token}, 'status', 'Revoked')
else:
otoken = frappe.db.set_value("OAuth Bearer Token", token, 'status', 'Revoked')
frappe.db.commit()
def validate_refresh_token(self, refresh_token, client, request, *args, **kwargs):
# """Ensure the Bearer token is valid and authorized access to scopes.
# OBS! The request.user attribute should be set to the resource owner
# associated with this refresh token.
# :param refresh_token: Unicode refresh token
# :param client: Client object set by you, see authenticate_client.
# :param request: The HTTP Request (oauthlib.common.Request)
# :rtype: True or False
# Method is used by:
# - Authorization Code Grant (indirectly by issuing refresh tokens)
# - Resource Owner Password Credentials Grant (also indirectly)
# - Refresh Token Grant
# """
otoken = frappe.get_doc("OAuth Bearer Token", {"refresh_token": refresh_token, "status": "Active"})
if not otoken:
return False
else:
return True
# OpenID Connect
def get_id_token(self, token, token_handler, request):
"""
In the OpenID Connect workflows when an ID Token is requested this method is called.
Subclasses should implement the construction, signing and optional encryption of the
ID Token as described in the OpenID Connect spec.
In addition to the standard OAuth2 request properties, the request may also contain
these OIDC specific properties which are useful to this method:
- nonce, if workflow is implicit or hybrid and it was provided
- claims, if provided to the original Authorization Code request
The token parameter is a dict which may contain an ``access_token`` entry, in which
case the resulting ID Token *should* include a calculated ``at_hash`` claim.
Similarly, when the request parameter has a ``code`` property defined, the ID Token
*should* include a calculated ``c_hash`` claim.
http://openid.net/specs/openid-connect-core-1_0.html (sections `3.1.3.6`_, `3.2.2.10`_, `3.3.2.11`_)
.. _`3.1.3.6`: http://openid.net/specs/openid-connect-core-1_0.html#CodeIDToken
.. _`3.2.2.10`: http://openid.net/specs/openid-connect-core-1_0.html#ImplicitIDToken
.. _`3.3.2.11`: http://openid.net/specs/openid-connect-core-1_0.html#HybridIDToken
:param token: A Bearer token dict
:param token_handler: the token handler (BearerToken class)
:param request: the HTTP Request (oauthlib.common.Request)
:return: The ID Token (a JWS signed JWT)
"""
# the request.scope should be used by the get_id_token() method to determine which claims to include in the resulting id_token
def validate_silent_authorization(self, request):
"""Ensure the logged in user has authorized silent OpenID authorization.
Silent OpenID authorization allows access tokens and id tokens to be
granted to clients without any user prompt or interaction.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- OpenIDConnectAuthCode
- OpenIDConnectImplicit
- OpenIDConnectHybrid
"""
if request.prompt == "login":
False
else:
True
def validate_silent_login(self, request):
"""Ensure session user has authorized silent OpenID login.
If no user is logged in or has not authorized silent login, this
method should return False.
If the user is logged in but associated with multiple accounts and
not selected which one to link to the token then this method should
raise an oauthlib.oauth2.AccountSelectionRequired error.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- OpenIDConnectAuthCode
- OpenIDConnectImplicit
- OpenIDConnectHybrid
"""
if frappe.session.user == "Guest" or request.prompt.lower() == "login":
return False
else:
return True
def validate_user_match(self, id_token_hint, scopes, claims, request):
"""Ensure client supplied user id hint matches session user.
If the sub claim or id_token_hint is supplied then the session
user must match the given ID.
:param id_token_hint: User identifier string.
:param scopes: List of OAuth 2 scopes and OpenID claims (strings).
:param claims: OpenID Connect claims dict.
:param request: The HTTP Request (oauthlib.common.Request)
:rtype: True or False
Method is used by:
- OpenIDConnectAuthCode
- OpenIDConnectImplicit
- OpenIDConnectHybrid
"""
if id_token_hint and id_token_hint == frappe.db.get_value("User Social Login", {"parent":frappe.session.user, "provider": "frappe"}, "userid"):
return True
else:
return False
def validate_user(self, username, password, client, request, *args, **kwargs):
"""Ensure the username and password is valid.
Method is used by:
- Resource Owner Password Credentials Grant
"""
login_manager = LoginManager()
login_manager.authenticate(username, password)
request.user = login_manager.user
return True
def get_cookie_dict_from_headers(r):
if r.headers.get('Cookie'):
cookie = r.headers.get('Cookie')
cookie = cookie.split("; ")
cookie_dict = {k:v for k,v in (x.split('=') for x in cookie)}
return cookie_dict
else:
return {}
def calculate_at_hash(access_token, hash_alg):
"""Helper method for calculating an access token
hash, as described in http://openid.net/specs/openid-connect-core-1_0.html#CodeIDToken
Its value is the base64url encoding of the left-most half of the hash of the octets
of the ASCII representation of the access_token value, where the hash algorithm
used is the hash algorithm used in the alg Header Parameter of the ID Token's JOSE
Header. For instance, if the alg is RS256, hash the access_token value with SHA-256,
then take the left-most 128 bits and base64url encode them. The at_hash value is a
case sensitive string.
Args:
access_token (str): An access token string.
hash_alg (callable): A callable returning a hash object, e.g. hashlib.sha256
"""
hash_digest = hash_alg(access_token.encode('utf-8')).digest()
cut_at = int(len(hash_digest) / 2)
truncated = hash_digest[:cut_at]
from jwt.utils import base64url_encode
at_hash = base64url_encode(truncated)
return at_hash.decode('utf-8')
def delete_oauth2_data():
# Delete Invalid Authorization Code and Revoked Token
commit_code, commit_token = False, False
code_list = frappe.get_all("OAuth Authorization Code", filters={"validity":"Invalid"})
token_list = frappe.get_all("OAuth Bearer Token", filters={"status":"Revoked"})
if len(code_list) > 0:
commit_code = True
if len(token_list) > 0:
commit_token = True
for code in code_list:
frappe.delete_doc("OAuth Authorization Code", code["name"])
for token in token_list:
frappe.delete_doc("OAuth Bearer Token", token["name"])
if commit_code or commit_token:
frappe.db.commit()
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#from __future__ import unicode_literals
from google.appengine.ext.webapp import template
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from google.appengine.api import users
#from webapp2_extras import jinja2
import logging
from os import path
import webapp2 as wa2
from functools import wraps
from jinja2 import Template
from session import SessionVw
import cryptoken
import i18n as i
import utils as u
import models as m
from jinja_boot import Jinja
import json
#import math
#from widget import W
#import utils
#import httplib as http
#import time
#from google.appengine._internal.django.utils.safestring import mark_safe
#from google.appengine.ext import ndb
# handler decorators ------------------------------------
def cookies (fn):
""" checks that the cookie is found
- some failure reasons:
1) there isnt one because: a) its the 1st time this app has been run on the browser
b) the user has deleted our cookie
2) it cant be read because a) the user has disabled cookies on the browser
b) the secure attribute is set but the channel is insecure
3) the user agent does not support cookies eg a webcrawler
redirecting to 'NoCookie' will test again, and for case 1) or 2) it will work this time
"""
def _cookies (h, *pa, **ka): # h is for handler
if h.ssn:
assert isinstance(h.ssn, SessionVw)
if 'rtt' in h.ssn:
return fn (h, *pa, **ka) #ok theres a cookie, so proceed
# no browser cookie so try again with 2 redirects: 1st to no-cookie, 2nd back to original url
h.ssn['lang'] = i.i18n().locale
h.ssn['ts'] = u.msNow()
url = h.request.path_url
qs = h.request.query_string
if qs:
url += u'?' + qs
h.redirect_to('NoCookie', nextUrl=url) # handler will test again
return _cookies
#..................................................................
def logSsn(d):
for k,v in d.iteritems():
logging.debug ('ssn: %s = %r', k, v)
def rateLimit (fn):
@cookies
def _rateLimit (h, *pa, **ka): # h is for handler
assert h.__class__.__name__.startswith('H_')
hlr = h.__class__.__name__[2:]
ipa = h.request.remote_addr
ema = h.getEma()
params = {}
rlt = RateLimiter (ema, ipa, hlr)
# logSsn(h.ssn)
if rlt.ready (h.ssn['rtt']):
try:
assert 'user' not in ka
assert ka == {}
ka['user'] = m.User.byEmail (ema, ipa, hlr)
except m.Locked:
h.flash ('%s failed: this account is locked. Please wait ... and try later.' % hlr)
else:
ok, next = fn(h, *pa, **ka) # CALL THE HANDLER
lock = rlt.try_(ok)
if lock:
name, duration = lock
logging.debug('xxxxxxxxxxxxxxxxxxxxxxxxxxx LOCK XXXXXXXXXXXXXXXX')
if name == 'ipa': # repeated bad attempts with same ipa but diferent ema's
kStr,mode,msg = ipa,'Local','you are now locked out'
elif name == 'ema_ipa':# repeated bad attempts with same ema and ipa
kStr,mode,msg = ema,'Local','this account is now locked'
elif name == 'ema': # repeated bad attempts with same ema but diferent ipa's
kStr,mode,msg = ema,'Distributed','this account is now locked'
m.Lock.set (kStr, duration, hlr)
h.flash ('Too many %s failures: %s for %s.' % (hlr, msg, u.hoursMins(duration)))
pwd = h.request.get('password')
logging.warning('%s BruteForceAttack! on %s page: start lock on %s: ema:%s pwd:%s ipa:%s',mode, hlr, name, ema, pwd, ipa)
elif next:
params['nextUrl'] = next
# elif rlt.state =='429':
# pwd = h.request.get('password')
# logging.warning('BruteForceAttack? throttle failure 429 for ema:%s ipa:%s %s pwd:%s', ema, ipa, pwd)
# h.flash('http code: 429 Too Many Requests')
# elif rlt.state =='wait':
else:
params['delay'] = rlt.wait
h.ajaxResponse (**params)
#todo: instead of auto unlock after n=locktime seconds, after n send user and email with unlock link
return _rateLimit
#..................................................................
# NB 'bad' in context of the RateLimiter means a request will count towards the lockout count.
# The handler determines which requests are 'bad'.
# Only failed logins are 'bad' requests to rate-limited login handler
#... but all requests to rate-limited forgot handler and signup handler are 'bad'
# because any rapid sequence of requests to those handlers is suspect
class RateLimiter (object):
def __init__(_s, ema, ipa, hlr):
def _initDelay (minWait):
_s.delay = minWait # ds
for key, diff, cf in _s.monitors.itervalues():
nBad = _s._get (key, diff) [0]
if nBad:
#logging.debug('extra = %d for %d bad %s logins', cf.delayFn(nBad), nBad, cf.name)
_s.delay += cf.delayFn(nBad)
d = _s.delay*100.0 # Convert from int-deciseconds to float-milliseconds
mcka = u.config('MemCacheKeepAlive')# Divide d into a series of equal waits so each wait is the max that is less than MemCacheKeepAlive
n = -(-d//mcka) # number of waits. NB -(-a//b) rounds up and is equivalent to math.ceil (a/b)
_s.wait = int(-(-d//n)) # .. round up to int-millisecs
logging.debug('delay = %d ms, n = %d, wait = %d ms, total = %d', d, n, _s.wait, _s.wait*n)
assert _s.wait <= mcka
assert n * _s.wait >= d
assert (n-1) * _s.wait <= d
def _initMonitors (ema, ipa, hlr):
def _insert (name, key, diff):
assert name in lCfg
#diff is the distinct value
_s.monitors[name] = ('L:'+hlr+':'+key, diff, lCfg[name])
cfg = u.config(hlr)
lCfg = cfg.lockCfg
_s.monitors = {}
# name ,key ,diff
_insert ('ema_ipa',_s.ei,None)
_insert ('ema' ,ema ,ipa )
_insert ('ipa' ,ipa ,ema )
#logging.debug('monitors = %r',_s.monitors)
return cfg
#_s.state = None
_s.ei = ema + ipa
_s.mc = memcache.Client()
cfg = _initMonitors (ema, ipa, hlr)
_initDelay (cfg.minDelay)
def _get (_s, key, diff):
val = _s.mc.get (key)
if val:
if diff:# set of distinct emails or ips
dset, exp = val
nBad = len(dset) # number of bad login attempts under this key
assert nBad > 0
return nBad, dset, exp
return val, None, None # in this case val is nBad
return None, None, None
def ready (_s, rtt):
# _s.delay += minDelay
now = u.dsNow() # deciseconds
key = 'W:'+ _s.ei
expiry = _s.mc.get (key)
logging.debug('expiry = %r key = %s',expiry, key)
if expiry:
if expiry <= now:
_s.mc.delete (key)
#_s.state = 'good'
return True #handler state 'good':-> | 'bad' | 'locked'
#_s.state = '429'
else: # key not found
#_s.state = 'wait'
exp = _s.delay+rtt # exp = relative expiry = delay+maxLatency. For maxLatency(ds), rtt * 100 (say) gives a v rough upper limit
_s.mc.set (key, now+_s.delay, exp) # ... but because rtt / 100 to convert ms to ds, maxLatency(ds) = rtt(ms) [!] - IE do nothing!
return False
def try_ (_s, ok):
'''Updates the monitors which are configured for this RateLimiter.
Return None or if a lock is triggered, the cfg for it.
'''
def update (lockname):
found = False
lock = None
key, diff, cfg = _s.monitors[lockname]
nBad, dset, exp = _s._get (key, diff)
if nBad:
found = True
if ok: # the user result
if cfg.bGoodReset:
_s.mc.delete (key)
else:
if nBad < cfg.maxbad:
logging.debug('same %s count = %d', lockname, nBad)
if diff:
assert diff not in dset
dset.append(diff)
_s.mc.set (key, (dset,exp), exp) # set() needs explicit abs exp to keep to same exp time
logging.debug('diffset: %r', dset)
else: _s.mc.incr (key) # incr() implicitly keeps same exp time
else:
_s.mc.delete (key)
logging.debug('duration = %r secs!', cfg.duration)
logging.debug('same %s count = %d Locked for %r secs!', lockname, nBad, cfg.duration)
lock = lockname, cfg.duration # ok so lock the account in ndb
elif not ok: #not found in mc so create it
#logging.debug('ts: %x', u.dsNow())
#logging.debug('period: %x', cfg.period)
exp = u.sNow() + cfg.period #need use absolute time to keep same exp time when calling mc.set
#logging.debug('exp: %x', exp)
val = ([diff], exp) if diff else 1 # diff set needs a tuple so it knows the expiry
_s.mc.set (key, val, exp)
return found, lock
#assert _s.state == 'good', 'Must call ready() before calling try_()'
found, lock = update('ema_ipa')
if not found:
found, lock = update('ema')
found, lock = update('ipa')
return lock
#------------------------------------
def loggedIn (fn):
""" Checks that there's an auth user. """
@cookies
def _loggedin (h, *pa, **ka):
if h.ssn.isLoggedIn (h.user, h.request.remote_addr):
logging.debug('XXXXXXXXXXXXX ok - logIn proceed ')
return fn (h, *pa, **ka) #ok, proceed
h.redirect_to ('Login') # fail
return _loggedin
#...................................-
def loggedInRecently (fn):
""" Checks if the auth session started recently.
(for handlers of sensitive operations eg change email or reset password)
"""
@loggedIn
def _loggedinRecently (h, *pa, **ka):
if h.ssn.hasLoggedInRecently (u.config('maxAgeRecentLogin')):
return fn (h, *pa, **ka) #ok, proceed
h.redirect_to ('Login') #fail
return _loggedinRecently
#...................................-
def pushQueueMethod (taskhandler):
""" Decorator to check that this is a taskqueue method using request.header
"""
def _taskqueue(h, *pa, **ka):
""" Check, if in Staging or Production, that h is being executed by Taskqueue
Otherwise, allow run in localhost calling the url
"""
if h.request.headers.get('X-AppEngine-TaskName'):
assert h.request.path.startswith('/tq')
elif u.config('Env') == 'Prod':
if not users.is_current_user_admin(): # we cant use this test in devServer or if logged-in as admin
logging.warning('Someone hacking a task url? pushQueueMethod does not have taskname header')
return h.error(403) #Forbidden
try:
return taskhandler(h, *pa, **ka)
except (TransientError, DeadlineExceededError):
raise # keep trying! (Exceptions in Push Queue Tasks are caught by the system and retried with exp backoff.)
except:
logging.exception("Task Failed:") #other exceptions - just give up!
return _taskqueue
#------------------------------------
class ViewClass:
""" ViewClass to insert variables into the template.
ViewClass is used in H_Base to promote variables automatically that can be used in jinja2 templates.
Use case in a H_Base Class:
self.view.dict = dict(a=[1, 2, 3], b="hello")
Can be accessed in the template by just using the variables like {{dict.b}}
"""
pass
#------------------------------------
class H_Base (wa2.RequestHandler):
def __init__(_s, request, response):
_s.initialize(request, response)
_s.view = ViewClass()
_s.localeStrings = i.getLocaleStrings(_s) # getLocaleStrings() must be called before setting path_qs in render_template()
def getEma (_s):
ema = _s.request.get('ema')
# sanity check: email validation is done client side using MailGun API
logging.debug('ema = %s', ema)
if len(ema) < 5:
_s.abort(422) # Unprocessable Entity
if '@' not in ema:
_s.abort(422) # Unprocessable Entity
return ema
# def decodeToken (token, type):
# try:
# return
# except Base64Error:
# logging.warning ('invalid Base64 in %s Token: %r', type, token)
# except:
# logging.exception('unexpected exception decoding %s token : %r', type, token)
def validVerifyToken (_s, token, type):
data, expired = cryptoken.decodeToken (token, type)
if expired:
#if _s.logOut():
_s.flash ('This token has expired. Please try again.')
else:
try:
ema, tok = data
logging.debug('ema found: %s' % ema)
logging.debug('%s token found: %s', type, tok)
if b.tqCompare (ema, tok,'tok'):
return True
except:
logging.exception('token data has unexpected structure? : %r', tokData)
_s.flash ('Your token is invalid. Please try again')
return False
def logIn (_s, user):
_s.ssn.logIn (user, _s.request.remote_addr)
def logOut (_s):
return _s.ssn.logOut()
# @webapp2.cached_property
# def jinja2 (_s):
# return jinja2.get_jinja2 (factory=jinja_boot.jinja2_factory, app=_s.app)
@property
def ssn (_s):
"""access to the current session."""
sn = _s.request.registry.get('session')
if not sn:
sn =_s.request.registry['session'] = SessionVw(_s)
if sn.expired:
if sn.logOut():
_s.flash ('This session has expired. Please log in again.')
return sn
#override wa2.RequestHandler.dispatch()
def dispatch (_s):
try:
# try:# csrf protection
if _s.request.method == "POST" \
and not _s.request.path.startswith('/tq'): # tq indicates a TaskQueue handler: they are internal therefore not required to have csrf token
ssnTok = _s.ssn.get('_csrf_token')
postTok = _s.request.get('_csrf_token')
if (not ssnTok # toks differ or if both are the same falsy
or ssnTok != postTok):
logging.warning('path = %r',_s.request.path)
logging.warning('ssn csrf token = %r',ssnTok)
logging.warning('post csrf token = %r',postTok)
logging.warning('CSRF attack or bad or missing csrf token?')
wa2.abort(403) # 'Forbidden'
#_s.response.set_status(403)
wa2.RequestHandler.dispatch (_s) # Dispatch the request.this is needed for wa2 sessions to work
finally:
u = _s.user
if u and u.modified:
u.put() # lazy put() to not put user more than once per request
_s.ssn.save() # Save ssn after every request
# except: # an exception in TQ handler causes the TQ to try again which loops
# logging.exception('unexpected exception in dispatch')
@wa2.cached_property
def user (_s):
uid = _s.ssn.get('_userID')
logging.debug('xxxxxxxxxx ssn = %r',_s.ssn)
if uid:
return m.User.byUid (uid)
return None
def flash(_s, msg):
#logging.info('>>>>>>>>>>>>> msg: %r' % msg)
_s.ssn.addFlash (msg)
def get_fmessages (_s):
fmsgs_html = u''
f = _s.ssn.getFlashes()
#logging.info('>>>>>>>>>>>>> ok added fmsgs: %r' % f)
if f:
fmsgsTmpl = Template ( '{%- if fmessages -%}'
'{%- for fmsg in fmessages -%}'
'<li>{{ fmsg.0 }}</li>'
'{%- endfor -%}'
'{%- endif -%}'
)
fmsgs_html = fmsgsTmpl.render (fmessages= f) # _s.ssn.getFlashes())
# logging.info('>>>>>>>>>>>>> ok tmplate fmsgs: %r' % fmsgs_html)
# logging.info('>>>>>>>>>>>>> ok tmplate fmsgs: %r' % str(fmsgs_html))
return u.utf8(fmsgs_html)
def serve (_s, filename, **ka):
ka['user'] = _s.user
ka['locale_strings'] = _s.localeStrings
# if not params.get('wait'): # if there's no 'wait' or its set to False
#fmsgs_html = fmsgs_tmpl.render (fmsgs=_s.ssn.get_flashes())
ka['fmsgs'] = _s.get_fmessages()
# logging.info('>>>>>>>>>>>>> added fmsgs: %r' % f)
# logging.info('>>>>>>>>>>>>> serving %s page ', filename)
# for k,v in params.iteritems():
# logging.info('params: %s = %r', k, v)
# viewpath = path.join (path.dirname (__file__), 'views', view_filename)
#_s.response.out.write (template.render (viewpath, params))
_s.response.write (Jinja().render (filename, ka))
def ajaxResponse (_s, **ka):
'''use this for ajax responses'''
ka['msgs'] = _s.get_fmessages()
resp = json.dumps (ka)
# Note: old browsers may have JSON Vulnerability if the JSON string is an array [...] at top level.
# But we are safe since ka is a python 'dictionary' so json.dumps() converts it to a JSON 'object' {...}.
assert resp.lstrip()[0] == '{', 'JSON Vulnerability'
assert resp.rstrip()[-1]== '}', 'JSON Vulnerability'
_s.response.write (resp)
# def sendNewVerifyToken (_s, tokData, route):
# tokenStr = cryptoken.encodeVerifyToken (tokData, tt)
# #logging.info('token = %s', tokenStr)
# if tt == 'signUp': route = 'signup_2'
# elif tt == 'pw1': route = 'newpassword'
# else: assert False
# verify_url = _s.uri_for ( route
# , token=tokenStr
# , _full=True
# )
# logging.info('sent url = %s', verify_url)
# #todo replace with 'an email has been sent' + code sending email
# _s.sendEmail(to=)
# _s.flash ('An email has been sent to you. Please follow the instructions.'
# )
# _s.flash ('Click this link: <a href="{url}">{url}</a>'
# .format (url=verify_url)
# )
#_s.redirect_to('home')
# replace with redirect
#_s.serve ('message.html', {'message': msg})
def verifyMsg (_s, msg, route, ema=None, nonce=None, tt=None): #todo use same string for tt as for route and simplifycode!
assert bool(nonce) == bool(tt),'theres a nonce iff theres a tt'
assert bool(nonce) == bool(ema),'theres a nonce iff theres a ema'
if nonce:
tqSave (ema, nonce,'tok')
tok = cryptoken.encodeVerifyToken ((ema, nonce), tt)
url = _s.uri_for (route, token=tok, _full=True)
else:
url = _s.uri_for (route, _full=True)
return msg % (url,url)
def sendVerifyEmail (_s, ema, mod):
taskqueue.add ( url=_s.uri_for('TQSendVerifyEmail')
, params= {'ema':ema
,'mode' :mod
}
, queue_name='mailSender' #todo use a different Q so it can have different config. Possible Disadvantage: might spin up extra instance?
#, countdown=5 # wait at least this (secs) before executing task
)
logging.debug ('sent verify email to taskqueue' )
def sendEmail (_s, **ka):
assert 'to' in ka
assert 'subject'in ka
assert( 'body' in ka
or 'html' in ka )
# mailgun mail can also have these params/headers: attachment inline
# appengine mail can also have these: attachments reply_to
# and also extra headers eg List-Unsubscribe On-Behalf-Of
# both can have these: cc bcc
html = ka.get('html')
if html and not html.endswith('\n'):
html += '\n'
# if not 'sender' in ka:
# ka['sender'] = 'chdb@blueyonder.co.uk'#'sittingmap@gmail.com'
# sender = params.get('sender').strip()
##todo this block is all about checking and setting static data so run it at startup eg in main.py
# if not u.validEmail(sender):
# cs = _s.app.config.get('contact_sender')
# if u.validEmail(cs):
# sender = cs
# else:
# from google.appengine.api import app_identity
# app_id = app_identity.get_application_id()
# sender = "%s <no-reply@%s.appspotmail.com>" % (app_id, app_id)
# params['sender'] = sender
logging.debug ('send email to taskqueue' )
taskqueue.add ( url=_s.uri_for('TQSendEmail')
, params=ka
, queue_name='mailSender'
, countdown=5 # wait at least this (secs) before executing task
)
# def doLocking (_s, rl, ema, pw, ipa):
# if rl.state == 'locked': _s.flash ('log-in failed: this account is locked. Please wait ... and try later.')
# elif rl.state == 'bad' : _s.flash ('log-in failed: either the email or the password is wrong.')
# elif rl.state == '429':
# logging.warning('BruteForceAttack? throttle failure 429 for ema:%s ipa:%s %s pwd:%s', ema, ipa, pw)
# _s.flash('http code: 429 Too Many Requests')
# name, locktime = rl.try_() # try_() is a noop and returns None,None unless rl.state=='good' or 'bad'
# if name:
# if name == 'ipa':
# m.BadIP.lock (ipa, locktime)
# attack,msg = 'Local','you are now locked out'
# elif name == 'ema_ipa':
# m.User.lock (ema, locktime)
# attack,msg = 'Local','this account is now locked'
# elif name == 'ema':
# m.User.lock (ema, locktime)
# attack,msg = 'Distributed','this account is now locked'
# _s.flash ('Too many log-in failures: %s for %s.' % (msg, u.hoursMins(locktime)))
# logging.warning('%s BruteForceAttack! start lock on %s: email:%s pwd:%s ipa:%s',attack ,name, ema, pw, ipa)
#todo: instead of auto unlock after n=locktime seconds, after n send user and email with unlock link
#import datetime
def sendEmailNow (**ka):
ok = u.sendEmail(**ka)
if ok and u.config('recordEmails'):
try:
m.SentEmail.create (**ka)
except: # (apiproxy_errors.OverQuotaError, BadValueError):
logging.exception("Error saving SentEmail in datastore")
def tqSave (tag_, nonce, pname):
q = taskqueue.Queue('pullq')
#eta_ = datetime.datetime.now()
tasks = q.lease_tasks_by_tag(1, 1000, tag=tag_)
if tasks:
q.delete_tasks(tasks)
logging.info('Deleting %d old tasks for r %s!', len(tasks), tag_)
t = taskqueue.Task(method='PULL', params={pname:nonce}, tag=tag_)#, eta=eta_)
# , countdown=5 wait at least this (secs) before executing task
q.add (t)
logging.debug('added task = %r', t)
def tqCompare (tag_, token, pname):
'''if tag is found, check that its unique.
Return whether its tok value is same as token, and delete if so.'''
q = taskqueue.Queue('pullq')
tasks = q.lease_tasks_by_tag(0.1, 1000, tag=tag_)
logging.debug('tasks = %r', tasks)
n = len(tasks)
if n == 0:
logging.warning('Not one single pullq task for %s!', tag_) # todo try again message ?
return False
if n > 1:
logging.warning('Multiple (%d) pullq task for %s!', n, tag_)
# GAE Bug? What does ETA really mean? For push queues its clearly the earliest time to start executing - should be ETE or ETS ?
# but for Pull queues it seems to be lease expiry time E the latest possible time to finish executing -- LTE ?
# If we find multiple tasks for a tag, (there shouldnt be but in case ...) we want to read the most recent one and discard the others.
# From Docs and StackOverflow it seems that we should use eta which we can optionally set (it should default to when the Task was created)
# and not use the list ordering (which is unspecified in Docs )
# However in devServer at least, the eta seem to be set to the time of lease expiry. (current_time + lease_time)
# and so all the leased tasks seem to always have the same eta.
# todo: test this on env:prod and if necessary on creating a Task, pass in a creation timestamp as a param
p = tasks[n-1].extract_params() # There should only be one but however many there are, we choose the last one ...
logging.debug('params found: %r' % p)
nonce = p[pname]
if u.sameStr (token, nonce):
q.delete_tasks(tasks) # .. and then delete them all
return True
logging.warning('url token has: %s', token)
logging.warning('pullq tasks has: %s', nonce)
return False
#------------------------------------
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ConnectionMonitorsOperations:
"""ConnectionMonitorsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2021_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _create_or_update_initial(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
parameters: "_models.ConnectionMonitor",
migrate: Optional[str] = None,
**kwargs: Any
) -> "_models.ConnectionMonitorResult":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if migrate is not None:
query_parameters['migrate'] = self._serialize.query("migrate", migrate, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ConnectionMonitor')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
parameters: "_models.ConnectionMonitor",
migrate: Optional[str] = None,
**kwargs: Any
) -> AsyncLROPoller["_models.ConnectionMonitorResult"]:
"""Create or update a connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:param parameters: Parameters that define the operation to create a connection monitor.
:type parameters: ~azure.mgmt.network.v2021_02_01.models.ConnectionMonitor
:param migrate: Value indicating whether connection monitor V1 should be migrated to V2 format.
:type migrate: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ConnectionMonitorResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2021_02_01.models.ConnectionMonitorResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
parameters=parameters,
migrate=migrate,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
async def get(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs: Any
) -> "_models.ConnectionMonitorResult":
"""Gets a connection monitor by name.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionMonitorResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_02_01.models.ConnectionMonitorResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
async def update_tags(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.ConnectionMonitorResult":
"""Update tags of the specified connection monitor.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:param parameters: Parameters supplied to update connection monitor tags.
:type parameters: ~azure.mgmt.network.v2021_02_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ConnectionMonitorResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2021_02_01.models.ConnectionMonitorResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_tags.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('ConnectionMonitorResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_tags.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}'} # type: ignore
async def _stop_initial(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self._stop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/stop'} # type: ignore
async def begin_stop(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Stops the specified connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._stop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/stop'} # type: ignore
async def _start_initial(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self._start_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_start_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/start'} # type: ignore
async def begin_start(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Starts the specified connection monitor.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name of the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._start_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_start.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/start'} # type: ignore
async def _query_initial(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs: Any
) -> "_models.ConnectionMonitorQueryResult":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorQueryResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
# Construct URL
url = self._query_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_query_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/query'} # type: ignore
async def begin_query(
self,
resource_group_name: str,
network_watcher_name: str,
connection_monitor_name: str,
**kwargs: Any
) -> AsyncLROPoller["_models.ConnectionMonitorQueryResult"]:
"""Query a snapshot of the most recent connection states.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param connection_monitor_name: The name given to the connection monitor.
:type connection_monitor_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ConnectionMonitorQueryResult or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2021_02_01.models.ConnectionMonitorQueryResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorQueryResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._query_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
connection_monitor_name=connection_monitor_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ConnectionMonitorQueryResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'connectionMonitorName': self._serialize.url("connection_monitor_name", connection_monitor_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_query.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors/{connectionMonitorName}/query'} # type: ignore
def list(
self,
resource_group_name: str,
network_watcher_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ConnectionMonitorListResult"]:
"""Lists all connection monitors for the specified Network Watcher.
:param resource_group_name: The name of the resource group containing Network Watcher.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ConnectionMonitorListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2021_02_01.models.ConnectionMonitorListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ConnectionMonitorListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2021-02-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ConnectionMonitorListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/connectionMonitors'} # type: ignore
| |
from __main__ import settings, botdata, httpgetter
import aiohttp
import asyncio
import async_timeout
import sys
import subprocess
import os
import numpy
import math
from datetime import datetime, timedelta, timezone
from PIL import Image, ImageDraw, ImageFont
from .tabledraw import Table, ImageCell, TextCell, ColorCell, DoubleCell, SlantedTextCell, get_table_font
from io import BytesIO
from .helpers import run_command, get_pretty_time, read_json, UserError, format_duration_simple
from .imagetools import *
from concurrent.futures import ThreadPoolExecutor
from .metastats import get_hero_winrate, get_hero_pickban_percent
import logging
logger = logging.getLogger("mangologger")
radiant_icon = settings.resource("images/radiant.png")
dire_icon = settings.resource("images/dire.png")
discord_color0 = "#6f7377" # much lighter, mostly unused color
discord_color1 = "#2C2F33"
discord_color2 = "#23272A"
discord_color3 = "#202225"
discord_color4 = "#131416" # darker, mostly unused color
faded_yellow_color = "#c6b37c" # similar to the color of the text for displaying level info
# mostly from https://www.dota2.com/public/css/heropedia.css
item_quality_colors = {
"rare": "#1A87F9",
"artifact": "#E29B01",
"secret_shop": "#31d0d0", # this one wasn't updated, so grabbed from in-game screenshot
"consumable": "#1D80E7",
"common": "#2BAB01",
"epic": "#B812F9",
"component": "#FEFEFE"
}
# from vpk/panorama/styles/dotastyles.css
neutral_tier_text_colors = {
"1": "#BEBEBE",
"2": "#92E47E",
"3": "#7F93FC",
"4": "#D57BFF",
"5": "#FFE195",
}
# from in-game screenshot
neutral_tier_colors = {
"1": "#958a97",
"2": "#0ea243",
"3": "#4c6ee8",
"4": "#9b2bf6",
"5": "#e47b17",
}
# from in-game times
neutral_timings = {
"1": "7:00+",
"2": "17:00+",
"3": "27:00+",
"4": "37:00+",
"5": "60:00+",
}
vpkurl = None
hero_infos = {}
item_infos = {}
ability_infos = {}
def get_item_color(item, default=None):
if item is None:
return default
if item.quality in item_quality_colors:
return item_quality_colors[item.quality]
elif item.neutral_tier is not None:
return neutral_tier_colors[item.neutral_tier]
else:
return default
def init_dota_info(hero_info, item_info, ability_info, the_vpkurl):
global hero_infos, item_infos, ability_infos, vpkurl
hero_infos = hero_info
item_infos = item_info
ability_infos = ability_info
vpkurl = the_vpkurl
def get_hero_name(hero_id):
return hero_infos[hero_id]["name"]
async def get_url_image(url):
return Image.open(await httpgetter.get(url, "bytes", cache=True))
async def get_hero_image(hero_id):
try:
return await get_url_image(hero_infos[hero_id]["image"])
except KeyError:
return Image.new('RGBA', (10, 10), (0, 0, 0, 0))
async def get_hero_icon(hero_id):
try:
return await get_url_image(hero_infos[hero_id]["icon"])
except KeyError:
return Image.new('RGBA', (10, 10), (0, 0, 0, 0))
async def get_hero_portrait(hero_id):
try:
return await get_url_image(hero_infos[hero_id]["portrait"])
except KeyError:
return Image.new('RGBA', (10, 10), (0, 0, 0, 0))
async def get_item_image(item_id):
try:
return await get_url_image(item_infos[item_id]["icon"])
except KeyError:
return Image.new('RGBA', (10, 10), (0, 0, 0, 0))
async def get_level_image(level):
rowheight = 48
image = Image.new('RGBA', (rowheight - 4, rowheight), (0, 0, 0, 0))
draw = ImageDraw.Draw(image)
size = image.size
outer_radius = 20
inner_radius = outer_radius - 2
outer_circle = ((size[0] / 2) - outer_radius, (size[1] / 2) - outer_radius,
(size[0] / 2) + outer_radius, (size[1] / 2) + outer_radius)
inner_circle = ((size[0] / 2) - inner_radius, (size[1] / 2) - inner_radius,
(size[0] / 2) + inner_radius, (size[1] / 2) + inner_radius)
draw.ellipse(outer_circle, fill=discord_color3)
draw.ellipse(inner_circle, fill=discord_color4)
font_adjustment_y = -4
level = str(level)
font = get_table_font(24)
font_size = font.getsize(level)
x_loc = (image.size[0] / 2) - (font_size[0] / 2)
y_loc = (image.size[1] / 2) - (font_size[1] / 2)
draw.text((x_loc, y_loc + font_adjustment_y), level, font=font, fill=faded_yellow_color)
return image
async def get_ability_image(ability_id, hero_id=None):
try:
ability = ability_infos[ability_id]["entity"]
if ability.is_talent:
return await get_talents_image(ability_id, hero_id)
return await get_url_image(ability_infos[ability_id]["icon"])
except KeyError:
return Image.new('RGBA', (10, 10), (0, 0, 0, 0))
async def get_talents_image(abilities, hero_id):
if isinstance(abilities, int):
abilities = [ abilities ]
if abilities is None:
abilities = []
talent_slots = []
for ability_id in abilities:
if ability_id not in ability_infos:
continue
ability = ability_infos[ability_id]["entity"]
if not ability.is_talent:
continue
for talent in ability.talent_links:
if talent.hero_id is None or talent.hero_id == hero_id:
talent_slots.append(talent.slot)
talent_slots = sorted(talent_slots, reverse=True)
uri = f"talents_icon:{'_'.join(map(str, talent_slots))}"
filename = httpgetter.cache.get_filename(uri)
if filename and not settings.debug:
return Image.open(filename)
filename = await httpgetter.cache.new(uri, "png")
image = Image.open(settings.resource("images/talents/talent_background.png"))
for slot in talent_slots:
slot_image = Image.open(settings.resource(f"images/talents/talent_{slot}.png"))
image = paste_image(image, slot_image)
image.save(filename, format="PNG")
return image
async def get_neutral_image(item):
background = Image.new("RGBA", (64, 64))
size = background.size
circle_diameter = 48
circle_thickness = 3
circle_color = discord_color4
img_scale = circle_diameter / 64
inner_radius = circle_diameter / 2
inner_circle = ((size[0] / 2) - inner_radius, (size[1] / 2) - inner_radius,
(size[0] / 2) + inner_radius, (size[1] / 2) + inner_radius)
outer_radius = inner_radius + circle_thickness
outer_circle = ((size[0] / 2) - outer_radius, (size[1] / 2) - outer_radius,
(size[0] / 2) + outer_radius, (size[1] / 2) + outer_radius)
if item:
draw = ImageDraw.Draw(background)
draw.ellipse(outer_circle, fill=circle_color)
item_img = await get_item_image(item)
item_img = item_img.resize((int(item_img.size[0] * img_scale), int(item_img.size[1] * img_scale)))
item_img = item_img.crop((
math.floor((item_img.size[0] - background.size[0]) / 2),
math.floor((item_img.size[1] - background.size[1]) / 2),
item_img.size[0] - math.ceil((item_img.size[0] - background.size[0]) / 2),
item_img.size[1] - math.ceil((item_img.size[1] - background.size[1]) / 2))
)
mask_circle = Image.new("RGBA", background.size)
mask_draw = ImageDraw.Draw(mask_circle)
mask_draw.ellipse(inner_circle, fill="#ffffff")
temp_image = Image.new("RGBA", (64, 64))
temp_image.paste(item_img, (0, 0), mask=mask_circle)
return Image.alpha_composite(background, temp_image)
else:
return background
# gets an image for the matches table with the icons for what aghanim effects are active for the given player
async def get_active_aghs_image(player):
global vpkurl
is_shard_active = False
is_scepter_active = False
for i in range(0, 6):
# 108 is aghs scepter's item id
if player.get(f"item_{i}", 0) == 108:
is_scepter_active = True
for buff in (player.get("permanent_buffs") or []):
# for buffs, 2 is scepter, 12 is shard: https://github.com/odota/dotaconstants/blob/master/json/permanent_buffs.json
if buff["permanent_buff"] == 2:
is_scepter_active = True
if buff["permanent_buff"] == 12:
is_shard_active = True
scepter_image = "/panorama/images/hud/reborn/aghsstatus_scepter_psd.png"
if is_scepter_active:
scepter_image = "/panorama/images/hud/reborn/aghsstatus_scepter_on_psd.png"
shard_image = "/panorama/images/hud/reborn/aghsstatus_shard_psd.png"
if is_shard_active:
shard_image = "/panorama/images/hud/reborn/aghsstatus_shard_on_psd.png"
scepter_image = await get_url_image(vpkurl + scepter_image)
shard_image = await get_url_image(vpkurl + shard_image)
image = Image.new("RGBA", (
max(scepter_image.size[0], shard_image.size[0]),
scepter_image.size[1] + shard_image.size[1])
)
image = paste_image(image, scepter_image, 2, 0)
image = paste_image(image, shard_image, 0, scepter_image.size[1])
return image
async def get_item_images(player):
images = []
item_size = (88, 64)
for i in range(0, 6):
item = player.get(f"item_{i}")
if item:
images.append(await get_item_image(item))
else:
images.append(Image.new("RGBA", item_size))
item = player.get("item_neutral")
if item:
images.append(await get_neutral_image(item))
widths, heights = zip(*(i.size if i else item_size for i in images))
result = Image.new("RGBA", (sum(widths), max(heights)))
x = 0
for i in range(len(images)):
result.paste(images[i], (x, 0))
x += item_size[0]
return result
async def get_spell_images(spells):
images = []
spell_size = (128, 128)
for spell in spells:
if spell:
images.append(await get_ability_image(spell))
else:
images.append(Image.new("RGBA", spell_size))
widths, heights = zip(*(i.size if i else spell_size for i in images))
result = Image.new("RGBA", (sum(widths), max(heights)))
x = 0
for i in range(len(images)):
result.paste(images[i], (x, 0))
x += spell_size[0]
return result
def get_lane(player):
lane_dict = { 1: "Bot", 3: "Top", None: "" }
lane_role_dict = { 1: "Safe", 2: "Mid", 3: "Off", 4: "Jungle", None: "" }
if player.get('is_roaming'):
return "Roam"
elif player.get('lane') in lane_dict:
return f"{lane_role_dict[player.get('lane_role')]}"
else:
return lane_role_dict[player.get('lane_role')]
async def add_player_row(table, player, is_parsed, is_ability_draft, has_talents):
row = [
ColorCell(width=5, color=("green" if player["isRadiant"] else "red")),
ImageCell(img=await get_hero_image(player["hero_id"]), height=48),
ImageCell(img=await get_level_image(player.get("level", 1))),
TextCell(player.get("personaname", "Anonymous")),
TextCell(player.get("kills")),
TextCell(player.get("deaths")),
TextCell(player.get("assists")),
TextCell(player.get("gold_per_min"), color="yellow")
]
if is_parsed:
row.extend([
TextCell(player.get("actions_per_min")),
TextCell(get_lane(player)),
ImageCell(img=await get_active_aghs_image(player), height=48)
])
if has_talents:
row.append(ImageCell(img=await get_talents_image(player.get("ability_upgrades_arr"), player["hero_id"]), height=48))
row.append(ImageCell(img=await get_item_images(player), height=48))
if is_ability_draft:
def ad_ability_filter(ability_id):
ability = ability_infos[ability_id]["entity"]
return not (ability.is_talent or ("ad_special_bonus" in ability.name))
abilities = filter(ad_ability_filter, player.get("ability_upgrades_arr"))
abilities = list(set(abilities))
abilities = sorted(abilities, key=lambda a: ability_infos[a]["slot"] if ability_infos[a]["slot"] else 20)
if len(abilities) > 4:
abilities = abilities[:4]
row[3:3] = [
ImageCell(img=await get_spell_images(abilities), height=48)
]
table.add_row(row)
async def draw_match_table(match):
is_parsed = match.get("version")
table = Table(background=discord_color2)
has_ability_upgrades = match["players"][0].get("ability_upgrades_arr") is not None
is_ability_draft = match["game_mode"] == 18 and has_ability_upgrades
has_talents = has_ability_upgrades and match["start_time"] > 1481500800
# Header
headers = [
TextCell("", padding=0),
TextCell(""),
TextCell(""),
TextCell(""),
TextCell("K", horizontal_align="center"),
TextCell("D", horizontal_align="center"),
TextCell("A", horizontal_align="center"),
TextCell("GPM", color="yellow")
]
if is_parsed:
headers.extend([
TextCell("APM"),
TextCell("Lane"),
TextCell("")
])
if has_talents:
headers.append(TextCell(""))
headers.append(TextCell("Items"))
if is_ability_draft:
headers[3:3] = [
TextCell("Abilities")
]
table.add_row(headers)
for cell in table.rows[0]:
cell.background = discord_color1
# Do players
for player in match["players"]:
if player['isRadiant']:
await add_player_row(table, player, is_parsed, is_ability_draft, has_talents)
table.add_row([ColorCell(color=discord_color1, height=5) for i in range(len(headers))])
for player in match["players"]:
if not player['isRadiant']:
await add_player_row(table, player, is_parsed, is_ability_draft, has_talents)
return table.render()
async def create_match_image(match):
table_border = 10
table_image = await draw_match_table(match)
image = Image.new('RGBA', (table_image.size[0] + (table_border * 2), table_image.size[1] + table_border + 64))
draw = ImageDraw.Draw(image)
draw.rectangle([0, 0, image.size[0], image.size[1]], fill=discord_color2)
draw.rectangle([0, 64, image.size[0], image.size[1]], fill=discord_color1)
image.paste(table_image, (table_border, 64))
title = TextCell(f"{'Radiant' if match['radiant_win'] else 'Dire'} Victory", font_size=48, color=("green" if match['radiant_win'] else "red"))
title.render(draw, image, 64, 0, image.size[0] - 64, 64)
team_icon = Image.open(radiant_icon if match['radiant_win'] else dire_icon).resize((64, 64))
temp_image = Image.new("RGBA", image.size)
temp_image.paste(team_icon, (0, 0))
image = Image.alpha_composite(image, temp_image)
fp = BytesIO()
image.save(fp, format="PNG")
fp.seek(0)
return fp
async def combine_image_halves(img_url1, img_url2):
img1 = Image.open(await httpgetter.get(img_url1, "bytes", cache=True)).convert("RGBA")
img2 = Image.open(await httpgetter.get(img_url2, "bytes", cache=True)).convert("RGBA")
pixels1 = img1.load()
pixels2 = img2.load()
width = img1.size[0]
height = img1.size[1]
for j in range(height):
for i in range(abs(width - j), width):
pixels1[i,j] = pixels2[i,j]
fp = BytesIO()
img1.save(fp, format="PNG")
fp.seek(0)
return fp
def optimize_gif(uri, filename):
# if need further, try doing O3 only after colors instead of before
optimization = [
["--colors", "256"],
["-O3"],
["--colors", "128"],
["-O3"],
]
size_limit = 8
logger.info(f"optimizing: {uri}")
file_size = os.path.getsize(filename) / 1000000
logger.info(f"bytes: {file_size} MB")
i = 0
while file_size >= size_limit and i < len(optimization):
output = run_command(["gifsicle", "--conserve-memory", filename, "-o", filename] + optimization[i])
file_size = os.path.getsize(filename) / 1000000
logger.info(f"bytes: {file_size} MB")
i += 1
if file_size >= size_limit:
raise ValueError(f"couldn't optimize {uri} far enough")
# places an icon on the map at the indicated x/y using the dota coordinant system
# scale is how much to scale the icon
def place_icon_on_map(map_image, icon, x, y):
scale = map_image.width / 128
x = (x - 64) * scale
y = (128 - (y - 64)) * scale
return paste_image(map_image, icon, int(x - (icon.width / 2)), int(y - (icon.height / 2)))
# wraps the main gif creation code so it doesnt block
async def create_dota_gif(bot, match, stratz_match, start_time, end_time, ms_per_second=100):
uri = f"match_gif:{match['match_id']}:{start_time}:{end_time}:{ms_per_second}"
filename = httpgetter.cache.get_filename(uri)
if filename and not settings.debug:
return filename
filename = await httpgetter.cache.new(uri, "gif")
hero_icons = {}
for player in stratz_match["players"]:
hero_id = player["heroId"]
hero_icons[str(hero_id)] = await get_hero_icon(hero_id)
return await bot.loop.run_in_executor(ThreadPoolExecutor(max_workers=1), create_dota_gif_main, match, stratz_match, start_time, end_time, ms_per_second, filename, uri, hero_icons)
# the main code for creating the dota gif. this should be run in a separate thread because it blocks
def create_dota_gif_main(match, stratz_match, start_time, end_time, ms_per_second, filename, uri, hero_icons):
building_data = read_json(settings.resource("json/building_data.json"))
map_image = Image.open(settings.resource("images/map/dota_map.png"))
map_image = map_image.resize((256, 256), Image.ANTIALIAS)
clock_bg_image = Image.open(settings.resource("images/map/clock_background.png"))
font = ImageFont.truetype(settings.resource("images/arial_unicode_bold.ttf"), 16)
reverse = end_time < start_time
if reverse:
temp = start_time
start_time = end_time
end_time = temp
match_start = -89
if start_time < match_start:
start_time = match_start
if end_time > match["duration"]:
end_time = match["duration"]
players = []
for player in stratz_match["players"]:
playbackData = player["playbackData"]
positionEvents = playbackData["playerUpdatePositionEvents"]
deathEvents = playbackData["deathEvents"]
scale = 0.75
icon = hero_icons[str(player["heroId"])]
icon = icon.resize((int(icon.width * scale), int(icon.height * scale)), Image.ANTIALIAS)
# icon = outline_image(icon, 2, (0, 255, 0) if player["isRadiant"] else (255, 0, 0))
x = 0
y = 0
data = {
"icon": icon
}
for t in range(match_start, end_time + 1):
event = next((e for e in positionEvents if e["time"] == t), None)
if event:
x = event["x"]
y = event["y"]
if t >= start_time:
data[t] = { "x": x, "y": y }
death_timer = 0
for t in range(match_start, end_time + 1):
event = next((e for e in deathEvents if e["time"] == t), None)
if event:
death_timer = event["timeDead"]
if t >= start_time:
data[t]["dead"] = death_timer > 0
if death_timer > 0:
death_timer -= 1
players.append(data)
objectiveEvents = match["objectives"]
buildings = []
for b in building_data:
icon = Image.open(settings.resource(f"images/map/{b['icon']}"))
size = {
"tower": int(map_image.width * (16 / 300)),
"barracks": int(map_image.width * (12 / 300)),
"ancient": int(map_image.width * (25 / 300))
}[b["type"]]
icon = icon.resize((size, size), Image.ANTIALIAS)
building = {
"icon": icon,
"x": b["x"],
"y": b["y"]
}
event = next((e for e in objectiveEvents if e.get("key") == b["key"]), None)
if event:
building["death"] = event["time"]
buildings.append(building)
#sort from top right to bottom left for drawing
buildings = sorted(buildings, key=lambda b: b["x"] + b["y"], reverse=True)
# runes
runeEvents = stratz_match["playbackData"]["runeEvents"]
current_runes = {}
runes = {}
for t in range(match_start, end_time + 1):
events = filter(lambda e: e["time"] == t, runeEvents)
for e in filter(lambda e: e["time"] == t and e["action"] == 0, runeEvents):
current_runes[e["id"]] = {
"type": e["runeType"],
"x": e["x"],
"y": e["y"]
}
if t >= start_time and current_runes:
runes[t] = current_runes.copy()
for e in filter(lambda e: e["time"] == t and e["action"] == 1, runeEvents):
if e["id"] in current_runes:
del current_runes[e["id"]]
# rune icons
rune_icons = {}
for i in range(0, 9):
scale = 0.5
icon = Image.open(settings.resource(f"images/map/rune_{i}.png"))
rune_icons[i] = icon.resize((int(icon.width * scale), int(icon.height * scale)), Image.ANTIALIAS)
process = subprocess.Popen(["gifsicle", "--multifile", "-d", str(ms_per_second // 10), "--conserve-memory", "-O3", "-", "-o", filename], stdin=subprocess.PIPE, bufsize=-1)
time_range = range(start_time, end_time + 1)
if reverse:
time_range = range(end_time, start_time - 1, -1)
for t in time_range:
image = map_image.copy()
for building in buildings:
if t < building.get("death", t + 1):
image = place_icon_on_map(image, building["icon"], building["x"], building["y"])
for player in players:
icon = player["icon"].convert("LA") if player[t]["dead"] else player["icon"]
image = place_icon_on_map(image, icon, player[t]["x"], player[t]["y"])
for rune in runes.get(t, {}):
rune = runes[t][rune]
image = place_icon_on_map(image, rune_icons[rune["type"]], rune["x"], rune["y"])
image = paste_image(image, clock_bg_image, (image.width // 2) - (clock_bg_image.width // 2), 0)
draw = ImageDraw.Draw(image)
clock_text = get_pretty_time(abs(t))
clock_pos = ((image.width // 2) - (font.getsize(clock_text)[0] // 2), -1)
draw.text(clock_pos, clock_text, font=font, fill="#ffffff")
image.save(process.stdin, "gif")
image.close()
process.stdin.close()
process.wait()
optimize_gif(uri, filename)
return filename
async def create_dota_emoticon(emoticon, url):
uri = f"dota_emoticon:{emoticon.name}"
filename = httpgetter.cache.get_filename(uri)
if filename and not settings.debug:
return filename
filetype = "gif" if emoticon.frames > 1 else "png"
filename = await httpgetter.cache.new(uri, filetype)
image = Image.open(await httpgetter.get(url, "bytes", cache=True))
image = remove_semi_transparent(image, (255, 255, 255, 0))
if filetype == "png":
image.save(filename, "png")
return filename
frame_width = image.width / emoticon.frames
try:
process = subprocess.Popen(["gifsicle",
"--multifile",
"-d", str(emoticon.ms_per_frame // 10),
"-U", "--disposal=bg",
"--loopcount=0",
"--transparent", "0",
"-", "-o", filename], stdin=subprocess.PIPE, bufsize=-1)
except OSError as e:
raise UserError("Whoever setup this mangobyte doesn't have gifsicle installed")
for i in range(0, emoticon.frames):
box = (i * frame_width, 0, (i + 1) * frame_width, image.height)
frame = image.crop(box)
frame.save(process.stdin, "gif")
process.stdin.close()
process.wait()
return filename
async def dota_rank_icon(rank_tier, leaderboard_rank):
if rank_tier is None:
rank_tier = 0
uri = f"dota_rank:{rank_tier}_{leaderboard_rank}"
logger.info(uri)
filename = httpgetter.cache.get_filename(uri)
if filename and not settings.debug:
return filename
filename = await httpgetter.cache.new(uri, "png")
badge_num = rank_tier // 10
stars_num = min(rank_tier % 10, 7)
modifier = ""
if leaderboard_rank and badge_num == 7:
badge_num = 8 # this is to make consistant with what opendota shows
if badge_num == 8 and leaderboard_rank:
stars_num = 0
if leaderboard_rank <= 10:
modifier = "c"
elif leaderboard_rank <= 100:
modifier = "b"
image = Image.open(settings.resource(f"images/ranks/rank_{badge_num}{modifier}.png"))
if stars_num > 0:
stars_image = Image.open(settings.resource(f"images/ranks/stars_{stars_num}.png"))
image = paste_image(image, stars_image, 0, 0)
if leaderboard_rank:
draw = ImageDraw.Draw(image)
box_width = 256
box_height = 50
cell = TextCell(leaderboard_rank, color="#feffe5", font_size=50, horizontal_align="center")
cell.render(draw, image, 0, 232 - box_height, box_width, box_height)
image.save(filename, "png")
return filename
def get_datetime_cell(match, region_data):
match_date = datetime.fromtimestamp(match["start_time"], tz=timezone.utc)
region = str(match.get("region"))
if region is None or region == "None":
region = "1" # Default to US West
if region in region_data:
match_date += timedelta(hours=region_data[region]["UTC_offset"])
# character for leading space is different on windows
lead_char = "#" if os.name == "nt" else "-"
str_date = match_date.strftime(f"%b %{lead_char}d %Y")
str_time = match_date.strftime(f"%{lead_char}I:%M %p")
return DoubleCell(
TextCell(str_date, font_size=18, horizontal_align="center"),
TextCell(str_time, font_size=18, horizontal_align="center")
)
async def draw_meta_table(sorted_heroes, heroes):
"""Takes a sorted json and an unsorted json of
heroes and draws a nice little discord-friendly table"""
border_size = 10
table = Table(background=discord_color2)
#Header
headers=[
TextCell("Hero", background=discord_color1, padding=6),
TextCell("", background=discord_color1, padding=6),
TextCell("Win %", background=discord_color1, padding=6),
TextCell("Pick/Ban %", background=discord_color1, padding=6)
]
table.add_row(headers)
for hero in sorted_heroes:
table.add_row([
ImageCell(img=await get_hero_image(hero["hero_id"]), height=48),
TextCell(get_hero_name(hero["hero_id"]), fontsize=24),
TextCell(f"{get_hero_winrate(hero):.0%}", fontsize=24),
TextCell(f"{get_hero_pickban_percent(hero, heroes):.0%}", fontsize=24)
])
image = table.render()
border_image = Image.new('RGBA', (image.size[0] + (border_size * 2), image.size[1] + border_size), color=discord_color1)
image = paste_image(border_image, image, border_size, 0)
fp = BytesIO()
image.save(fp, format="PNG")
fp.seek(0)
return fp
async def draw_matches_table(matches, game_strings):
region_data = read_json(settings.resource("json/region_data.json"))
border_size = 10
grey_color = "#BBBBBB"
table = Table(background=discord_color2)
# Header
headers = [
TextCell("Hero", padding=0),
TextCell(""),
TextCell("Result"),
TextCell("K", horizontal_align="center"),
TextCell("D", horizontal_align="center"),
TextCell("A", horizontal_align="center"),
TextCell("Duration"),
TextCell("Type"),
TextCell("Date")
]
table.add_row(headers)
for cell in table.rows[0]:
cell.background = discord_color1
table.add_row([ColorCell(color=discord_color1, height=6) for i in range(len(headers))])
first = True
for match in matches:
won_match = bool(match["radiant_win"]) == bool(match["player_slot"] < 128)
game_mode = game_strings.get(f"game_mode_{match['game_mode']}", "Unknown")
lobby_type = game_strings.get(f"lobby_type_{match['lobby_type']}", "Unknown")
if first:
first = False
else:
table.add_row([ColorCell(color=discord_color2, height=12) for i in range(len(headers))])
table.add_row([
ImageCell(img=await get_hero_image(match["hero_id"]), height=48),
DoubleCell(
TextCell(get_hero_name(match["hero_id"]), font_size=24),
TextCell(match.get("match_id"), font_size=12, horizontal_align="left", color=grey_color)
),
TextCell("Win" if won_match else "Loss", color=("green" if won_match else "red"), horizontal_align="center"),
TextCell(match.get("kills")),
TextCell(match.get("deaths")),
TextCell(match.get("assists")),
TextCell(format_duration_simple(match.get("duration")), horizontal_align="center"),
DoubleCell(
TextCell(game_mode, font_size=18, padding_right=15, color=grey_color),
TextCell(lobby_type, font_size=18, padding_right=15, color=grey_color)
),
get_datetime_cell(match, region_data)
])
image = table.render()
border_image = Image.new('RGBA', (image.size[0] + (border_size * 2), image.size[1] + border_size), color=discord_color1)
image = paste_image(border_image, image, border_size, 0)
fp = BytesIO()
image.save(fp, format="PNG")
fp.seek(0)
return fp
# given talents as they are stored in dotabase
async def draw_hero_talents(hero):
talents = list(map(lambda t: t.localized_name, hero.talents))
talent_rows = [
[ talents[7], talents[6] ],
[ talents[5], talents[4] ],
[ talents[3], talents[2] ],
[ talents[1], talents[0] ]
]
image = Image.open(settings.resource("images/talents.png"))
draw = ImageDraw.Draw(image)
header_x = 19
header_y = 17
header_width = 655
header_height = 51
cell = TextCell(hero.localized_name, color="#dddddd", font_size=28, horizontal_align="center")
cell.render(draw, image, header_x, header_y, header_width, header_height)
box_width = 306
box_height = 73
box_margin_y = 14
start_y = 70
start_x_left = 14
start_x_right = 370
start_x = [ start_x_left, start_x_right ]
for i in range(0, 4):
for j in range(0, 2):
x = start_x[j]
y = start_y + (i * (box_height + box_margin_y))
text = talent_rows[i][j]
cell = TextCell(text, color="#cca770", font_size=20, wrap=True, padding=[ 0, 15, 0, 15 ], horizontal_align="center")
cell.render(draw, image, x, y, box_width, box_height)
fp = BytesIO()
image.save(fp, format="PNG")
fp.seek(0)
return fp
async def fuse_hero_images(hero1, hero2):
file1 = await httpgetter.get(hero_infos[hero1.id]["image"], "filename", cache=True)
file2 = await httpgetter.get(hero_infos[hero2.id]["image"], "filename", cache=True)
fp = BytesIO()
colorize_image(file1, file2, fp)
fp.seek(0)
return fp
async def draw_courage(hero_id, icon_ids):
# scaled to 128 height
hero_image = await get_hero_portrait(hero_id)
hero_image = hero_image.resize((97, 128), Image.ANTIALIAS)
table = Table(background="#000000")
table.add_row([
ColorCell(color="white", width=97, height=64),
ImageCell(img=await get_item_image(icon_ids[0])),
ImageCell(img=await get_item_image(icon_ids[1])),
ImageCell(img=await get_item_image(icon_ids[2]))
])
table.add_row([
ColorCell(color="white", width=97, height=64),
ImageCell(img=await get_item_image(icon_ids[3])),
ImageCell(img=await get_item_image(icon_ids[4])),
ImageCell(img=await get_item_image(icon_ids[5]))
])
image = table.render()
image = paste_image(image, hero_image, 0, 0)
fp = BytesIO()
image.save(fp, format="PNG")
fp.seek(0)
return fp
async def draw_artifact_deck(deck_string, cards, hero_turns, card_counts):
uri = f"artifact_deck:{deck_string}"
filename = httpgetter.cache.get_filename(uri)
if filename and not settings.debug:
return filename
filename = await httpgetter.cache.new(uri, "png")
sorting_info = [
{
"filter": lambda c: c.type == "Hero",
"sort": lambda c: hero_turns[c.id]
},
{
"filter": lambda c: c.type != "Hero" and c.type != "Item",
"sort": lambda c: c.mana_cost
},
{
"filter": lambda c: c.type == "Item",
"sort": lambda c: c.gold_cost
}
]
ordered_cards = []
for info in sorting_info:
for card in sorted(filter(info["filter"], cards), key=info["sort"]):
ordered_cards.append(card)
column_count = 5
border_size = 10
grey_color = "#BBBBBB"
table = Table(background=discord_color2)
table.add_row([ColorCell(color=discord_color1, height=border_size) for i in range(column_count)])
first = True
for card in ordered_cards:
cost = ""
if card.type != "Hero":
if card.type == "Item":
cost = card.gold_cost
else:
cost = card.mana_cost
last_cell = ""
if card.type == "Hero":
last_cell = f"Turn {hero_turns.get(card.id)}"
else:
last_cell = f"x {card_counts.get(card.id)}"
if first:
first = False
else:
table.add_row([ColorCell(color=discord_color2, height=2) for i in range(column_count)])
table.add_row([
ImageCell(img=await get_url_image(card.mini_image), height=48),
ImageCell(img=await get_url_image(card.type_image), height=48),
TextCell(cost),
TextCell(card.name),
TextCell(last_cell, horizontal_align="right")
])
card_color = card.color.blend(Color(discord_color2), 0.5)
for cell in table.rows[len(table.rows) - 1]:
cell.background = card_color.hex
image = table.render()
border_image = Image.new('RGBA', (image.size[0] + (border_size * 2), image.size[1] + border_size), color=discord_color1)
image = paste_image(border_image, image, border_size, 0)
image.save(filename, format="PNG")
return filename
# taken from https://stackoverflow.com/questions/4998427
def grouper(values, N):
return [values[n:n+N] for n in range(0, len(values), N)]
async def draw_neutralitems_tier(selected_tier, all_neutral_items):
items = list(filter(lambda i: i.neutral_tier == str(selected_tier), all_neutral_items))
table = Table(background=discord_color2)
for item in items:
table.add_row([
ImageCell(img=await get_item_image(item.id)),
TextCell(item.localized_name, font_size=30, padding=10)
])
image = table.render()
fp = BytesIO()
image.save(fp, format="PNG")
fp.seek(0)
return fp
async def draw_neutralitems(selected_tier, all_neutral_items):
if selected_tier is not None:
return await draw_neutralitems_tier(selected_tier, all_neutral_items)
items_per_row = 6
table = Table(background=discord_color1)
for tier in range(1, 6):
header_row = [ColorCell(color=discord_color2) for i in range(items_per_row)]
header_row[0] = TextCell(f"Tier {tier}", color=neutral_tier_text_colors[str(tier)], font_size=25, padding=[10, 0, 10, 10], background=discord_color2)
header_row[items_per_row - 1] = TextCell(neutral_timings[str(tier)], color=discord_color0, font_size=25, padding=[10, 10, 10, 0], horizontal_align="right", background=discord_color2)
table.add_row(header_row)
items = list(filter(lambda i: i.neutral_tier == str(tier), all_neutral_items))
item_img_cells = []
for item in items:
item_img_cells.append(ImageCell(img=await get_item_image(item.id)))
new_rows = grouper(item_img_cells, items_per_row)
for row in new_rows:
table.add_row(row)
footer_row = [ColorCell(color=discord_color1, height=20) for i in range(items_per_row)]
table.add_row(footer_row)
image = table.render()
fp = BytesIO()
image.save(fp, format="PNG")
fp.seek(0)
return fp
def get_poly_points(n, radius, origin=(0, 0), radius_percentages=None):
radii = [radius for i in range(n)]
if radius_percentages:
radii = [radius * radius_percentages[i] for i in range(n)]
rot_start = 0 - (math.pi / 2)
return [
(math.cos(rot_start + th) * radii[j] + origin[0],
math.sin(rot_start + th) * radii[j] + origin[1])
for j, th in enumerate([i * (2 * math.pi) / n for i in range(n)])
]
def draw_poly_label(draw, point, center, text):
font = ImageFont.truetype(settings.resource("images/arial_unicode_bold.ttf"), 16)
font_size = font.getsize(text)
point = list(point)
if point[0] < center[0]:
point[0] -= font_size[0]
if point[1] < center[1]:
point[1] -= font_size[1]
if point[0] == center[0]:
point[0] -= font_size[0] / 2
if point[1] == center[1]:
point[1] -= font_size[1] / 2
draw.text(tuple(point), text, font=font, fill="#ffffff")
def draw_polygraph(values, labels):
size = (500, 500)
polygon_radius = 175
point_count = len(values)
center = (size[0] / 2, size[1] / 2)
image = Image.new('RGBA', size)
draw = ImageDraw.Draw(image)
draw.rectangle([0, 0, image.size[0], image.size[1]], fill=discord_color2)
points = get_poly_points(point_count, polygon_radius, center)
draw.polygon(points, fill="#2C2F33", outline="#111111")
for point in points:
draw.line((center[0], center[1], point[0], point[1]), fill="#111111")
for i in range(len(points)):
draw_poly_label(draw, points[i], center, labels[i])
image2 = Image.new('RGBA', size)
draw2 = ImageDraw.Draw(image2)
data_points = get_poly_points(point_count, polygon_radius, center, values)
draw2.polygon(data_points, fill="#FFDF0044", outline="#FFDF00")
for p in data_points:
dot_rad = 2
draw2.ellipse([(p[0] - dot_rad, p[1] - dot_rad), (p[0] + dot_rad, p[1] + dot_rad)], fill="#FFDF00")
image = paste_image(image, image2, 0, 0)
fp = BytesIO()
image.save(fp, format="PNG")
fp.seek(0)
return fp
async def draw_herostatstable(table_args, hero_stat_categories, leveled_hero_stats):
category = None
for cat in hero_stat_categories:
if any(stat["stat"] == table_args.stat for stat in cat["stats"]):
category = cat
break
if category is None:
raise UserError("Couldn't find referenced stat")
stats = category["stats"]
# sort / get data
hero_data = leveled_hero_stats[table_args.hero_level]
hero_data = sorted(hero_data, key=lambda hero: hero.get(table_args.stat), reverse=not table_args.reverse)
hero_data = hero_data[0:table_args.hero_count]
table = Table(border_size=10)
stat_highlight_color = discord_color1
table_background = discord_color2
table_border_color = discord_color1
header_row = [ TextCell("") ]
for stat in stats:
header_row.append(SlantedTextCell(
stat["name"],
font_size=20,
background=stat_highlight_color if stat["stat"] == table_args.stat else table_background,
border_color=table_border_color,
border_size=2,
rotation=45))
header_height = max(cell.height for cell in header_row)
padding_right = int(header_height / math.tan(header_row[-1].rotation_rad))
table.border_size[1] = padding_right
table.add_row(header_row)
i = 0
for hero in hero_data:
cell_background = table_background
#cell_background = stat_highlight_color if i % 2 else table_background
new_row = [ ImageCell(
img=await get_hero_icon(hero.get("id")),
padding=3,
border_color=table_border_color,
border_size=2,
background=table_background)
]
for stat in stats:
value = hero.get(stat["stat"])
if stat.get("display") == "resistance_percentage":
value = 100 * (1 - value)
if stat.get("display") == "int":
value = round(value)
value = f"{value:.2f}"
value = re.sub("\.0+$", "", value)
if stat.get("display") == "resistance_percentage":
value += "%"
new_row.append(TextCell(
value,
font_size=16,
padding=10,
border_color=table_border_color,
border_size=2,
background=stat_highlight_color if stat["stat"] == table_args.stat else cell_background))
table.add_row(new_row)
i += 1
image = table.render()
fp = BytesIO()
image.save(fp, format="PNG")
fp.seek(0)
return fp
# draws the recipe image for the given item
async def draw_itemrecipe(main_item, components, products):
item_ids = [ main_item.id ]
item_ids.extend(map(lambda i: i.id, components))
item_ids.extend(map(lambda i: i.id, products))
item_ids = "_".join(map(str, item_ids))
uri = f"dota_recipe:{item_ids}"
filename = httpgetter.cache.get_filename(uri)
if filename and not settings.debug:
return filename
filename = await httpgetter.cache.new(uri, "png")
line_ducking = 5 # how many pixels into the main item to hide the line
inner_padding = 60 # y padding between item rows
outer_padding = 10 # padding around the whole thing
item_size = (88, 64) # the size of an item
max_spacing = 175 # the max spacing in between item centers
max_items_per_row = 5 # helps determine default image width
max_items_per_row = max(max_items_per_row, len(components), len(products))
rows = 1
if components:
rows += 1
if products:
rows += 1
base_size = (max_items_per_row * item_size[0] + outer_padding * 2, (rows * (inner_padding + item_size[1])) - inner_padding + (2 * outer_padding))
base_image = Image.new('RGBA', base_size, (0, 0, 0, 0))
rows = []
if products:
rows.append(products)
rows.append([main_item])
if components:
rows.append(components)
row_points = []
# generate points
for i in range(len(rows)):
row = rows[i]
if len(row) == 1:
spacing = 0
start_x = int((base_size[0] / 2) - (item_size[0] / 2))
else:
spacing = int((base_size[0] - outer_padding * 2 - item_size[0]) / (len(row) - 1))
start_x = int(outer_padding)
if spacing > max_spacing:
start_x += int(((spacing - max_spacing)* (len(row) - 1)) / 2)
spacing = max_spacing
start_y = int(outer_padding + (i * (inner_padding + item_size[1])))
points = []
for j in range(len(row)):
x = start_x + (j * spacing)
y = start_y
points.append((x, y))
row_points.append(points)
# draw lines
draw = ImageDraw.Draw(base_image)
for i in range(len(rows) - 1):
for j in range(len(rows[i])):
is_main_first = len(rows[i]) < len(rows[i + 1])
for k in range(len(rows[i + 1])):
p1 = row_points[i][j]
p2 = row_points[i + 1][k]
p1 = (p1[0] + item_size[0] // 2, p1[1] + item_size[1])
p2 = (p2[0] + item_size[0] // 2, p2[1])
# p1 = (p1[0] + item_size[0] // 2, p1[1] + item_size[1] // 2)
# p2 = (p2[0] + item_size[0] // 2, p2[1] + item_size[1] // 2)
if is_main_first:
p1 = (p1[0], p1[1] - line_ducking)
color_item = rows[i + 1][k]
else:
p2 = (p2[0], p2[1] + line_ducking)
color_item = rows[i][j]
color = get_item_color(color_item, "#111111")
draw.line((p1[0], p1[1], p2[0], p2[1]), fill=color, width=3)
# paste images
for i in range(len(rows)):
row = rows[i]
for j in range(len(row)):
image = await get_item_image(row[j].id)
base_image.paste(image, row_points[i][j])
base_image = base_image.resize((base_size[0] // 2, base_size[1] // 2), Image.ANTIALIAS)
base_image.save(filename, format="PNG")
return filename
async def draw_heroabilities(abilities):
abilities = sorted(abilities, key=lambda a: a.slot)
table = Table(background=discord_color2)
for ability in abilities:
icon = await get_url_image(f"{vpkurl}{ability.icon}")
icon = icon.resize((icon.size[0] // 2, icon.size[1] // 2), Image.ANTIALIAS)
row = [
ImageCell(img=icon),
TextCell(ability.localized_name, font_size=30, padding=10),
TextCell("")
]
if ability.scepter_grants:
aghs_icon = f"{vpkurl}/panorama/images/hud/reborn/aghsstatus_scepter_on_psd.png"
aghs_icon = await get_url_image(aghs_icon)
aghs_icon = aghs_icon.resize(icon.size, Image.ANTIALIAS)
row[2] = ImageCell(img=aghs_icon)
elif ability.shard_grants:
aghs_icon = f"{vpkurl}/panorama/images/hud/reborn/aghsstatus_shard_on_psd.png"
aghs_icon = await get_url_image(aghs_icon)
# aghs_icon = aghs_icon.resize(icon.size, Image.ANTIALIAS)
row[2] = ImageCell(img=aghs_icon, padding_top=20)
table.add_row(row)
image = table.render()
fp = BytesIO()
image.save(fp, format="PNG")
fp.seek(0)
return fp
async def add_player_ability_upgrades_row(table, player):
abilities = player.get("ability_upgrades_arr")
if abilities is None:
abilities = []
row = [
ColorCell(width=5, color=("green" if player["isRadiant"] else "red")),
ImageCell(img=await get_hero_image(player["hero_id"]), height=48),
TextCell(player.get("personaname", "Anonymous"))
]
empty_levels = [ 17, 19, 21, 22, 23, 24 ] # levels at which there are no upgrades
for i in range(1, 26):
if len(abilities) == 0:
continue
if i in empty_levels:
row.append(TextCell(""))
continue
ability = abilities.pop(0)
row.append(ImageCell(img=await get_ability_image(ability, player["hero_id"]), height=48))
table.add_row(row)
# draws a table of the ability upgrades for each hero in the match.
async def draw_match_ability_upgrades(match):
if match["players"][0].get("ability_upgrades_arr") is None:
raise UserError("That match is too old, it doesn't have ability data")
is_parsed = match.get("version")
table = Table(background=discord_color2)
# Header
headers = [
TextCell("", padding=0),
TextCell(""),
TextCell("")
]
for i in range(1, 26):
headers.append(TextCell(f"{i}", width=48, horizontal_align="center"))
table.add_row(headers)
for cell in table.rows[0]:
cell.background = discord_color1
# Do players
for player in match["players"]:
if player['isRadiant']:
await add_player_ability_upgrades_row(table, player)
table.add_row([ColorCell(color=discord_color1, height=5) for i in range(len(headers))])
for player in match["players"]:
if not player['isRadiant']:
await add_player_ability_upgrades_row(table, player)
table_image = table.render()
table_border = 10
image = Image.new('RGBA', (table_image.size[0] + (table_border * 2), table_image.size[1] + (table_border * 2)))
draw = ImageDraw.Draw(image)
draw.rectangle([0, 0, image.size[0], image.size[1]], fill=discord_color1)
image.paste(table_image, (table_border, table_border))
fp = BytesIO()
image.save(fp, format="PNG")
fp.seek(0)
return fp
| |
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.extensions import portbindings
from neutron.plugins.ml2 import driver_api as api
from neutron.tests import base
NETWORK_ID = "fake_network"
PORT_ID = "fake_port"
class FakeNetworkContext(api.NetworkContext):
def __init__(self, segments):
self._network_segments = segments
@property
def current(self):
return {'id': NETWORK_ID}
@property
def original(self):
return None
@property
def network_segments(self):
return self._network_segments
class FakePortContext(api.PortContext):
def __init__(self, agent_type, agents, segments,
vnic_type=portbindings.VNIC_NORMAL):
self._agent_type = agent_type
self._agents = agents
self._network_context = FakeNetworkContext(segments)
self._bound_vnic_type = vnic_type
self._bound_segment_id = None
self._bound_vif_type = None
self._bound_vif_details = None
@property
def current(self):
return {'id': PORT_ID,
'binding:vnic_type': self._bound_vnic_type}
@property
def original(self):
return None
@property
def network(self):
return self._network_context
@property
def bound_segment(self):
if self._bound_segment_id:
for segment in self._network_context.network_segments:
if segment[api.ID] == self._bound_segment_id:
return segment
@property
def original_bound_segment(self):
return None
@property
def bound_driver(self):
return None
@property
def original_bound_driver(self):
return None
def host_agents(self, agent_type):
if agent_type == self._agent_type:
return self._agents
else:
return []
def set_binding(self, segment_id, vif_type, vif_details):
self._bound_segment_id = segment_id
self._bound_vif_type = vif_type
self._bound_vif_details = vif_details
class AgentMechanismBaseTestCase(base.BaseTestCase):
# These following must be overriden for the specific mechanism
# driver being tested:
VIF_TYPE = None
CAP_PORT_FILTER = None
AGENT_TYPE = None
AGENTS = None
AGENTS_DEAD = None
AGENTS_BAD = None
def _check_unbound(self, context):
self.assertIsNone(context._bound_segment_id)
self.assertIsNone(context._bound_vif_type)
self.assertIsNone(context._bound_vif_details)
def _check_bound(self, context, segment):
self.assertEqual(context._bound_segment_id, segment[api.ID])
self.assertEqual(context._bound_vif_type, self.VIF_TYPE)
vif_details = context._bound_vif_details
self.assertIsNotNone(vif_details)
self.assertEqual(vif_details[portbindings.CAP_PORT_FILTER],
self.CAP_PORT_FILTER)
class AgentMechanismGenericTestCase(AgentMechanismBaseTestCase):
UNKNOWN_TYPE_SEGMENTS = [{api.ID: 'unknown_segment_id',
api.NETWORK_TYPE: 'no_such_type'}]
def test_unknown_type(self):
context = FakePortContext(self.AGENT_TYPE,
self.AGENTS,
self.UNKNOWN_TYPE_SEGMENTS)
self.driver.bind_port(context)
self._check_unbound(context)
class AgentMechanismLocalTestCase(AgentMechanismBaseTestCase):
LOCAL_SEGMENTS = [{api.ID: 'unknown_segment_id',
api.NETWORK_TYPE: 'no_such_type'},
{api.ID: 'local_segment_id',
api.NETWORK_TYPE: 'local'}]
def test_type_local(self):
context = FakePortContext(self.AGENT_TYPE,
self.AGENTS,
self.LOCAL_SEGMENTS)
self.driver.bind_port(context)
self._check_bound(context, self.LOCAL_SEGMENTS[1])
self.assertTrue(self.driver.validate_port_binding(context))
self.driver.unbind_port(context)
def test_type_local_dead(self):
context = FakePortContext(self.AGENT_TYPE,
self.AGENTS_DEAD,
self.LOCAL_SEGMENTS)
self.driver.bind_port(context)
self._check_unbound(context)
class AgentMechanismFlatTestCase(AgentMechanismBaseTestCase):
FLAT_SEGMENTS = [{api.ID: 'unknown_segment_id',
api.NETWORK_TYPE: 'no_such_type'},
{api.ID: 'flat_segment_id',
api.NETWORK_TYPE: 'flat',
api.PHYSICAL_NETWORK: 'fake_physical_network'}]
def test_type_flat(self):
context = FakePortContext(self.AGENT_TYPE,
self.AGENTS,
self.FLAT_SEGMENTS)
self.driver.bind_port(context)
self._check_bound(context, self.FLAT_SEGMENTS[1])
self.assertTrue(self.driver.validate_port_binding(context))
self.driver.unbind_port(context)
def test_type_flat_bad(self):
context = FakePortContext(self.AGENT_TYPE,
self.AGENTS_BAD,
self.FLAT_SEGMENTS)
self.driver.bind_port(context)
self._check_unbound(context)
class AgentMechanismVlanTestCase(AgentMechanismBaseTestCase):
VLAN_SEGMENTS = [{api.ID: 'unknown_segment_id',
api.NETWORK_TYPE: 'no_such_type'},
{api.ID: 'vlan_segment_id',
api.NETWORK_TYPE: 'vlan',
api.PHYSICAL_NETWORK: 'fake_physical_network',
api.SEGMENTATION_ID: 1234}]
def test_type_vlan(self):
context = FakePortContext(self.AGENT_TYPE,
self.AGENTS,
self.VLAN_SEGMENTS)
self.driver.bind_port(context)
self._check_bound(context, self.VLAN_SEGMENTS[1])
self.assertTrue(self.driver.validate_port_binding(context))
self.driver.unbind_port(context)
def test_type_vlan_bad(self):
context = FakePortContext(self.AGENT_TYPE,
self.AGENTS_BAD,
self.VLAN_SEGMENTS)
self.driver.bind_port(context)
self._check_unbound(context)
class AgentMechanismGreTestCase(AgentMechanismBaseTestCase):
GRE_SEGMENTS = [{api.ID: 'unknown_segment_id',
api.NETWORK_TYPE: 'no_such_type'},
{api.ID: 'gre_segment_id',
api.NETWORK_TYPE: 'gre',
api.SEGMENTATION_ID: 1234}]
def test_type_gre(self):
context = FakePortContext(self.AGENT_TYPE,
self.AGENTS,
self.GRE_SEGMENTS)
self.driver.bind_port(context)
self._check_bound(context, self.GRE_SEGMENTS[1])
self.assertTrue(self.driver.validate_port_binding(context))
self.driver.unbind_port(context)
def test_type_gre_bad(self):
context = FakePortContext(self.AGENT_TYPE,
self.AGENTS_BAD,
self.GRE_SEGMENTS)
self.driver.bind_port(context)
self._check_unbound(context)
| |
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import os
import threading
import logging
import sickbeard
from sickbeard import classes
# number of log files to keep
NUM_LOGS = 3
# log size in bytes
LOG_SIZE = 10000000 # 10 megs
ERROR = logging.ERROR
WARNING = logging.WARNING
MESSAGE = logging.INFO
DEBUG = logging.DEBUG
DB = 5
reverseNames = {u'ERROR': ERROR,
u'WARNING': WARNING,
u'INFO': MESSAGE,
u'DEBUG': DEBUG,
u'DB' : DB}
class SBRotatingLogHandler(object):
def __init__(self, log_file, num_files, num_bytes):
self.num_files = num_files
self.num_bytes = num_bytes
self.log_file = log_file
self.cur_handler = None
self.writes_since_check = 0
self.log_lock = threading.Lock()
def initLogging(self, consoleLogging=True):
self.log_file = os.path.join(sickbeard.LOG_DIR, self.log_file)
self.cur_handler = self._config_handler()
logging.addLevelName(5,'DB')
logging.getLogger('sickbeard').addHandler(self.cur_handler)
logging.getLogger('subliminal').addHandler(self.cur_handler)
logging.getLogger('imdbpy').addHandler(self.cur_handler)
# define a Handler which writes INFO messages or higher to the sys.stderr
if consoleLogging:
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
console.setFormatter(DispatchingFormatter({'sickbeard' : logging.Formatter('%(asctime)s %(levelname)s::%(message)s', '%H:%M:%S'),
'subliminal' : logging.Formatter('%(asctime)s %(levelname)s::SUBLIMINAL :: %(message)s', '%H:%M:%S'),
'imdbpy' : logging.Formatter('%(asctime)s %(levelname)s::IMDBPY :: %(message)s', '%H:%M:%S')
},
logging.Formatter('%(message)s'),))
# add the handler to the root logger
logging.getLogger('sickbeard').addHandler(console)
logging.getLogger('subliminal').addHandler(console)
logging.getLogger('imdbpy').addHandler(console)
logging.getLogger('sickbeard').setLevel(DB)
logging.getLogger('subliminal').setLevel(logging.WARNING)
logging.getLogger('imdbpy').setLevel(logging.WARNING)
def _config_handler(self):
"""
Configure a file handler to log at file_name and return it.
"""
file_handler = logging.FileHandler(self.log_file)
file_handler.setLevel(DB)
file_handler.setFormatter(DispatchingFormatter({'sickbeard' : logging.Formatter('%(asctime)s %(levelname)-8s %(message)s', '%b-%d %H:%M:%S'),
'subliminal' : logging.Formatter('%(asctime)s %(levelname)-8s SUBLIMINAL :: %(message)s', '%b-%d %H:%M:%S'),
'imdbpy' : logging.Formatter('%(asctime)s %(levelname)-8s IMDBPY :: %(message)s', '%b-%d %H:%M:%S')
},
logging.Formatter('%(message)s'),))
return file_handler
def _log_file_name(self, i):
"""
Returns a numbered log file name depending on i. If i==0 it just uses logName, if not it appends
it to the extension (blah.log.3 for i == 3)
i: Log number to ues
"""
return self.log_file + ('.' + str(i) if i else '')
def _num_logs(self):
"""
Scans the log folder and figures out how many log files there are already on disk
Returns: The number of the last used file (eg. mylog.log.3 would return 3). If there are no logs it returns -1
"""
cur_log = 0
while os.path.isfile(self._log_file_name(cur_log)):
cur_log += 1
return cur_log - 1
def _rotate_logs(self):
sb_logger = logging.getLogger('sickbeard')
subli_logger = logging.getLogger('subliminal')
# delete the old handler
if self.cur_handler:
self.cur_handler.flush()
self.cur_handler.close()
sb_logger.removeHandler(self.cur_handler)
subli_logger.removeHandler(self.cur_handler)
# rename or delete all the old log files
for i in range(self._num_logs(), -1, -1):
cur_file_name = self._log_file_name(i)
try:
if i >= NUM_LOGS:
os.remove(cur_file_name)
else:
os.rename(cur_file_name, self._log_file_name(i+1))
except WindowsError:
pass
# the new log handler will always be on the un-numbered .log file
new_file_handler = self._config_handler()
self.cur_handler = new_file_handler
sb_logger.addHandler(new_file_handler)
subli_logger.addHandler(new_file_handler)
def log(self, toLog, logLevel=MESSAGE):
with self.log_lock:
# check the size and see if we need to rotate
if self.writes_since_check >= 10:
if os.path.isfile(self.log_file) and os.path.getsize(self.log_file) >= LOG_SIZE:
self._rotate_logs()
self.writes_since_check = 0
else:
self.writes_since_check += 1
meThread = threading.currentThread().getName()
message = meThread + u" :: " + toLog
out_line = message.encode('utf-8')
sb_logger = logging.getLogger('sickbeard')
setattr(sb_logger, 'db', lambda *args: sb_logger.log(DB, *args))
try:
if logLevel == DEBUG:
sb_logger.debug(out_line)
elif logLevel == MESSAGE:
sb_logger.info(out_line)
elif logLevel == WARNING:
sb_logger.warning(out_line)
elif logLevel == ERROR:
sb_logger.error(out_line)
# add errors to the UI logger
classes.ErrorViewer.add(classes.UIError(message))
elif logLevel == DB:
sb_logger.db(out_line)
else:
sb_logger.log(logLevel, out_line)
except ValueError:
pass
class DispatchingFormatter:
def __init__(self, formatters, default_formatter):
self._formatters = formatters
self._default_formatter = default_formatter
def format(self, record):
formatter = self._formatters.get(record.name, self._default_formatter)
return formatter.format(record)
sb_log_instance = SBRotatingLogHandler('sickbeard.log', NUM_LOGS, LOG_SIZE)
def log(toLog, logLevel=MESSAGE):
sb_log_instance.log(toLog, logLevel)
| |
# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import filecmp
import os
import random
import tempfile
import time
import sys
import testtools
import mock
import mox
import glanceclient.exc
from oslo.config import cfg
from nova import context
from nova import exception
from nova.image import glance
from nova import test
from nova.tests.api.openstack import fakes
from nova.tests.glance import stubs as glance_stubs
from nova.tests import matchers
from nova import utils
import nova.virt.libvirt.utils as lv_utils
CONF = cfg.CONF
class NullWriter(object):
"""Used to test ImageService.get which takes a writer object."""
def write(self, *arg, **kwargs):
pass
class TestGlanceSerializer(test.NoDBTestCase):
def test_serialize(self):
metadata = {'name': 'image1',
'is_public': True,
'foo': 'bar',
'properties': {
'prop1': 'propvalue1',
'mappings': [
{'virtual': 'aaa',
'device': 'bbb'},
{'virtual': 'xxx',
'device': 'yyy'}],
'block_device_mapping': [
{'virtual_device': 'fake',
'device_name': '/dev/fake'},
{'virtual_device': 'ephemeral0',
'device_name': '/dev/fake0'}]}}
converted_expected = {
'name': 'image1',
'is_public': True,
'foo': 'bar',
'properties': {
'prop1': 'propvalue1',
'mappings':
'[{"device": "bbb", "virtual": "aaa"}, '
'{"device": "yyy", "virtual": "xxx"}]',
'block_device_mapping':
'[{"virtual_device": "fake", "device_name": "/dev/fake"}, '
'{"virtual_device": "ephemeral0", '
'"device_name": "/dev/fake0"}]'}}
converted = glance._convert_to_string(metadata)
self.assertEqual(converted, converted_expected)
self.assertEqual(glance._convert_from_string(converted), metadata)
class TestGlanceImageService(test.NoDBTestCase):
"""Tests the Glance image service.
At a high level, the translations involved are:
1. Glance -> ImageService - This is needed so we can support
multple ImageServices (Glance, Local, etc)
2. ImageService -> API - This is needed so we can support multple
APIs (OpenStack, EC2)
"""
NOW_GLANCE_OLD_FORMAT = "2010-10-11T10:30:22"
NOW_GLANCE_FORMAT = "2010-10-11T10:30:22.000000"
class tzinfo(datetime.tzinfo):
@staticmethod
def utcoffset(*args, **kwargs):
return datetime.timedelta()
NOW_DATETIME = datetime.datetime(2010, 10, 11, 10, 30, 22, tzinfo=tzinfo())
def setUp(self):
super(TestGlanceImageService, self).setUp()
fakes.stub_out_compute_api_snapshot(self.stubs)
self.client = glance_stubs.StubGlanceClient()
self.service = self._create_image_service(self.client)
self.context = context.RequestContext('fake', 'fake', auth_token=True)
self.mox = mox.Mox()
self.files_to_clean = []
def tearDown(self):
super(TestGlanceImageService, self).tearDown()
self.mox.UnsetStubs()
for f in self.files_to_clean:
try:
os.unlink(f)
except os.error:
pass
def _get_tempfile(self):
(outfd, config_filename) = tempfile.mkstemp(prefix='nova_glance_tests')
self.files_to_clean.append(config_filename)
return (outfd, config_filename)
def _create_image_service(self, client):
def _fake_create_glance_client(context, host, port, use_ssl, version):
return client
self.stubs.Set(glance, '_create_glance_client',
_fake_create_glance_client)
client_wrapper = glance.GlanceClientWrapper(
'fake', 'fake_host', 9292)
return glance.GlanceImageService(client=client_wrapper)
@staticmethod
def _make_fixture(**kwargs):
fixture = {'name': None,
'properties': {},
'status': None,
'is_public': None}
fixture.update(kwargs)
return fixture
def _make_datetime_fixture(self):
return self._make_fixture(created_at=self.NOW_GLANCE_FORMAT,
updated_at=self.NOW_GLANCE_FORMAT,
deleted_at=self.NOW_GLANCE_FORMAT)
def test_create_with_instance_id(self):
# Ensure instance_id is persisted as an image-property.
fixture = {'name': 'test image',
'is_public': False,
'properties': {'instance_id': '42', 'user_id': 'fake'}}
image_id = self.service.create(self.context, fixture)['id']
image_meta = self.service.show(self.context, image_id)
expected = {
'id': image_id,
'name': 'test image',
'is_public': False,
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'status': None,
'properties': {'instance_id': '42', 'user_id': 'fake'},
'owner': None,
}
self.assertThat(image_meta, matchers.DictMatches(expected))
image_metas = self.service.detail(self.context)
self.assertThat(image_metas[0], matchers.DictMatches(expected))
def test_create_without_instance_id(self):
"""Ensure we can create an image without having to specify an
instance_id. Public images are an example of an image not tied to an
instance.
"""
fixture = {'name': 'test image', 'is_public': False}
image_id = self.service.create(self.context, fixture)['id']
expected = {
'id': image_id,
'name': 'test image',
'is_public': False,
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'status': None,
'properties': {},
'owner': None,
}
actual = self.service.show(self.context, image_id)
self.assertThat(actual, matchers.DictMatches(expected))
def test_create(self):
fixture = self._make_fixture(name='test image')
num_images = len(self.service.detail(self.context))
image_id = self.service.create(self.context, fixture)['id']
self.assertIsNotNone(image_id)
self.assertEqual(num_images + 1,
len(self.service.detail(self.context)))
def test_create_and_show_non_existing_image(self):
fixture = self._make_fixture(name='test image')
image_id = self.service.create(self.context, fixture)['id']
self.assertIsNotNone(image_id)
self.assertRaises(exception.ImageNotFound,
self.service.show,
self.context,
'bad image id')
def test_show_makes_datetimes(self):
fixture = self._make_datetime_fixture()
image_id = self.service.create(self.context, fixture)['id']
image_meta = self.service.show(self.context, image_id)
self.assertEqual(image_meta['created_at'], self.NOW_DATETIME)
self.assertEqual(image_meta['updated_at'], self.NOW_DATETIME)
def test_detail_makes_datetimes(self):
fixture = self._make_datetime_fixture()
self.service.create(self.context, fixture)
image_meta = self.service.detail(self.context)[0]
self.assertEqual(image_meta['created_at'], self.NOW_DATETIME)
self.assertEqual(image_meta['updated_at'], self.NOW_DATETIME)
def test_page_size(self):
with mock.patch.object(glance.GlanceClientWrapper, 'call') as a_mock:
self.service.detail(self.context, page_size=5)
self.assertEqual(a_mock.called, True)
a_mock.assert_called_with(self.context, 1, 'list',
filters={'is_public': 'none'},
page_size=5)
def test_update(self):
fixture = self._make_fixture(name='test image')
image = self.service.create(self.context, fixture)
image_id = image['id']
fixture['name'] = 'new image name'
self.service.update(self.context, image_id, fixture)
new_image_data = self.service.show(self.context, image_id)
self.assertEqual('new image name', new_image_data['name'])
def test_delete(self):
fixture1 = self._make_fixture(name='test image 1')
fixture2 = self._make_fixture(name='test image 2')
fixtures = [fixture1, fixture2]
num_images = len(self.service.detail(self.context))
self.assertEqual(0, num_images)
ids = []
for fixture in fixtures:
new_id = self.service.create(self.context, fixture)['id']
ids.append(new_id)
num_images = len(self.service.detail(self.context))
self.assertEqual(2, num_images)
self.service.delete(self.context, ids[0])
# When you delete an image from glance, it sets the status to DELETED
# and doesn't actually remove the image.
# Check the image is still there.
num_images = len(self.service.detail(self.context))
self.assertEqual(2, num_images)
# Check the image is marked as deleted.
num_images = reduce(lambda x, y: x + (0 if y['deleted'] else 1),
self.service.detail(self.context), 0)
self.assertEqual(1, num_images)
def test_download_with_retries(self):
tries = [0]
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that fails the first time, then succeeds."""
def get(self, image_id):
if tries[0] == 0:
tries[0] = 1
raise glanceclient.exc.ServiceUnavailable('')
else:
return {}
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
writer = NullWriter()
# When retries are disabled, we should get an exception
self.flags(glance_num_retries=0)
self.assertRaises(exception.GlanceConnectionFailed,
service.download, self.context, image_id, data=writer)
# Now lets enable retries. No exception should happen now.
tries = [0]
self.flags(glance_num_retries=1)
service.download(self.context, image_id, data=writer)
def test_download_file_url(self):
self.flags(allowed_direct_url_schemes=['file'])
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that returns a file url."""
(outfd, s_tmpfname) = tempfile.mkstemp(prefix='directURLsrc')
outf = os.fdopen(outfd, 'w')
inf = open('/dev/urandom', 'r')
for i in range(10):
_data = inf.read(1024)
outf.write(_data)
outf.close()
def get(self, image_id):
return type('GlanceTestDirectUrlMeta', (object,),
{'direct_url': 'file://%s' + self.s_tmpfname})
client = MyGlanceStubClient()
(outfd, tmpfname) = tempfile.mkstemp(prefix='directURLdst')
os.close(outfd)
service = self._create_image_service(client)
image_id = 1 # doesn't matter
service.download(self.context, image_id, dst_path=tmpfname)
# compare the two files
rc = filecmp.cmp(tmpfname, client.s_tmpfname)
self.assertTrue(rc, "The file %s and %s should be the same" %
(tmpfname, client.s_tmpfname))
os.remove(client.s_tmpfname)
os.remove(tmpfname)
def test_download_module_filesystem_match(self):
mountpoint = '/'
fs_id = 'someid'
desc = {'id': fs_id, 'mountpoint': mountpoint}
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
outer_test = self
def get(self, image_id):
return type('GlanceLocations', (object,),
{'locations': [
{'url': 'file:///' + os.devnull,
'metadata': desc}]})
def data(self, image_id):
self.outer_test.fail('This should not be called because the '
'transfer module should have intercepted '
'it.')
self.mox.StubOutWithMock(lv_utils, 'copy_image')
image_id = 1 # doesn't matter
client = MyGlanceStubClient()
self.flags(allowed_direct_url_schemes=['file'])
self.flags(group='image_file_url', filesystems=['gluster'])
service = self._create_image_service(client)
#NOTE(Jbresnah) The following options must be added after the module
# has added the specific groups.
self.flags(group='image_file_url:gluster', id=fs_id)
self.flags(group='image_file_url:gluster', mountpoint=mountpoint)
dest_file = os.devnull
lv_utils.copy_image(mox.IgnoreArg(), dest_file)
self.mox.ReplayAll()
service.download(self.context, image_id, dst_path=dest_file)
self.mox.VerifyAll()
def test_download_module_no_filesystem_match(self):
mountpoint = '/'
fs_id = 'someid'
desc = {'id': fs_id, 'mountpoint': mountpoint}
some_data = "sfxvdwjer"
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
outer_test = self
def get(self, image_id):
return type('GlanceLocations', (object,),
{'locations': [
{'url': 'file:///' + os.devnull,
'metadata': desc}]})
def data(self, image_id):
return some_data
def _fake_copyfile(source, dest):
self.fail('This should not be called because a match should not '
'have been found.')
self.stubs.Set(lv_utils, 'copy_image', _fake_copyfile)
image_id = 1 # doesn't matter
client = MyGlanceStubClient()
self.flags(allowed_direct_url_schemes=['file'])
self.flags(group='image_file_url', filesystems=['gluster'])
service = self._create_image_service(client)
#NOTE(Jbresnah) The following options must be added after the module
# has added the specific groups.
self.flags(group='image_file_url:gluster', id='someotherid')
self.flags(group='image_file_url:gluster', mountpoint=mountpoint)
service.download(self.context, image_id,
dst_path=os.devnull,
data=None)
def test_download_module_mountpoints(self):
glance_mount = '/glance/mount/point'
_, data_filename = self._get_tempfile()
nova_mount = os.path.dirname(data_filename)
source_path = os.path.basename(data_filename)
file_url = 'file://%s' % os.path.join(glance_mount, source_path)
file_system_id = 'test_FS_ID'
file_system_desc = {'id': file_system_id, 'mountpoint': glance_mount}
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
outer_test = self
def get(self, image_id):
return type('GlanceLocations', (object,),
{'locations': [{'url': file_url,
'metadata': file_system_desc}]})
def data(self, image_id):
self.outer_test.fail('This should not be called because the '
'transfer module should have intercepted '
'it.')
self.copy_called = False
def _fake_copyfile(source, dest):
self.assertEqual(source, data_filename)
self.copy_called = True
self.stubs.Set(lv_utils, 'copy_image', _fake_copyfile)
self.flags(allowed_direct_url_schemes=['file'])
self.flags(group='image_file_url', filesystems=['gluster'])
image_id = 1 # doesn't matter
client = MyGlanceStubClient()
service = self._create_image_service(client)
self.flags(group='image_file_url:gluster', id=file_system_id)
self.flags(group='image_file_url:gluster', mountpoint=nova_mount)
service.download(self.context, image_id, dst_path=os.devnull)
self.assertTrue(self.copy_called)
def test_download_module_file_bad_module(self):
_, data_filename = self._get_tempfile()
file_url = 'applesauce://%s' % data_filename
data_called = False
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
data_called = False
def get(self, image_id):
return type('GlanceLocations', (object,),
{'locations': [{'url': file_url,
'metadata': {}}]})
def data(self, image_id):
self.data_called = True
return "someData"
self.flags(allowed_direct_url_schemes=['applesauce'])
self.mox.StubOutWithMock(lv_utils, 'copy_image')
self.flags(allowed_direct_url_schemes=['file'])
image_id = 1 # doesn't matter
client = MyGlanceStubClient()
service = self._create_image_service(client)
# by not calling copyfileobj in the file download module we verify
# that the requirements were not met for its use
self.mox.ReplayAll()
service.download(self.context, image_id, dst_path=os.devnull)
self.mox.VerifyAll()
self.assertTrue(client.data_called)
def test_client_forbidden_converts_to_imagenotauthed(self):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that raises a Forbidden exception."""
def get(self, image_id):
raise glanceclient.exc.Forbidden(image_id)
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
self.assertRaises(exception.ImageNotAuthorized, service.download,
self.context, image_id, dst_path=os.devnull)
def test_client_httpforbidden_converts_to_imagenotauthed(self):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that raises a HTTPForbidden exception."""
def get(self, image_id):
raise glanceclient.exc.HTTPForbidden(image_id)
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
self.assertRaises(exception.ImageNotAuthorized, service.download,
self.context, image_id, dst_path=os.devnull)
def test_client_notfound_converts_to_imagenotfound(self):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that raises a NotFound exception."""
def get(self, image_id):
raise glanceclient.exc.NotFound(image_id)
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
self.assertRaises(exception.ImageNotFound, service.download,
self.context, image_id, dst_path=os.devnull)
def test_client_httpnotfound_converts_to_imagenotfound(self):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that raises a HTTPNotFound exception."""
def get(self, image_id):
raise glanceclient.exc.HTTPNotFound(image_id)
client = MyGlanceStubClient()
service = self._create_image_service(client)
image_id = 1 # doesn't matter
self.assertRaises(exception.ImageNotFound, service.download,
self.context, image_id, dst_path=os.devnull)
def test_glance_client_image_id(self):
fixture = self._make_fixture(name='test image')
image_id = self.service.create(self.context, fixture)['id']
(service, same_id) = glance.get_remote_image_service(
self.context, image_id)
self.assertEqual(same_id, image_id)
def test_glance_client_image_ref(self):
fixture = self._make_fixture(name='test image')
image_id = self.service.create(self.context, fixture)['id']
image_url = 'http://something-less-likely/%s' % image_id
(service, same_id) = glance.get_remote_image_service(
self.context, image_url)
self.assertEqual(same_id, image_id)
self.assertEqual(service._client.host, 'something-less-likely')
def test_extracting_missing_attributes(self):
"""Verify behavior from glance objects that are missing attributes
This fakes the image class and is missing attribute as the client can
return if they're not set in the database.
"""
class MyFakeGlanceImage(glance_stubs.FakeImage):
def __init__(self, metadata):
IMAGE_ATTRIBUTES = ['size', 'owner', 'id', 'created_at',
'updated_at', 'status', 'min_disk',
'min_ram', 'is_public']
raw = dict.fromkeys(IMAGE_ATTRIBUTES)
raw.update(metadata)
self.__dict__['raw'] = raw
metadata = {
'id': 1,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
}
image = MyFakeGlanceImage(metadata)
observed = glance._extract_attributes(image)
expected = {
'id': 1,
'name': None,
'is_public': None,
'size': None,
'min_disk': None,
'min_ram': None,
'disk_format': None,
'container_format': None,
'checksum': None,
'created_at': self.NOW_DATETIME,
'updated_at': self.NOW_DATETIME,
'deleted_at': None,
'deleted': None,
'status': None,
'properties': {},
'owner': None,
}
self.assertEqual(expected, observed)
def _create_failing_glance_client(info):
class MyGlanceStubClient(glance_stubs.StubGlanceClient):
"""A client that fails the first time, then succeeds."""
def get(self, image_id):
info['num_calls'] += 1
if info['num_calls'] == 1:
raise glanceclient.exc.ServiceUnavailable('')
return {}
return MyGlanceStubClient()
class TestIsImageAvailable(test.NoDBTestCase):
"""Tests the internal _is_image_available function."""
class ImageSpecV2(object):
visibility = None
properties = None
class ImageSpecV1(object):
is_public = None
properties = None
def test_auth_token_override(self):
ctx = mock.MagicMock(auth_token=True)
img = mock.MagicMock()
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
img.assert_not_called()
def test_admin_override(self):
ctx = mock.MagicMock(auth_token=False, is_admin=True)
img = mock.MagicMock()
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
img.assert_not_called()
def test_v2_visibility(self):
ctx = mock.MagicMock(auth_token=False, is_admin=False)
# We emulate warlock validation that throws an AttributeError
# if you try to call is_public on an image model returned by
# a call to V2 image.get(). Here, the ImageSpecV2 does not have
# an is_public attribute and MagicMock will throw an AttributeError.
img = mock.MagicMock(visibility='PUBLIC',
spec=TestIsImageAvailable.ImageSpecV2)
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
def test_v1_is_public(self):
ctx = mock.MagicMock(auth_token=False, is_admin=False)
img = mock.MagicMock(is_public=True,
spec=TestIsImageAvailable.ImageSpecV1)
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
def test_project_is_owner(self):
ctx = mock.MagicMock(auth_token=False, is_admin=False,
project_id='123')
props = {
'owner_id': '123'
}
img = mock.MagicMock(visibility='private', properties=props,
spec=TestIsImageAvailable.ImageSpecV2)
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
ctx.reset_mock()
img = mock.MagicMock(is_public=False, properties=props,
spec=TestIsImageAvailable.ImageSpecV1)
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
def test_project_context_matches_project_prop(self):
ctx = mock.MagicMock(auth_token=False, is_admin=False,
project_id='123')
props = {
'project_id': '123'
}
img = mock.MagicMock(visibility='private', properties=props,
spec=TestIsImageAvailable.ImageSpecV2)
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
ctx.reset_mock()
img = mock.MagicMock(is_public=False, properties=props,
spec=TestIsImageAvailable.ImageSpecV1)
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
def test_no_user_in_props(self):
ctx = mock.MagicMock(auth_token=False, is_admin=False,
project_id='123')
props = {
}
img = mock.MagicMock(visibility='private', properties=props,
spec=TestIsImageAvailable.ImageSpecV2)
res = glance._is_image_available(ctx, img)
self.assertFalse(res)
ctx.reset_mock()
img = mock.MagicMock(is_public=False, properties=props,
spec=TestIsImageAvailable.ImageSpecV1)
res = glance._is_image_available(ctx, img)
self.assertFalse(res)
def test_user_matches_context(self):
ctx = mock.MagicMock(auth_token=False, is_admin=False,
user_id='123')
props = {
'user_id': '123'
}
img = mock.MagicMock(visibility='private', properties=props,
spec=TestIsImageAvailable.ImageSpecV2)
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
ctx.reset_mock()
img = mock.MagicMock(is_public=False, properties=props,
spec=TestIsImageAvailable.ImageSpecV1)
res = glance._is_image_available(ctx, img)
self.assertTrue(res)
class TestShow(test.NoDBTestCase):
"""Tests the show method of the GlanceImageService."""
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_show_success(self, is_avail_mock, trans_from_mock):
is_avail_mock.return_value = True
trans_from_mock.return_value = mock.sentinel.trans_from
client = mock.MagicMock()
client.call.return_value = mock.sentinel.images_0
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
info = service.show(ctx, mock.sentinel.image_id)
client.call.assert_called_once_with(ctx, 1, 'get',
mock.sentinel.image_id)
is_avail_mock.assert_called_once_with(ctx, mock.sentinel.images_0)
trans_from_mock.assert_called_once_with(mock.sentinel.images_0)
self.assertEqual(mock.sentinel.trans_from, info)
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_show_not_available(self, is_avail_mock, trans_from_mock):
is_avail_mock.return_value = False
client = mock.MagicMock()
client.call.return_value = mock.sentinel.images_0
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
with testtools.ExpectedException(exception.ImageNotFound):
service.show(ctx, mock.sentinel.image_id)
client.call.assert_called_once_with(ctx, 1, 'get',
mock.sentinel.image_id)
is_avail_mock.assert_called_once_with(ctx, mock.sentinel.images_0)
trans_from_mock.assert_not_called()
@mock.patch('nova.image.glance._reraise_translated_image_exception')
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_show_client_failure(self, is_avail_mock, trans_from_mock,
reraise_mock):
raised = exception.ImageNotAuthorized(image_id=123)
client = mock.MagicMock()
client.call.side_effect = glanceclient.exc.Forbidden
ctx = mock.sentinel.ctx
reraise_mock.side_effect = raised
service = glance.GlanceImageService(client)
with testtools.ExpectedException(exception.ImageNotAuthorized):
service.show(ctx, mock.sentinel.image_id)
client.call.assert_called_once_with(ctx, 1, 'get',
mock.sentinel.image_id)
is_avail_mock.assert_not_called()
trans_from_mock.assert_not_called()
reraise_mock.assert_called_once_with(mock.sentinel.image_id)
@mock.patch('nova.image.glance._is_image_available')
def test_show_queued_image_without_some_attrs(self, is_avail_mock):
is_avail_mock.return_value = True
client = mock.MagicMock()
# fake image cls without disk_format, container_format, name attributes
class fake_image_cls(object):
id = 'b31aa5dd-f07a-4748-8f15-398346887584'
deleted = False
protected = False
min_disk = 0
created_at = '2014-05-20T08:16:48'
size = 0
status = 'queued'
is_public = False
min_ram = 0
owner = '980ec4870033453ead65c0470a78b8a8'
updated_at = '2014-05-20T08:16:48'
glance_image = fake_image_cls()
client.call.return_value = glance_image
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
image_info = service.show(ctx, glance_image.id)
client.call.assert_called_once_with(ctx, 1, 'get',
glance_image.id)
NOVA_IMAGE_ATTRIBUTES = set(['size', 'disk_format', 'owner',
'container_format', 'status', 'id',
'name', 'created_at', 'updated_at',
'deleted', 'deleted_at', 'checksum',
'min_disk', 'min_ram', 'is_public',
'properties'])
self.assertEqual(NOVA_IMAGE_ATTRIBUTES, set(image_info.keys()))
class TestDetail(test.NoDBTestCase):
"""Tests the show method of the GlanceImageService."""
@mock.patch('nova.image.glance._extract_query_params')
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_detail_success_available(self, is_avail_mock, trans_from_mock,
ext_query_mock):
params = {}
is_avail_mock.return_value = True
ext_query_mock.return_value = params
trans_from_mock.return_value = mock.sentinel.trans_from
client = mock.MagicMock()
client.call.return_value = [mock.sentinel.images_0]
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
images = service.detail(ctx, **params)
client.call.assert_called_once_with(ctx, 1, 'list')
is_avail_mock.assert_called_once_with(ctx, mock.sentinel.images_0)
trans_from_mock.assert_called_once_with(mock.sentinel.images_0)
self.assertEqual([mock.sentinel.trans_from], images)
@mock.patch('nova.image.glance._extract_query_params')
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_detail_success_unavailable(self, is_avail_mock, trans_from_mock,
ext_query_mock):
params = {}
is_avail_mock.return_value = False
ext_query_mock.return_value = params
trans_from_mock.return_value = mock.sentinel.trans_from
client = mock.MagicMock()
client.call.return_value = [mock.sentinel.images_0]
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
images = service.detail(ctx, **params)
client.call.assert_called_once_with(ctx, 1, 'list')
is_avail_mock.assert_called_once_with(ctx, mock.sentinel.images_0)
trans_from_mock.assert_not_called()
self.assertEqual([], images)
@mock.patch('nova.image.glance._extract_query_params')
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_detail_params_passed(self, is_avail_mock, _trans_from_mock,
ext_query_mock):
params = dict(limit=10)
ext_query_mock.return_value = params
client = mock.MagicMock()
client.call.return_value = [mock.sentinel.images_0]
ctx = mock.sentinel.ctx
service = glance.GlanceImageService(client)
images = service.detail(ctx, **params)
client.call.assert_called_once_with(ctx, 1, 'list', limit=10)
@mock.patch('nova.image.glance._reraise_translated_exception')
@mock.patch('nova.image.glance._extract_query_params')
@mock.patch('nova.image.glance._translate_from_glance')
@mock.patch('nova.image.glance._is_image_available')
def test_detail_client_failure(self, is_avail_mock, trans_from_mock,
ext_query_mock, reraise_mock):
params = {}
ext_query_mock.return_value = params
raised = exception.NotAuthorized()
client = mock.MagicMock()
client.call.side_effect = glanceclient.exc.Forbidden
ctx = mock.sentinel.ctx
reraise_mock.side_effect = raised
service = glance.GlanceImageService(client)
with testtools.ExpectedException(exception.NotAuthorized):
service.detail(ctx, **params)
client.call.assert_called_once_with(ctx, 1, 'list')
is_avail_mock.assert_not_called()
trans_from_mock.assert_not_called()
reraise_mock.assert_called_once_with()
class TestGlanceClientWrapper(test.NoDBTestCase):
def setUp(self):
super(TestGlanceClientWrapper, self).setUp()
# host1 has no scheme, which is http by default
self.flags(glance_api_servers=['host1:9292', 'https://host2:9293',
'http://host3:9294'])
# Make the test run fast
def _fake_sleep(secs):
pass
self.stubs.Set(time, 'sleep', _fake_sleep)
def test_headers_passed_glanceclient(self):
auth_token = 'auth_token'
ctxt = context.RequestContext('fake', 'fake', auth_token=auth_token)
fake_host = 'host4'
fake_port = 9295
fake_use_ssl = False
def _get_fake_glanceclient(version, endpoint, **params):
fake_client = glance_stubs.StubGlanceClient(version,
endpoint, **params)
self.assertIsNotNone(fake_client.auth_token)
self.assertIsNotNone(fake_client.identity_headers)
self.assertEqual(fake_client.identity_header['X-Auth_Token'],
auth_token)
self.assertEqual(fake_client.identity_header['X-User-Id'], 'fake')
self.assertIsNone(fake_client.identity_header['X-Roles'])
self.assertIsNone(fake_client.identity_header['X-Tenant-Id'])
self.assertIsNone(fake_client.identity_header['X-Service-Catalog'])
self.assertEqual(fake_client.
identity_header['X-Identity-Status'],
'Confirmed')
self.stubs.Set(glanceclient.Client, '__init__',
_get_fake_glanceclient)
glance._create_glance_client(ctxt, fake_host, fake_port, fake_use_ssl)
def test_static_client_without_retries(self):
self.flags(glance_num_retries=0)
ctxt = context.RequestContext('fake', 'fake')
fake_host = 'host4'
fake_port = 9295
fake_use_ssl = False
info = {'num_calls': 0}
def _fake_create_glance_client(context, host, port, use_ssl, version):
self.assertEqual(host, fake_host)
self.assertEqual(port, fake_port)
self.assertEqual(use_ssl, fake_use_ssl)
return _create_failing_glance_client(info)
self.stubs.Set(glance, '_create_glance_client',
_fake_create_glance_client)
client = glance.GlanceClientWrapper(context=ctxt,
host=fake_host, port=fake_port, use_ssl=fake_use_ssl)
self.assertRaises(exception.GlanceConnectionFailed,
client.call, ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 1)
def test_default_client_without_retries(self):
self.flags(glance_num_retries=0)
ctxt = context.RequestContext('fake', 'fake')
info = {'num_calls': 0,
'host': 'host1',
'port': 9292,
'use_ssl': False}
# Leave the list in a known-order
def _fake_shuffle(servers):
pass
def _fake_create_glance_client(context, host, port, use_ssl, version):
self.assertEqual(host, info['host'])
self.assertEqual(port, info['port'])
self.assertEqual(use_ssl, info['use_ssl'])
return _create_failing_glance_client(info)
self.stubs.Set(random, 'shuffle', _fake_shuffle)
self.stubs.Set(glance, '_create_glance_client',
_fake_create_glance_client)
client = glance.GlanceClientWrapper()
client2 = glance.GlanceClientWrapper()
self.assertRaises(exception.GlanceConnectionFailed,
client.call, ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 1)
info = {'num_calls': 0,
'host': 'host2',
'port': 9293,
'use_ssl': True}
def _fake_shuffle2(servers):
# fake shuffle in a known manner
servers.append(servers.pop(0))
self.stubs.Set(random, 'shuffle', _fake_shuffle2)
self.assertRaises(exception.GlanceConnectionFailed,
client2.call, ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 1)
def test_static_client_with_retries(self):
self.flags(glance_num_retries=1)
ctxt = context.RequestContext('fake', 'fake')
fake_host = 'host4'
fake_port = 9295
fake_use_ssl = False
info = {'num_calls': 0}
def _fake_create_glance_client(context, host, port, use_ssl, version):
self.assertEqual(host, fake_host)
self.assertEqual(port, fake_port)
self.assertEqual(use_ssl, fake_use_ssl)
return _create_failing_glance_client(info)
self.stubs.Set(glance, '_create_glance_client',
_fake_create_glance_client)
client = glance.GlanceClientWrapper(context=ctxt,
host=fake_host, port=fake_port, use_ssl=fake_use_ssl)
client.call(ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 2)
def test_default_client_with_retries(self):
self.flags(glance_num_retries=1)
ctxt = context.RequestContext('fake', 'fake')
info = {'num_calls': 0,
'host0': 'host1',
'port0': 9292,
'use_ssl0': False,
'host1': 'host2',
'port1': 9293,
'use_ssl1': True}
# Leave the list in a known-order
def _fake_shuffle(servers):
pass
def _fake_create_glance_client(context, host, port, use_ssl, version):
attempt = info['num_calls']
self.assertEqual(host, info['host%s' % attempt])
self.assertEqual(port, info['port%s' % attempt])
self.assertEqual(use_ssl, info['use_ssl%s' % attempt])
return _create_failing_glance_client(info)
self.stubs.Set(random, 'shuffle', _fake_shuffle)
self.stubs.Set(glance, '_create_glance_client',
_fake_create_glance_client)
client = glance.GlanceClientWrapper()
client2 = glance.GlanceClientWrapper()
client.call(ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 2)
def _fake_shuffle2(servers):
# fake shuffle in a known manner
servers.append(servers.pop(0))
self.stubs.Set(random, 'shuffle', _fake_shuffle2)
info = {'num_calls': 0,
'host0': 'host2',
'port0': 9293,
'use_ssl0': True,
'host1': 'host3',
'port1': 9294,
'use_ssl1': False}
client2.call(ctxt, 1, 'get', 'meow')
self.assertEqual(info['num_calls'], 2)
class TestGlanceUrl(test.NoDBTestCase):
def test_generate_glance_http_url(self):
generated_url = glance.generate_glance_url()
glance_host = CONF.glance_host
# ipv6 address, need to wrap it with '[]'
if utils.is_valid_ipv6(glance_host):
glance_host = '[%s]' % glance_host
http_url = "http://%s:%d" % (glance_host, CONF.glance_port)
self.assertEqual(generated_url, http_url)
def test_generate_glance_https_url(self):
self.flags(glance_protocol="https")
generated_url = glance.generate_glance_url()
glance_host = CONF.glance_host
# ipv6 address, need to wrap it with '[]'
if utils.is_valid_ipv6(glance_host):
glance_host = '[%s]' % glance_host
https_url = "https://%s:%d" % (glance_host, CONF.glance_port)
self.assertEqual(generated_url, https_url)
class TestGlanceApiServers(test.TestCase):
def test_get_ipv4_api_servers(self):
self.flags(glance_api_servers=['10.0.1.1:9292',
'https://10.0.0.1:9293',
'http://10.0.2.2:9294'])
glance_host = ['10.0.1.1', '10.0.0.1',
'10.0.2.2']
api_servers = glance.get_api_servers()
i = 0
for server in api_servers:
i += 1
self.assertIn(server[0], glance_host)
if i > 2:
break
# Python 2.6 can not parse ipv6 address correctly
@testtools.skipIf(sys.version_info < (2, 7), "py27 or greater only")
def test_get_ipv6_api_servers(self):
self.flags(glance_api_servers=['[2001:2012:1:f101::1]:9292',
'https://[2010:2013:1:f122::1]:9293',
'http://[2001:2011:1:f111::1]:9294'])
glance_host = ['2001:2012:1:f101::1', '2010:2013:1:f122::1',
'2001:2011:1:f111::1']
api_servers = glance.get_api_servers()
i = 0
for server in api_servers:
i += 1
self.assertIn(server[0], glance_host)
if i > 2:
break
class TestUpdateGlanceImage(test.NoDBTestCase):
def test_start(self):
consumer = glance.UpdateGlanceImage(
'context', 'id', 'metadata', 'stream')
image_service = self.mox.CreateMock(glance.GlanceImageService)
self.mox.StubOutWithMock(glance, 'get_remote_image_service')
glance.get_remote_image_service(
'context', 'id').AndReturn((image_service, 'image_id'))
image_service.update(
'context', 'image_id', 'metadata', 'stream', purge_props=False)
self.mox.ReplayAll()
consumer.start()
| |
# -*- coding: utf-8 -*-
""" Incident Reporting System - Model
@author: Sahana Taiwan Team
"""
module = "irs"
if deployment_settings.has_module(module):
# ---------------------------------------------------------------------
# List of Incident Categories
# NB It is important that the meaning of these entries is not changed as otherwise this hurts our ability to do synchronisation
# The keys are based on the Canadian ems.incident hierarchy, with a few extra general versions added
# The 2nd is meant for end-users
# Entries can be hidden from user view in the controller.
# Additional sets of 'translations' can be added to the tuples.
irs_incident_type_opts = {
"animalHealth.animalDieOff" : T("Animal Die Off"),
"animalHealth.animalFeed" : T("Animal Feed"),
"aviation.aircraftCrash" : T("Aircraft Crash"),
"aviation.aircraftHijacking" : T("Aircraft Hijacking"),
"aviation.airportClosure" : T("Airport Closure"),
"aviation.airspaceClosure" : T("Airspace Closure"),
"aviation.noticeToAirmen" : T("Notice to Airmen"),
"aviation.spaceDebris" : T("Space Debris"),
"civil.demonstrations" : T("Demonstrations"),
"civil.dignitaryVisit" : T("Dignitary Visit"),
"civil.displacedPopulations" : T("Displaced Populations"),
"civil.emergency" : T("Civil Emergency"),
"civil.looting" : T("Looting"),
"civil.publicEvent" : T("Public Event"),
"civil.riot" : T("Riot"),
"civil.volunteerRequest" : T("Volunteer Request"),
"crime" : T("Crime"),
"crime.bomb" : T("Bomb"),
"crime.bombExplosion" : T("Bomb Explosion"),
"crime.bombThreat" : T("Bomb Threat"),
"crime.dangerousPerson" : T("Dangerous Person"),
"crime.drugs" : T("Drugs"),
"crime.homeCrime" : T("Home Crime"),
"crime.illegalImmigrant" : T("Illegal Immigrant"),
"crime.industrialCrime" : T("Industrial Crime"),
"crime.poisoning" : T("Poisoning"),
"crime.retailCrime" : T("Retail Crime"),
"crime.shooting" : T("Shooting"),
"crime.stowaway" : T("Stowaway"),
"crime.terrorism" : T("Terrorism"),
"crime.vehicleCrime" : T("Vehicle Crime"),
"fire" : T("Fire"),
"fire.forestFire" : T("Forest Fire"),
"fire.hotSpot" : T("Hot Spot"),
"fire.industryFire" : T("Industry Fire"),
"fire.smoke" : T("Smoke"),
"fire.urbanFire" : T("Urban Fire"),
"fire.wildFire" : T("Wild Fire"),
"flood" : T("Flood"),
"flood.damOverflow" : T("Dam Overflow"),
"flood.flashFlood" : T("Flash Flood"),
"flood.highWater" : T("High Water"),
"flood.overlandFlowFlood" : T("Overland Flow Flood"),
"flood.tsunami" : T("Tsunami"),
"geophysical.avalanche" : T("Avalanche"),
"geophysical.earthquake" : T("Earthquake"),
"geophysical.lahar" : T("Lahar"),
"geophysical.landslide" : T("Landslide"),
"geophysical.magneticStorm" : T("Magnetic Storm"),
"geophysical.meteorite" : T("Meteorite"),
"geophysical.pyroclasticFlow" : T("Pyroclastic Flow"),
"geophysical.pyroclasticSurge" : T("Pyroclastic Surge"),
"geophysical.volcanicAshCloud" : T("Volcanic Ash Cloud"),
"geophysical.volcanicEvent" : T("Volcanic Event"),
"hazardousMaterial" : T("Hazardous Material"),
"hazardousMaterial.biologicalHazard" : T("Biological Hazard"),
"hazardousMaterial.chemicalHazard" : T("Chemical Hazard"),
"hazardousMaterial.explosiveHazard" : T("Explosive Hazard"),
"hazardousMaterial.fallingObjectHazard" : T("Falling Object Hazard"),
"hazardousMaterial.infectiousDisease" : T("Infectious Disease"),
"hazardousMaterial.poisonousGas" : T("Poisonous Gas"),
"hazardousMaterial.radiologicalHazard" : T("Radiological Hazard"),
"health.infectiousDisease" : T("Infectious Disease"),
"health.infestation" : T("Infestation"),
"ice.iceberg" : T("Iceberg"),
"ice.icePressure" : T("Ice Pressure"),
"ice.rapidCloseLead" : T("Rapid Close Lead"),
"ice.specialIce" : T("Special Ice"),
"marine.marineSecurity" : T("Marine Security"),
"marine.nauticalAccident" : T("Nautical Accident"),
"marine.nauticalHijacking" : T("Nautical Hijacking"),
"marine.portClosure" : T("Port Closure"),
"marine.specialMarine" : T("Special Marine"),
"meteorological.blizzard" : T("Blizzard"),
"meteorological.blowingSnow" : T("Blowing Snow"),
"meteorological.drought" : T("Drought"),
"meteorological.dustStorm" : T("Dust Storm"),
"meteorological.fog" : T("Fog"),
"meteorological.freezingDrizzle" : T("Freezing Drizzle"),
"meteorological.freezingRain" : T("Freezing Rain"),
"meteorological.freezingSpray" : T("Freezing Spray"),
"meteorological.hail" : T("Hail"),
"meteorological.hurricane" : T("Hurricane"),
"meteorological.rainFall" : T("Rain Fall"),
"meteorological.snowFall" : T("Snow Fall"),
"meteorological.snowSquall" : T("Snow Squall"),
"meteorological.squall" : T("Squall"),
"meteorological.stormSurge" : T("Storm Surge"),
"meteorological.thunderstorm" : T("Thunderstorm"),
"meteorological.tornado" : T("Tornado"),
"meteorological.tropicalStorm" : T("Tropical Storm"),
"meteorological.waterspout" : T("Waterspout"),
"meteorological.winterStorm" : T("Winter Storm"),
"missingPerson" : T("Missing Person"),
"missingPerson.amberAlert" : T("Child Abduction Emergency"), # http://en.wikipedia.org/wiki/Amber_Alert
"missingPerson.missingVulnerablePerson" : T("Missing Vulnerable Person"),
"missingPerson.silver" : T("Missing Senior Citizen"), # http://en.wikipedia.org/wiki/Silver_Alert
"publicService.emergencySupportFacility" : T("Emergency Support Facility"),
"publicService.emergencySupportService" : T("Emergency Support Service"),
"publicService.schoolClosure" : T("School Closure"),
"publicService.schoolLockdown" : T("School Lockdown"),
"publicService.serviceOrFacility" : T("Service or Facility"),
"publicService.transit" : T("Transit"),
"railway.railwayAccident" : T("Railway Accident"),
"railway.railwayHijacking" : T("Railway Hijacking"),
"roadway.bridgeClosure" : T("Bridge Closed"),
"roadway.hazardousRoadConditions" : T("Hazardous Road Conditions"),
"roadway.roadwayAccident" : T("Road Accident"),
"roadway.roadwayClosure" : T("Road Closed"),
"roadway.roadwayDelay" : T("Road Delay"),
"roadway.roadwayHijacking" : T("Road Hijacking"),
"roadway.roadwayUsageCondition" : T("Road Usage Condition"),
"roadway.trafficReport" : T("Traffic Report"),
"temperature.arcticOutflow" : T("Arctic Outflow"),
"temperature.coldWave" : T("Cold Wave"),
"temperature.flashFreeze" : T("Flash Freeze"),
"temperature.frost" : T("Frost"),
"temperature.heatAndHumidity" : T("Heat and Humidity"),
"temperature.heatWave" : T("Heat Wave"),
"temperature.windChill" : T("Wind Chill"),
"wind.galeWind" : T("Gale Wind"),
"wind.hurricaneForceWind" : T("Hurricane Force Wind"),
"wind.stormForceWind" : T("Storm Force Wind"),
"wind.strongWind" : T("Strong Wind"),
"other.buildingCollapsed" : T("Building Collapsed"),
"other.peopleTrapped" : T("People Trapped"),
"other.powerFailure" : T("Power Failure"),
}
# This Table defines which Categories are visible to end-users
resourcename = "icategory"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
Field("code"))
table.code.label = T("Category")
table.code.requires = IS_IN_SET(irs_incident_type_opts)
table.code.represent = lambda opt: irs_incident_type_opts.get(opt, opt)
# ---------------------------------------------------------------------
# Incidents
# This is the current status of an Incident
# @ToDo Change this so that there is a 'lead' ireport updated in the case of duplicates
resourcename = "incident"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
super_link(db.sit_situation),
Field("name"),
Field("category"),
Field("contact"),
location_id(),
Field("datetime", "datetime"),
#Field("persons_affected", "integer"),
#Field("persons_injured", "integer"),
#Field("persons_deceased", "integer"),
comments(),
migrate=migrate, *s3_meta_fields())
table.name.requires = IS_NOT_EMPTY()
table.datetime.label = T("Date/Time")
table.datetime.requires = [IS_NOT_EMPTY(),
IS_UTC_DATETIME(utc_offset=shn_user_utc_offset(), allow_future=False)]
# The full set available to Admins & Imports/Exports
# (users use the subset by over-riding this in the Controller)
table.category.requires = IS_NULL_OR(IS_IN_SET(irs_incident_type_opts))
table.category.represent = lambda opt: irs_incident_type_opts.get(opt, opt)
# CRUD strings
ADD_INCIDENT = T("Add Incident")
LIST_INCIDENTS = T("List Incidents")
s3.crud_strings[tablename] = Storage(
title_create = ADD_INCIDENT,
title_display = T("Incident Details"),
title_list = LIST_INCIDENTS,
title_update = T("Edit Incident"),
title_search = T("Search Incidents"),
subtitle_create = T("Add New Incident"),
subtitle_list = T("Incidents"),
label_list_button = LIST_INCIDENTS,
label_create_button = ADD_INCIDENT,
label_delete_button = T("Delete Incident"),
msg_record_created = T("Incident added"),
msg_record_modified = T("Incident updated"),
msg_record_deleted = T("Incident deleted"),
msg_list_empty = T("No Incidents currently registered"))
incident_id = S3ReusableField("incident_id", table,
requires = IS_NULL_OR(IS_ONE_OF(db, "irs_incident.id", "%(id)s")),
represent = lambda id: id,
label = T("Incident"),
ondelete = "RESTRICT")
s3xrc.model.configure(table,
super_entity = db.sit_situation,
list_fields = [
"id",
"category",
"datetime",
"location_id"
])
# -----------------------------------------------------------------------------
# Reports
# This is a report of an Incident
# (A single incident may generate many reports)
#def shn_assess_represent(assessments):
# """ Represent assessments in the Incidents List """
# add_assessment = A(T("Add Assessment"), _href=URL(r=request, c="assess", f="assess.html", args="create"), _class="action-btn")
# output = add_assessment
# if assessments:
# _assessments = assessments.split("|")
# for assessment in _assessments:
# output.append(A(T("Open Assessment"), _href=URL(r=request, c="assess", f="assess", args=assessment), _class="action-btn"))
# return output
# else:
# return output
resourcename = "ireport"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
incident_id(), # ToDo: Remove
Field("name"),
Field("message", "text"),
Field("category"),
person_id(),
Field("contact"),
organisation_id(),
Field("datetime", "datetime"),
location_id(),
# To be replaced by flexible Impacts as per Assessments?
#Field("persons_affected", "integer"),
#Field("persons_injured", "integer"),
#Field("persons_deceased", "integer"),
document_id(),
Field("verified", "boolean"),
#Field("assess_id", label=T("Assessments"),
# represent = shn_assess_represent
#),
comments(),
migrate=migrate, *s3_meta_fields())
table.category.label = T("Category")
# The full set available to Admins & Imports/Exports
# (users use the subset by over-riding this in the Controller)
table.category.requires = IS_NULL_OR(IS_IN_SET(irs_incident_type_opts))
table.category.represent = lambda opt: irs_incident_type_opts.get(opt, opt)
table.name.label = T("Short Description")
table.name.requires = IS_NOT_EMPTY()
table.message.label = T("Message")
table.message.represent = lambda message: shn_abbreviate(message)
table.person_id.label = T("Reporter Name")
table.person_id.comment = (T("At/Visited Location (not virtual)"),
shn_person_comment(T("Reporter Name"), T("The person at the location who is reporting this incident (optional)")))
table.contact.label = T("Contact Details")
table.datetime.label = T("Date/Time")
table.datetime.requires = [IS_NOT_EMPTY(),
IS_UTC_DATETIME(utc_offset=shn_user_utc_offset(), allow_future=False)]
organisation_id.label = T("Assign to Org.")
#table.persons_affected.label = T("# of People Affected")
#table.persons_injured.label = T("# of People Injured")
#table.persons_deceased.label = T("# of People Deceased")
table.verified.label = T("Verified?")
table.verified.represent = lambda verified: (T("No"), T("Yes"))[verified == True]
# CRUD strings
ADD_INC_REPORT = T("Add Incident Report")
LIST_INC_REPORTS = T("List Incident Reports")
s3.crud_strings[tablename] = Storage(
title_create = ADD_INC_REPORT,
title_display = T("Incident Report Details"),
title_list = LIST_INC_REPORTS,
title_update = T("Edit Incident Report"),
title_search = T("Search Incident Reports"),
subtitle_create = T("Add New Incident Report"),
subtitle_list = T("Incident Reports"),
label_list_button = LIST_INC_REPORTS,
label_create_button = ADD_INC_REPORT,
label_delete_button = T("Delete Incident Report"),
msg_record_created = T("Incident Report added"),
msg_record_modified = T("Incident Report updated"),
msg_record_deleted = T("Incident Report deleted"),
msg_list_empty = T("No Incident Reports currently registered"))
#def ireport_onaccept(form):
# """ Nasty Hack for Resource Linking """
# if "assessments" in form.vars and form.vars.assessments:
# pass
# else:
# # Default it to the record ID so that the represent can create an assessment for this Incident
# form.vars.assessments = form.vars.id
# We don't want these visible in Create forms
# (we override in Update forms in controller)
table.verified.writable = table.verified.readable = False
#table.assess_id.writable = table.assess_id.readable = False
s3xrc.model.configure(table,
#onaccept = lambda form: ireport_onaccept(form),
#onvalidation = ireport_onvalidation,
list_fields = ["id", "category", "location_id", "organisation_id", "verified", "name", "message"]
)
# irs_ireport as component of doc_documents
s3xrc.model.add_component(module, resourcename,
multiple=True,
joinby=dict(doc_document="document_id"),
deletable=True,
editable=True)
ireport_id = S3ReusableField("incident_id", table,
requires = IS_NULL_OR(IS_ONE_OF(db, "irs_ireport.id", "%(name)s")),
represent = lambda id: id,
label = T("Incident"),
ondelete = "RESTRICT")
# -----------------------------------------------------------------------------
irs_assessment_type_opts = {
1:T("initial assessment"),
2:T("follow-up assessment"),
3:T("final report"),
99:T("other")
}
irs_event_type_opts = {
1:T("primary incident"),
2:T("secondary effect"),
3:T("collateral event"),
99:T("other")
}
irs_cause_type_opts = {
1:T("natural hazard"),
2:T("technical failure"),
3:T("human error"),
4:T("criminal intent"),
5:T("operational intent"),
99:T("other")
}
# Assessments
# This is a follow-up assessment of an Incident
# Deprecated by Assessments module?
resourcename = "iassessment"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
incident_id(),
Field("datetime", "datetime"),
Field("itype", "integer",
requires = IS_IN_SET(irs_assessment_type_opts, zero=None),
default = 1,
label = T("Report Type"),
represent = lambda opt: irs_assessment_type_opts.get(opt, UNKNOWN_OPT)),
Field("event_type", "integer",
requires = IS_IN_SET(irs_event_type_opts, zero=None),
default = 1,
label = T("Event type"),
represent = lambda opt: irs_event_type_opts.get(opt, UNKNOWN_OPT)),
Field("cause_type", "integer",
requires = IS_IN_SET(irs_cause_type_opts, zero=None),
default = 1,
label = T("Type of cause"),
represent = lambda opt: irs_cause_type_opts.get(opt, UNKNOWN_OPT)),
Field("report", "text"),
Field("persons_affected", "integer"),
Field("persons_injured", "integer"),
Field("persons_deceased", "integer"),
migrate=migrate, *s3_meta_fields())
table.modified_by.label = T("Reporter")
table.modified_by.readable = True
table.datetime.label = T("Date/Time")
# CRUD strings
ADD_ASSESSMENT = T("Add Assessment")
LIST_ASSESSMENTS = T("List Assessments")
s3.crud_strings[tablename] = Storage(
title_create = ADD_ASSESSMENT,
title_display = T("Assessment Details"),
title_list = LIST_ASSESSMENTS,
title_update = T("Edit Assessment"),
title_search = T("Search Assessments"),
subtitle_create = T("Add New Assessment"),
subtitle_list = T("Assessments"),
label_list_button = LIST_ASSESSMENTS,
label_create_button = ADD_ASSESSMENT,
msg_record_created = T("Assessment added"),
msg_record_modified = T("Assessment updated"),
msg_record_deleted = T("Assessment deleted"),
msg_list_empty = T("No Assessments currently registered"))
s3xrc.model.configure(table,
list_fields = [
"id",
"datetime",
"itype",
"modified_by"
])
# Disabling until we figure out how to link to Assessments module
#s3xrc.model.add_component(module, resourcename,
# multiple = True,
# joinby = dict(irs_incident="incident_id"),
# deletable = True,
# editable = True)
# -----------------------------------------------------------------------------
irs_image_type_opts = {
1:T("Photograph"),
2:T("Map"),
3:T("Document Scan"),
99:T("other")
}
# Replace by image_id
resourcename = "iimage"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
Field("report_id", db.irs_ireport),
incident_id(),
Field("assessment_id", db.irs_iassessment),
Field("type", "integer",
requires = IS_IN_SET(irs_image_type_opts, zero=None),
default = 1,
label = T("Image Type"),
represent = lambda opt: irs_image_type_opts.get(opt, UNKNOWN_OPT)),
Field("image", "upload", autodelete=True),
#Field("url"),
Field("description"),
#Field("tags"),
migrate=migrate, *s3_meta_fields())
# CRUD strings
ADD_IMAGE = T("Add Image")
LIST_IMAGES = T("List Images")
s3.crud_strings[tablename] = Storage(
title_create = ADD_IMAGE,
title_display = T("Image Details"),
title_list = LIST_IMAGES,
title_update = T("Edit Image"),
title_search = T("Search Images"),
subtitle_create = T("Add New Image"),
subtitle_list = T("Images"),
label_list_button = LIST_IMAGES,
label_create_button = ADD_IMAGE,
msg_record_created = T("Image added"),
msg_record_modified = T("Image updated"),
msg_record_deleted = T("Image deleted"),
msg_list_empty = T("No Images currently registered"))
s3xrc.model.add_component(module, resourcename,
multiple = True,
joinby = dict(irs_incident="incident_id",
irs_ireport="report_id",
irs_iassessment="assessment_id"),
deletable = True,
editable = True)
# -----------------------------------------------------------------------------
irs_response_type_opts = {
1:T("Alert"),
2:T("Intervention"),
3:T("Closure"),
99:T("other")
}
resourcename = "iresponse"
tablename = "%s_%s" % (module, resourcename)
table = db.define_table(tablename,
incident_id(),
Field("datetime", "datetime"),
Field("itype", "integer",
requires = IS_IN_SET(irs_response_type_opts, zero=None),
default = 1,
label = T("Type"),
represent = lambda opt: irs_response_type_opts.get(opt, UNKNOWN_OPT)),
Field("report", "text"),
migrate=migrate, *s3_meta_fields())
# CRUD strings
ADD_RESPONSE = T("Add Response")
LIST_RESPONSES = T("List Responses")
s3.crud_strings[tablename] = Storage(
title_create = ADD_RESPONSE,
title_display = T("Response Details"),
title_list = LIST_RESPONSES,
title_update = T("Edit Response"),
title_search = T("Search Responses"),
subtitle_create = T("Add New Response"),
subtitle_list = T("Responses"),
label_list_button = LIST_RESPONSES,
label_create_button = ADD_RESPONSE,
msg_record_created = T("Response added"),
msg_record_modified = T("Response updated"),
msg_record_deleted = T("Response deleted"),
msg_list_empty = T("No Responses currently registered"))
s3xrc.model.add_component(module, resourcename,
multiple = True,
joinby = dict(irs_incident="incident_id"),
deletable = True,
editable = True)
# -----------------------------------------------------------------------------
@auth.shn_requires_membership(1) # must be Administrator
def shn_irs_ushahidi_import(r, **attr):
if r.representation == "html" and \
r.name == "ireport" and not r.component and not r.id:
url = r.request.get_vars.get("url", "http://")
title = T("Incident Reports")
subtitle = T("Import from Ushahidi Instance")
form = FORM(TABLE(TR(
TH("URL: "),
INPUT(_type="text", _name="url", _size="100", _value=url,
requires=[IS_URL(), IS_NOT_EMPTY()]),
TH(DIV(SPAN("*", _class="req", _style="padding-right: 5px;")))),
TR(TD("Ignore Errors?: "),
TD(INPUT(_type="checkbox", _name="ignore_errors", _id="ignore_errors"))),
TR("", INPUT(_type="submit", _value=T("Import")))))
label_list_btn = shn_get_crud_string(r.tablename, "title_list")
list_btn = A(label_list_btn,
_href=r.other(method="", vars=None),
_class="action-btn")
rheader = DIV(P(T("API is documented here") + ": http://wiki.ushahidi.com/doku.php?id=ushahidi_api"), P(T("Example") + " URL: http://ushahidi.my.domain/api?task=incidents&by=all&resp=xml&limit=1000"))
output = dict(title=title, form=form, subtitle=subtitle, list_btn=list_btn, rheader=rheader)
if form.accepts(request.vars, session):
import_count = [0]
def sync(vector, import_count = import_count):
if vector.tablename == "irs_ireport":
import_count[0] += 1
s3xrc.sync_resolve = sync
ireports = r.resource
ushahidi = form.vars.url
ignore_errors = form.vars.get("ignore_errors", None)
template = os.path.join(request.folder, "static", "xslt", "import", "ushahidi.xsl")
if os.path.exists(template) and ushahidi:
try:
success = ireports.import_xml(ushahidi, template=template, ignore_errors=ignore_errors)
except:
import sys
e = sys.exc_info()[1]
response.error = e
else:
if success:
count = import_count[0]
if count:
response.flash = "%s %s" % (import_count[0], T("reports successfully imported."))
else:
response.flash = T("No reports available.")
else:
response.error = s3xrc.error
response.view = "create.html"
return output
else:
raise HTTP(501, BADMETHOD)
s3xrc.model.set_method(module, "ireport",
method="ushahidi",
action=shn_irs_ushahidi_import)
# -----------------------------------------------------------------------------
| |
# Copyright 2012 NetApp
# Copyright 2014 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Unit tests for the Share driver module."""
import time
import ddt
import mock
from manila import exception
from manila import network
from manila.share import configuration
from manila.share import driver
from manila import test
from manila.tests import utils as test_utils
from manila import utils
def fake_execute_with_raise(*cmd, **kwargs):
raise exception.ProcessExecutionError
def fake_sleep(duration):
pass
class ShareDriverWithExecuteMixin(driver.ShareDriver, driver.ExecuteMixin):
pass
@ddt.ddt
class ShareDriverTestCase(test.TestCase):
_SNAPSHOT_METHOD_NAMES = ["create_snapshot", "delete_snapshot"]
def setUp(self):
super(ShareDriverTestCase, self).setUp()
self.utils = utils
self.mock_object(self.utils, 'execute', fake_execute_with_raise)
self.time = time
self.mock_object(self.time, 'sleep', fake_sleep)
driver.CONF.set_default('driver_handles_share_servers', True)
def test__try_execute(self):
execute_mixin = ShareDriverWithExecuteMixin(
True, configuration=configuration.Configuration(None))
self.assertRaises(exception.ProcessExecutionError,
execute_mixin._try_execute)
def test_verify_share_driver_mode_option_type(self):
data = {'DEFAULT': {'driver_handles_share_servers': 'True'}}
with test_utils.create_temp_config_with_opts(data):
share_driver = driver.ShareDriver([True, False])
self.assertTrue(share_driver.driver_handles_share_servers)
def _instantiate_share_driver(self, network_config_group,
driver_handles_share_servers,
admin_network_config_group=None):
self.mock_object(network, 'API')
config = mock.Mock()
config.append_config_values = mock.Mock()
config.config_group = 'fake_config_group'
config.network_config_group = network_config_group
if admin_network_config_group:
config.admin_network_config_group = admin_network_config_group
config.safe_get = mock.Mock(return_value=driver_handles_share_servers)
share_driver = driver.ShareDriver([True, False], configuration=config)
self.assertTrue(hasattr(share_driver, 'configuration'))
config.append_config_values.assert_called_once_with(driver.share_opts)
if driver_handles_share_servers:
calls = []
if network_config_group:
calls.append(mock.call(
config_group_name=config.network_config_group))
else:
calls.append(mock.call(
config_group_name=config.config_group))
if admin_network_config_group:
calls.append(mock.call(
config_group_name=config.admin_network_config_group,
label='admin'))
network.API.assert_has_calls(calls)
self.assertTrue(hasattr(share_driver, 'network_api'))
self.assertTrue(hasattr(share_driver, 'admin_network_api'))
self.assertIsNotNone(share_driver.network_api)
self.assertIsNotNone(share_driver.admin_network_api)
else:
self.assertFalse(hasattr(share_driver, 'network_api'))
self.assertTrue(hasattr(share_driver, 'admin_network_api'))
self.assertIsNone(share_driver.admin_network_api)
self.assertFalse(network.API.called)
return share_driver
def test_instantiate_share_driver(self):
self._instantiate_share_driver(None, True)
def test_instantiate_share_driver_another_config_group(self):
self._instantiate_share_driver("fake_network_config_group", True)
def test_instantiate_share_driver_with_admin_network(self):
self._instantiate_share_driver(
"fake_network_config_group", True,
"fake_admin_network_config_group")
def test_instantiate_share_driver_no_configuration(self):
self.mock_object(network, 'API')
share_driver = driver.ShareDriver(True, configuration=None)
self.assertIsNone(share_driver.configuration)
network.API.assert_called_once_with(config_group_name=None)
def test_get_share_stats_refresh_false(self):
share_driver = driver.ShareDriver(True, configuration=None)
share_driver._stats = {'fake_key': 'fake_value'}
result = share_driver.get_share_stats(False)
self.assertEqual(share_driver._stats, result)
def test_get_share_stats_refresh_true(self):
conf = configuration.Configuration(None)
expected_keys = [
'qos', 'driver_version', 'share_backend_name',
'free_capacity_gb', 'total_capacity_gb',
'driver_handles_share_servers',
'reserved_percentage', 'vendor_name', 'storage_protocol',
'snapshot_support', 'mount_snapshot_support',
]
share_driver = driver.ShareDriver(True, configuration=conf)
fake_stats = {'fake_key': 'fake_value'}
share_driver._stats = fake_stats
result = share_driver.get_share_stats(True)
self.assertNotEqual(fake_stats, result)
for key in expected_keys:
self.assertIn(key, result)
self.assertEqual('Open Source', result['vendor_name'])
@ddt.data(
{'opt': True, 'allowed': True},
{'opt': True, 'allowed': (True, False)},
{'opt': True, 'allowed': [True, False]},
{'opt': True, 'allowed': set([True, False])},
{'opt': False, 'allowed': False},
{'opt': False, 'allowed': (True, False)},
{'opt': False, 'allowed': [True, False]},
{'opt': False, 'allowed': set([True, False])})
@ddt.unpack
def test__verify_share_server_handling_valid_cases(self, opt, allowed):
conf = configuration.Configuration(None)
self.mock_object(conf, 'safe_get', mock.Mock(return_value=opt))
share_driver = driver.ShareDriver(allowed, configuration=conf)
self.assertTrue(conf.safe_get.celled)
self.assertEqual(opt, share_driver.driver_handles_share_servers)
@ddt.data(
{'opt': False, 'allowed': True},
{'opt': True, 'allowed': False},
{'opt': None, 'allowed': True},
{'opt': 'True', 'allowed': True},
{'opt': 'False', 'allowed': False},
{'opt': [], 'allowed': True},
{'opt': True, 'allowed': []},
{'opt': True, 'allowed': ['True']},
{'opt': False, 'allowed': ['False']})
@ddt.unpack
def test__verify_share_server_handling_invalid_cases(self, opt, allowed):
conf = configuration.Configuration(None)
self.mock_object(conf, 'safe_get', mock.Mock(return_value=opt))
self.assertRaises(
exception.ManilaException,
driver.ShareDriver, allowed, configuration=conf)
self.assertTrue(conf.safe_get.celled)
def test_setup_server_handling_disabled(self):
share_driver = self._instantiate_share_driver(None, False)
# We expect successful execution, nothing to assert
share_driver.setup_server('Nothing is expected to happen.')
def test_setup_server_handling_enabled(self):
share_driver = self._instantiate_share_driver(None, True)
self.assertRaises(
NotImplementedError,
share_driver.setup_server,
'fake_network_info')
def test_teardown_server_handling_disabled(self):
share_driver = self._instantiate_share_driver(None, False)
# We expect successful execution, nothing to assert
share_driver.teardown_server('Nothing is expected to happen.')
def test_teardown_server_handling_enabled(self):
share_driver = self._instantiate_share_driver(None, True)
self.assertRaises(
NotImplementedError,
share_driver.teardown_server,
'fake_share_server_details')
def _assert_is_callable(self, obj, attr):
self.assertTrue(callable(getattr(obj, attr)))
@ddt.data('manage_existing',
'unmanage')
def test_drivers_methods_needed_by_manage_functionality(self, method):
share_driver = self._instantiate_share_driver(None, False)
self._assert_is_callable(share_driver, method)
@ddt.data('manage_existing_snapshot',
'unmanage_snapshot')
def test_drivers_methods_needed_by_manage_snapshot_functionality(
self, method):
share_driver = self._instantiate_share_driver(None, False)
self._assert_is_callable(share_driver, method)
@ddt.data('revert_to_snapshot',
'revert_to_replicated_snapshot')
def test_drivers_methods_needed_by_share_revert_to_snapshot_functionality(
self, method):
share_driver = self._instantiate_share_driver(None, False)
self._assert_is_callable(share_driver, method)
@ddt.data(True, False)
def test_get_share_server_pools(self, value):
driver.CONF.set_default('driver_handles_share_servers', value)
share_driver = driver.ShareDriver(value)
self.assertEqual([],
share_driver.get_share_server_pools('fake_server'))
@ddt.data(0.8, 1.0, 10.5, 20.0, None, '1', '1.1')
def test_check_for_setup_error(self, value):
driver.CONF.set_default('driver_handles_share_servers', False)
share_driver = driver.ShareDriver(False)
share_driver.configuration = configuration.Configuration(None)
self.mock_object(share_driver.configuration, 'safe_get',
mock.Mock(return_value=value))
if value and float(value) >= 1.0:
share_driver.check_for_setup_error()
else:
self.assertRaises(exception.InvalidParameterValue,
share_driver.check_for_setup_error)
def test_snapshot_support_exists(self):
driver.CONF.set_default('driver_handles_share_servers', True)
fake_method = lambda *args, **kwargs: None
child_methods = {
"create_snapshot": fake_method,
"delete_snapshot": fake_method,
}
child_class_instance = type(
"NotRedefined", (driver.ShareDriver, ), child_methods)(True)
self.mock_object(child_class_instance, "configuration")
child_class_instance._update_share_stats()
self.assertTrue(child_class_instance._stats["snapshot_support"])
self.assertTrue(child_class_instance.configuration.safe_get.called)
@ddt.data(
([], [], False),
(_SNAPSHOT_METHOD_NAMES, [], True),
(_SNAPSHOT_METHOD_NAMES, _SNAPSHOT_METHOD_NAMES, True),
(_SNAPSHOT_METHOD_NAMES[0:1], _SNAPSHOT_METHOD_NAMES[1:], True),
([], _SNAPSHOT_METHOD_NAMES, True),
)
@ddt.unpack
def test_check_redefined_driver_methods(self, common_drv_meth_names,
child_drv_meth_names,
expected_result):
# This test covers the case of drivers inheriting other drivers or
# common classes.
driver.CONF.set_default('driver_handles_share_servers', True)
common_drv_methods, child_drv_methods = [
{method_name: lambda *args, **kwargs: None
for method_name in method_names}
for method_names in (common_drv_meth_names,
child_drv_meth_names)]
common_drv = type(
"NotRedefinedCommon", (driver.ShareDriver, ), common_drv_methods)
child_drv_instance = type("NotRedefined", (common_drv, ),
child_drv_methods)(True)
has_redefined_methods = (
child_drv_instance._has_redefined_driver_methods(
self._SNAPSHOT_METHOD_NAMES))
self.assertEqual(expected_result, has_redefined_methods)
@ddt.data(
(),
("create_snapshot"),
("delete_snapshot"),
("create_snapshot", "delete_snapshotFOO"),
)
def test_snapshot_support_absent(self, methods):
driver.CONF.set_default('driver_handles_share_servers', True)
fake_method = lambda *args, **kwargs: None
child_methods = {}
for method in methods:
child_methods[method] = fake_method
child_class_instance = type(
"NotRedefined", (driver.ShareDriver, ), child_methods)(True)
self.mock_object(child_class_instance, "configuration")
child_class_instance._update_share_stats()
self.assertFalse(child_class_instance._stats["snapshot_support"])
self.assertTrue(child_class_instance.configuration.safe_get.called)
@ddt.data(True, False)
def test_snapshot_support_not_exists_and_set_explicitly(
self, snapshots_are_supported):
driver.CONF.set_default('driver_handles_share_servers', True)
child_class_instance = type(
"NotRedefined", (driver.ShareDriver, ), {})(True)
self.mock_object(child_class_instance, "configuration")
child_class_instance._update_share_stats(
{"snapshot_support": snapshots_are_supported})
self.assertEqual(
snapshots_are_supported,
child_class_instance._stats["snapshot_support"])
self.assertTrue(child_class_instance.configuration.safe_get.called)
@ddt.data(True, False)
def test_snapshot_support_exists_and_set_explicitly(
self, snapshots_are_supported):
driver.CONF.set_default('driver_handles_share_servers', True)
fake_method = lambda *args, **kwargs: None
child_methods = {
"create_snapshot": fake_method,
"delete_snapshot": fake_method,
}
child_class_instance = type(
"NotRedefined", (driver.ShareDriver, ), child_methods)(True)
self.mock_object(child_class_instance, "configuration")
child_class_instance._update_share_stats(
{"snapshot_support": snapshots_are_supported})
self.assertEqual(
snapshots_are_supported,
child_class_instance._stats["snapshot_support"])
self.assertTrue(child_class_instance.configuration.safe_get.called)
def test_create_share_from_snapshot_support_exists(self):
driver.CONF.set_default('driver_handles_share_servers', True)
fake_method = lambda *args, **kwargs: None
child_methods = {
"create_share_from_snapshot": fake_method,
"create_snapshot": fake_method,
"delete_snapshot": fake_method,
}
child_class_instance = type(
"NotRedefined", (driver.ShareDriver, ), child_methods)(True)
self.mock_object(child_class_instance, "configuration")
child_class_instance._update_share_stats()
self.assertTrue(
child_class_instance._stats["create_share_from_snapshot_support"])
self.assertTrue(child_class_instance.configuration.safe_get.called)
@ddt.data(
(),
("create_snapshot"),
("create_share_from_snapshotFOO"),
)
def test_create_share_from_snapshot_support_absent(self, methods):
driver.CONF.set_default('driver_handles_share_servers', True)
fake_method = lambda *args, **kwargs: None
child_methods = {}
for method in methods:
child_methods[method] = fake_method
child_class_instance = type(
"NotRedefined", (driver.ShareDriver, ), child_methods)(True)
self.mock_object(child_class_instance, "configuration")
child_class_instance._update_share_stats()
self.assertFalse(
child_class_instance._stats["create_share_from_snapshot_support"])
self.assertTrue(child_class_instance.configuration.safe_get.called)
@ddt.data(True, False)
def test_create_share_from_snapshot_not_exists_and_set_explicitly(
self, creating_shares_from_snapshot_is_supported):
driver.CONF.set_default('driver_handles_share_servers', True)
child_class_instance = type(
"NotRedefined", (driver.ShareDriver, ), {})(True)
self.mock_object(child_class_instance, "configuration")
child_class_instance._update_share_stats({
"create_share_from_snapshot_support":
creating_shares_from_snapshot_is_supported,
})
self.assertEqual(
creating_shares_from_snapshot_is_supported,
child_class_instance._stats["create_share_from_snapshot_support"])
self.assertTrue(child_class_instance.configuration.safe_get.called)
@ddt.data(True, False)
def test_create_share_from_snapshot_exists_and_set_explicitly(
self, create_share_from_snapshot_supported):
driver.CONF.set_default('driver_handles_share_servers', True)
fake_method = lambda *args, **kwargs: None
child_methods = {"create_share_from_snapshot": fake_method}
child_class_instance = type(
"NotRedefined", (driver.ShareDriver, ), child_methods)(True)
self.mock_object(child_class_instance, "configuration")
child_class_instance._update_share_stats({
"create_share_from_snapshot_support":
create_share_from_snapshot_supported,
})
self.assertEqual(
create_share_from_snapshot_supported,
child_class_instance._stats["create_share_from_snapshot_support"])
self.assertTrue(child_class_instance.configuration.safe_get.called)
def test_get_periodic_hook_data(self):
share_driver = self._instantiate_share_driver(None, False)
share_instances = ["list", "of", "share", "instances"]
result = share_driver.get_periodic_hook_data(
"fake_context", share_instances)
self.assertEqual(share_instances, result)
def test_get_admin_network_allocations_number(self):
share_driver = self._instantiate_share_driver(None, True)
self.assertEqual(
0, share_driver.get_admin_network_allocations_number())
def test_allocate_admin_network_count_None(self):
share_driver = self._instantiate_share_driver(None, True)
ctxt = 'fake_context'
share_server = 'fake_share_server'
mock_get_admin_network_allocations_number = self.mock_object(
share_driver,
'get_admin_network_allocations_number',
mock.Mock(return_value=0))
self.mock_object(
share_driver.admin_network_api,
'allocate_network',
mock.Mock(side_effect=Exception('ShouldNotBeRaised')))
share_driver.allocate_admin_network(ctxt, share_server)
mock_get_admin_network_allocations_number.assert_called_once_with()
self.assertFalse(
share_driver.admin_network_api.allocate_network.called)
def test_allocate_admin_network_count_0(self):
share_driver = self._instantiate_share_driver(None, True)
ctxt = 'fake_context'
share_server = 'fake_share_server'
self.mock_object(
share_driver,
'get_admin_network_allocations_number',
mock.Mock(return_value=0))
self.mock_object(
share_driver.admin_network_api,
'allocate_network',
mock.Mock(side_effect=Exception('ShouldNotBeRaised')))
share_driver.allocate_admin_network(ctxt, share_server, count=0)
self.assertFalse(
share_driver.get_admin_network_allocations_number.called)
self.assertFalse(
share_driver.admin_network_api.allocate_network.called)
def test_allocate_admin_network_count_1_api_initialized(self):
share_driver = self._instantiate_share_driver(None, True)
ctxt = 'fake_context'
share_server = 'fake_share_server'
mock_get_admin_network_allocations_number = self.mock_object(
share_driver,
'get_admin_network_allocations_number',
mock.Mock(return_value=1))
self.mock_object(
share_driver.admin_network_api,
'allocate_network',
mock.Mock())
share_driver.allocate_admin_network(ctxt, share_server)
mock_get_admin_network_allocations_number.assert_called_once_with()
share_driver.admin_network_api.allocate_network.\
assert_called_once_with(ctxt, share_server, count=1)
def test_allocate_admin_network_count_1_api_not_initialized(self):
share_driver = self._instantiate_share_driver(None, True, None)
ctxt = 'fake_context'
share_server = 'fake_share_server'
share_driver._admin_network_api = None
mock_get_admin_network_allocations_number = self.mock_object(
share_driver,
'get_admin_network_allocations_number',
mock.Mock(return_value=1))
self.assertRaises(
exception.NetworkBadConfigurationException,
share_driver.allocate_admin_network,
ctxt, share_server,
)
mock_get_admin_network_allocations_number.assert_called_once_with()
def test_migration_start(self):
driver.CONF.set_default('driver_handles_share_servers', False)
share_driver = driver.ShareDriver(False)
self.assertRaises(NotImplementedError, share_driver.migration_start,
None, None, None, None, None, None, None)
def test_migration_continue(self):
driver.CONF.set_default('driver_handles_share_servers', False)
share_driver = driver.ShareDriver(False)
self.assertRaises(NotImplementedError, share_driver.migration_continue,
None, None, None, None, None, None, None)
def test_migration_complete(self):
driver.CONF.set_default('driver_handles_share_servers', False)
share_driver = driver.ShareDriver(False)
self.assertRaises(NotImplementedError, share_driver.migration_complete,
None, None, None, None, None, None, None)
def test_migration_cancel(self):
driver.CONF.set_default('driver_handles_share_servers', False)
share_driver = driver.ShareDriver(False)
self.assertRaises(NotImplementedError, share_driver.migration_cancel,
None, None, None, None, None, None, None)
def test_migration_get_progress(self):
driver.CONF.set_default('driver_handles_share_servers', False)
share_driver = driver.ShareDriver(False)
self.assertRaises(NotImplementedError,
share_driver.migration_get_progress,
None, None, None, None, None, None, None)
@ddt.data(True, False)
def test_connection_get_info(self, admin):
expected = {
'mount': 'mount -vt nfs %(options)s /fake/fake_id %(path)s',
'unmount': 'umount -v %(path)s',
'access_mapping': {
'ip': ['nfs']
}
}
fake_share = {
'id': 'fake_id',
'share_proto': 'nfs',
'export_locations': [{
'path': '/fake/fake_id',
'is_admin_only': admin
}]
}
driver.CONF.set_default('driver_handles_share_servers', False)
share_driver = driver.ShareDriver(False)
share_driver.configuration = configuration.Configuration(None)
connection_info = share_driver.connection_get_info(
None, fake_share, "fake_server")
self.assertEqual(expected, connection_info)
def test_migration_check_compatibility(self):
driver.CONF.set_default('driver_handles_share_servers', False)
share_driver = driver.ShareDriver(False)
share_driver.configuration = configuration.Configuration(None)
expected = {
'compatible': False,
'writable': False,
'preserve_metadata': False,
'nondisruptive': False,
'preserve_snapshots': False,
}
result = share_driver.migration_check_compatibility(
None, None, None, None, None)
self.assertEqual(expected, result)
def test_update_access(self):
share_driver = driver.ShareDriver(True, configuration=None)
self.assertRaises(
NotImplementedError,
share_driver.update_access,
'ctx',
'fake_share',
'fake_access_rules',
'fake_add_rules',
'fake_delete_rules'
)
def test_create_replica(self):
share_driver = self._instantiate_share_driver(None, True)
self.assertRaises(NotImplementedError,
share_driver.create_replica,
'fake_context', ['r1', 'r2'],
'fake_new_replica', [], [])
def test_delete_replica(self):
share_driver = self._instantiate_share_driver(None, True)
self.assertRaises(NotImplementedError,
share_driver.delete_replica,
'fake_context', ['r1', 'r2'],
'fake_replica', [])
def test_promote_replica(self):
share_driver = self._instantiate_share_driver(None, True)
self.assertRaises(NotImplementedError,
share_driver.promote_replica,
'fake_context', [], 'fake_replica', [])
def test_update_replica_state(self):
share_driver = self._instantiate_share_driver(None, True)
self.assertRaises(NotImplementedError,
share_driver.update_replica_state,
'fake_context', ['r1', 'r2'], 'fake_replica', [], [])
def test_create_replicated_snapshot(self):
share_driver = self._instantiate_share_driver(None, False)
self.assertRaises(NotImplementedError,
share_driver.create_replicated_snapshot,
'fake_context', ['r1', 'r2'], ['s1', 's2'])
def test_delete_replicated_snapshot(self):
share_driver = self._instantiate_share_driver(None, False)
self.assertRaises(NotImplementedError,
share_driver.delete_replicated_snapshot,
'fake_context', ['r1', 'r2'], ['s1', 's2'])
def test_update_replicated_snapshot(self):
share_driver = self._instantiate_share_driver(None, False)
self.assertRaises(NotImplementedError,
share_driver.update_replicated_snapshot,
'fake_context', ['r1', 'r2'], 'r1',
['s1', 's2'], 's1')
@ddt.data(True, False)
def test_share_group_snapshot_support_exists_and_equals_snapshot_support(
self, snapshots_are_supported):
driver.CONF.set_default('driver_handles_share_servers', True)
child_class_instance = driver.ShareDriver(True)
child_class_instance._snapshots_are_supported = snapshots_are_supported
self.mock_object(child_class_instance, "configuration")
child_class_instance._update_share_stats()
self.assertEqual(
snapshots_are_supported,
child_class_instance._stats["snapshot_support"])
self.assertEqual(
snapshots_are_supported,
child_class_instance._stats["share_group_snapshot_support"])
self.assertTrue(child_class_instance.configuration.safe_get.called)
def test_create_share_group_from_share_group_snapshot(self):
share_driver = self._instantiate_share_driver(None, False)
fake_shares = [
{'id': 'fake_share_%d' % i,
'source_share_group_snapshot_member_id': 'fake_member_%d' % i}
for i in (1, 2)]
fake_share_group_dict = {
'source_share_group_snapshot_id': 'some_fake_uuid_abc',
'shares': fake_shares,
'id': 'some_fake_uuid_def',
}
fake_share_group_snapshot_dict = {
'share_group_snapshot_members': [
{'id': 'fake_member_1'}, {'id': 'fake_member_2'}],
'id': 'fake_share_group_snapshot_id',
}
mock_create = self.mock_object(
share_driver, 'create_share_from_snapshot',
mock.Mock(side_effect=['fake_export1', 'fake_export2']))
expected_share_updates = [
{
'id': 'fake_share_1',
'export_locations': 'fake_export1',
},
{
'id': 'fake_share_2',
'export_locations': 'fake_export2',
},
]
share_group_update, share_update = (
share_driver.create_share_group_from_share_group_snapshot(
'fake_context', fake_share_group_dict,
fake_share_group_snapshot_dict))
mock_create.assert_has_calls([
mock.call(
'fake_context',
{'id': 'fake_share_1',
'source_share_group_snapshot_member_id': 'fake_member_1'},
{'id': 'fake_member_1'}),
mock.call(
'fake_context',
{'id': 'fake_share_2',
'source_share_group_snapshot_member_id': 'fake_member_2'},
{'id': 'fake_member_2'})
])
self.assertIsNone(share_group_update)
self.assertEqual(expected_share_updates, share_update)
def test_create_share_group_from_share_group_snapshot_dhss(self):
share_driver = self._instantiate_share_driver(None, True)
mock_share_server = mock.Mock()
fake_shares = [
{'id': 'fake_share_1',
'source_share_group_snapshot_member_id': 'foo_member_1'},
{'id': 'fake_share_2',
'source_share_group_snapshot_member_id': 'foo_member_2'}]
fake_share_group_dict = {
'source_share_group_snapshot_id': 'some_fake_uuid',
'shares': fake_shares,
'id': 'eda52174-0442-476d-9694-a58327466c14',
}
fake_share_group_snapshot_dict = {
'share_group_snapshot_members': [
{'id': 'foo_member_1'}, {'id': 'foo_member_2'}],
'id': 'fake_share_group_snapshot_id'
}
mock_create = self.mock_object(
share_driver, 'create_share_from_snapshot',
mock.Mock(side_effect=['fake_export1', 'fake_export2']))
expected_share_updates = [
{'id': 'fake_share_1', 'export_locations': 'fake_export1'},
{'id': 'fake_share_2', 'export_locations': 'fake_export2'},
]
share_group_update, share_update = (
share_driver.create_share_group_from_share_group_snapshot(
'fake_context',
fake_share_group_dict,
fake_share_group_snapshot_dict, share_server=mock_share_server,
)
)
mock_create.assert_has_calls([
mock.call(
'fake_context',
{'id': 'fake_share_%d' % i,
'source_share_group_snapshot_member_id': 'foo_member_%d' % i},
{'id': 'foo_member_%d' % i},
share_server=mock_share_server)
for i in (1, 2)
])
self.assertIsNone(share_group_update)
self.assertEqual(expected_share_updates, share_update)
def test_create_share_group_from_sg_snapshot_with_no_members(self):
share_driver = self._instantiate_share_driver(None, False)
fake_share_group_dict = {}
fake_share_group_snapshot_dict = {'share_group_snapshot_members': []}
share_group_update, share_update = (
share_driver.create_share_group_from_share_group_snapshot(
'fake_context', fake_share_group_dict,
fake_share_group_snapshot_dict))
self.assertIsNone(share_group_update)
self.assertIsNone(share_update)
def test_create_share_group_snapshot(self):
fake_snap_member_1 = {
'id': '6813e06b-a8f5-4784-b17d-f3e91afa370e',
'share_id': 'a3ebdba5-b4e1-46c8-a0ea-a9ac8daf5296',
'share_group_snapshot_id': 'fake_share_group_snapshot_id',
'share_instance_id': 'fake_share_instance_id_1',
'provider_location': 'should_not_be_used_1',
'share': {
'id': '420f978b-dbf6-4b3c-92fe-f5b17a0bb5e2',
'size': 3,
'share_proto': 'fake_share_proto',
},
}
fake_snap_member_2 = {
'id': '1e010dfe-545b-432d-ab95-4ef03cd82f89',
'share_id': 'a3ebdba5-b4e1-46c8-a0ea-a9ac8daf5296',
'share_group_snapshot_id': 'fake_share_group_snapshot_id',
'share_instance_id': 'fake_share_instance_id_2',
'provider_location': 'should_not_be_used_2',
'share': {
'id': '420f978b-dbf6-4b3c-92fe-f5b17a0bb5e2',
'size': '2',
'share_proto': 'fake_share_proto',
},
}
fake_snap_dict = {
'status': 'available',
'project_id': '13c0be6290934bd98596cfa004650049',
'user_id': 'a0314a441ca842019b0952224aa39192',
'description': None,
'deleted': '0',
'share_group_id': '4b04fdc3-00b9-4909-ba1a-06e9b3f88b67',
'share_group_snapshot_members': [
fake_snap_member_1, fake_snap_member_2],
'deleted_at': None,
'id': 'f6aa3b59-57eb-421e-965c-4e182538e36a',
'name': None
}
share_driver = self._instantiate_share_driver(None, False)
share_driver._stats['share_group_snapshot_support'] = True
mock_create_snap = self.mock_object(
share_driver, 'create_snapshot',
mock.Mock(side_effect=lambda *args, **kwargs: {
'foo_k': 'foo_v', 'bar_k': 'bar_v_%s' % args[1]['id']}))
share_group_snapshot_update, member_update_list = (
share_driver.create_share_group_snapshot(
'fake_context', fake_snap_dict))
mock_create_snap.assert_has_calls([
mock.call(
'fake_context',
{'snapshot_id': member['share_group_snapshot_id'],
'share_id': member['share_id'],
'share_instance_id': member['share']['id'],
'id': member['id'],
'share': member['share'],
'size': member['share']['size'],
'share_size': member['share']['size'],
'share_proto': member['share']['share_proto'],
'provider_location': None},
share_server=None)
for member in (fake_snap_member_1, fake_snap_member_2)
])
self.assertIsNone(share_group_snapshot_update)
self.assertEqual(
[{'id': member['id'], 'foo_k': 'foo_v',
'bar_k': 'bar_v_%s' % member['id']}
for member in (fake_snap_member_1, fake_snap_member_2)],
member_update_list,
)
def test_create_share_group_snapshot_failed_snapshot(self):
fake_snap_member_1 = {
'id': '6813e06b-a8f5-4784-b17d-f3e91afa370e',
'share_id': 'a3ebdba5-b4e1-46c8-a0ea-a9ac8daf5296',
'share_group_snapshot_id': 'fake_share_group_snapshot_id',
'share_instance_id': 'fake_share_instance_id_1',
'provider_location': 'should_not_be_used_1',
'share': {
'id': '420f978b-dbf6-4b3c-92fe-f5b17a0bb5e2',
'size': 3,
'share_proto': 'fake_share_proto',
},
}
fake_snap_member_2 = {
'id': '1e010dfe-545b-432d-ab95-4ef03cd82f89',
'share_id': 'a3ebdba5-b4e1-46c8-a0ea-a9ac8daf5296',
'share_group_snapshot_id': 'fake_share_group_snapshot_id',
'share_instance_id': 'fake_share_instance_id_2',
'provider_location': 'should_not_be_used_2',
'share': {
'id': '420f978b-dbf6-4b3c-92fe-f5b17a0bb5e2',
'size': '2',
'share_proto': 'fake_share_proto',
},
}
fake_snap_dict = {
'status': 'available',
'project_id': '13c0be6290934bd98596cfa004650049',
'user_id': 'a0314a441ca842019b0952224aa39192',
'description': None,
'deleted': '0',
'share_group_id': '4b04fdc3-00b9-4909-ba1a-06e9b3f88b67',
'share_group_snapshot_members': [
fake_snap_member_1, fake_snap_member_2],
'deleted_at': None,
'id': 'f6aa3b59-57eb-421e-965c-4e182538e36a',
'name': None
}
expected_exception = exception.ManilaException
share_driver = self._instantiate_share_driver(None, False)
share_driver._stats['share_group_snapshot_support'] = True
mock_create_snap = self.mock_object(
share_driver, 'create_snapshot',
mock.Mock(side_effect=[None, expected_exception]))
mock_delete_snap = self.mock_object(share_driver, 'delete_snapshot')
self.assertRaises(
expected_exception,
share_driver.create_share_group_snapshot,
'fake_context', fake_snap_dict)
fake_snap_member_1_expected = {
'snapshot_id': fake_snap_member_1['share_group_snapshot_id'],
'share_id': fake_snap_member_1['share_id'],
'share_instance_id': fake_snap_member_1['share']['id'],
'id': fake_snap_member_1['id'],
'share': fake_snap_member_1['share'],
'size': fake_snap_member_1['share']['size'],
'share_size': fake_snap_member_1['share']['size'],
'share_proto': fake_snap_member_1['share']['share_proto'],
'provider_location': None,
}
mock_create_snap.assert_has_calls([
mock.call(
'fake_context',
{'snapshot_id': member['share_group_snapshot_id'],
'share_id': member['share_id'],
'share_instance_id': member['share']['id'],
'id': member['id'],
'share': member['share'],
'size': member['share']['size'],
'share_size': member['share']['size'],
'share_proto': member['share']['share_proto'],
'provider_location': None},
share_server=None)
for member in (fake_snap_member_1, fake_snap_member_2)
])
mock_delete_snap.assert_called_with(
'fake_context', fake_snap_member_1_expected, share_server=None)
def test_create_share_group_snapshot_no_support(self):
fake_snap_dict = {
'status': 'available',
'project_id': '13c0be6290934bd98596cfa004650049',
'user_id': 'a0314a441ca842019b0952224aa39192',
'description': None,
'deleted': '0',
'share_group_id': '4b04fdc3-00b9-4909-ba1a-06e9b3f88b67',
'share_group_snapshot_members': [
{
'status': 'available',
'share_type_id': '1a9ed31e-ee70-483d-93ba-89690e028d7f',
'user_id': 'a0314a441ca842019b0952224aa39192',
'deleted': 'False',
'share_proto': 'NFS',
'project_id': '13c0be6290934bd98596cfa004650049',
'share_group_snapshot_id':
'f6aa3b59-57eb-421e-965c-4e182538e36a',
'deleted_at': None,
'id': '6813e06b-a8f5-4784-b17d-f3e91afa370e',
'size': 1
},
],
'deleted_at': None,
'id': 'f6aa3b59-57eb-421e-965c-4e182538e36a',
'name': None
}
share_driver = self._instantiate_share_driver(None, False)
share_driver._stats['share_group_snapshot_support'] = False
self.assertRaises(
exception.ShareGroupSnapshotNotSupported,
share_driver.create_share_group_snapshot,
'fake_context', fake_snap_dict)
def test_create_share_group_snapshot_no_members(self):
fake_snap_dict = {
'status': 'available',
'project_id': '13c0be6290934bd98596cfa004650049',
'user_id': 'a0314a441ca842019b0952224aa39192',
'description': None,
'deleted': '0',
'share_group_id': '4b04fdc3-00b9-4909-ba1a-06e9b3f88b67',
'share_group_snapshot_members': [],
'deleted_at': None,
'id': 'f6aa3b59-57eb-421e-965c-4e182538e36a',
'name': None
}
share_driver = self._instantiate_share_driver(None, False)
share_driver._stats['share_group_snapshot_support'] = True
share_group_snapshot_update, member_update_list = (
share_driver.create_share_group_snapshot(
'fake_context', fake_snap_dict))
self.assertIsNone(share_group_snapshot_update)
self.assertIsNone(member_update_list)
def test_delete_share_group_snapshot(self):
fake_snap_member_1 = {
'id': '6813e06b-a8f5-4784-b17d-f3e91afa370e',
'share_id': 'a3ebdba5-b4e1-46c8-a0ea-a9ac8daf5296',
'share_group_snapshot_id': 'fake_share_group_snapshot_id',
'share_instance_id': 'fake_share_instance_id_1',
'provider_location': 'fake_provider_location_2',
'share': {
'id': '420f978b-dbf6-4b3c-92fe-f5b17a0bb5e2',
'size': 3,
'share_proto': 'fake_share_proto',
},
}
fake_snap_member_2 = {
'id': '1e010dfe-545b-432d-ab95-4ef03cd82f89',
'share_id': 'a3ebdba5-b4e1-46c8-a0ea-a9ac8daf5296',
'share_group_snapshot_id': 'fake_share_group_snapshot_id',
'share_instance_id': 'fake_share_instance_id_2',
'provider_location': 'fake_provider_location_2',
'share': {
'id': '420f978b-dbf6-4b3c-92fe-f5b17a0bb5e2',
'size': '2',
'share_proto': 'fake_share_proto',
},
}
fake_snap_dict = {
'status': 'available',
'project_id': '13c0be6290934bd98596cfa004650049',
'user_id': 'a0314a441ca842019b0952224aa39192',
'description': None,
'deleted': '0',
'share_group_id': '4b04fdc3-00b9-4909-ba1a-06e9b3f88b67',
'share_group_snapshot_members': [
fake_snap_member_1, fake_snap_member_2],
'deleted_at': None,
'id': 'f6aa3b59-57eb-421e-965c-4e182538e36a',
'name': None
}
share_driver = self._instantiate_share_driver(None, False)
share_driver._stats['share_group_snapshot_support'] = True
mock_delete_snap = self.mock_object(share_driver, 'delete_snapshot')
share_group_snapshot_update, member_update_list = (
share_driver.delete_share_group_snapshot(
'fake_context', fake_snap_dict))
mock_delete_snap.assert_has_calls([
mock.call(
'fake_context',
{'snapshot_id': member['share_group_snapshot_id'],
'share_id': member['share_id'],
'share_instance_id': member['share']['id'],
'id': member['id'],
'share': member['share'],
'size': member['share']['size'],
'share_size': member['share']['size'],
'share_proto': member['share']['share_proto'],
'provider_location': member['provider_location']},
share_server=None)
for member in (fake_snap_member_1, fake_snap_member_2)
])
self.assertIsNone(share_group_snapshot_update)
self.assertIsNone(member_update_list)
def test_snapshot_update_access(self):
share_driver = self._instantiate_share_driver(None, False)
self.assertRaises(NotImplementedError,
share_driver.snapshot_update_access,
'fake_context', 'fake_snapshot', ['r1', 'r2'],
[], [])
| |
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import sys
import pykwalify
import yaml
from pykwalify.core import Core as PyKwalify
from . import default
pykwalify.init_logging(1)
DEFAULT_CONFIG_FILE = "/opt/xos/xos_config.yaml"
DEFAULT_CONFIG_SCHEMA = "xos-config-schema.yaml"
INITIALIZED = False
CONFIG_FILE = None
CONFIG = {}
OVERRIDE_CONFIG = {}
class Config:
"""
XOS Configuration APIs
"""
@staticmethod
def init(
config_file=DEFAULT_CONFIG_FILE,
config_schema=DEFAULT_CONFIG_SCHEMA,
override_config_file=None,
):
# make schema relative to this directory
# TODO give the possibility to specify an absolute path
config_schema = Config.get_abs_path(config_schema)
global INITIALIZED
global CONFIG
global CONFIG_FILE
global OVERRIDE_CONFIG
global OVERRIDE_CONFIG_FILE
global OVERRIDE_CONFIG_SCHEMA
# Use same schema for both provided and global config by default
OVERRIDE_CONFIG_SCHEMA = config_schema
OVERRIDE_CONFIG_FILE = override_config_file
# the config module can be initialized only one
if INITIALIZED:
raise Exception("[XOS-Config] Module already initialized")
INITIALIZED = True
# if XOS_CONFIG_FILE is defined override the config_file
# FIXME shouldn't this stay in whatever module call this one? and then just pass the file to the init method
if os.environ.get("XOS_CONFIG_FILE"):
config_file = os.environ["XOS_CONFIG_FILE"]
# if XOS_CONFIG_SCHEMA is defined override the config_schema
# FIXME shouldn't this stay in whatever module call this one? and then just pass the file to the init method
if os.environ.get("XOS_CONFIG_SCHEMA"):
config_schema = Config.get_abs_path(os.environ["XOS_CONFIG_SCHEMA"])
# allow OVERRIDE_CONFIG_* to be overridden by env vars
if os.environ.get("XOS_OVERRIDE_CONFIG_FILE"):
OVERRIDE_CONFIG_FILE = os.environ["XOS_OVERRIDE_CONFIG_FILE"]
if os.environ.get("XOS_OVERRIDE_CONFIG_SCHEMA"):
OVERRIDE_CONFIG_SCHEMA = Config.get_abs_path(
os.environ["XOS_OVERRIDE_CONFIG_SCHEMA"]
)
# if a -C parameter is set in the cli override the config_file
# FIXME shouldn't this stay in whatever module call this one? and then just pass the file to the init method
if Config.get_cli_param(sys.argv):
config_schema = Config.get_cli_param(sys.argv)
CONFIG_FILE = config_file
CONFIG = Config.read_config(config_file, config_schema)
# if an override is set
if OVERRIDE_CONFIG_FILE is not None:
OVERRIDE_CONFIG = Config.read_config(
OVERRIDE_CONFIG_FILE, OVERRIDE_CONFIG_SCHEMA, True
)
@staticmethod
def get_config_file():
return CONFIG_FILE
@staticmethod
def clear():
global INITIALIZED
INITIALIZED = False
@staticmethod
def get_abs_path(path):
if os.path.isabs(path):
return path
return os.path.dirname(os.path.realpath(__file__)) + "/" + path
@staticmethod
def validate_config_format(config_file, config_schema):
schema = os.path.abspath(config_schema)
c = PyKwalify(source_file=config_file, schema_files=[schema])
c.validate(raise_exception=True)
@staticmethod
def get_cli_param(args):
last = None
for arg in args:
if last == "-C":
return arg
last = arg
@staticmethod
def read_config(config_file, config_schema, ignore_if_not_found=False):
"""
Read the configuration file and return a dictionary
:param config_file: string
:return: dict
"""
if not os.path.exists(config_file) and ignore_if_not_found:
return {}
if not os.path.exists(config_file):
raise Exception("[XOS-Config] Config file not found at: %s" % config_file)
if not os.path.exists(config_schema):
raise Exception(
"[XOS-Config] Config schema not found at: %s" % config_schema
)
try:
Config.validate_config_format(config_file, config_schema)
except Exception as e:
try:
error_msg = e.msg
except AttributeError:
error_msg = str(e)
raise Exception("[XOS-Config] The config format is wrong: %s" % error_msg)
with open(config_file, "r") as stream:
return yaml.safe_load(stream)
@staticmethod
def get(query):
"""
Read a parameter from the config
:param query: a dot separated selector for configuration options (eg: database.username)
:return: the requested parameter in any format the parameter is specified
"""
global INITIALIZED
global CONFIG
global OVERRIDE_CONFIG
global OVERRIDE_CONFIG_FILE
if not INITIALIZED:
raise Exception("[XOS-Config] Module has not been initialized")
val = Config.get_param(query, CONFIG)
if OVERRIDE_CONFIG_FILE or not val:
# if we specified an override configuration, we should override the value
# we also look for the value in case it's missing
over_val = Config.get_param(query, OVERRIDE_CONFIG)
if over_val is not None:
val = over_val
if not val:
val = Config.get_param(query, default.DEFAULT_VALUES)
if not val:
# TODO if no val return none
# raise Exception('[XOS-Config] Config does not have a value (or a default) parameter %s' % query)
return None
return val
@staticmethod
def get_param(query, config):
"""
Search for a parameter in config's first level, other call get_nested_param
:param query: a dot separated selector for configuration options (eg: database.username)
:param config: the config source to read from (can be the config file or the defaults)
:return: the requested parameter in any format the parameter is specified
"""
keys = query.split(".")
if len(keys) == 1:
key = keys[0]
if key not in config:
return None
return config[key]
else:
return Config.get_nested_param(keys, config)
@staticmethod
def get_nested_param(keys, config):
"""
:param keys: a list of descending selector
:param config: the config source to read from (can be the config file or the defaults)
:return: the requested parameter in any format the parameter is specified
"""
param = config
for k in keys:
if k not in param:
return None
param = param[k]
return param
if __name__ == "__main__":
Config.init()
| |
'''tzinfo timezone information for Africa/Windhoek.'''
from pytz.tzinfo import DstTzInfo
from pytz.tzinfo import memorized_datetime as d
from pytz.tzinfo import memorized_ttinfo as i
class Windhoek(DstTzInfo):
'''Africa/Windhoek timezone definition. See datetime.tzinfo for details'''
zone = 'Africa/Windhoek'
_utc_transition_times = [
d(1,1,1,0,0,0),
d(1903,2,28,22,30,0),
d(1942,9,20,0,0,0),
d(1943,3,20,23,0,0),
d(1990,3,20,22,0,0),
d(1994,4,2,22,0,0),
d(1994,9,4,1,0,0),
d(1995,4,2,0,0,0),
d(1995,9,3,1,0,0),
d(1996,4,7,0,0,0),
d(1996,9,1,1,0,0),
d(1997,4,6,0,0,0),
d(1997,9,7,1,0,0),
d(1998,4,5,0,0,0),
d(1998,9,6,1,0,0),
d(1999,4,4,0,0,0),
d(1999,9,5,1,0,0),
d(2000,4,2,0,0,0),
d(2000,9,3,1,0,0),
d(2001,4,1,0,0,0),
d(2001,9,2,1,0,0),
d(2002,4,7,0,0,0),
d(2002,9,1,1,0,0),
d(2003,4,6,0,0,0),
d(2003,9,7,1,0,0),
d(2004,4,4,0,0,0),
d(2004,9,5,1,0,0),
d(2005,4,3,0,0,0),
d(2005,9,4,1,0,0),
d(2006,4,2,0,0,0),
d(2006,9,3,1,0,0),
d(2007,4,1,0,0,0),
d(2007,9,2,1,0,0),
d(2008,4,6,0,0,0),
d(2008,9,7,1,0,0),
d(2009,4,5,0,0,0),
d(2009,9,6,1,0,0),
d(2010,4,4,0,0,0),
d(2010,9,5,1,0,0),
d(2011,4,3,0,0,0),
d(2011,9,4,1,0,0),
d(2012,4,1,0,0,0),
d(2012,9,2,1,0,0),
d(2013,4,7,0,0,0),
d(2013,9,1,1,0,0),
d(2014,4,6,0,0,0),
d(2014,9,7,1,0,0),
d(2015,4,5,0,0,0),
d(2015,9,6,1,0,0),
d(2016,4,3,0,0,0),
d(2016,9,4,1,0,0),
d(2017,4,2,0,0,0),
d(2017,9,3,1,0,0),
d(2018,4,1,0,0,0),
d(2018,9,2,1,0,0),
d(2019,4,7,0,0,0),
d(2019,9,1,1,0,0),
d(2020,4,5,0,0,0),
d(2020,9,6,1,0,0),
d(2021,4,4,0,0,0),
d(2021,9,5,1,0,0),
d(2022,4,3,0,0,0),
d(2022,9,4,1,0,0),
d(2023,4,2,0,0,0),
d(2023,9,3,1,0,0),
d(2024,4,7,0,0,0),
d(2024,9,1,1,0,0),
d(2025,4,6,0,0,0),
d(2025,9,7,1,0,0),
d(2026,4,5,0,0,0),
d(2026,9,6,1,0,0),
d(2027,4,4,0,0,0),
d(2027,9,5,1,0,0),
d(2028,4,2,0,0,0),
d(2028,9,3,1,0,0),
d(2029,4,1,0,0,0),
d(2029,9,2,1,0,0),
d(2030,4,7,0,0,0),
d(2030,9,1,1,0,0),
d(2031,4,6,0,0,0),
d(2031,9,7,1,0,0),
d(2032,4,4,0,0,0),
d(2032,9,5,1,0,0),
d(2033,4,3,0,0,0),
d(2033,9,4,1,0,0),
d(2034,4,2,0,0,0),
d(2034,9,3,1,0,0),
d(2035,4,1,0,0,0),
d(2035,9,2,1,0,0),
d(2036,4,6,0,0,0),
d(2036,9,7,1,0,0),
d(2037,4,5,0,0,0),
d(2037,9,6,1,0,0),
]
_transition_info = [
i(5400,0,'SWAT'),
i(7200,0,'SAST'),
i(10800,3600,'SAST'),
i(7200,0,'SAST'),
i(7200,0,'CAT'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
i(3600,0,'WAT'),
i(7200,3600,'WAST'),
]
Windhoek = Windhoek()
| |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file './acq4/analysis/modules/pbm_ImageAnalysis/ctrlTemplatePhysiology.ui'
#
# Created: Tue Dec 24 01:49:14 2013
# by: PyQt4 UI code generator 4.10
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(315, 410)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(Form.sizePolicy().hasHeightForWidth())
Form.setSizePolicy(sizePolicy)
font = QtGui.QFont()
font.setFamily(_fromUtf8("Arial"))
font.setPointSize(12)
Form.setFont(font)
self.gridLayout = QtGui.QGridLayout(Form)
self.gridLayout.setMargin(0)
self.gridLayout.setHorizontalSpacing(10)
self.gridLayout.setVerticalSpacing(0)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.groupBox = QtGui.QGroupBox(Form)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Ignored)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.groupBox.sizePolicy().hasHeightForWidth())
self.groupBox.setSizePolicy(sizePolicy)
self.groupBox.setAlignment(QtCore.Qt.AlignCenter)
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.gridLayout_2 = QtGui.QGridLayout(self.groupBox)
self.gridLayout_2.setSizeConstraint(QtGui.QLayout.SetNoConstraint)
self.gridLayout_2.setMargin(0)
self.gridLayout_2.setHorizontalSpacing(5)
self.gridLayout_2.setVerticalSpacing(0)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.line = QtGui.QFrame(self.groupBox)
self.line.setFrameShape(QtGui.QFrame.VLine)
self.line.setFrameShadow(QtGui.QFrame.Sunken)
self.line.setObjectName(_fromUtf8("line"))
self.gridLayout_2.addWidget(self.line, 1, 1, 2, 1)
self.line_2 = QtGui.QFrame(self.groupBox)
self.line_2.setFrameShape(QtGui.QFrame.HLine)
self.line_2.setFrameShadow(QtGui.QFrame.Sunken)
self.line_2.setObjectName(_fromUtf8("line_2"))
self.gridLayout_2.addWidget(self.line_2, 0, 0, 1, 1)
self.widget_2 = QtGui.QWidget(self.groupBox)
self.widget_2.setObjectName(_fromUtf8("widget_2"))
self.widget = QtGui.QWidget(self.widget_2)
self.widget.setGeometry(QtCore.QRect(145, -20, 146, 396))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget.sizePolicy().hasHeightForWidth())
self.widget.setSizePolicy(sizePolicy)
self.widget.setObjectName(_fromUtf8("widget"))
self.ImagePhys_burstISI = QtGui.QDoubleSpinBox(self.widget)
self.ImagePhys_burstISI.setGeometry(QtCore.QRect(0, 170, 106, 25))
font = QtGui.QFont()
font.setPointSize(12)
self.ImagePhys_burstISI.setFont(font)
self.ImagePhys_burstISI.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.ImagePhys_burstISI.setDecimals(1)
self.ImagePhys_burstISI.setMinimum(1.0)
self.ImagePhys_burstISI.setMaximum(1000.0)
self.ImagePhys_burstISI.setSingleStep(10.0)
self.ImagePhys_burstISI.setProperty("value", 100.0)
self.ImagePhys_burstISI.setObjectName(_fromUtf8("ImagePhys_burstISI"))
self.ImagePhys_withinBurstISI = QtGui.QDoubleSpinBox(self.widget)
self.ImagePhys_withinBurstISI.setGeometry(QtCore.QRect(0, 195, 106, 25))
font = QtGui.QFont()
font.setPointSize(12)
self.ImagePhys_withinBurstISI.setFont(font)
self.ImagePhys_withinBurstISI.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.ImagePhys_withinBurstISI.setDecimals(1)
self.ImagePhys_withinBurstISI.setMinimum(1.0)
self.ImagePhys_withinBurstISI.setMaximum(1000.0)
self.ImagePhys_withinBurstISI.setSingleStep(2.0)
self.ImagePhys_withinBurstISI.setProperty("value", 40.0)
self.ImagePhys_withinBurstISI.setObjectName(_fromUtf8("ImagePhys_withinBurstISI"))
self.ImagePhys_minBurstSpikes = QtGui.QSpinBox(self.widget)
self.ImagePhys_minBurstSpikes.setGeometry(QtCore.QRect(0, 220, 106, 25))
font = QtGui.QFont()
font.setPointSize(12)
self.ImagePhys_minBurstSpikes.setFont(font)
self.ImagePhys_minBurstSpikes.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.ImagePhys_minBurstSpikes.setMinimum(2)
self.ImagePhys_minBurstSpikes.setMaximum(20)
self.ImagePhys_minBurstSpikes.setProperty("value", 3)
self.ImagePhys_minBurstSpikes.setObjectName(_fromUtf8("ImagePhys_minBurstSpikes"))
self.ImagePhys_STA = QtGui.QPushButton(self.widget_2)
self.ImagePhys_STA.setGeometry(QtCore.QRect(0, 255, 141, 32))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Arial"))
font.setPointSize(11)
self.ImagePhys_STA.setFont(font)
self.ImagePhys_STA.setObjectName(_fromUtf8("ImagePhys_STA"))
self.ImagePhys_RevSTA = QtGui.QPushButton(self.widget_2)
self.ImagePhys_RevSTA.setEnabled(False)
self.ImagePhys_RevSTA.setGeometry(QtCore.QRect(0, 310, 141, 32))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Arial"))
font.setPointSize(11)
self.ImagePhys_RevSTA.setFont(font)
self.ImagePhys_RevSTA.setObjectName(_fromUtf8("ImagePhys_RevSTA"))
self.ImagePhys_BTA = QtGui.QPushButton(self.widget_2)
self.ImagePhys_BTA.setGeometry(QtCore.QRect(0, 280, 141, 32))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Arial"))
font.setPointSize(11)
self.ImagePhys_BTA.setFont(font)
self.ImagePhys_BTA.setObjectName(_fromUtf8("ImagePhys_BTA"))
self.line_5 = QtGui.QFrame(self.widget_2)
self.line_5.setGeometry(QtCore.QRect(5, 230, 131, 20))
self.line_5.setLineWidth(2)
self.line_5.setFrameShape(QtGui.QFrame.HLine)
self.line_5.setFrameShadow(QtGui.QFrame.Sunken)
self.line_5.setObjectName(_fromUtf8("line_5"))
self.label_4 = QtGui.QLabel(self.widget_2)
self.label_4.setGeometry(QtCore.QRect(15, 60, 123, 25))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Arial"))
font.setPointSize(12)
self.label_4.setFont(font)
self.label_4.setObjectName(_fromUtf8("label_4"))
self.ImagePhys_PhysLPF = QtGui.QDoubleSpinBox(self.widget_2)
self.ImagePhys_PhysLPF.setGeometry(QtCore.QRect(143, 35, 112, 25))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Arial"))
font.setPointSize(12)
self.ImagePhys_PhysLPF.setFont(font)
self.ImagePhys_PhysLPF.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.ImagePhys_PhysLPF.setMinimum(-5000.0)
self.ImagePhys_PhysLPF.setMaximum(50000.0)
self.ImagePhys_PhysLPF.setProperty("value", 2500.0)
self.ImagePhys_PhysLPF.setObjectName(_fromUtf8("ImagePhys_PhysLPF"))
self.ImagePhys_PhysThresh = QtGui.QDoubleSpinBox(self.widget_2)
self.ImagePhys_PhysThresh.setGeometry(QtCore.QRect(143, 60, 112, 25))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Arial"))
font.setPointSize(12)
self.ImagePhys_PhysThresh.setFont(font)
self.ImagePhys_PhysThresh.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.ImagePhys_PhysThresh.setDecimals(1)
self.ImagePhys_PhysThresh.setMinimum(0.0)
self.ImagePhys_PhysThresh.setMaximum(2000.0)
self.ImagePhys_PhysThresh.setSingleStep(5.0)
self.ImagePhys_PhysThresh.setProperty("value", 50.0)
self.ImagePhys_PhysThresh.setObjectName(_fromUtf8("ImagePhys_PhysThresh"))
self.ImagePhys_PhysSign = QtGui.QComboBox(self.widget_2)
self.ImagePhys_PhysSign.setGeometry(QtCore.QRect(140, 87, 118, 26))
font = QtGui.QFont()
font.setPointSize(12)
self.ImagePhys_PhysSign.setFont(font)
self.ImagePhys_PhysSign.setObjectName(_fromUtf8("ImagePhys_PhysSign"))
self.ImagePhys_PhysSign.addItem(_fromUtf8(""))
self.ImagePhys_PhysSign.addItem(_fromUtf8(""))
self.label_7 = QtGui.QLabel(self.widget_2)
self.label_7.setGeometry(QtCore.QRect(15, 35, 123, 25))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Arial"))
font.setPointSize(12)
self.label_7.setFont(font)
self.label_7.setObjectName(_fromUtf8("label_7"))
self.label_2 = QtGui.QLabel(self.widget_2)
self.label_2.setGeometry(QtCore.QRect(15, 89, 123, 20))
font = QtGui.QFont()
font.setPointSize(12)
self.label_2.setFont(font)
self.label_2.setObjectName(_fromUtf8("label_2"))
self.label_6 = QtGui.QLabel(self.widget_2)
self.label_6.setGeometry(QtCore.QRect(15, 15, 123, 20))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Arial"))
font.setPointSize(12)
font.setBold(True)
font.setWeight(75)
self.label_6.setFont(font)
self.label_6.setObjectName(_fromUtf8("label_6"))
self.label_5 = QtGui.QLabel(self.widget_2)
self.label_5.setGeometry(QtCore.QRect(5, 205, 131, 25))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Arial"))
font.setPointSize(12)
self.label_5.setFont(font)
self.label_5.setObjectName(_fromUtf8("label_5"))
self.label_8 = QtGui.QLabel(self.widget_2)
self.label_8.setGeometry(QtCore.QRect(5, 180, 123, 25))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Arial"))
font.setPointSize(12)
self.label_8.setFont(font)
self.label_8.setObjectName(_fromUtf8("label_8"))
self.label_9 = QtGui.QLabel(self.widget_2)
self.label_9.setGeometry(QtCore.QRect(5, 155, 123, 25))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Arial"))
font.setPointSize(12)
self.label_9.setFont(font)
self.label_9.setObjectName(_fromUtf8("label_9"))
self.ImagePhys_DetectSpikes = QtGui.QPushButton(self.widget_2)
self.ImagePhys_DetectSpikes.setGeometry(QtCore.QRect(0, 120, 137, 32))
self.ImagePhys_DetectSpikes.setMinimumSize(QtCore.QSize(5, 0))
font = QtGui.QFont()
font.setFamily(_fromUtf8("Arial"))
font.setPointSize(11)
self.ImagePhys_DetectSpikes.setFont(font)
self.ImagePhys_DetectSpikes.setObjectName(_fromUtf8("ImagePhys_DetectSpikes"))
self.gridLayout_2.addWidget(self.widget_2, 1, 0, 2, 1)
self.gridLayout.addWidget(self.groupBox, 0, 0, 1, 1)
self.retranslateUi(Form)
self.ImagePhys_PhysSign.setCurrentIndex(1)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.groupBox.setTitle(_translate("Form", "Physiology Analysis Functions", None))
self.ImagePhys_STA.setText(_translate("Form", "Spike-triggered Average", None))
self.ImagePhys_RevSTA.setText(_translate("Form", "Rev STA", None))
self.ImagePhys_BTA.setText(_translate("Form", "Burst-triggered Average", None))
self.label_4.setText(_translate("Form", "Event Thresh", None))
self.ImagePhys_PhysThresh.setSuffix(_translate("Form", " pA", None))
self.ImagePhys_PhysSign.setItemText(0, _translate("Form", "+", None))
self.ImagePhys_PhysSign.setItemText(1, _translate("Form", "-", None))
self.label_7.setText(_translate("Form", "LPF", None))
self.label_2.setText(_translate("Form", "Event Sign", None))
self.label_6.setText(_translate("Form", "Physiology", None))
self.label_5.setText(_translate("Form", "Minimum # spikes/burst", None))
self.label_8.setText(_translate("Form", "Max burst ISI (msec)", None))
self.label_9.setText(_translate("Form", "Min Interburst Interval", None))
self.ImagePhys_DetectSpikes.setText(_translate("Form", "Detect Spikes", None))
| |
#!/usr/bin/python
# Copyright (c) 2013 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
from buildbot_lib import (
BuildContext, BuildStatus, Command, ParseStandardCommandLine,
RemoveSconsBuildDirectories, RunBuild, SetupLinuxEnvironment,
SetupMacEnvironment, SetupWindowsEnvironment, SCons, Step )
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import pynacl.platform
def RunSconsTests(status, context):
# Clean out build directories, unless we have built elsewhere.
if not context['skip_build']:
with Step('clobber scons', status):
RemoveSconsBuildDirectories()
# Run checkdeps script to vet #includes.
with Step('checkdeps', status):
Command(context, cmd=[sys.executable, 'tools/checkdeps/checkdeps.py'])
arch = context['default_scons_platform']
flags_build = ['do_not_run_tests=1']
flags_run = []
# This file is run 3 different ways for ARM builds. The qemu-only trybot does
# a normal build-and-run with the emulator just like the x86 bots. The panda
# build side runs on an x86 machines with skip_run, and then packs up the
# result and triggers an ARM hardware tester that run with skip_build
if arch != 'arm':
# Unlike their arm counterparts we do not run trusted tests on x86 bots.
# Trusted tests get plenty of coverage by other bots, e.g. nacl-gcc bots.
# We make the assumption here that there are no "exotic tests" which
# are trusted in nature but are somehow depedent on the untrusted TC.
flags_build.append('skip_trusted_tests=1')
flags_run.append('skip_trusted_tests=1')
if context['skip_run']:
flags_run.append('do_not_run_tests=1')
if arch == 'arm':
# For ARM hardware bots, force_emulator= disables use of QEMU, which
# enables building tests which don't work under QEMU.
flags_build.append('force_emulator=')
flags_run.append('force_emulator=')
if context['skip_build']:
flags_run.extend(['naclsdk_validate=0', 'built_elsewhere=1'])
if not context['skip_build']:
# For ARM builders which will trigger hardware testers, run the hello world
# test with the emulator as a basic sanity check before doing anything else.
if arch == 'arm' and context['skip_run']:
with Step('hello_world ' + arch, status):
SCons(context, parallel=True, args=['run_hello_world_test'])
with Step('build_all ' + arch, status):
SCons(context, parallel=True, args=flags_build)
smoke_tests = ['small_tests', 'medium_tests']
# Normal pexe-mode tests
with Step('smoke_tests ' + arch, status, halt_on_fail=False):
SCons(context, parallel=True, args=flags_run + smoke_tests)
# Large tests cannot be run in parallel
with Step('large_tests ' + arch, status, halt_on_fail=False):
SCons(context, parallel=False, args=flags_run + ['large_tests'])
with Step('nonpexe_tests ' + arch, status, halt_on_fail=False):
SCons(context, parallel=True,
args=flags_run + ['pnacl_generate_pexe=0', 'nonpexe_tests'])
irt_mode = context['default_scons_mode'] + ['nacl_irt_test']
# Build all the tests with the IRT
if not context['skip_build']:
with Step('build_all_irt ' + arch, status):
SCons(context, parallel=True, mode=irt_mode, args=flags_build)
smoke_tests_irt = ['small_tests_irt', 'medium_tests_irt']
# Run tests with the IRT.
with Step('smoke_tests_irt ' + arch, status, halt_on_fail=False):
SCons(context, parallel=True, mode=irt_mode,
args=flags_run + smoke_tests_irt)
with Step('large_tests_irt ' + arch, status, halt_on_fail=False):
SCons(context, parallel=False, mode=irt_mode,
args=flags_run + ['large_tests_irt'])
# Run some nacl_clang tests. Eventually we will have bots that just run
# buildbot_standard with nacl_clang and this can be split out.
context['pnacl'] = False
context['nacl_clang'] = True
if not context['skip_build']:
with Step('build_nacl_clang ' + arch, status, halt_on_fail=False):
SCons(context, parallel=True, args=flags_build)
with Step('smoke_tests_nacl_clang ' + arch, status, halt_on_fail=False):
SCons(context, parallel=True,
args=flags_run + ['small_tests', 'medium_tests'])
with Step('large_tests_nacl_clang ' + arch, status, halt_on_fail=False):
SCons(context, parallel=False,
args=flags_run + ['large_tests'])
context['pnacl'] = True
context['nacl_clang'] = False
# Test sandboxed translation
# TODO(dschuff): The standalone sandboxed translator driver does not have
# the batch script wrappers, so it can't run on Windows. Either add them to
# the translator package or make SCons use the pnacl_newlib drivers except
# on the ARM bots where we don't have the pnacl_newlib drivers.
# The mac standalone sandboxed translator is flaky.
# https://code.google.com/p/nativeclient/issues/detail?id=3856
if not context.Windows() and not context.Mac():
flags_run_sbtc = ['use_sandboxed_translator=1']
sbtc_tests = ['toolchain_tests_irt']
if arch == 'arm':
# When splitting the build from the run, translate_in_build_step forces
# the translation to run on the run side (it usually runs on the build
# side because that runs with more parallelism)
if context['skip_build'] or context['skip_run']:
flags_run_sbtc.append('translate_in_build_step=0')
else:
# The ARM sandboxed translator is flaky under qemu, so run a very small
# set of tests on the qemu-only trybot.
sbtc_tests = ['run_hello_world_test_irt']
else:
sbtc_tests.append('large_code')
with Step('sandboxed_translator_tests ' + arch, status,
halt_on_fail=False):
SCons(context, parallel=True, mode=irt_mode,
args=flags_run + flags_run_sbtc + sbtc_tests)
with Step('sandboxed_translator_fast_tests ' + arch, status,
halt_on_fail=False):
SCons(context, parallel=True, mode=irt_mode,
args=flags_run + flags_run_sbtc + ['translate_fast=1'] + sbtc_tests)
# Test Non-SFI Mode.
# The only architectures that the PNaCl toolchain supports Non-SFI
# versions of are currently x86-32 and ARM.
# The x86-64 toolchain bot currently also runs these tests from
# buildbot_pnacl.sh
if context.Linux() and (arch == 'x86-32' or arch == 'arm'):
with Step('nonsfi_tests ' + arch, status, halt_on_fail=False):
SCons(context, parallel=True, mode=irt_mode,
args=flags_run +
['nonsfi_nacl=1',
'nonsfi_tests',
'nonsfi_tests_irt'])
# Build with pnacl_generate_pexe=0 to allow using pnacl-clang with
# direct-to-native mode. This allows assembly to be used in tests.
with Step('nonsfi_tests_nopnacl_generate_pexe ' + arch,
status, halt_on_fail=False):
extra_args = ['nonsfi_nacl=1',
'pnacl_generate_pexe=0',
'nonsfi_tests',
'nonsfi_tests_irt']
# nonsfi_tests_irt with pnacl_generate_pexe=0 does not pass on x86-32.
# https://code.google.com/p/nativeclient/issues/detail?id=4093
if arch == 'x86-32':
extra_args.remove('nonsfi_tests_irt')
SCons(context, parallel=True, mode=irt_mode,
args=flags_run + extra_args)
# Test nonsfi_loader linked against host's libc.
with Step('nonsfi_tests_host_libc ' + arch, status, halt_on_fail=False):
# Using skip_nonstable_bitcode=1 here disables the tests for
# zero-cost C++ exception handling, which don't pass for Non-SFI
# mode yet because we don't build libgcc_eh for Non-SFI mode.
SCons(context, parallel=True, mode=irt_mode,
args=flags_run +
['nonsfi_nacl=1', 'use_newlib_nonsfi_loader=0',
'nonsfi_tests', 'nonsfi_tests_irt',
'toolchain_tests_irt', 'skip_nonstable_bitcode=1'])
# Test unsandboxed mode.
if (context.Linux() or context.Mac()) and arch == 'x86-32':
if context.Linux():
tests = ['run_' + test + '_test_irt' for test in
['hello_world', 'irt_futex', 'thread', 'float',
'malloc_realloc_calloc_free', 'dup', 'cond_timedwait',
'getpid']]
else:
# TODO(mseaborn): Use the same test list as on Linux when the threading
# tests pass for Mac.
tests = ['run_hello_world_test_irt']
with Step('unsandboxed_tests ' + arch, status, halt_on_fail=False):
SCons(context, parallel=True, mode=irt_mode,
args=flags_run + ['pnacl_unsandboxed=1'] + tests)
# Test MinSFI.
if not context.Windows() and (arch == 'x86-32' or arch == 'x86-64'):
with Step('minsfi_tests ' + arch, status, halt_on_fail=False):
SCons(context, parallel=True,
args=flags_run + ['minsfi=1', 'minsfi_tests'])
def Main():
context = BuildContext()
status = BuildStatus(context)
ParseStandardCommandLine(context)
if context.Linux():
SetupLinuxEnvironment(context)
elif context.Windows():
SetupWindowsEnvironment(context)
elif context.Mac():
SetupMacEnvironment(context)
else:
raise Exception('Unsupported platform')
# Panda bots only have 2 cores.
if pynacl.platform.GetArch() == 'arm':
context['max_jobs'] = 2
RunBuild(RunSconsTests, status)
if __name__ == '__main__':
Main()
| |
from __future__ import print_function, absolute_import, division
import warnings
import numpy as np
from numpy.ma.core import nomask
import dask.array as da
from astropy import convolution
from astropy import units as u
from astropy import wcs
#from astropy import log
from astropy.io.fits import Header, HDUList, PrimaryHDU, BinTableHDU, FITS_rec
from radio_beam import Beam, Beams
from astropy.io.registry import UnifiedReadWriteMethod
from . import spectral_axis
from .io.core import LowerDimensionalObjectWrite
from .utils import SliceWarning, BeamWarning, SmoothingWarning, FITSWarning, BeamUnitsError
from . import cube_utils
from . import wcs_utils
from .masks import BooleanArrayMask, MaskBase
from .base_class import (BaseNDClass, SpectralAxisMixinClass,
SpatialCoordMixinClass, MaskableArrayMixinClass,
MultiBeamMixinClass, BeamMixinClass,
HeaderMixinClass
)
__all__ = ['LowerDimensionalObject', 'Projection', 'Slice', 'OneDSpectrum']
class LowerDimensionalObject(u.Quantity, BaseNDClass, HeaderMixinClass):
"""
Generic class for 1D and 2D objects.
"""
@property
def hdu(self):
if self.wcs is None:
hdu = PrimaryHDU(self.value)
else:
hdu = PrimaryHDU(self.value, header=self.header)
hdu.header['BUNIT'] = self.unit.to_string(format='fits')
if 'beam' in self.meta:
hdu.header.update(self.meta['beam'].to_header_keywords())
return hdu
def read(self, *args, **kwargs):
raise NotImplementedError()
write = UnifiedReadWriteMethod(LowerDimensionalObjectWrite)
def __getslice__(self, start, end, increment=None):
# I don't know why this is needed, but apparently one of the inherited
# classes implements getslice, which forces us to overwrite it
# I can't find any examples where __getslice__ is actually implemented,
# though, so this seems like a deep and frightening bug.
#log.debug("Getting a slice from {0} to {1}".format(start,end))
return self.__getitem__(slice(start, end, increment))
def __getitem__(self, key, **kwargs):
"""
Return a new `~spectral_cube.lower_dimensional_structures.LowerDimensionalObject` of the same class while keeping
other properties fixed.
"""
new_qty = super(LowerDimensionalObject, self).__getitem__(key)
if new_qty.ndim < 2:
# do not return a projection
return u.Quantity(new_qty)
if self._wcs is not None:
if ((isinstance(key, tuple) and
any(isinstance(k, slice) for k in key) and
len(key) > self.ndim)):
# Example cases include: indexing tricks like [:,:,None]
warnings.warn("Slice {0} cannot be used on this {1}-dimensional"
" array's WCS. If this is intentional, you "
" should use this {2}'s ``array`` or ``quantity``"
" attribute."
.format(key, self.ndim, type(self)),
SliceWarning
)
return self.quantity[key]
else:
newwcs = self._wcs[key]
else:
newwcs = None
new = self.__class__(value=new_qty.value,
unit=new_qty.unit,
copy=False,
wcs=newwcs,
meta=self._meta,
mask=(self._mask[key] if self._mask is not nomask
else None),
header=self._header,
**kwargs)
new._wcs = newwcs
new._meta = self._meta
new._mask=(self._mask[key] if self._mask is not nomask else nomask)
new._header = self._header
return new
def __array_finalize__(self, obj):
self._wcs = getattr(obj, '_wcs', None)
self._meta = getattr(obj, '_meta', None)
self._mask = getattr(obj, '_mask', None)
self._header = getattr(obj, '_header', None)
self._spectral_unit = getattr(obj, '_spectral_unit', None)
self._fill_value = getattr(obj, '_fill_value', np.nan)
self._wcs_tolerance = getattr(obj, '_wcs_tolerance', 0.0)
if isinstance(obj, VaryingResolutionOneDSpectrum):
self._beams = getattr(obj, '_beams', None)
else:
self._beam = getattr(obj, '_beam', None)
super(LowerDimensionalObject, self).__array_finalize__(obj)
@property
def __array_priority__(self):
return super(LowerDimensionalObject, self).__array_priority__*2
@property
def array(self):
"""
Get a pure array representation of the LDO. Useful when multiplying
and using numpy indexing tricks.
"""
return np.asarray(self)
@property
def _data(self):
# the _data property is required by several other mixins
# (which probably means defining it here is a bad design)
return self.array
@property
def quantity(self):
"""
Get a pure `~astropy.units.Quantity` representation of the LDO.
"""
return u.Quantity(self)
def to(self, unit, equivalencies=[], freq=None):
"""
Return a new `~spectral_cube.lower_dimensional_structures.Projection`
of the same class with the specified unit.
See `astropy.units.Quantity.to` for further details.
"""
if not isinstance(unit, u.Unit):
unit = u.Unit(unit)
if unit == self.unit:
# No copying
return self
if hasattr(self, 'with_spectral_unit'):
freq = self.with_spectral_unit(u.Hz).spectral_axis
if freq is None and 'RESTFRQ' in self.header:
freq = self.header['RESTFRQ'] * u.Hz
# Create the tuple of unit conversions needed.
factor = cube_utils.bunit_converters(self, unit, equivalencies=equivalencies,
freq=freq)
converted_array = (self.quantity * factor).value
# use private versions of variables, not the generated property
# versions
# Not entirely sure the use of __class__ here is kosher, but we do want
# self.__class__, not super()
new = self.__class__(value=converted_array, unit=unit, copy=True,
wcs=self._wcs, meta=self._meta, mask=self._mask,
header=self._header)
return new
@property
def _mask(self):
""" Annoying hack to deal with np.ma.core.is_mask failures (I don't
like using __ but I think it's necessary here)"""
if self.__mask is None:
# need this to be *exactly* the numpy boolean False
return nomask
return self.__mask
@_mask.setter
def _mask(self, value):
self.__mask = value
def shrink_mask(self):
"""
Copy of the numpy masked_array shrink_mask method. This is essentially
a hack needed for matplotlib to show images.
"""
m = self._mask
if m.ndim and not m.any():
self._mask = nomask
return self
def _initial_set_mask(self, mask):
"""
Helper tool to validate mask when originally setting it in __new__
Note that because this is intended to be used in __new__, order
matters: ``self`` must have ``_wcs``, for example.
"""
if mask is None:
mask = BooleanArrayMask(np.ones_like(self.value, dtype=bool),
self._wcs, shape=self.value.shape)
elif isinstance(mask, np.ndarray):
if mask.shape != self.value.shape:
raise ValueError("Mask shape must match the {0} shape."
.format(self.__class__.__name__)
)
mask = BooleanArrayMask(mask, self._wcs, shape=self.value.shape)
elif isinstance(mask, MaskBase):
pass
else:
raise TypeError("mask of type {} is not a supported mask "
"type.".format(type(mask)))
# Validate the mask before setting
mask._validate_wcs(new_data=self.value, new_wcs=self._wcs,
wcs_tolerance=self._wcs_tolerance)
self._mask = mask
class Projection(LowerDimensionalObject, SpatialCoordMixinClass,
MaskableArrayMixinClass, BeamMixinClass):
def __new__(cls, value, unit=None, dtype=None, copy=True, wcs=None,
meta=None, mask=None, header=None, beam=None,
fill_value=np.nan, read_beam=False, wcs_tolerance=0.0):
if np.asarray(value).ndim != 2:
raise ValueError("value should be a 2-d array")
if wcs is not None and wcs.wcs.naxis != 2:
raise ValueError("wcs should have two dimension")
self = u.Quantity.__new__(cls, value, unit=unit, dtype=dtype,
copy=copy).view(cls)
self._wcs = wcs
self._meta = {} if meta is None else meta
self._wcs_tolerance = wcs_tolerance
self._initial_set_mask(mask)
self._fill_value = fill_value
if header is not None:
self._header = header
else:
self._header = Header()
if beam is None:
if "beam" in self.meta:
beam = self.meta['beam']
elif read_beam:
beam = cube_utils.try_load_beam(header)
if beam is None:
warnings.warn("Cannot load beam from header.",
BeamWarning
)
if beam is not None:
self.beam = beam
self.meta['beam'] = beam
# TODO: Enable header updating when non-celestial slices are
# properly handled in the WCS object.
# self._header.update(beam.to_header_keywords())
self._cache = {}
return self
def with_beam(self, beam, raise_error_jybm=True):
'''
Attach a new beam object to the Projection.
Parameters
----------
beam : `~radio_beam.Beam`
A new beam object.
'''
if not isinstance(beam, Beam):
raise TypeError("beam must be a radio_beam.Beam object.")
self.check_jybeam_smoothing(raise_error_jybm=raise_error_jybm)
meta = self.meta.copy()
meta['beam'] = beam
return self._new_projection_with(beam=beam, meta=meta)
def with_fill_value(self, fill_value):
"""
Create a new :class:`Projection` or :class:`Slice` with a different
``fill_value``.
"""
return self._new_projection_with(fill_value=fill_value)
@property
def _new_thing_with(self):
return self._new_projection_with
def _new_projection_with(self, data=None, wcs=None, mask=None, meta=None,
fill_value=None, spectral_unit=None, unit=None,
header=None, wcs_tolerance=None, beam=None,
**kwargs):
data = self._data if data is None else data
if unit is None and hasattr(data, 'unit'):
if data.unit != self.unit:
raise u.UnitsError("New data unit '{0}' does not"
" match unit '{1}'. You can"
" override this by specifying the"
" `unit` keyword."
.format(data.unit, self.unit))
unit = data.unit
elif unit is None:
unit = self.unit
elif unit is not None:
# convert string units to Units
if not isinstance(unit, u.Unit):
unit = u.Unit(unit)
if hasattr(data, 'unit'):
if u.Unit(unit) != data.unit:
raise u.UnitsError("The specified new cube unit '{0}' "
"does not match the input unit '{1}'."
.format(unit, data.unit))
else:
data = u.Quantity(data, unit=unit, copy=False)
wcs = self._wcs if wcs is None else wcs
mask = self._mask if mask is None else mask
if meta is None:
meta = {}
meta.update(self._meta)
if unit is not None:
meta['BUNIT'] = unit.to_string(format='FITS')
fill_value = self._fill_value if fill_value is None else fill_value
if beam is None:
if hasattr(self, 'beam'):
beam = self.beam
newproj = self.__class__(value=data, wcs=wcs, mask=mask, meta=meta,
unit=unit, fill_value=fill_value,
header=header or self._header,
wcs_tolerance=wcs_tolerance or self._wcs_tolerance,
beam=beam,
**kwargs)
return newproj
@staticmethod
def from_hdu(hdu):
'''
Return a projection from a FITS HDU.
'''
if isinstance(hdu, HDUList):
hdul = hdu
hdu = hdul[0]
if not len(hdu.data.shape) == 2:
raise ValueError("HDU must contain two-dimensional data.")
meta = {}
mywcs = wcs.WCS(hdu.header)
if "BUNIT" in hdu.header:
unit = cube_utils.convert_bunit(hdu.header["BUNIT"])
meta["BUNIT"] = hdu.header["BUNIT"]
else:
unit = None
beam = cube_utils.try_load_beam(hdu.header)
self = Projection(hdu.data, unit=unit, wcs=mywcs, meta=meta,
header=hdu.header, beam=beam)
return self
def quicklook(self, filename=None, use_aplpy=True, aplpy_kwargs={}):
"""
Use `APLpy <https://pypi.python.org/pypi/APLpy>`_ to make a quick-look
image of the projection. This will make the ``FITSFigure`` attribute
available.
If there are unmatched celestial axes, this will instead show an image
without axis labels.
Parameters
----------
filename : str or Non
Optional - the filename to save the quicklook to.
"""
if use_aplpy:
try:
if not hasattr(self, 'FITSFigure'):
import aplpy
self.FITSFigure = aplpy.FITSFigure(self.hdu,
**aplpy_kwargs)
self.FITSFigure.show_grayscale()
self.FITSFigure.add_colorbar()
if filename is not None:
self.FITSFigure.save(filename)
except (wcs.InconsistentAxisTypesError, ImportError):
self._quicklook_mpl(filename=filename)
else:
self._quicklook_mpl(filename=filename)
def _quicklook_mpl(self, filename=None):
from matplotlib import pyplot
self.figure = pyplot.gcf()
self.image = pyplot.imshow(self.value)
if filename is not None:
self.figure.savefig(filename)
def convolve_to(self, beam, convolve=convolution.convolve_fft,
**kwargs):
"""
Convolve the image to a specified beam.
Parameters
----------
beam : `radio_beam.Beam`
The beam to convolve to
convolve : function
The astropy convolution function to use, either
`astropy.convolution.convolve` or
`astropy.convolution.convolve_fft`
Returns
-------
proj : `Projection`
A Projection convolved to the given ``beam`` object.
"""
self._raise_wcs_no_celestial()
if not hasattr(self, 'beam'):
raise ValueError("No beam is contained in Projection.meta.")
# Check if the beams are the same.
if beam == self.beam:
warnings.warn("The given beam is identical to the current beam. "
"Skipping convolution.")
return self
pixscale = wcs.utils.proj_plane_pixel_area(self.wcs.celestial)**0.5 * u.deg
convolution_kernel = \
beam.deconvolve(self.beam).as_kernel(pixscale)
newdata = convolve(self.value, convolution_kernel,
normalize_kernel=True,
**kwargs)
self = Projection(newdata, unit=self.unit, wcs=self.wcs,
meta=self.meta, header=self.header,
beam=beam)
return self
def reproject(self, header, order='bilinear'):
"""
Reproject the image into a new header.
Parameters
----------
header : `astropy.io.fits.Header`
A header specifying a cube in valid WCS
order : int or str, optional
The order of the interpolation (if ``mode`` is set to
``'interpolation'``). This can be either one of the following
strings:
* 'nearest-neighbor'
* 'bilinear'
* 'biquadratic'
* 'bicubic'
or an integer. A value of ``0`` indicates nearest neighbor
interpolation.
"""
self._raise_wcs_no_celestial()
try:
from reproject.version import version
except ImportError:
raise ImportError("Requires the reproject package to be"
" installed.")
# Need version > 0.2 to work with cubes
from distutils.version import LooseVersion
if LooseVersion(version) < "0.3":
raise Warning("Requires version >=0.3 of reproject. The current "
"version is: {}".format(version))
from reproject import reproject_interp
# TODO: Find the minimal footprint that contains the header and only reproject that
# (see FITS_tools.regrid_cube for a guide on how to do this)
newwcs = wcs.WCS(header)
shape_out = [header['NAXIS{0}'.format(i + 1)] for i in range(header['NAXIS'])][::-1]
newproj, newproj_valid = reproject_interp((self.value,
self.header),
newwcs,
shape_out=shape_out,
order=order)
self = Projection(newproj, unit=self.unit, wcs=newwcs,
meta=self.meta, header=header,
read_beam=True)
return self
def subimage(self, xlo='min', xhi='max', ylo='min', yhi='max'):
"""
Extract a region spatially.
When spatial WCS dimensions are given as an `~astropy.units.Quantity`,
the spatial coordinates of the 'lo' and 'hi' corners are solved together.
This minimizes WCS variations due to the sky curvature when slicing from
a large (>1 deg) image.
Parameters
----------
[xy]lo/[xy]hi : int or `astropy.units.Quantity` or `min`/`max`
The endpoints to extract. If given as a quantity, will be
interpreted as World coordinates. If given as a string or
int, will be interpreted as pixel coordinates.
"""
self._raise_wcs_no_celestial()
# Solve for the spatial pixel indices together
limit_dict = wcs_utils.find_spatial_pixel_index(self, xlo, xhi, ylo, yhi)
slices = [slice(limit_dict[xx + 'lo'], limit_dict[xx + 'hi'])
for xx in 'yx']
return self[tuple(slices)]
def to(self, unit, equivalencies=[], freq=None):
"""
Return a new `~spectral_cube.lower_dimensional_structures.Projection`
of the same class with the specified unit.
See `astropy.units.Quantity.to` for further details.
"""
return super(Projection, self).to(unit, equivalencies, freq)
# A slice is just like a projection in every way
class Slice(Projection):
pass
class BaseOneDSpectrum(LowerDimensionalObject, MaskableArrayMixinClass,
SpectralAxisMixinClass):
"""
Properties shared between OneDSpectrum and VaryingResolutionOneDSpectrum.
"""
def __new__(cls, value, unit=None, dtype=None, copy=True, wcs=None,
meta=None, mask=None, header=None, spectral_unit=None,
fill_value=np.nan, wcs_tolerance=0.0):
#log.debug("Creating a OneDSpectrum with __new__")
if np.asarray(value).ndim != 1:
raise ValueError("value should be a 1-d array")
if wcs is not None and wcs.wcs.naxis != 1:
raise ValueError("wcs should have two dimension")
self = u.Quantity.__new__(cls, value, unit=unit, dtype=dtype,
copy=copy).view(cls)
self._wcs = wcs
self._meta = {} if meta is None else meta
self._wcs_tolerance = wcs_tolerance
self._initial_set_mask(mask)
self._fill_value = fill_value
if header is not None:
self._header = header
else:
self._header = Header()
self._spectral_unit = spectral_unit
if spectral_unit is None:
if 'CUNIT1' in self._header:
self._spectral_unit = u.Unit(self._header['CUNIT1'])
elif self._wcs is not None:
self._spectral_unit = u.Unit(self._wcs.wcs.cunit[0])
return self
def __repr__(self):
prefixstr = '<' + self.__class__.__name__ + ' '
arrstr = np.array2string(self.filled_data[:].value, separator=',',
prefix=prefixstr)
return '{0}{1}{2:s}>'.format(prefixstr, arrstr, self._unitstr)
@staticmethod
def from_hdu(hdu):
'''
Return a OneDSpectrum from a FITS HDU or HDU list.
'''
if isinstance(hdu, HDUList):
hdul = hdu
hdu = hdul[0]
else:
hdul = HDUList([hdu])
if not len(hdu.data.shape) == 1:
raise ValueError("HDU must contain one-dimensional data.")
meta = {}
mywcs = wcs.WCS(hdu.header)
if "BUNIT" in hdu.header:
unit = cube_utils.convert_bunit(hdu.header["BUNIT"])
meta["BUNIT"] = hdu.header["BUNIT"]
else:
unit = None
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=FITSWarning)
beam = cube_utils.try_load_beams(hdul)
if hasattr(beam, '__len__'):
beams = beam
else:
beams = None
if beams is not None:
self = VaryingResolutionOneDSpectrum(hdu.data, unit=unit,
wcs=mywcs, meta=meta,
header=hdu.header,
beams=beams)
else:
beam = cube_utils.try_load_beam(hdu.header)
self = OneDSpectrum(hdu.data, unit=unit, wcs=mywcs, meta=meta,
header=hdu.header, beam=beam)
return self
@property
def header(self):
header = super(BaseOneDSpectrum, self).header
# Preserve the spectrum's spectral units
if 'CUNIT1' in header and self._spectral_unit != u.Unit(header['CUNIT1']):
spectral_scale = spectral_axis.wcs_unit_scale(self._spectral_unit)
header['CDELT1'] *= spectral_scale
header['CRVAL1'] *= spectral_scale
header['CUNIT1'] = self.spectral_axis.unit.to_string(format='FITS')
return header
@property
def spectral_axis(self):
"""
A `~astropy.units.Quantity` array containing the central values of
each channel along the spectral axis.
"""
if self._wcs is None:
spec_axis = np.arange(self.size) * u.one
else:
spec_axis = self.wcs.wcs_pix2world(np.arange(self.size), 0)[0] * \
u.Unit(self.wcs.wcs.cunit[0])
if self._spectral_unit is not None:
spec_axis = spec_axis.to(self._spectral_unit)
return spec_axis
def quicklook(self, filename=None, drawstyle='steps-mid', **kwargs):
"""
Plot the spectrum with current spectral units in the currently open
figure
kwargs are passed to `matplotlib.pyplot.plot`
Parameters
----------
filename : str or Non
Optional - the filename to save the quicklook to.
"""
from matplotlib import pyplot
ax = pyplot.gca()
ax.plot(self.spectral_axis, self.filled_data[:].value,
drawstyle=drawstyle, **kwargs)
ax.set_xlabel(self.spectral_axis.unit.to_string(format='latex'))
ax.set_ylabel(self.unit)
if filename is not None:
pyplot.gcf().savefig(filename)
def with_spectral_unit(self, unit, velocity_convention=None,
rest_value=None):
newwcs, newmeta = self._new_spectral_wcs(unit,
velocity_convention=velocity_convention,
rest_value=rest_value)
newheader = self._nowcs_header.copy()
newheader.update(newwcs.to_header())
wcs_cunit = u.Unit(newheader['CUNIT1'])
newheader['CUNIT1'] = unit.to_string(format='FITS')
newheader['CDELT1'] *= wcs_cunit.to(unit)
if self._mask is not None:
newmask = self._mask.with_spectral_unit(unit,
velocity_convention=velocity_convention,
rest_value=rest_value)
newmask._wcs = newwcs
else:
newmask = None
return self._new_spectrum_with(wcs=newwcs, spectral_unit=unit,
mask=newmask, meta=newmeta,
header=newheader)
def __getitem__(self, key, **kwargs):
# Ideally, this could just be in VaryingResolutionOneDSpectrum,
# but it's about the code is about the same length by just
# keeping it here.
try:
kwargs['beams'] = self.beams[key]
except (AttributeError, TypeError):
pass
new_qty = super(BaseOneDSpectrum, self).__getitem__(key)
if isinstance(key, slice):
new = self.__class__(value=new_qty.value,
unit=new_qty.unit,
copy=False,
wcs=wcs_utils.slice_wcs(self._wcs, key,
shape=self.shape),
meta=self._meta,
mask=(self._mask[key]
if self._mask is not nomask
else nomask),
header=self._header,
wcs_tolerance=self._wcs_tolerance,
fill_value=self.fill_value,
**kwargs)
return new
else:
if self._mask is not nomask:
# Kind of a hack; this is probably inefficient
bad = self._mask.exclude()[key]
if isinstance(bad, da.Array):
bad = bad.compute()
new_qty[bad] = np.nan
return new_qty
def __getattribute__(self, attrname):
# This is a hack to handle dimensionality-reducing functions
# We want spectrum.max() to return a Quantity, not a spectrum
# Long-term, we really want `OneDSpectrum` to not inherit from
# `Quantity`, but for now this approach works.... we just have
# to add more functions to this list.
if attrname in ('min', 'max', 'std', 'mean', 'sum', 'cumsum',
'nansum', 'ptp', 'var'):
return getattr(self.quantity, attrname)
else:
return super(BaseOneDSpectrum, self).__getattribute__(attrname)
def spectral_interpolate(self, spectral_grid,
suppress_smooth_warning=False,
fill_value=None):
"""
Resample the spectrum onto a specific grid
Parameters
----------
spectral_grid : array
An array of the spectral positions to regrid onto
suppress_smooth_warning : bool
If disabled, a warning will be raised when interpolating onto a
grid that does not nyquist sample the existing grid. Disable this
if you have already appropriately smoothed the data.
fill_value : float
Value for extrapolated spectral values that lie outside of
the spectral range defined in the original data. The
default is to use the nearest spectral channel in the
cube.
Returns
-------
spectrum : OneDSpectrum
"""
assert spectral_grid.ndim == 1
inaxis = self.spectral_axis.to(spectral_grid.unit)
indiff = np.mean(np.diff(inaxis))
outdiff = np.mean(np.diff(spectral_grid))
# account for reversed axes
if outdiff < 0:
spectral_grid = spectral_grid[::-1]
outdiff = np.mean(np.diff(spectral_grid))
outslice = slice(None, None, -1)
else:
outslice = slice(None, None, 1)
specslice = slice(None) if indiff >= 0 else slice(None, None, -1)
inaxis = inaxis[specslice]
indiff = np.mean(np.diff(inaxis))
# insanity checks
if indiff < 0 or outdiff < 0:
raise ValueError("impossible.")
assert np.all(np.diff(spectral_grid) > 0)
assert np.all(np.diff(inaxis) > 0)
np.testing.assert_allclose(np.diff(spectral_grid), outdiff,
err_msg="Output grid must be linear")
if outdiff > 2 * indiff and not suppress_smooth_warning:
warnings.warn("Input grid has too small a spacing. The data should "
"be smoothed prior to resampling.",
SmoothingWarning
)
newspec = np.empty([spectral_grid.size], dtype=self.dtype)
newmask = np.empty([spectral_grid.size], dtype='bool')
newspec[outslice] = np.interp(spectral_grid.value, inaxis.value,
self.filled_data[specslice].value,
left=fill_value, right=fill_value)
mask = self.mask.include()
if all(mask):
newmask = np.ones([spectral_grid.size], dtype='bool')
else:
interped = np.interp(spectral_grid.value,
inaxis.value, mask[specslice]) > 0
newmask[outslice] = interped
newwcs = self.wcs.deepcopy()
newwcs.wcs.crpix[0] = 1
newwcs.wcs.crval[0] = spectral_grid[0].value if outslice.step > 0 \
else spectral_grid[-1].value
newwcs.wcs.cunit[0] = spectral_grid.unit.to_string(format='FITS')
newwcs.wcs.cdelt[0] = outdiff.value if outslice.step > 0 \
else -outdiff.value
newwcs.wcs.set()
newheader = self._nowcs_header.copy()
newheader.update(newwcs.to_header())
wcs_cunit = u.Unit(newheader['CUNIT1'])
newheader['CUNIT1'] = spectral_grid.unit.to_string(format='FITS')
newheader['CDELT1'] *= wcs_cunit.to(spectral_grid.unit)
newbmask = BooleanArrayMask(newmask, wcs=newwcs)
return self._new_spectrum_with(data=newspec, wcs=newwcs, mask=newbmask,
header=newheader,
spectral_unit=spectral_grid.unit)
def spectral_smooth(self, kernel,
convolve=convolution.convolve,
**kwargs):
"""
Smooth the spectrum
Parameters
----------
kernel : `~astropy.convolution.Kernel1D`
A 1D kernel from astropy
convolve : function
The astropy convolution function to use, either
`astropy.convolution.convolve` or
`astropy.convolution.convolve_fft`
kwargs : dict
Passed to the convolve function
"""
newspec = convolve(self.value, kernel, normalize_kernel=True, **kwargs)
return self._new_spectrum_with(data=newspec)
def to(self, unit, equivalencies=[]):
"""
Return a new `~spectral_cube.lower_dimensional_structures.OneDSpectrum`
of the same class with the specified unit.
See `astropy.units.Quantity.to` for further details.
"""
return super(BaseOneDSpectrum, self).to(unit, equivalencies, freq=None)
def with_fill_value(self, fill_value):
"""
Create a new :class:`OneDSpectrum` with a different ``fill_value``.
"""
return self._new_spectrum_with(fill_value=fill_value)
@property
def _new_thing_with(self):
return self._new_spectrum_with
def _new_spectrum_with(self, data=None, wcs=None, mask=None, meta=None,
fill_value=None, spectral_unit=None, unit=None,
header=None, wcs_tolerance=None,
**kwargs):
data = self._data if data is None else data
if unit is None and hasattr(data, 'unit'):
if data.unit != self.unit:
raise u.UnitsError("New data unit '{0}' does not"
" match unit '{1}'. You can"
" override this by specifying the"
" `unit` keyword."
.format(data.unit, self.unit))
unit = data.unit
elif unit is None:
unit = self.unit
elif unit is not None:
# convert string units to Units
if not isinstance(unit, u.Unit):
unit = u.Unit(unit)
if hasattr(data, 'unit'):
if u.Unit(unit) != data.unit:
raise u.UnitsError("The specified new cube unit '{0}' "
"does not match the input unit '{1}'."
.format(unit, data.unit))
else:
data = u.Quantity(data, unit=unit, copy=False)
wcs = self._wcs if wcs is None else wcs
mask = self._mask if mask is None else mask
if meta is None:
meta = {}
meta.update(self._meta)
if unit is not None:
meta['BUNIT'] = unit.to_string(format='FITS')
fill_value = self._fill_value if fill_value is None else fill_value
spectral_unit = self._spectral_unit if spectral_unit is None else u.Unit(spectral_unit)
spectrum = self.__class__(value=data, wcs=wcs, mask=mask, meta=meta,
unit=unit, fill_value=fill_value,
header=header or self._header,
wcs_tolerance=wcs_tolerance or self._wcs_tolerance,
**kwargs)
spectrum._spectral_unit = spectral_unit
return spectrum
class OneDSpectrum(BaseOneDSpectrum, BeamMixinClass):
def __new__(cls, value, beam=None, read_beam=False, **kwargs):
self = super(OneDSpectrum, cls).__new__(cls, value, **kwargs)
if beam is None:
if "beam" in self.meta:
beam = self.meta['beam']
elif read_beam:
beam = cube_utils.try_load_beam(self.header)
if beam is None:
warnings.warn("Cannot load beam from header.",
BeamWarning
)
if beam is not None:
self.beam = beam
self.meta['beam'] = beam
self._cache = {}
return self
def _new_spectrum_with(self, **kwargs):
beam = kwargs.pop('beam', None)
if 'beam' in self._meta and beam is None:
beam = self.beam
out = super(OneDSpectrum, self)._new_spectrum_with(beam=beam, **kwargs)
return out
def with_beam(self, beam, raise_error_jybm=True):
'''
Attach a new beam object to the OneDSpectrum.
Parameters
----------
beam : `~radio_beam.Beam`
A new beam object.
'''
if not isinstance(beam, Beam):
raise TypeError("beam must be a radio_beam.Beam object.")
self.check_jybeam_smoothing(raise_error_jybm=raise_error_jybm)
meta = self.meta.copy()
meta['beam'] = beam
return self._new_spectrum_with(beam=beam, meta=meta)
class VaryingResolutionOneDSpectrum(BaseOneDSpectrum, MultiBeamMixinClass):
def __new__(cls, value, beams=None, read_beam=False, goodbeams_mask=None, **kwargs):
self = super(VaryingResolutionOneDSpectrum, cls).__new__(cls, value, **kwargs)
assert hasattr(self, '_fill_value')
if beams is None:
if "beams" in self.meta:
beams = self.meta['beams']
elif read_beam:
beams = cube_utils.try_load_beams(self.header)
if beams is None:
warnings.warn("Cannot load beams table from header.",
BeamWarning
)
if beams is not None:
if isinstance(beams, BinTableHDU):
beam_data_table = beams.data
elif isinstance(beams, FITS_rec):
beam_data_table = beams
else:
beam_data_table = None
if beam_data_table is not None:
beams = Beams(major=u.Quantity(beam_data_table['BMAJ'], u.arcsec),
minor=u.Quantity(beam_data_table['BMIN'], u.arcsec),
pa=u.Quantity(beam_data_table['BPA'], u.deg),
meta=[{key: row[key] for key in beam_data_table.names
if key not in ('BMAJ','BPA', 'BMIN')}
for row in beam_data_table],)
self.beams = beams
self.meta['beams'] = beams
if goodbeams_mask is not None:
self.goodbeams_mask = goodbeams_mask
self._cache = {}
return self
@property
def hdu(self):
warnings.warn("There are multiple beams for this spectrum that "
"are being ignored when creating the HDU.",
BeamWarning
)
return super(VaryingResolutionOneDSpectrum, self).hdu
@property
def hdulist(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore")
hdu = self.hdu
beamhdu = cube_utils.beams_to_bintable(self.beams)
return HDUList([hdu, beamhdu])
def _new_spectrum_with(self, **kwargs):
beams = kwargs.pop('beams', self.beams)
if beams is None:
beams = self.beams
VRODS = VaryingResolutionOneDSpectrum
out = super(VRODS, self)._new_spectrum_with(beams=beams,
**kwargs)
return out
def __array_finalize__(self, obj):
super(VaryingResolutionOneDSpectrum, self).__array_finalize__(obj)
self._beams = getattr(obj, '_beams', None)
if getattr(obj, 'goodbeams_mask', None) is not None:
# do NOT use the setter here, because we sometimes need to write
# intermediate size-mismatch things that later get fixed, e.g., in
# __getitem__ below
self._goodbeams_mask = getattr(obj, 'goodbeams_mask', None)
def __getitem__(self, key):
new_qty = super(VaryingResolutionOneDSpectrum, self).__getitem__(key)
# use the goodbeams_mask setter here because it checks size
new_qty.goodbeams_mask = self.goodbeams_mask[key]
new_qty.beams = self.unmasked_beams[key]
return new_qty
| |
"""
This script is part of the pytest release process which is triggered by comments
in issues.
This script is started by the `release-on-comment.yml` workflow, which always executes on
`master` and is triggered by two comment related events:
* https://help.github.com/en/actions/reference/events-that-trigger-workflows#issue-comment-event-issue_comment
* https://help.github.com/en/actions/reference/events-that-trigger-workflows#issues-event-issues
This script receives the payload and a secrets on the command line.
The payload must contain a comment with a phrase matching this pseudo-regular expression:
@pytestbot please prepare (major )? release from <branch name>
Then the appropriate version will be obtained based on the given branch name:
* a major release from master if "major" appears in the phrase in that position
* a feature or bug fix release from master (based if there are features in the current changelog
folder)
* a bug fix from a maintenance branch
After that, it will create a release using the `release` tox environment, and push a new PR.
**Secret**: currently the secret is defined in the @pytestbot account, which the core maintainers
have access to. There we created a new secret named `chatops` with write access to the repository.
"""
import argparse
import json
import os
import re
import traceback
from pathlib import Path
from subprocess import CalledProcessError
from subprocess import check_call
from subprocess import check_output
from subprocess import run
from textwrap import dedent
from typing import Dict
from typing import Optional
from typing import Tuple
from colorama import Fore
from colorama import init
from github3.repos import Repository
class InvalidFeatureRelease(Exception):
pass
SLUG = "pytest-dev/pytest"
PR_BODY = """\
Created automatically from {comment_url}.
Once all builds pass and it has been **approved** by one or more maintainers, the build
can be released by pushing a tag `{version}` to this repository.
Closes #{issue_number}.
"""
def login(token: str) -> Repository:
import github3
github = github3.login(token=token)
owner, repo = SLUG.split("/")
return github.repository(owner, repo)
def get_comment_data(payload: Dict) -> str:
if "comment" in payload:
return payload["comment"]
else:
return payload["issue"]
def validate_and_get_issue_comment_payload(
issue_payload_path: Optional[Path],
) -> Tuple[str, str, bool]:
payload = json.loads(issue_payload_path.read_text(encoding="UTF-8"))
body = get_comment_data(payload)["body"]
m = re.match(r"@pytestbot please prepare (major )?release from ([\w\-_\.]+)", body)
if m:
is_major, base_branch = m.group(1) is not None, m.group(2)
else:
is_major, base_branch = False, None
return payload, base_branch, is_major
def print_and_exit(msg) -> None:
print(msg)
raise SystemExit(1)
def trigger_release(payload_path: Path, token: str) -> None:
payload, base_branch, is_major = validate_and_get_issue_comment_payload(
payload_path
)
if base_branch is None:
url = get_comment_data(payload)["html_url"]
print_and_exit(
f"Comment {Fore.CYAN}{url}{Fore.RESET} did not match the trigger command."
)
print()
print(f"Precessing release for branch {Fore.CYAN}{base_branch}")
repo = login(token)
issue_number = payload["issue"]["number"]
issue = repo.issue(issue_number)
check_call(["git", "checkout", f"origin/{base_branch}"])
try:
version = find_next_version(base_branch, is_major)
except InvalidFeatureRelease as e:
issue.create_comment(str(e))
print_and_exit(f"{Fore.RED}{e}")
error_contents = ""
try:
print(f"Version: {Fore.CYAN}{version}")
release_branch = f"release-{version}"
run(
["git", "config", "user.name", "pytest bot"],
text=True,
check=True,
capture_output=True,
)
run(
["git", "config", "user.email", "pytestbot@gmail.com"],
text=True,
check=True,
capture_output=True,
)
run(
["git", "checkout", "-b", release_branch, f"origin/{base_branch}"],
text=True,
check=True,
capture_output=True,
)
print(f"Branch {Fore.CYAN}{release_branch}{Fore.RESET} created.")
# important to use tox here because we have changed branches, so dependencies
# might have changed as well
cmdline = ["tox", "-e", "release", "--", version, "--skip-check-links"]
print("Running", " ".join(cmdline))
run(
cmdline,
text=True,
check=True,
capture_output=True,
)
oauth_url = f"https://{token}:x-oauth-basic@github.com/{SLUG}.git"
run(
["git", "push", oauth_url, f"HEAD:{release_branch}", "--force"],
text=True,
check=True,
capture_output=True,
)
print(f"Branch {Fore.CYAN}{release_branch}{Fore.RESET} pushed.")
body = PR_BODY.format(
comment_url=get_comment_data(payload)["html_url"],
version=version,
issue_number=issue_number,
)
pr = repo.create_pull(
f"Prepare release {version}",
base=base_branch,
head=release_branch,
body=body,
)
print(f"Pull request {Fore.CYAN}{pr.url}{Fore.RESET} created.")
comment = issue.create_comment(
f"As requested, opened a PR for release `{version}`: #{pr.number}."
)
print(f"Notified in original comment {Fore.CYAN}{comment.url}{Fore.RESET}.")
except CalledProcessError as e:
error_contents = f"CalledProcessError\noutput:\n{e.output}\nstderr:\n{e.stderr}"
except Exception:
error_contents = f"Exception:\n{traceback.format_exc()}"
if error_contents:
link = f"https://github.com/{SLUG}/actions/runs/{os.environ['GITHUB_RUN_ID']}"
msg = ERROR_COMMENT.format(
version=version, base_branch=base_branch, contents=error_contents, link=link
)
issue.create_comment(msg)
print_and_exit(f"{Fore.RED}{error_contents}")
else:
print(f"{Fore.GREEN}Success.")
ERROR_COMMENT = """\
The request to prepare release `{version}` from {base_branch} failed with:
```
{contents}
```
See: {link}.
"""
def find_next_version(base_branch: str, is_major: bool) -> str:
output = check_output(["git", "tag"], encoding="UTF-8")
valid_versions = []
for v in output.splitlines():
m = re.match(r"\d.\d.\d+$", v.strip())
if m:
valid_versions.append(tuple(int(x) for x in v.split(".")))
valid_versions.sort()
last_version = valid_versions[-1]
changelog = Path("changelog")
features = list(changelog.glob("*.feature.rst"))
breaking = list(changelog.glob("*.breaking.rst"))
is_feature_release = features or breaking
if is_feature_release and base_branch != "master":
msg = dedent(
f"""
Found features or breaking changes in `{base_branch}`, and feature releases can only be
created from `master`:
"""
)
msg += "\n".join(f"* `{x.name}`" for x in sorted(features + breaking))
raise InvalidFeatureRelease(msg)
if is_major:
return f"{last_version[0]+1}.0.0"
elif is_feature_release:
return f"{last_version[0]}.{last_version[1] + 1}.0"
else:
return f"{last_version[0]}.{last_version[1]}.{last_version[2] + 1}"
def main() -> None:
init(autoreset=True)
parser = argparse.ArgumentParser()
parser.add_argument("payload")
parser.add_argument("token")
options = parser.parse_args()
trigger_release(Path(options.payload), options.token)
if __name__ == "__main__":
main()
| |
"""Config flow for the Huawei LTE platform."""
from collections import OrderedDict
import logging
from typing import Optional
from urllib.parse import urlparse
from huawei_lte_api.AuthorizedConnection import AuthorizedConnection
from huawei_lte_api.Client import Client
from huawei_lte_api.Connection import Connection
from huawei_lte_api.exceptions import (
LoginErrorPasswordWrongException,
LoginErrorUsernamePasswordOverrunException,
LoginErrorUsernamePasswordWrongException,
LoginErrorUsernameWrongException,
ResponseErrorException,
)
from requests.exceptions import Timeout
from url_normalize import url_normalize
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import ssdp
from homeassistant.const import (
CONF_NAME,
CONF_PASSWORD,
CONF_RECIPIENT,
CONF_URL,
CONF_USERNAME,
)
from homeassistant.core import callback
# see https://github.com/PyCQA/pylint/issues/3202 about the DOMAIN's pylint issue
from .const import CONNECTION_TIMEOUT, DEFAULT_DEVICE_NAME, DEFAULT_NOTIFY_SERVICE_NAME
from .const import DOMAIN # pylint: disable=unused-import
_LOGGER = logging.getLogger(__name__)
class ConfigFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle Huawei LTE config flow."""
VERSION = 2
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get options flow."""
return OptionsFlowHandler(config_entry)
async def _async_show_user_form(self, user_input=None, errors=None):
if user_input is None:
user_input = {}
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
OrderedDict(
(
(
vol.Required(
CONF_URL,
default=user_input.get(
CONF_URL,
# https://github.com/PyCQA/pylint/issues/3167
self.context.get( # pylint: disable=no-member
CONF_URL, ""
),
),
),
str,
),
(
vol.Optional(
CONF_USERNAME, default=user_input.get(CONF_USERNAME, "")
),
str,
),
(
vol.Optional(
CONF_PASSWORD, default=user_input.get(CONF_PASSWORD, "")
),
str,
),
)
)
),
errors=errors or {},
)
async def async_step_import(self, user_input=None):
"""Handle import initiated config flow."""
return await self.async_step_user(user_input)
def _already_configured(self, user_input):
"""See if we already have a router matching user input configured."""
existing_urls = {
url_normalize(entry.data[CONF_URL], default_scheme="http")
for entry in self._async_current_entries()
}
return user_input[CONF_URL] in existing_urls
async def async_step_user(self, user_input=None):
"""Handle user initiated config flow."""
if user_input is None:
return await self._async_show_user_form()
errors = {}
# Normalize URL
user_input[CONF_URL] = url_normalize(
user_input[CONF_URL], default_scheme="http"
)
if "://" not in user_input[CONF_URL]:
errors[CONF_URL] = "invalid_url"
return await self._async_show_user_form(
user_input=user_input, errors=errors
)
if self._already_configured(user_input):
return self.async_abort(reason="already_configured")
conn = None
def logout():
if hasattr(conn, "user"):
try:
conn.user.logout()
except Exception: # pylint: disable=broad-except
_LOGGER.debug("Could not logout", exc_info=True)
def try_connect(username: Optional[str], password: Optional[str]) -> Connection:
"""Try connecting with given credentials."""
if username or password:
conn = AuthorizedConnection(
user_input[CONF_URL],
username=username,
password=password,
timeout=CONNECTION_TIMEOUT,
)
else:
try:
conn = AuthorizedConnection(
user_input[CONF_URL],
username="",
password="",
timeout=CONNECTION_TIMEOUT,
)
user_input[CONF_USERNAME] = ""
user_input[CONF_PASSWORD] = ""
except ResponseErrorException:
_LOGGER.debug(
"Could not login with empty credentials, proceeding unauthenticated",
exc_info=True,
)
conn = Connection(user_input[CONF_URL], timeout=CONNECTION_TIMEOUT)
del user_input[CONF_USERNAME]
del user_input[CONF_PASSWORD]
return conn
def get_router_title(conn: Connection) -> str:
"""Get title for router."""
title = None
client = Client(conn)
try:
info = client.device.basic_information()
except Exception: # pylint: disable=broad-except
_LOGGER.debug("Could not get device.basic_information", exc_info=True)
else:
title = info.get("devicename")
if not title:
try:
info = client.device.information()
except Exception: # pylint: disable=broad-except
_LOGGER.debug("Could not get device.information", exc_info=True)
else:
title = info.get("DeviceName")
return title or DEFAULT_DEVICE_NAME
username = user_input.get(CONF_USERNAME)
password = user_input.get(CONF_PASSWORD)
try:
conn = await self.hass.async_add_executor_job(
try_connect, username, password
)
except LoginErrorUsernameWrongException:
errors[CONF_USERNAME] = "incorrect_username"
except LoginErrorPasswordWrongException:
errors[CONF_PASSWORD] = "incorrect_password"
except LoginErrorUsernamePasswordWrongException:
errors[CONF_USERNAME] = "incorrect_username_or_password"
except LoginErrorUsernamePasswordOverrunException:
errors["base"] = "login_attempts_exceeded"
except ResponseErrorException:
_LOGGER.warning("Response error", exc_info=True)
errors["base"] = "response_error"
except Timeout:
_LOGGER.warning("Connection timeout", exc_info=True)
errors[CONF_URL] = "connection_timeout"
except Exception: # pylint: disable=broad-except
_LOGGER.warning("Unknown error connecting to device", exc_info=True)
errors[CONF_URL] = "unknown_connection_error"
if errors:
await self.hass.async_add_executor_job(logout)
return await self._async_show_user_form(
user_input=user_input, errors=errors
)
title = await self.hass.async_add_executor_job(get_router_title, conn)
await self.hass.async_add_executor_job(logout)
return self.async_create_entry(title=title, data=user_input)
async def async_step_ssdp(self, discovery_info):
"""Handle SSDP initiated config flow."""
# Attempt to distinguish from other non-LTE Huawei router devices, at least
# some ones we are interested in have "Mobile Wi-Fi" friendlyName.
if "mobile" not in discovery_info.get(ssdp.ATTR_UPNP_FRIENDLY_NAME, "").lower():
return self.async_abort(reason="not_huawei_lte")
# https://github.com/PyCQA/pylint/issues/3167
url = self.context[CONF_URL] = url_normalize( # pylint: disable=no-member
discovery_info.get(
ssdp.ATTR_UPNP_PRESENTATION_URL,
f"http://{urlparse(discovery_info[ssdp.ATTR_SSDP_LOCATION]).hostname}/",
)
)
if any(
url == flow["context"].get(CONF_URL) for flow in self._async_in_progress()
):
return self.async_abort(reason="already_in_progress")
user_input = {CONF_URL: url}
if self._already_configured(user_input):
return self.async_abort(reason="already_configured")
return await self._async_show_user_form(user_input)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Huawei LTE options flow."""
def __init__(self, config_entry: config_entries.ConfigEntry):
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Handle options flow."""
# Recipients are persisted as a list, but handled as comma separated string in UI
if user_input is not None:
# Preserve existing options, for example *_from_yaml markers
data = {**self.config_entry.options, **user_input}
if not isinstance(data[CONF_RECIPIENT], list):
data[CONF_RECIPIENT] = [
x.strip() for x in data[CONF_RECIPIENT].split(",")
]
return self.async_create_entry(title="", data=data)
data_schema = vol.Schema(
{
vol.Optional(
CONF_NAME,
default=self.config_entry.options.get(
CONF_NAME, DEFAULT_NOTIFY_SERVICE_NAME
),
): str,
vol.Optional(
CONF_RECIPIENT,
default=", ".join(
self.config_entry.options.get(CONF_RECIPIENT, [])
),
): str,
}
)
return self.async_show_form(step_id="init", data_schema=data_schema)
| |
#!/usr/bin/env python
'''
@author Luke Campbell <LCampbell@ASAScience.com>
@file
@date 04/17/12 09:07
@description DESCRIPTION
'''
from mock import Mock
from nose.plugins.attrib import attr
from interface.objects import Index, Collection, SearchOptions, ElasticSearchIndex, CouchDBIndex, InformationResource, Resource, Association
from interface.services.dm.iindex_management_service import IndexManagementServiceClient
from interface.services.coi.iresource_registry_service import ResourceRegistryServiceClient
from pyon.core.exception import BadRequest, NotFound
from pyon.util.containers import DotDict
from pyon.util.int_test import IonIntegrationTestCase
from pyon.util.unit_test import PyonTestCase
from ion.services.dm.inventory.index_management_service import IndexManagementService
import unittest
@attr('UNIT',group='dm')
class IndexManagementUnitTest(PyonTestCase):
def setUp(self):
mock_clients = self._create_service_mock('index_management')
self.index_management = IndexManagementService()
self.index_management.clients = mock_clients
self.rr_create = mock_clients.resource_registry.create
self.rr_read = mock_clients.resource_registry.read
self.rr_update = mock_clients.resource_registry.update
self.rr_delete = mock_clients.resource_registry.delete
self.rr_find_resources = mock_clients.resource_registry.find_resources
self.rr_find_assocs = mock_clients.resource_registry.find_associations
self.rr_find_subj = mock_clients.resource_registry.find_subjects
self.rr_find_obj = mock_clients.resource_registry.find_objects
self.rr_delete_assoc = mock_clients.resource_registry.delete_association
self.get_datastore = Mock()
self.db_create = Mock()
self.get_datastore.return_value = DotDict({'datastore_name':'test_datastore'})
self.index_management.container = DotDict({
'datastore_manager':DotDict({
'get_datastore' : self.get_datastore
})
})
self.index_name = 'test_index'
def test_create_index(self):
'''
test_create_index
Unit test for basic creation of an index
'''
# Mocks
self.rr_create.return_value = ('index_id','rev')
self.rr_find_resources.return_value = ([],[])
retval = self.index_management.create_index(name='mock', content_type=IndexManagementService.DATASTORE_INDEX, options='ugh')
self.assertTrue(retval=='index_id','invalid return value: %s' % retval)
self.assertTrue(self.rr_create.called)
with self.assertRaises(BadRequest):
self.index_management.create_index(name='another', content_type='not_listed')
def test_dup_index(self):
# Mocks
self.rr_find_resources.return_value = ([1],[1])
# Execution
with self.assertRaises(BadRequest):
self.index_management.create_index('mock_index_id')
def test_read_index(self):
# mocks
return_obj = dict(mock='mock')
self.rr_read.return_value = return_obj
# execution
retval = self.index_management.read_index('mock_index_id')
# assertions
self.assertEquals(return_obj, retval, 'The resource should be returned.')
def test_update_index(self):
with self.assertRaises(BadRequest):
self.index_management.update_index()
with self.assertRaises(BadRequest):
self.index_management.update_index('hi')
self.index_management.update_index(Index())
def test_delete_index(self):
self.index_management.delete_index('index_id')
self.rr_delete.assert_called_with('index_id')
def test_list_indexes(self):
# Mocks
self.rr_find_resources.return_value = ([
DotDict({'_id':'1','name':'1'}),
DotDict({'_id':'2','name':'2'}),
DotDict({'_id':'3','name':'3'}),
DotDict({'_id':'4','name':'4'})
],[1,2,3,4])
# Execution
retval = self.index_management.list_indexes()
# Assertions
self.assertTrue(retval == {'1':'1','2':'2','3':'3','4':'4'}, 'Index mismatch')
def test_find_indexes(self):
self.index_management.list_indexes=Mock()
self.index_management.list_indexes.return_value = {'index_name':'1'}
retval = self.index_management.find_indexes('index_name')
self.assertTrue(retval=='1')
self.index_management.list_indexes.return_value = {}
retval = self.index_management.find_indexes('index_name')
self.assertTrue(retval==None)
def test_create_collection(self):
self.rr_create.return_value = 'collection_id', 'rev'
self.rr_find_resources.return_value = ([0],[0])
with self.assertRaises(BadRequest):
self.index_management.create_collection('test',[0])
self.rr_find_resources.return_value = ([],[])
with self.assertRaises(BadRequest):
self.index_management.create_collection('test',[])
retval = self.index_management.create_collection('test',[0])
self.assertTrue(retval=='collection_id')
def test_read_collection(self):
self.rr_read.return_value = 'retval'
retval = self.index_management.read_collection('test')
self.assertTrue(retval=='retval')
def test_update_collection(self):
with self.assertRaises(BadRequest):
ind = Index()
self.index_management.update_collection(ind)
self.index_management.update_collection(Collection())
self.assertTrue(self.rr_update.called)
def test_delete_collection(self):
self.rr_find_assocs.return_value = ['assoc']
retval = self.index_management.delete_collection('collection_id')
self.assertTrue(retval)
self.rr_delete.assert_called_once_with('collection_id')
self.rr_delete_assoc.assert_called_once_with('assoc')
def test_list_collection_resources(self):
self.rr_find_obj.return_value = (['test_id'],[''])
result1 = self.index_management.list_collection_resources('collection_id', id_only=True)
self.assertTrue(result1 == ['test_id'])
def test_find_collection(self):
self.rr_find_resources.return_value = (['test'],[])
retval = self.index_management.find_collection(collection_name='hi')
self.assertTrue(retval == ['test'] , '%s' % retval)
fake_collection = Collection(resources=['test_res_id'])
fake_assoc = Association(s='test_id')
self.rr_find_assocs.return_value = [fake_assoc]
retval = self.index_management.find_collection(resource_ids=['test_res_id'])
self.assertTrue(retval == ['test_id'], '%s' % retval)
with self.assertRaises(BadRequest):
self.index_management.find_collection()
@attr('INT',group='dm')
class IndexManagementIntTest(IonIntegrationTestCase):
def setUp(self):
self._start_container()
self.container.start_rel_from_url('res/deploy/r2dm.yml')
self.ims_cli = IndexManagementServiceClient()
self.rr_cli = ResourceRegistryServiceClient()
self.index_name = 'test_index'
def test_create_datastore_index(self):
index_name = self.index_name
ims_cli = self.ims_cli
rr_cli = self.rr_cli
options = SearchOptions()
options.attribute_match = ['test_field']
index_id = ims_cli.create_index(
name=index_name,
content_type=IndexManagementService.DATASTORE_INDEX,
options=options
)
index_result = self.rr_cli.read(index_id)
self.assertIsInstance(index_result,ElasticSearchIndex)
self.assertTrue(index_result.name == index_name)
#======================================
# Clean up
#======================================
rr_cli.delete(index_id)
def test_read_index(self):
ims_cli = self.ims_cli
rr_cli = self.rr_cli
index_name = self.index_name
index_res = Index(name=index_name)
index_id, _ = rr_cli.create(index_res)
index = ims_cli.read_index(index_id)
self.assertIsInstance(index,Index)
self.assertTrue(index.name==index_name)
rr_cli.delete(index_id)
def test_delete_index(self):
ims_cli = self.ims_cli
rr_cli = self.rr_cli
index_name = self.index_name
index_res = Index(name=index_name)
index_id, _ = rr_cli.create(index_res)
ims_cli.delete_index(index_id)
with self.assertRaises(NotFound):
rr_cli.delete(index_id)
def test_update_index(self):
ims_cli = self.ims_cli
rr_cli = self.rr_cli
index_name = self.index_name
index_res = Index(name=index_name)
index_id, _ = rr_cli.create(index_res)
index = ims_cli.read_index(index_id)
index.name = 'another'
ims_cli.update_index(index)
index = rr_cli.read(index_id)
self.assertTrue(index.name == 'another')
def test_find_indexes(self):
ims_cli = self.ims_cli
rr_cli = self.rr_cli
index_name = self.index_name
#======================================
# Index Pool
#======================================
indexes = [
Index(name='first'),
Index(name='second'),
Index(name='third')
]
id_pool = list()
for index in indexes:
id_pool.append(rr_cli.create(index)[0])
index_id = ims_cli.find_indexes(index_name='second')
index = ims_cli.read_index(index_id)
self.assertTrue(index.name=='second')
#======================================
# Clean up
#======================================
for index_id in id_pool:
rr_cli.delete(index_id)
def test_create_collection(self):
ims_cli = self.ims_cli
rr_cli = self.rr_cli
with self.assertRaises(BadRequest):
ims_cli.create_collection('failing_collection')
resources = [ Resource(), Resource(), Resource() ]
resources = [ rr_cli.create(i)[0] for i in resources ]
collection_id = ims_cli.create_collection('working_collection',resources)
collection = rr_cli.read(collection_id)
collection_resources = ims_cli.list_collection_resources(collection_id, id_only=True)
self.assertTrue(set(collection_resources) == set(resources), '%s != %s' % (set(collection_resources) , set(resources)))
def test_read_collection(self):
ims_cli = self.ims_cli
rr_cli = self.rr_cli
collection = Collection(name='working_collection')
collection_id, _ = rr_cli.create(collection)
collection = ims_cli.read_collection(collection_id)
self.assertTrue(collection.name == 'working_collection')
def test_update_collection(self):
ims_cli = self.ims_cli
rr_cli = self.rr_cli
collection = Collection(name='useful_collection')
collection_id, _ = rr_cli.create(collection)
collection = rr_cli.read(collection_id)
collection.name = 'nub'
ims_cli.update_collection(collection)
collection = rr_cli.read(collection_id)
self.assertTrue(collection.name=='nub')
def test_delete_collection(self):
ims_cli = self.ims_cli
rr_cli = self.rr_cli
res = Resource()
res_id, rev = rr_cli.create(res)
collection_id = ims_cli.create_collection(name='test_collection', resources=[res_id])
ims_cli.delete_collection(collection_id)
with self.assertRaises(NotFound):
rr_cli.read(collection_id)
def test_list_collection_resources(self):
ims_cli = self.ims_cli
rr_cli = self.rr_cli
#========================================
# Resource Pool
#========================================
resources = [ InformationResource(name='bean_counter'), InformationResource(name='lunar_rock'), InformationResource('aperature'), InformationResource('lemons') ]
resources = [ rr_cli.create(i)[0] for i in resources ]
collection = Collection(name='park_bench')
collection_id = ims_cli.create_collection(name='park_bench', resources=resources)
retval = ims_cli.list_collection_resources(collection_id, id_only=True)
retval.sort()
resources.sort()
self.assertTrue(retval == resources, '%s != %s' %(retval , resources))
def test_find_collection(self):
res_id, _ = self.rr_cli.create(Resource(name='test_res'))
collection_id = self.ims_cli.create_collection('test', [res_id])
retval = self.ims_cli.find_collection(collection_name='test')
self.assertTrue(retval[0] == collection_id)
retval = self.ims_cli.find_collection(resource_ids=[res_id])
self.assertTrue(retval[0] == collection_id)
| |
import unittest
import os
import shutil
import stat
from mpm import MPMMetadata, mpm_init, mpm_purge, mpm_install, mpm_uninstall, mpm_update, mpm_load, mpm_freeze, mpm_convert, mpm_show
from mpm_helpers import clone_and_checkout_helper, clone_helper, checkout_helper, yaml_to_path_helper, path_to_yaml_helper, onerror_helper, remove_from_gitignore_helper, add_to_gitignore_helper, is_local_commit_helper, with_open_or_create_tinydb_helper, with_open_or_create_file_helper, create_directory_helper
from mpm_yaml_storage import YAMLStorage
from tinydb import TinyDB, Query
from git import Repo, GitCommandError
class HelperObject(object):
pass
class TestHelpers(unittest.TestCase):
def test_is_local_commit_helper_not_local(self):
path = os.path.join('test', 'broker')
url = 'https://github.com/msembinelli/broker.git'
ref = '2dc33423188a7e06fa6e9725a0a74059b009ff6a'
clone_helper(url, path)
repo = Repo(path)
self.assertFalse(is_local_commit_helper(repo, '2dc3342318'))
repo.close()
shutil.rmtree(path, onerror=onerror_helper)
os.rmdir('test')
def test_is_local_commit_helper_is_local(self):
path = os.path.join('test', 'broker')
url = 'https://github.com/msembinelli/broker.git'
ref = '2dc33423188a7e06fa6e9725a0a74059b009ff6a'
clone_helper(url, path)
repo = Repo(path)
branch = repo.create_head('test_branch', '2dc33423188a7e06fa6e9725a0a74059b009ff6a')
self.assertTrue(is_local_commit_helper(repo, 'test_branch'))
repo.git.checkout('test_branch')
new_path = os.path.join(path, 'test_module')
os.mkdir(new_path)
repo.index.add(['test_module'])
repo.index.commit("Added a new folder test")
self.assertTrue(is_local_commit_helper(repo, 'test_branch'))
repo.close()
shutil.rmtree(path, onerror=onerror_helper)
os.rmdir('test')
def test_clone_helper_should_clone(self):
path = os.path.join('test', 'broker')
url = 'https://github.com/msembinelli/broker.git'
clone_helper(url, path)
self.assertTrue(os.path.exists(path))
self.assertTrue(os.path.exists(os.path.join(path, '.git')))
# Try cloning again
clone_helper(url, path)
self.assertTrue(os.path.exists(path))
self.assertTrue(os.path.exists(os.path.join(path, '.git')))
shutil.rmtree(path, onerror=onerror_helper)
os.rmdir('test')
def test_clone_helper_should_not_clone(self):
path = os.path.join('test', 'test')
# Bad URL
url = 'https://github.com/fake/repo/repo123456789.git'
self.assertRaises(GitCommandError, clone_helper, url, path)
self.assertFalse(os.path.exists(path))
self.assertFalse(os.path.exists(os.path.join(path, '.git')))
self.assertRaises(TypeError, clone_helper, url, None)
self.assertRaises(TypeError, clone_helper, None, path)
def test_checkout_helper_should_checkout(self):
path = os.path.join('test', 'broker')
url = 'https://github.com/msembinelli/broker.git'
ref = '2dc33423188a7e06fa6e9725a0a74059b009ff6a'
clone_helper(url, path)
checkout_helper(path, ref)
repo = Repo(path)
self.assertEqual(repo.head.commit.hexsha, ref)
repo.close()
repo = Repo(path)
branch = repo.create_head('test', '2dc33423188a7e06fa6e9725a0a74059b009ff6a')
checkout_helper(path, 'test')
repo.close()
shutil.rmtree(path, onerror=onerror_helper)
os.rmdir('test')
def test_checkout_helper_should_not_checkout(self):
path = os.path.join('test', 'broker')
url = 'https://github.com/msembinelli/broker.git'
ref = '123456789'
clone_helper(url, path)
self.assertRaises(GitCommandError, checkout_helper, path, ref)
self.assertRaises(TypeError, checkout_helper, None, '2dc3342')
self.assertRaises(TypeError, checkout_helper, path, None)
shutil.rmtree(path, onerror=onerror_helper)
os.rmdir('test')
def test_clone_and_checkout_helper_should_clone_and_checkout(self):
path = os.path.join('test', 'broker')
url = 'https://github.com/msembinelli/broker.git'
ref = '2dc33423188a7e06fa6e9725a0a74059b009ff6a'
clone_and_checkout_helper(url, ref, path)
self.assertTrue(os.path.exists(path))
self.assertTrue(os.path.exists(os.path.join(path, '.git')))
repo = Repo(path)
self.assertEqual(repo.head.commit.hexsha, ref)
repo.close()
shutil.rmtree(path, onerror=onerror_helper)
os.rmdir('test')
def test_yaml_to_path_helper(self):
yaml_path = '/test/folder'
expected_path = os.path.sep + 'test' + os.path.sep + 'folder'
self.assertEqual(expected_path, yaml_to_path_helper(yaml_path))
def test_path_to_yaml_helper(self):
path = os.path.sep + 'test' + os.path.sep + 'folder'
expected_yaml_path = '/test/folder'
self.assertEqual(expected_yaml_path, path_to_yaml_helper(path))
def test_add_to_gitignore_helper(self):
self.assertTrue(add_to_gitignore_helper('.gitignore', 'test/path'))
remove_from_gitignore_helper('.gitignore', 'test/path')
def test_add_to_gitignore_helper_already_added(self):
add_to_gitignore_helper('.gitignore', 'test/path')
self.assertFalse(add_to_gitignore_helper('.gitignore', 'test/path'))
def test_remove_from_gitignore_helper(self):
add_to_gitignore_helper('.gitignore', 'test/path')
self.assertTrue(remove_from_gitignore_helper('.gitignore', 'test/path'))
def test_remove_from_gitignore_helper_already_removed(self):
self.assertFalse(remove_from_gitignore_helper('.gitignore', 'test/path'))
def test_with_open_or_create_tinydb_helper_should_create_db(self):
path = os.getcwd()
filepath = os.path.join(path, 'test-db.yaml')
with_open_or_create_tinydb_helper(filepath, YAMLStorage)
self.assertTrue(os.path.exists(path))
self.assertTrue(os.path.isfile(filepath))
os.remove(filepath)
def test_with_open_or_create_tinydb_helper_should_not_create_db(self):
path = os.getcwd()
filepath = os.path.join(path, 'test-db.yaml')
self.assertRaises(TypeError, with_open_or_create_tinydb_helper, filepath, None)
self.assertRaises(TypeError, with_open_or_create_tinydb_helper, None, YAMLStorage)
self.assertRaises(IOError, with_open_or_create_tinydb_helper, '.', YAMLStorage)
def test_with_open_or_create_file_helper_should_create_file(self):
path = os.getcwd()
filepath = os.path.join(path, 'file.txt')
with_open_or_create_file_helper(filepath, 'a+')
self.assertTrue(os.path.exists(path))
self.assertTrue(os.path.isfile(filepath))
with_open_or_create_file_helper(filepath, 'r+')
self.assertTrue(os.path.exists(path))
self.assertTrue(os.path.isfile(filepath))
os.remove(filepath)
def test_with_open_or_create_file_helper_should_not_create_file(self):
path = os.getcwd()
filepath = os.path.join(path, 'file.txt')
self.assertRaises(TypeError, with_open_or_create_file_helper, filepath, None)
self.assertRaises(TypeError, with_open_or_create_file_helper, None, 'r+')
self.assertRaises(IOError, with_open_or_create_file_helper, '.', 'r+')
def test_create_directory_helper_should_create_directory(self):
path = os.path.join(os.getcwd(), 'test')
create_directory_helper(path)
self.assertTrue(os.path.exists(path))
os.rmdir(path)
def test_create_directory_helper_should_not_create_directory(self):
self.assertRaises(TypeError, create_directory_helper, None)
def test_onerror_helper_should_delete_file(self):
path = 'tmp'
create_directory_helper(path)
filepath = os.path.join(path, 'file.txt')
with_open_or_create_file_helper(filepath, 'a+')
os.chmod(filepath, stat.S_IREAD)
onerror_helper(os.remove, filepath, 'test')
self.assertFalse(os.path.isfile(filepath))
shutil.rmtree(path, onerror=onerror_helper)
def test_onerror_helper_should_not_delete_file(self):
path = 'tmp'
create_directory_helper(path)
filepath = os.path.join(path, 'file.txt')
with_open_or_create_file_helper(filepath, 'a+')
self.assertRaises(IOError, onerror_helper, os.remove, filepath, 'test exception message')
shutil.rmtree(path, onerror=onerror_helper)
class TestInit(unittest.TestCase):
def setUp(self):
self.context = HelperObject()
self.context.obj = None
self.db_table = 'aaa'
self.db_path = '.aaa/'
self.db_filename = 'aaa-db.yml'
self.db_filepath = os.path.join(self.db_path, self.db_filename)
self.db_storage = YAMLStorage
self.gitignore = '.gitignore-test'
def tearDown(self):
shutil.rmtree(self.db_path)
os.remove(self.gitignore)
def test_init_should_init(self):
expected_metadata_object = MPMMetadata(self.db_filepath, self.db_storage, self.db_table, self.gitignore)
output_metadata_object = mpm_init(self.context, self.db_table, self.db_path, self.db_filename, self.db_storage, self.gitignore)
self.assertTrue(os.path.exists(self.db_path))
self.assertTrue(os.path.exists(self.gitignore))
self.assertTrue(os.path.isfile(self.db_filepath))
self.assertEqual(expected_metadata_object.filepath, output_metadata_object.filepath)
self.assertEqual(expected_metadata_object.storage, output_metadata_object.storage)
self.assertEqual(expected_metadata_object.table_name, output_metadata_object.table_name)
with open(self.gitignore, 'r') as gitignore_file:
self.assertTrue(self.db_path in gitignore_file.read())
with open(self.db_filepath, 'r') as database_file:
self.assertTrue(self.db_table in database_file.read())
class TestInstall(unittest.TestCase):
def setUp(self):
self.context = HelperObject()
self.db = mpm_init(self.context)
def tearDown(self):
shutil.rmtree('.mpm', onerror=onerror_helper)
def test_install_defaults(self):
remote_url = 'https://github.com/msembinelli/broker.git'
reference = 'remotes/origin/master'
directory = 'modules'
name = 'broker'
full_path = os.path.join(directory, name)
expected_db_entry = {'name': name, 'remote_url': remote_url, 'reference': reference, 'path': path_to_yaml_helper(full_path)}
mpm_install(self.db, remote_url, reference, directory, None)
with TinyDB(self.db.filepath, storage=self.db.storage, default_table=self.db.table_name) as mpm_db:
module = Query()
db_entry = mpm_db.get(module.name == name)
self.assertEqual(db_entry, expected_db_entry)
self.assertTrue(os.path.exists(directory))
self.assertTrue(os.path.exists(full_path))
shutil.rmtree(directory, onerror=onerror_helper)
remove_from_gitignore_helper('.gitignore', full_path)
def test_install_custom_name(self):
remote_url = 'https://github.com/msembinelli/broker.git'
reference = 'remotes/origin/master'
directory = 'modules'
name = 'broker-test'
full_path = os.path.join(directory, name)
expected_db_entry = {'name': name, 'remote_url': remote_url, 'reference': reference, 'path': path_to_yaml_helper(full_path)}
mpm_install(self.db, remote_url, reference, directory, name)
with TinyDB(self.db.filepath, storage=self.db.storage, default_table=self.db.table_name) as mpm_db:
module = Query()
db_entry = mpm_db.get(module.name == name)
self.assertEqual(db_entry, expected_db_entry)
self.assertTrue(os.path.exists(directory))
self.assertTrue(os.path.exists(full_path))
shutil.rmtree(directory, onerror=onerror_helper)
remove_from_gitignore_helper('.gitignore', full_path)
def test_install_already_installed(self):
remote_url = 'https://github.com/msembinelli/broker.git'
reference = 'remotes/origin/master'
directory = 'modules'
name = 'broker'
full_path = os.path.join(directory, name)
expected_db_entry = {'name': name, 'remote_url': remote_url, 'reference': reference, 'path': path_to_yaml_helper(full_path)}
mpm_install(self.db, remote_url, reference, directory, None)
mpm_install(self.db, remote_url, reference, directory, None)
with TinyDB(self.db.filepath, storage=self.db.storage, default_table=self.db.table_name) as mpm_db:
module = Query()
db_entry = mpm_db.get(module.name == name)
self.assertEqual(db_entry, expected_db_entry)
self.assertTrue(os.path.exists(directory))
self.assertTrue(os.path.exists(full_path))
shutil.rmtree(directory, onerror=onerror_helper)
remove_from_gitignore_helper('.gitignore', full_path)
def test_install_reinstall(self):
remote_url = 'https://github.com/msembinelli/broker.git'
reference = 'remotes/origin/master'
directory = 'modules'
name = 'broker'
full_path = os.path.join(directory, name)
expected_db_entry = {'name': name, 'remote_url': remote_url, 'reference': reference, 'path': path_to_yaml_helper(full_path)}
mpm_install(self.db, remote_url, reference, directory, None)
shutil.rmtree(directory, onerror=onerror_helper)
self.assertFalse(os.path.exists(full_path))
mpm_install(self.db, remote_url, reference, directory, None)
with TinyDB(self.db.filepath, storage=self.db.storage, default_table=self.db.table_name) as mpm_db:
module = Query()
db_entry = mpm_db.get(module.name == name)
self.assertEqual(db_entry, expected_db_entry)
self.assertTrue(os.path.exists(directory))
self.assertTrue(os.path.exists(full_path))
shutil.rmtree(directory, onerror=onerror_helper)
remove_from_gitignore_helper('.gitignore', full_path)
def test_install_bad_parameters(self):
remote_url = 'https://github.com/msembinelli/broker.git'
reference = 'remotes/origin/master'
directory = 'modules'
name = 'broker'
full_path = os.path.join(directory, name)
expected_db_entry = {'name': name, 'remote_url': remote_url, 'reference': reference, 'path': path_to_yaml_helper(full_path)}
self.assertRaises(Exception, mpm_install, None, remote_url, reference, directory, None)
self.assertRaises(Exception, mpm_install, self.db, None, reference, directory, None)
self.assertRaises(Exception, mpm_install, self.db, remote_url, None, directory, None)
self.assertRaises(Exception, mpm_install, self.db, remote_url, reference, None, None)
class TestUninstall(unittest.TestCase):
def setUp(self):
self.context = HelperObject()
self.db = mpm_init(self.context)
def tearDown(self):
shutil.rmtree('.mpm', onerror=onerror_helper)
def test_uninstall_defaults(self):
remote_url = 'https://github.com/msembinelli/broker.git'
reference = 'remotes/origin/master'
directory = 'modules'
name = 'broker'
full_path = os.path.join(directory, name)
expected_db_entry = {'name': name, 'remote_url': remote_url, 'reference': reference, 'path': path_to_yaml_helper(full_path)}
mpm_install(self.db, remote_url, reference, directory, None)
mpm_uninstall(self.db, name)
self.assertFalse(os.path.exists(full_path))
with TinyDB(self.db.filepath, storage=self.db.storage, default_table=self.db.table_name) as mpm_db:
module = Query()
db_entry = mpm_db.get(module.name == name)
self.assertIsNone(db_entry)
self.assertEqual([], mpm_db.all())
def test_uninstall_nothing_to_uninstall(self):
name = 'broker'
mpm_uninstall(self.db, name)
with TinyDB(self.db.filepath, storage=self.db.storage, default_table=self.db.table_name) as mpm_db:
module = Query()
db_entry = mpm_db.get(module.name == name)
self.assertIsNone(db_entry)
self.assertEqual([], mpm_db.all())
def test_uninstall_bad_parameters(self):
name = 'broker'
self.assertRaises(Exception, mpm_uninstall, None, name)
class TestUpdate(unittest.TestCase):
def setUp(self):
self.context = HelperObject()
self.db = mpm_init(self.context)
self.remote_url = 'https://github.com/msembinelli/broker.git'
self.reference = 'remotes/origin/master'
self.directory = 'modules'
self.name = 'broker'
self.full_path = os.path.join(self.directory, self.name)
mpm_install(self.db, self.remote_url, self.reference, self.directory, None)
def tearDown(self):
mpm_uninstall(self.db, self.name)
shutil.rmtree('.mpm', onerror=onerror_helper)
def test_update_new_reference(self):
new_ref = '2dc33423188a7e06fa6e9725a0a74059b009ff6a'
mpm_update(self.db, self.name, new_ref, None)
repo = Repo(self.full_path)
self.assertEqual(repo.head.commit.hexsha, new_ref)
repo.close()
def test_update_new_directory(self):
new_directory = 'modules-test'
mpm_update(self.db, self.name, None, new_directory)
self.assertTrue(os.path.exists(os.path.join(new_directory, self.name)))
repo = Repo(os.path.join(new_directory, self.name))
self.assertIsNotNone(repo)
repo.close()
def test_update_no_module(self):
self.assertIsNone(mpm_update(self.db, 'broker-test', self.reference, self.directory))
def test_update_bad_parameters(self):
self.assertRaises(Exception, mpm_update, None, self.name, self.reference, self.directory)
class TestConvert(unittest.TestCase):
def setUp(self):
self.context = HelperObject()
self.db = mpm_init(self.context)
self.remote_url = 'https://github.com/msembinelli/test-mpm-with-submodule.git'
self.reference = 'remotes/origin/master'
self.directory = 'modules'
self.name = 'test-mpm-with-submodule'
self.full_path = os.path.join(self.directory, self.name)
mpm_install(self.db, self.remote_url, self.reference, self.directory, None)
def tearDown(self):
mpm_uninstall(self.db, self.name)
def test_convert_hard(self):
filename = 'convert-test.yaml'
product = '_default'
expected_module_name = 'broker'
dir_before = os.getcwd()
os.chdir(self.full_path)
context = HelperObject()
new_db = mpm_init(context)
mpm_convert(new_db, filename, product, True)
self.assertTrue(os.path.isfile(filename))
with TinyDB(filename, storage=YAMLStorage) as db:
module = Query()
db_entry = db.get(module.name == expected_module_name)
self.assertIsNotNone(db_entry)
with open('.gitmodules', 'r') as gitmodules:
self.assertTrue(expected_module_name not in gitmodules.read())
os.chdir(dir_before)
def test_convert_soft(self):
filename = 'convert-test.yaml'
product = '_default'
expected_module_name = 'broker'
dir_before = os.getcwd()
os.chdir(self.full_path)
context = HelperObject()
new_db = mpm_init(context)
mpm_convert(new_db, filename, product, False)
self.assertTrue(os.path.isfile(filename))
with TinyDB(filename, storage=YAMLStorage) as db:
module = Query()
db_entry = db.get(module.name == expected_module_name)
self.assertIsNotNone(db_entry)
with open('.gitmodules', 'r') as gitmodules:
self.assertTrue(expected_module_name in gitmodules.read())
os.chdir(dir_before)
def test_convert_no_gitmodules(self):
filename = 'convert-test.yaml'
product = '_default'
context = HelperObject()
new_db = mpm_init(context)
self.assertIsNone(mpm_convert(new_db, filename, product, False))
def test_convert_no_submodules(self):
filename = 'convert-test.yaml'
product = '_default'
with open('.gitmodules', 'a+') as file:
pass
context = HelperObject()
new_db = mpm_init(context)
self.assertIsNone(mpm_convert(new_db, filename, product, False))
os.remove('.gitmodules')
class TestLoad(unittest.TestCase):
def setUp(self):
self.context = HelperObject()
self.db = mpm_init(self.context)
self.remote_url = 'https://github.com/msembinelli/q2.git'
self.reference = 'remotes/origin/master'
self.directory = 'modules'
self.name = 'q2'
self.full_path = os.path.join(self.directory, self.name)
mpm_install(self.db, self.remote_url, self.reference, self.directory, None)
def tearDown(self):
mpm_uninstall(self.db, self.name)
def test_load_defaults(self):
dir_before = os.getcwd()
os.chdir(self.full_path)
context = HelperObject()
new_db = mpm_init(context)
filename = 'package.yaml'
product = 'travis'
self.assertIsNone(mpm_load(new_db, filename, product))
os.chdir(dir_before)
def test_load_nothing_to_load(self):
with open('package.yaml', 'a+') as file:
pass
filename = 'package.yaml'
product = '_default'
self.assertIsNone(mpm_load(self.db, filename, product))
os.remove(filename)
def test_load_file_not_found(self):
filename = 'package.yaml'
product = '_default'
self.assertFalse(os.path.isfile(filename))
self.assertIsNone(mpm_load(self.db, filename, product))
class TestPurge(unittest.TestCase):
def setUp(self):
self.context = HelperObject()
self.db = mpm_init(self.context)
self.remote_url = 'https://github.com/msembinelli/q2.git'
self.reference = 'remotes/origin/master'
self.directory = 'modules'
self.name = 'q2'
self.full_path = os.path.join(self.directory, self.name)
def tearDown(self):
pass
def test_purge(self):
mpm_install(self.db, self.remote_url, self.reference, self.directory, None)
self.assertIsNone(mpm_purge(self.db))
self.assertFalse(os.path.exists(self.full_path))
def test_purge_nothing_to_purge(self):
mpm_uninstall(self.db, self.name)
self.assertIsNone(mpm_purge(self.db))
self.assertFalse(os.path.exists(self.full_path))
class TestFreeze(unittest.TestCase):
def setUp(self):
self.context = HelperObject()
self.db = mpm_init(self.context)
self.remote_url = 'https://github.com/msembinelli/q2.git'
self.reference = 'remotes/origin/master'
self.directory = 'modules'
self.name = 'q2'
self.full_path = os.path.join(self.directory, self.name)
def tearDown(self):
pass
def test_freeze(self):
mpm_install(self.db, self.remote_url, self.reference, self.directory, None)
self.assertIsNone(mpm_freeze(self.db, 'package-test.yaml', 'test'))
self.assertTrue(os.path.isfile('package-test.yaml'))
mpm_uninstall(self.db, self.name)
os.remove('package-test.yaml')
def test_freeze_nothing_to_freeze(self):
self.assertIsNone(mpm_freeze(self.db, 'package-test.yaml', 'test'))
self.assertFalse(os.path.isfile('package-test.yaml'))
class TestShow(unittest.TestCase):
def setUp(self):
self.context = HelperObject()
self.db = mpm_init(self.context)
self.remote_url = 'https://github.com/msembinelli/broker.git'
self.reference = 'remotes/origin/master'
self.directory = 'modules'
self.name = 'broker'
def tearDown(self):
shutil.rmtree('.mpm', onerror=onerror_helper)
def test_show(self):
name = 'broker'
mpm_install(self.db, self.remote_url, self.reference, self.directory, None)
mpm_show(self.db)
mpm_uninstall(self.db, name)
def test_show_no_modules(self):
mpm_show(self.db)
def test_show_bad_parameters(self):
self.assertRaises(AttributeError, mpm_show, None)
if __name__ == '__main__':
unittest.main()
| |
import json
import time
import re
import signal
import scenario_result_manager
import scenario_error
import scenario_tester
class ScenarioPingerBase(scenario_tester.ScenarioTesterBase):
def __init__(self, testdefs_file_name):
super(ScenarioPingerBase, self).__init__(testdefs_file_name)
self._set_ping_test_params()
self._set_test_scenario_data()
def _set_test_scenario_data(self):
try:
params = self.testdef_data["test-scenario-defs"]
result_file_name = params["test-result-file"]
scenario_file_name = params["scenario-file"]
# generate result manager
self.result_mgr = scenario_result_manager.ResultManager(result_file_name)
# generate scenario file by pattern file
gen_scenario_cmd = self._make_command(params, "generate-scenario-command")
self._exec_command(gen_scenario_cmd)
# load generated scenario
scenario_file = open(scenario_file_name, 'r')
self.scenario_list = json.load(scenario_file)
scenario_file.close()
# optional key
self.runner_class = None # 'this' class (default)
if "class" in params:
self.runner_class = params["class"]
except KeyError as err:
msg = "Cannot find key:%s in test definition 'test-scenario-defs' section." % err.message
raise scenario_error.ScenarioTestDefinitionError(msg)
except IOError as err:
msg = "File operation error in test definition 'test-scenario-defs' section.\n%s" % err
raise scenario_error.ScenarioTestError(msg)
def _set_ping_test_params(self):
try:
params = self.testdef_data["ping-test-params"]
self.ping_cmd = params["ping-command"]
# optional keys
self.ping_max_retry = 3 # (times) default
if "ping-max-retry" in params:
self.ping_max_retry = params["ping-max-retry"]
self.ping_retry_interval = 1 # (sec) default
if "ping-retry-interval" in params:
self.ping_retry_interval = params["ping-retry-interval"]
except KeyError as err:
msg = "Cannot find key:%s in test definition 'ping-test-params' section." % err.message
raise scenario_error.ScenarioTestDefinitionError(msg)
@staticmethod
def _check_arp_table(result_detail):
result_re = r"(REACHABLE|STALE|DELAY)"
if re.compile(result_re).search(result_detail):
# FAIL to clear arp table if found a REACHABLE entry
return "FAIL"
else:
return "SUCCESS"
@staticmethod
def _check_ping_result(result_detail):
result_re = r"(\d+) packets transmitted, (\d+) received"
match = re.compile(result_re, flags=0).search(result_detail)
transmitted = int(match.group(1))
received = int(match.group(2))
if received > transmitted / 2:
return "SUCCESS"
else:
return "FAIL"
def _start_scenario(self, description):
self.logger.info("run scenario: %s", description)
self.result_mgr.append_scenario(description)
def _start_sub_scenario(self, description):
self.logger.info("run sub scenario: %s", description)
self.result_mgr.append_sub_scenario(description)
def _run_test_check_arp_table(self, description, host, expected_result):
# check
command = "ip neigh show"
result_detail = self._run_command_at(host, command)
result = self._check_arp_table(result_detail)
self.result_mgr.append_task_result_by(
description, host.name, command, expected_result, result, result_detail
)
def _run_test_pre_task(self):
self._start_sub_scenario("pre-tasks")
# run pre-task for all hosts
for host in self.net.hosts:
description = "clear host %s arp table" % host.name
self.logger.info("run task: %s", description)
# cleaning arp cache at host
self._run_command_at(host, "ip neigh flush dev %s" % host.defaultIntf().name)
self._run_test_check_arp_table(description, host, "SUCCESS")
def _run_test_post_task(self):
self._start_sub_scenario("post-tasks")
for host in self.net.hosts:
description = "check host %s arp table" % host.name
self.logger.info("run task: %s", description)
self._run_test_check_arp_table(description, host, "FAIL")
def _run_test_ping_task(self, task_list):
self._start_sub_scenario("main tasks")
# convert table to get host instance by its name
host_dict = {h.name: h for h in self.net.hosts}
# run test
count = 0
total = len(task_list)
for task in task_list:
description = task['task']
src_host_name = task["source"]
dst_host_name = task["destination"]
expected_result = task["expect"]
count += 1
command = self.ping_cmd
self.logger.info(
"[%-3.1f%%/current:%d/total:%d] run task: %s",
100.0*count/total, count, total, description
)
if re.match(r"\d+\.\d+\.\d+\.\d+", dst_host_name):
# if match IP address
command = " ".join([command, dst_host_name])
else:
command = " ".join([command, host_dict[dst_host_name].IP()])
# run at first
result_detail, result = self._run_ping_at(host_dict[src_host_name], command)
# check need to retry
retry_count = 1
retry_interval = self.ping_retry_interval
while retry_count <= self.ping_max_retry and result != expected_result:
self.logger.warning(
"task: %s, (retry:%d/%d, after wait %s[sec])",
description, retry_count, self.ping_max_retry, retry_interval
)
time.sleep(retry_interval)
# run retry
result_detail, result = self._run_ping_at(host_dict[src_host_name], command)
retry_count += 1
retry_interval += self.ping_retry_interval
# save result
self.result_mgr.append_task_result_by(
description, src_host_name, command, expected_result, result, result_detail
)
def _run_ping_at(self, host, command):
result_detail = self._run_command_at(host, command)
result = self._check_ping_result(result_detail)
self.logger.info("result: %s", result)
return result_detail, result
def _run_scenario_test(self):
for scenario in self.scenario_list:
self._start_scenario(scenario["scenario"])
self._run_test_pre_task()
self._run_test_ping_task(scenario['task-list'])
self._run_test_post_task()
# save test results to file
self.result_mgr.write_to_file()
def _run_test(self, opt_dic):
# option handling
# usecase selection
opt_manual = opt_dic["manual"]
opt_test_cli = opt_dic["test-cli"]
# layer selection
opt_layer1 = opt_dic["layer1"]
opt_layer2 = opt_dic["layer2"]
opt_all_layers = opt_dic["all-layers"]
if opt_all_layers:
opt_layer1 = True
opt_layer2 = True
# mininet setup
self._build_mininet()
self._start_mininet()
# wire(flow rules) setup
if opt_layer1:
self._put_layer1_flow_rules()
if opt_layer2:
self._put_layer2_flow_rules()
# run test
if opt_manual:
self._run_cli()
else:
if opt_layer2:
# run auto scenario test
self._set_sigint_handler()
self._run_scenario_test()
if opt_test_cli:
self._run_cli()
self._delete_flow_rules()
else:
self.logger.warn("Setup only L1(exclusive) wire flow rules.")
# post test operations
self._stop_mininet()
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Fairly basic set of tools for real-time data augmentation on image data.
Can easily be extended to include new transformations,
new preprocessing methods, etc...
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
import multiprocessing.pool
import os
import re
import threading
import numpy as np
from six.moves import range # pylint: disable=redefined-builtin
from tensorflow.python.keras._impl.keras import backend as K
from tensorflow.python.keras._impl.keras.utils.data_utils import Sequence
from tensorflow.python.platform import tf_logging as logging
# pylint: disable=g-import-not-at-top
try:
from PIL import Image as pil_image
except ImportError:
pil_image = None
try:
from scipy import linalg
import scipy.ndimage as ndi
except ImportError:
linalg = None
ndi = None
# pylint: enable=g-import-not-at-top
if pil_image is not None:
_PIL_INTERPOLATION_METHODS = {
'nearest': pil_image.NEAREST,
'bilinear': pil_image.BILINEAR,
'bicubic': pil_image.BICUBIC,
}
# These methods were only introduced in version 3.4.0 (2016).
if hasattr(pil_image, 'HAMMING'):
_PIL_INTERPOLATION_METHODS['hamming'] = pil_image.HAMMING
if hasattr(pil_image, 'BOX'):
_PIL_INTERPOLATION_METHODS['box'] = pil_image.BOX
# This method is new in version 1.1.3 (2013).
if hasattr(pil_image, 'LANCZOS'):
_PIL_INTERPOLATION_METHODS['lanczos'] = pil_image.LANCZOS
def random_rotation(x,
rg,
row_axis=1,
col_axis=2,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Performs a random rotation of a Numpy image tensor.
Arguments:
x: Input tensor. Must be 3D.
rg: Rotation range, in degrees.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
Returns:
Rotated Numpy image tensor.
"""
theta = np.pi / 180 * np.random.uniform(-rg, rg)
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0], [0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(rotation_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shift(x,
wrg,
hrg,
row_axis=1,
col_axis=2,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Performs a random spatial shift of a Numpy image tensor.
Arguments:
x: Input tensor. Must be 3D.
wrg: Width shift range, as a float fraction of the width.
hrg: Height shift range, as a float fraction of the height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
Returns:
Shifted Numpy image tensor.
"""
h, w = x.shape[row_axis], x.shape[col_axis]
tx = np.random.uniform(-hrg, hrg) * h
ty = np.random.uniform(-wrg, wrg) * w
translation_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]])
transform_matrix = translation_matrix # no need to do offset
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_shear(x,
intensity,
row_axis=1,
col_axis=2,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Performs a random spatial shear of a Numpy image tensor.
Arguments:
x: Input tensor. Must be 3D.
intensity: Transformation intensity.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
Returns:
Sheared Numpy image tensor.
"""
shear = np.random.uniform(-intensity, intensity)
shear_matrix = np.array([[1, -np.sin(shear), 0], [0, np.cos(shear), 0],
[0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(shear_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_zoom(x,
zoom_range,
row_axis=1,
col_axis=2,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Performs a random spatial zoom of a Numpy image tensor.
Arguments:
x: Input tensor. Must be 3D.
zoom_range: Tuple of floats; zoom range for width and height.
row_axis: Index of axis for rows in the input tensor.
col_axis: Index of axis for columns in the input tensor.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
Returns:
Zoomed Numpy image tensor.
Raises:
ValueError: if `zoom_range` isn't a tuple.
"""
if len(zoom_range) != 2:
raise ValueError('`zoom_range` should be a tuple or list of two floats. '
'Received arg: ', zoom_range)
if zoom_range[0] == 1 and zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(zoom_range[0], zoom_range[1], 2)
zoom_matrix = np.array([[zx, 0, 0], [0, zy, 0], [0, 0, 1]])
h, w = x.shape[row_axis], x.shape[col_axis]
transform_matrix = transform_matrix_offset_center(zoom_matrix, h, w)
x = apply_transform(x, transform_matrix, channel_axis, fill_mode, cval)
return x
def random_channel_shift(x, intensity, channel_axis=0):
x = np.rollaxis(x, channel_axis, 0)
min_x, max_x = np.min(x), np.max(x)
channel_images = [
np.clip(x_channel + np.random.uniform(-intensity, intensity), min_x,
max_x) for x_channel in x
]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def apply_transform(x,
transform_matrix,
channel_axis=0,
fill_mode='nearest',
cval=0.):
"""Apply the image transformation specified by a matrix.
Arguments:
x: 2D numpy array, single image.
transform_matrix: Numpy array specifying the geometric transformation.
channel_axis: Index of axis for channels in the input tensor.
fill_mode: Points outside the boundaries of the input
are filled according to the given mode
(one of `{'constant', 'nearest', 'reflect', 'wrap'}`).
cval: Value used for points outside the boundaries
of the input if `mode='constant'`.
Returns:
The transformed version of the input.
"""
x = np.rollaxis(x, channel_axis, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [
ndi.interpolation.affine_transform(
x_channel,
final_affine_matrix,
final_offset,
order=0,
mode=fill_mode,
cval=cval) for x_channel in x
]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_axis + 1)
return x
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def array_to_img(x, data_format=None, scale=True):
"""Converts a 3D Numpy array to a PIL Image instance.
Arguments:
x: Input Numpy array.
data_format: Image data format.
scale: Whether to rescale image values
to be within [0, 255].
Returns:
A PIL Image instance.
Raises:
ImportError: if PIL is not available.
ValueError: if invalid `x` or `data_format` is passed.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 3:
raise ValueError('Expected image array to have rank 3 (single image). '
'Got array with shape:', x.shape)
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Invalid data_format:', data_format)
# Original Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but target PIL image has format (width, height, channel)
if data_format == 'channels_first':
x = x.transpose(1, 2, 0)
if scale:
x = x + max(-np.min(x), 0) # pylint: disable=g-no-augmented-assignment
x_max = np.max(x)
if x_max != 0:
x /= x_max
x *= 255
if x.shape[2] == 3:
# RGB
return pil_image.fromarray(x.astype('uint8'), 'RGB')
elif x.shape[2] == 1:
# grayscale
return pil_image.fromarray(x[:, :, 0].astype('uint8'), 'L')
else:
raise ValueError('Unsupported channel number: ', x.shape[2])
def img_to_array(img, data_format=None):
"""Converts a PIL Image instance to a Numpy array.
Arguments:
img: PIL Image instance.
data_format: Image data format.
Returns:
A 3D Numpy array.
Raises:
ValueError: if invalid `img` or `data_format` is passed.
"""
if data_format is None:
data_format = K.image_data_format()
if data_format not in {'channels_first', 'channels_last'}:
raise ValueError('Unknown data_format: ', data_format)
# Numpy array x has format (height, width, channel)
# or (channel, height, width)
# but original PIL image has format (width, height, channel)
x = np.asarray(img, dtype=K.floatx())
if len(x.shape) == 3:
if data_format == 'channels_first':
x = x.transpose(2, 0, 1)
elif len(x.shape) == 2:
if data_format == 'channels_first':
x = x.reshape((1, x.shape[0], x.shape[1]))
else:
x = x.reshape((x.shape[0], x.shape[1], 1))
else:
raise ValueError('Unsupported image shape: ', x.shape)
return x
def load_img(path, grayscale=False, target_size=None, interpolation='nearest'):
"""Loads an image into PIL format.
Arguments:
path: Path to image file
grayscale: Boolean, whether to load the image as grayscale.
target_size: Either `None` (default to original size)
or tuple of ints `(img_height, img_width)`.
interpolation: Interpolation method used to resample the image if the
target size is different from that of the loaded image.
Supported methods are "nearest", "bilinear", and "bicubic".
If PIL version 1.1.3 or newer is installed, "lanczos" is also
supported. If PIL version 3.4.0 or newer is installed, "box" and
"hamming" are also supported. By default, "nearest" is used.
Returns:
A PIL Image instance.
Raises:
ImportError: if PIL is not available.
ValueError: if interpolation method is not supported.
"""
if pil_image is None:
raise ImportError('Could not import PIL.Image. '
'The use of `array_to_img` requires PIL.')
img = pil_image.open(path)
if grayscale:
if img.mode != 'L':
img = img.convert('L')
else:
if img.mode != 'RGB':
img = img.convert('RGB')
if target_size is not None:
width_height_tuple = (target_size[1], target_size[0])
if img.size != width_height_tuple:
if interpolation not in _PIL_INTERPOLATION_METHODS:
raise ValueError(
'Invalid interpolation method {} specified. Supported '
'methods are {}'.format(
interpolation,
', '.join(_PIL_INTERPOLATION_METHODS.keys())))
resample = _PIL_INTERPOLATION_METHODS[interpolation]
img = img.resize(width_height_tuple, resample)
return img
def list_pictures(directory, ext='jpg|jpeg|bmp|png|ppm'):
return [
os.path.join(root, f)
for root, _, files in os.walk(directory) for f in files
if re.match(r'([\w]+\.(?:' + ext + '))', f)
]
class ImageDataGenerator(object):
"""Generate minibatches of image data with real-time data augmentation.
Arguments:
featurewise_center: set input mean to 0 over the dataset.
samplewise_center: set each sample mean to 0.
featurewise_std_normalization: divide inputs by std of the dataset.
samplewise_std_normalization: divide each input by its std.
zca_whitening: apply ZCA whitening.
zca_epsilon: epsilon for ZCA whitening. Default is 1e-6.
rotation_range: degrees (0 to 180).
width_shift_range: fraction of total width.
height_shift_range: fraction of total height.
shear_range: shear intensity (shear angle in radians).
zoom_range: amount of zoom. if scalar z, zoom will be randomly picked
in the range [1-z, 1+z]. A sequence of two can be passed instead
to select this range.
channel_shift_range: shift range for each channel.
fill_mode: points outside the boundaries are filled according to the
given mode ('constant', 'nearest', 'reflect' or 'wrap'). Default
is 'nearest'.
cval: value used for points outside the boundaries when fill_mode is
'constant'. Default is 0.
horizontal_flip: whether to randomly flip images horizontally.
vertical_flip: whether to randomly flip images vertically.
rescale: rescaling factor. If None or 0, no rescaling is applied,
otherwise we multiply the data by the value provided. This is
applied after the `preprocessing_function` (if any provided)
but before any other transformation.
preprocessing_function: function that will be implied on each input.
The function will run before any other modification on it.
The function should take one argument:
one image (Numpy tensor with rank 3),
and should output a Numpy tensor with the same shape.
data_format: 'channels_first' or 'channels_last'. In 'channels_first'
mode, the channels dimension
(the depth) is at index 1, in 'channels_last' mode it is at index 3.
It defaults to the `image_data_format` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "channels_last".
"""
def __init__(self,
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
zca_epsilon=1e-6,
rotation_range=0.,
width_shift_range=0.,
height_shift_range=0.,
shear_range=0.,
zoom_range=0.,
channel_shift_range=0.,
fill_mode='nearest',
cval=0.,
horizontal_flip=False,
vertical_flip=False,
rescale=None,
preprocessing_function=None,
data_format=None):
if data_format is None:
data_format = K.image_data_format()
self.featurewise_center = featurewise_center
self.samplewise_center = samplewise_center
self.featurewise_std_normalization = featurewise_std_normalization
self.samplewise_std_normalization = samplewise_std_normalization
self.zca_whitening = zca_whitening
self.zca_epsilon = zca_epsilon
self.rotation_range = rotation_range
self.width_shift_range = width_shift_range
self.height_shift_range = height_shift_range
self.shear_range = shear_range
self.zoom_range = zoom_range
self.channel_shift_range = channel_shift_range
self.fill_mode = fill_mode
self.cval = cval
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rescale = rescale
self.preprocessing_function = preprocessing_function
if data_format not in {'channels_last', 'channels_first'}:
raise ValueError(
'`data_format` should be `"channels_last"` (channel after row and '
'column) or `"channels_first"` (channel before row and column). '
'Received arg: ', data_format)
self.data_format = data_format
if data_format == 'channels_first':
self.channel_axis = 1
self.row_axis = 2
self.col_axis = 3
if data_format == 'channels_last':
self.channel_axis = 3
self.row_axis = 1
self.col_axis = 2
self.mean = None
self.std = None
self.principal_components = None
if np.isscalar(zoom_range):
self.zoom_range = [1 - zoom_range, 1 + zoom_range]
elif len(zoom_range) == 2:
self.zoom_range = [zoom_range[0], zoom_range[1]]
else:
raise ValueError('`zoom_range` should be a float or '
'a tuple or list of two floats. '
'Received arg: ', zoom_range)
def flow(self,
x,
y=None,
batch_size=32,
shuffle=True,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png'):
return NumpyArrayIterator(
x,
y,
self,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
data_format=self.data_format,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format)
def flow_from_directory(self,
directory,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False):
return DirectoryIterator(
directory,
self,
target_size=target_size,
color_mode=color_mode,
classes=classes,
class_mode=class_mode,
data_format=self.data_format,
batch_size=batch_size,
shuffle=shuffle,
seed=seed,
save_to_dir=save_to_dir,
save_prefix=save_prefix,
save_format=save_format,
follow_links=follow_links)
def standardize(self, x):
"""Apply the normalization configuration to a batch of inputs.
Arguments:
x: batch of inputs to be normalized.
Returns:
The inputs, normalized.
"""
if self.preprocessing_function:
x = self.preprocessing_function(x)
if self.rescale:
x *= self.rescale
if self.samplewise_center:
x -= np.mean(x, keepdims=True)
if self.samplewise_std_normalization:
x /= np.std(x, keepdims=True) + 1e-7
if self.featurewise_center:
if self.mean is not None:
x -= self.mean
else:
logging.warning('This ImageDataGenerator specifies '
'`featurewise_center`, but it hasn\'t'
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.featurewise_std_normalization:
if self.std is not None:
x /= (self.std + 1e-7)
else:
logging.warning('This ImageDataGenerator specifies '
'`featurewise_std_normalization`, but it hasn\'t'
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
if self.zca_whitening:
if self.principal_components is not None:
flatx = np.reshape(x, (-1, np.prod(x.shape[-3:])))
whitex = np.dot(flatx, self.principal_components)
x = np.reshape(whitex, x.shape)
else:
logging.warning('This ImageDataGenerator specifies '
'`zca_whitening`, but it hasn\'t'
'been fit on any training data. Fit it '
'first by calling `.fit(numpy_data)`.')
return x
def random_transform(self, x, seed=None):
"""Randomly augment a single image tensor.
Arguments:
x: 3D tensor, single image.
seed: random seed.
Returns:
A randomly transformed version of the input (same shape).
Raises:
ImportError: if Scipy is not available.
"""
if ndi is None:
raise ImportError('Scipy is required for image transformations.')
# x is a single image, so it doesn't have image number at index 0
img_row_axis = self.row_axis - 1
img_col_axis = self.col_axis - 1
img_channel_axis = self.channel_axis - 1
if seed is not None:
np.random.seed(seed)
# use composition of homographies
# to generate final transform that needs to be applied
if self.rotation_range:
theta = np.pi / 180 * np.random.uniform(-self.rotation_range,
self.rotation_range)
else:
theta = 0
if self.height_shift_range:
tx = np.random.uniform(-self.height_shift_range,
self.height_shift_range) * x.shape[img_row_axis]
else:
tx = 0
if self.width_shift_range:
ty = np.random.uniform(-self.width_shift_range,
self.width_shift_range) * x.shape[img_col_axis]
else:
ty = 0
if self.shear_range:
shear = np.random.uniform(-self.shear_range, self.shear_range)
else:
shear = 0
if self.zoom_range[0] == 1 and self.zoom_range[1] == 1:
zx, zy = 1, 1
else:
zx, zy = np.random.uniform(self.zoom_range[0], self.zoom_range[1], 2)
transform_matrix = None
if theta != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta),
np.cos(theta), 0], [0, 0, 1]])
transform_matrix = rotation_matrix
if tx != 0 or ty != 0:
shift_matrix = np.array([[1, 0, tx], [0, 1, ty], [0, 0, 1]])
transform_matrix = shift_matrix if transform_matrix is None else np.dot(
transform_matrix, shift_matrix)
if shear != 0:
shear_matrix = np.array([[1, -np.sin(shear), 0], [0, np.cos(shear), 0],
[0, 0, 1]])
transform_matrix = shear_matrix if transform_matrix is None else np.dot(
transform_matrix, shear_matrix)
if zx != 1 or zy != 1:
zoom_matrix = np.array([[zx, 0, 0], [0, zy, 0], [0, 0, 1]])
transform_matrix = zoom_matrix if transform_matrix is None else np.dot(
transform_matrix, zoom_matrix)
if transform_matrix is not None:
h, w = x.shape[img_row_axis], x.shape[img_col_axis]
transform_matrix = transform_matrix_offset_center(transform_matrix, h, w)
x = apply_transform(
x,
transform_matrix,
img_channel_axis,
fill_mode=self.fill_mode,
cval=self.cval)
if self.channel_shift_range != 0:
x = random_channel_shift(x, self.channel_shift_range, img_channel_axis)
if self.horizontal_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_col_axis)
if self.vertical_flip:
if np.random.random() < 0.5:
x = flip_axis(x, img_row_axis)
return x
def fit(self, x, augment=False, rounds=1, seed=None):
"""Fits internal statistics to some sample data.
Required for featurewise_center, featurewise_std_normalization
and zca_whitening.
Arguments:
x: Numpy array, the data to fit on. Should have rank 4.
In case of grayscale data,
the channels axis should have value 1, and in case
of RGB data, it should have value 3.
augment: Whether to fit on randomly augmented samples
rounds: If `augment`,
how many augmentation passes to do over the data
seed: random seed.
Raises:
ValueError: in case of invalid input `x`.
ImportError: if Scipy is not available.
"""
x = np.asarray(x, dtype=K.floatx())
if x.ndim != 4:
raise ValueError('Input to `.fit()` should have rank 4. '
'Got array with shape: ' + str(x.shape))
if x.shape[self.channel_axis] not in {3, 4}:
logging.warning(
'Expected input to be images (as Numpy array) '
'following the data format convention "' + self.data_format + '" '
'(channels on axis ' + str(self.channel_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' + str(self.channel_axis) + '. '
'However, it was passed an array with shape ' + str(x.shape) + ' (' +
str(x.shape[self.channel_axis]) + ' channels).')
if seed is not None:
np.random.seed(seed)
x = np.copy(x)
if augment:
ax = np.zeros(
tuple([rounds * x.shape[0]] + list(x.shape)[1:]), dtype=K.floatx())
for r in range(rounds):
for i in range(x.shape[0]):
ax[i + r * x.shape[0]] = self.random_transform(x[i])
x = ax
if self.featurewise_center:
self.mean = np.mean(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.mean = np.reshape(self.mean, broadcast_shape)
x -= self.mean
if self.featurewise_std_normalization:
self.std = np.std(x, axis=(0, self.row_axis, self.col_axis))
broadcast_shape = [1, 1, 1]
broadcast_shape[self.channel_axis - 1] = x.shape[self.channel_axis]
self.std = np.reshape(self.std, broadcast_shape)
x /= (self.std + K.epsilon())
if self.zca_whitening:
if linalg is None:
raise ImportError('Scipy is required for zca_whitening.')
flat_x = np.reshape(x, (x.shape[0], x.shape[1] * x.shape[2] * x.shape[3]))
sigma = np.dot(flat_x.T, flat_x) / flat_x.shape[0]
u, s, _ = linalg.svd(sigma)
self.principal_components = np.dot(
np.dot(u, np.diag(1. / np.sqrt(s + self.zca_epsilon))), u.T)
class Iterator(Sequence):
"""Base class for image data iterators.
Every `Iterator` must implement the `_get_batches_of_transformed_samples`
method.
Arguments:
n: Integer, total number of samples in the dataset to loop over.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seeding for data shuffling.
"""
def __init__(self, n, batch_size, shuffle, seed):
self.n = n
self.batch_size = batch_size
self.seed = seed
self.shuffle = shuffle
self.batch_index = 0
self.total_batches_seen = 0
self.lock = threading.Lock()
self.index_array = None
self.index_generator = self._flow_index()
def _set_index_array(self):
self.index_array = np.arange(self.n)
if self.shuffle:
self.index_array = np.random.permutation(self.n)
def __getitem__(self, idx):
if idx >= len(self):
raise ValueError('Asked to retrieve element {idx}, '
'but the Sequence '
'has length {length}'.format(idx=idx,
length=len(self)))
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
self.total_batches_seen += 1
if self.index_array is None:
self._set_index_array()
index_array = self.index_array[self.batch_size * idx:self.batch_size *
(idx + 1)]
return self._get_batches_of_transformed_samples(index_array)
def __len__(self):
length = int(np.ceil(self.n / float(self.batch_size)))
return np.maximum(length, 0)
def on_epoch_end(self):
self._set_index_array()
def reset(self):
self.batch_index = 0
def _flow_index(self):
# Ensure self.batch_index is 0.
self.reset()
while 1:
if self.seed is not None:
np.random.seed(self.seed + self.total_batches_seen)
if self.batch_index == 0:
self._set_index_array()
current_index = (self.batch_index * self.batch_size) % self.n
if self.n > current_index + self.batch_size:
self.batch_index += 1
else:
self.batch_index = 0
self.total_batches_seen += 1
yield self.index_array[current_index:current_index + self.batch_size]
def __iter__(self): # pylint: disable=non-iterator-returned
# Needed if we want to do something like:
# for x, y in data_gen.flow(...):
return self
def __next__(self, *args, **kwargs):
return self.next(*args, **kwargs)
def _get_batches_of_transformed_samples(self, index_array):
"""Gets a batch of transformed samples.
Arguments:
index_array: array of sample indices to include in batch.
Returns:
A batch of transformed samples.
"""
raise NotImplementedError
class NumpyArrayIterator(Iterator):
"""Iterator yielding data from a Numpy array.
Arguments:
x: Numpy array of input data.
y: Numpy array of targets data.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
"""
def __init__(self,
x,
y,
image_data_generator,
batch_size=32,
shuffle=False,
seed=None,
data_format=None,
save_to_dir=None,
save_prefix='',
save_format='png'):
if y is not None and len(x) != len(y):
raise ValueError('X (images tensor) and y (labels) '
'should have the same length. '
'Found: X.shape = %s, y.shape = %s' %
(np.asarray(x).shape, np.asarray(y).shape))
if data_format is None:
data_format = K.image_data_format()
self.x = np.asarray(x, dtype=K.floatx())
if self.x.ndim != 4:
raise ValueError('Input data in `NumpyArrayIterator` '
'should have rank 4. You passed an array '
'with shape', self.x.shape)
channels_axis = 3 if data_format == 'channels_last' else 1
if self.x.shape[channels_axis] not in {1, 3, 4}:
logging.warning(
'NumpyArrayIterator is set to use the '
'data format convention "' + data_format + '" '
'(channels on axis ' + str(channels_axis) + '), i.e. expected '
'either 1, 3 or 4 channels on axis ' + str(channels_axis) + '. '
'However, it was passed an array with shape ' + str(self.x.shape) +
' (' + str(self.x.shape[channels_axis]) + ' channels).')
if y is not None:
self.y = np.asarray(y)
else:
self.y = None
self.image_data_generator = image_data_generator
self.data_format = data_format
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
super(NumpyArrayIterator, self).__init__(x.shape[0], batch_size, shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros(tuple([len(index_array)] + list(self.x.shape)[1:]),
dtype=K.floatx())
for i, j in enumerate(index_array):
x = self.x[j]
x = self.image_data_generator.random_transform(x.astype(K.floatx()))
x = self.image_data_generator.standardize(x)
batch_x[i] = x
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix, index=j, hash=np.random.randint(1e4),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
if self.y is None:
return batch_x
batch_y = self.y[index_array]
return batch_x, batch_y
def next(self):
"""For python 2.x.
Returns:
The next batch.
"""
# Keeps under lock only the mechanism which advances
# the indexing of each batch.
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
def _count_valid_files_in_directory(directory, white_list_formats,
follow_links):
"""Count files with extension in `white_list_formats` in a directory.
Arguments:
directory: absolute path to the directory containing files to be counted
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
follow_links: boolean.
Returns:
the count of files with extension in `white_list_formats` contained in
the directory.
"""
def _recursive_list(subpath):
return sorted(
os.walk(subpath, followlinks=follow_links), key=lambda tpl: tpl[0])
samples = 0
for _, _, files in _recursive_list(directory):
for fname in sorted(files):
is_valid = False
for extension in white_list_formats:
if fname.lower().endswith('.' + extension):
is_valid = True
break
if is_valid:
samples += 1
return samples
def _list_valid_filenames_in_directory(directory, white_list_formats,
class_indices, follow_links):
"""List paths of files in `subdir` with extensions in `white_list_formats`.
Arguments:
directory: absolute path to a directory containing the files to list.
The directory name is used as class label and must be a key of
`class_indices`.
white_list_formats: set of strings containing allowed extensions for
the files to be counted.
class_indices: dictionary mapping a class name to its index.
follow_links: boolean.
Returns:
classes: a list of class indices
filenames: the path of valid files in `directory`, relative from
`directory`'s parent (e.g., if `directory` is "dataset/class1",
the filenames will be ["class1/file1.jpg", "class1/file2.jpg", ...]).
"""
def _recursive_list(subpath):
return sorted(
os.walk(subpath, followlinks=follow_links), key=lambda tpl: tpl[0])
classes = []
filenames = []
subdir = os.path.basename(directory)
basedir = os.path.dirname(directory)
for root, _, files in _recursive_list(directory):
for fname in files:
is_valid = False
for extension in white_list_formats:
if fname.lower().endswith('.' + extension):
is_valid = True
break
if is_valid:
classes.append(class_indices[subdir])
# add filename relative to directory
absolute_path = os.path.join(root, fname)
filenames.append(os.path.relpath(absolute_path, basedir))
return classes, filenames
class DirectoryIterator(Iterator):
"""Iterator capable of reading images from a directory on disk.
Arguments:
directory: Path to the directory to read images from.
Each subdirectory in this directory will be
considered to contain images from one class,
or alternatively you could specify class subdirectories
via the `classes` argument.
image_data_generator: Instance of `ImageDataGenerator`
to use for random transformations and normalization.
target_size: tuple of integers, dimensions to resize input images to.
color_mode: One of `"rgb"`, `"grayscale"`. Color mode to read images.
classes: Optional list of strings, names of subdirectories
containing images from each class (e.g. `["dogs", "cats"]`).
It will be computed automatically if not set.
class_mode: Mode for yielding the targets:
`"binary"`: binary targets (if there are only two classes),
`"categorical"`: categorical targets,
`"sparse"`: integer targets,
`"input"`: targets are images identical to input images (mainly
used to work with autoencoders),
`None`: no targets get yielded (only input images are yielded).
batch_size: Integer, size of a batch.
shuffle: Boolean, whether to shuffle the data between epochs.
seed: Random seed for data shuffling.
data_format: String, one of `channels_first`, `channels_last`.
save_to_dir: Optional directory where to save the pictures
being yielded, in a viewable format. This is useful
for visualizing the random transformations being
applied, for debugging purposes.
save_prefix: String prefix to use for saving sample
images (if `save_to_dir` is set).
save_format: Format to use for saving sample images
(if `save_to_dir` is set).
"""
def __init__(self,
directory,
image_data_generator,
target_size=(256, 256),
color_mode='rgb',
classes=None,
class_mode='categorical',
batch_size=32,
shuffle=True,
seed=None,
data_format=None,
save_to_dir=None,
save_prefix='',
save_format='png',
follow_links=False):
if data_format is None:
data_format = K.image_data_format()
self.directory = directory
self.image_data_generator = image_data_generator
self.target_size = tuple(target_size)
if color_mode not in {'rgb', 'grayscale'}:
raise ValueError('Invalid color mode:', color_mode,
'; expected "rgb" or "grayscale".')
self.color_mode = color_mode
self.data_format = data_format
if self.color_mode == 'rgb':
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (3,)
else:
self.image_shape = (3,) + self.target_size
else:
if self.data_format == 'channels_last':
self.image_shape = self.target_size + (1,)
else:
self.image_shape = (1,) + self.target_size
self.classes = classes
if class_mode not in {'categorical', 'binary', 'sparse', 'input', None}:
raise ValueError('Invalid class_mode:', class_mode,
'; expected one of "categorical", '
'"binary", "sparse", "input"'
' or None.')
self.class_mode = class_mode
self.save_to_dir = save_to_dir
self.save_prefix = save_prefix
self.save_format = save_format
white_list_formats = {'png', 'jpg', 'jpeg', 'bmp', 'ppm'}
# first, count the number of samples and classes
self.samples = 0
if not classes:
classes = []
for subdir in sorted(os.listdir(directory)):
if os.path.isdir(os.path.join(directory, subdir)):
classes.append(subdir)
self.num_classes = len(classes)
self.class_indices = dict(zip(classes, range(len(classes))))
pool = multiprocessing.pool.ThreadPool()
function_partial = partial(
_count_valid_files_in_directory,
white_list_formats=white_list_formats,
follow_links=follow_links)
self.samples = sum(
pool.map(function_partial, (os.path.join(directory, subdir)
for subdir in classes)))
print('Found %d images belonging to %d classes.' % (self.samples,
self.num_classes))
# second, build an index of the images in the different class subfolders
results = []
self.filenames = []
self.classes = np.zeros((self.samples,), dtype='int32')
i = 0
for dirpath in (os.path.join(directory, subdir) for subdir in classes):
results.append(
pool.apply_async(_list_valid_filenames_in_directory, (
dirpath, white_list_formats, self.class_indices, follow_links)))
for res in results:
classes, filenames = res.get()
self.classes[i:i + len(classes)] = classes
self.filenames += filenames
i += len(classes)
pool.close()
pool.join()
super(DirectoryIterator, self).__init__(self.samples, batch_size, shuffle,
seed)
def _get_batches_of_transformed_samples(self, index_array):
batch_x = np.zeros((len(index_array),) + self.image_shape, dtype=K.floatx())
grayscale = self.color_mode == 'grayscale'
# build batch of image data
for i, j in enumerate(index_array):
fname = self.filenames[j]
img = load_img(os.path.join(self.directory, fname),
grayscale=grayscale,
target_size=self.target_size)
x = img_to_array(img, data_format=self.data_format)
x = self.image_data_generator.random_transform(x)
x = self.image_data_generator.standardize(x)
batch_x[i] = x
# optionally save augmented images to disk for debugging purposes
if self.save_to_dir:
for i, j in enumerate(index_array):
img = array_to_img(batch_x[i], self.data_format, scale=True)
fname = '{prefix}_{index}_{hash}.{format}'.format(
prefix=self.save_prefix, index=j, hash=np.random.randint(1e7),
format=self.save_format)
img.save(os.path.join(self.save_to_dir, fname))
# build batch of labels
if self.class_mode == 'input':
batch_y = batch_x.copy()
elif self.class_mode == 'sparse':
batch_y = self.classes[index_array]
elif self.class_mode == 'binary':
batch_y = self.classes[index_array].astype(K.floatx())
elif self.class_mode == 'categorical':
batch_y = np.zeros((len(batch_x), self.num_classes), dtype=K.floatx())
for i, label in enumerate(self.classes[index_array]):
batch_y[i, label] = 1.
else:
return batch_x
return batch_x, batch_y
def next(self):
"""For python 2.x.
Returns:
The next batch.
"""
with self.lock:
index_array = next(self.index_generator)
# The transformation of images is not under thread lock
# so it can be done in parallel
return self._get_batches_of_transformed_samples(index_array)
| |
# coding=utf-8
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf_slim.ops.sparse_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from tf_slim.layers import sparse_ops
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
def setUpModule():
tf.disable_eager_execution()
def _assert_sparse_tensor_value(test_case, expected, actual):
test_case.assertEqual(np.int64, np.array(actual.indices).dtype)
test_case.assertAllEqual(expected.indices, actual.indices)
test_case.assertEqual(
np.array(expected.values).dtype, np.array(actual.values).dtype)
test_case.assertAllEqual(expected.values, actual.values)
test_case.assertEqual(np.int64, np.array(actual.dense_shape).dtype)
test_case.assertAllEqual(expected.dense_shape, actual.dense_shape)
class DenseToSparseTensorTest(test.TestCase):
def test_dense_to_sparse_tensor_1d(self):
with self.cached_session() as sess:
st = sparse_ops.dense_to_sparse_tensor([1, 0, 2, 0])
result = sess.run(st)
self.assertEqual(result.indices.dtype, np.int64)
self.assertEqual(result.values.dtype, np.int32)
self.assertEqual(result.dense_shape.dtype, np.int64)
self.assertAllEqual([[0], [2]], result.indices)
self.assertAllEqual([1, 2], result.values)
self.assertAllEqual([4], result.dense_shape)
def test_dense_to_sparse_tensor_1d_float(self):
with self.cached_session() as sess:
st = sparse_ops.dense_to_sparse_tensor([1.5, 0.0, 2.3, 0.0])
result = sess.run(st)
self.assertEqual(result.indices.dtype, np.int64)
self.assertEqual(result.values.dtype, np.float32)
self.assertEqual(result.dense_shape.dtype, np.int64)
self.assertAllEqual([[0], [2]], result.indices)
self.assertAllClose([1.5, 2.3], result.values)
self.assertAllEqual([4], result.dense_shape)
def test_dense_to_sparse_tensor_1d_bool(self):
with self.cached_session() as sess:
st = sparse_ops.dense_to_sparse_tensor([True, False, True, False])
result = sess.run(st)
self.assertEqual(result.indices.dtype, np.int64)
self.assertEqual(result.values.dtype, np.bool)
self.assertEqual(result.dense_shape.dtype, np.int64)
self.assertAllEqual([[0], [2]], result.indices)
self.assertAllEqual([True, True], result.values)
self.assertAllEqual([4], result.dense_shape)
def test_dense_to_sparse_tensor_1d_str(self):
with self.cached_session() as sess:
st = sparse_ops.dense_to_sparse_tensor([b'qwe', b'', b'ewq', b''])
result = sess.run(st)
self.assertEqual(result.indices.dtype, np.int64)
self.assertEqual(result.values.dtype, np.object)
self.assertEqual(result.dense_shape.dtype, np.int64)
self.assertAllEqual([[0], [2]], result.indices)
self.assertAllEqual([b'qwe', b'ewq'], result.values)
self.assertAllEqual([4], result.dense_shape)
def test_dense_to_sparse_tensor_1d_str_special_ignore(self):
with self.cached_session() as sess:
st = sparse_ops.dense_to_sparse_tensor(
[b'qwe', b'', b'ewq', b''], ignore_value=b'qwe')
result = sess.run(st)
self.assertEqual(result.indices.dtype, np.int64)
self.assertEqual(result.values.dtype, np.object)
self.assertEqual(result.dense_shape.dtype, np.int64)
self.assertAllEqual([[1], [2], [3]], result.indices)
self.assertAllEqual([b'', b'ewq', b''], result.values)
self.assertAllEqual([4], result.dense_shape)
def test_dense_to_sparse_tensor_2d(self):
with self.cached_session() as sess:
st = sparse_ops.dense_to_sparse_tensor([[1, 2, 0, 0], [3, 4, 5, 0]])
result = sess.run(st)
self.assertAllEqual([[0, 0], [0, 1], [1, 0], [1, 1], [1, 2]],
result.indices)
self.assertAllEqual([1, 2, 3, 4, 5], result.values)
self.assertAllEqual([2, 4], result.dense_shape)
def test_dense_to_sparse_tensor_3d(self):
with self.cached_session() as sess:
st = sparse_ops.dense_to_sparse_tensor([[[1, 2, 0, 0], [3, 4, 5, 0]],
[[7, 8, 0, 0], [9, 0, 0, 0]]])
result = sess.run(st)
self.assertAllEqual([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [0, 1, 2],
[1, 0, 0], [1, 0, 1], [1, 1, 0]], result.indices)
self.assertAllEqual([1, 2, 3, 4, 5, 7, 8, 9], result.values)
self.assertAllEqual([2, 2, 4], result.dense_shape)
def test_dense_to_sparse_tensor_unknown_1d_shape(self):
with self.cached_session() as sess:
tensor = array_ops.placeholder(shape=[None], dtype=dtypes.int32)
st = sparse_ops.dense_to_sparse_tensor(tensor)
result = sess.run(st, feed_dict={tensor: [0, 100, 0, 3]})
self.assertAllEqual([[1], [3]], result.indices)
self.assertAllEqual([100, 3], result.values)
self.assertAllEqual([4], result.dense_shape)
def test_dense_to_sparse_tensor_unknown_3d_shape(self):
with self.cached_session() as sess:
tensor = array_ops.placeholder(
shape=[None, None, None], dtype=dtypes.int32)
st = sparse_ops.dense_to_sparse_tensor(tensor)
result = sess.run(st,
feed_dict={
tensor: [[[1, 2, 0, 0], [3, 4, 5, 0]],
[[7, 8, 0, 0], [9, 0, 0, 0]]]
})
self.assertAllEqual([[0, 0, 0], [0, 0, 1], [0, 1, 0], [0, 1, 1], [0, 1, 2],
[1, 0, 0], [1, 0, 1], [1, 1, 0]], result.indices)
self.assertAllEqual([1, 2, 3, 4, 5, 7, 8, 9], result.values)
self.assertAllEqual([2, 2, 4], result.dense_shape)
def test_dense_to_sparse_unknown_rank(self):
ph = array_ops.placeholder(dtype=dtypes.int32)
with self.cached_session() as sess:
st = sparse_ops.dense_to_sparse_tensor(ph)
result = sess.run(st, feed_dict={ph: [[1, 2, 0, 0], [3, 4, 5, 0]]})
self.assertAllEqual([[0, 0], [0, 1], [1, 0], [1, 1], [1, 2]],
result.indices)
self.assertAllEqual([1, 2, 3, 4, 5], result.values)
self.assertAllEqual([2, 4], result.dense_shape)
class SparseRowEnvelopeTest(test.TestCase):
def test_sparse_row_envelope(self):
expected_sparse_row_envelope = [1, 0, 3]
with self.cached_session() as sess:
sparse_input = sparse_tensor.SparseTensor(
indices=[[0, 0], [2, 0], [2, 1], [2, 2]],
values=[0, 1, 2, 3],
dense_shape=[3, 3])
sparse_row_envelope = sess.run(
sparse_ops.sparse_row_envelope(sparse_input))
self.assertAllEqual(expected_sparse_row_envelope,
sparse_row_envelope)
def test_sparse_row_envelope_unsorted_indices(self):
expected_sparse_row_envelope = [1, 0, 3]
with self.cached_session() as sess:
sparse_input = sparse_tensor.SparseTensor(
indices=[[2, 0], [2, 2], [2, 1], [0, 0]],
values=[0, 1, 2, 3],
dense_shape=[3, 3])
sparse_row_envelope = sess.run(
sparse_ops.sparse_row_envelope(sparse_input))
self.assertAllEqual(expected_sparse_row_envelope,
sparse_row_envelope)
def test_sparse_row_envelope_empty_in_the_end(self):
expected_sparse_row_envelope = [1, 0, 3, 0, 0]
with self.cached_session() as sess:
sparse_input = sparse_tensor.SparseTensor(
indices=[[0, 0], [2, 0], [2, 1], [2, 2]],
values=[0, 1, 2, 3],
dense_shape=[5, 3])
sparse_row_envelope = sess.run(
sparse_ops.sparse_row_envelope(sparse_input))
self.assertAllEqual(expected_sparse_row_envelope,
sparse_row_envelope)
def test_sparse_row_envelope_empty_3d(self):
expected_sparse_row_envelope = [1, 0, 3, 0, 0]
with self.cached_session() as sess:
sparse_input = sparse_tensor.SparseTensor(
indices=[[0, 0, 0], [0, 2, 0], [0, 2, 1], [0, 2, 2]],
values=[0, 1, 2, 3],
dense_shape=[1, 5, 3])
sparse_row_envelope = sess.run(
sparse_ops.sparse_row_envelope(sparse_input, 1, 2))
self.assertAllEqual(expected_sparse_row_envelope,
sparse_row_envelope)
class IndicatorToSparseIdsTest(test.TestCase):
def test_indicators_to_sparse_ids_1d(self):
indicators = (0, 0, 1, 0)
sparse_ids = sparse_ops.indicators_to_sparse_ids(indicators)
with self.cached_session():
_assert_sparse_tensor_value(self, sparse_tensor.SparseTensorValue(
indices=((0,),),
values=(2,),
dense_shape=(1,),
), sparse_ids.eval())
def test_indicators_to_sparse_ids_2d(self):
indicators = (
(0, 0, 1, 0),
(1, 0, 0, 1),
)
sparse_ids = sparse_ops.indicators_to_sparse_ids(indicators)
with self.cached_session():
_assert_sparse_tensor_value(self, sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=(2, 0, 3),
dense_shape=(2, 2),
), sparse_ids.eval())
def test_indicators_to_sparse_ids_3d(self):
indicators = (
((0, 0, 1, 0, 0), (0, 0, 0, 0, 0)),
((1, 0, 0, 1, 0), (0, 0, 1, 0, 0)),
((0, 0, 0, 0, 0), (0, 0, 0, 0, 0)),
((1, 0, 0, 1, 1), (0, 0, 1, 0, 0)),
)
sparse_ids = sparse_ops.indicators_to_sparse_ids(indicators)
with self.cached_session():
_assert_sparse_tensor_value(self, sparse_tensor.SparseTensorValue(
indices=(
(0, 0, 0),
(1, 0, 0), (1, 0, 1), (1, 1, 0),
(3, 0, 0), (3, 0, 1), (3, 0, 2), (3, 1, 0)
), values=(
2,
0, 3, 2,
0, 3, 4, 2
), dense_shape=(4, 2, 3),
), sparse_ids.eval())
def test_int16_to_sparse_ids_2d(self):
indicators = (
(0, 0, 1, 0),
(1, 0, 0, 1),
)
sparse_ids = sparse_ops.indicators_to_sparse_ids(
indicators, dtype=dtypes.int16)
with self.cached_session():
_assert_sparse_tensor_value(self, sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (1, 1)),
values=np.array((2, 0, 3), dtype=np.int16),
dense_shape=(2, 2),
), sparse_ids.eval())
def test_indicators_to_sparse_ids_ignore_value(self):
indicators = (
((-1, -1, 10, -1), (-1, -1, -1, -1)),
((11, -1, -1, 12), (-1, -1, 13, -1)),
)
sparse_ids = sparse_ops.indicators_to_sparse_ids(
indicators, ignore_value=-1)
with self.cached_session():
_assert_sparse_tensor_value(self, sparse_tensor.SparseTensorValue(
indices=((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
values=(2, 0, 3, 2),
dense_shape=(2, 2, 2),
), sparse_ids.eval())
def test_string_indicators_to_sparse_ids(self):
indicators = (
(('', '', 'A', ''), ('', '', '', '')),
(('B', '', '', 'C'), ('', '', 'D', '')),
)
sparse_ids = sparse_ops.indicators_to_sparse_ids(indicators)
with self.cached_session():
_assert_sparse_tensor_value(self, sparse_tensor.SparseTensorValue(
indices=((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
values=(2, 0, 3, 2),
dense_shape=(2, 2, 2),
), sparse_ids.eval())
def test_string_indicators_to_sparse_ids_ignore_value(self):
indicators = (
(('x', 'x', 'A', 'x'), ('x', 'x', 'x', 'x')),
(('B', 'x', 'x', 'C'), ('x', 'x', 'D', 'x')),
)
sparse_ids = sparse_ops.indicators_to_sparse_ids(
indicators, ignore_value='x')
with self.cached_session():
_assert_sparse_tensor_value(self, sparse_tensor.SparseTensorValue(
indices=((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
values=(2, 0, 3, 2),
dense_shape=(2, 2, 2),
), sparse_ids.eval())
def test_indicators_to_sparse_ids_unknown_3d_shape(self):
indicators_values = (
((0, 0, 1, 0), (0, 0, 0, 0)),
((1, 0, 0, 1), (0, 0, 1, 0)),
)
indicators = array_ops.placeholder(
dtype=dtypes.int32, shape=(None, None, None))
sparse_ids = sparse_ops.indicators_to_sparse_ids(indicators)
with self.cached_session():
_assert_sparse_tensor_value(self, sparse_tensor.SparseTensorValue(
indices=((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
values=(2, 0, 3, 2),
dense_shape=(2, 2, 2),
), sparse_ids.eval(feed_dict={indicators: indicators_values}))
def test_indicators_to_sparse_ids_unknown_rank(self):
indicators_values = (
((0, 0, 1, 0), (0, 0, 0, 0)),
((1, 0, 0, 1), (0, 0, 1, 0)),
)
indicators = array_ops.placeholder(dtype=dtypes.int32)
sparse_ids = sparse_ops.indicators_to_sparse_ids(indicators)
with self.cached_session():
_assert_sparse_tensor_value(self, sparse_tensor.SparseTensorValue(
indices=((0, 0, 0), (1, 0, 0), (1, 0, 1), (1, 1, 0)),
values=(2, 0, 3, 2),
dense_shape=(2, 2, 2),
), sparse_ids.eval(feed_dict={indicators: indicators_values}))
if __name__ == '__main__':
test.main()
| |
# coding=gbk
from pprint import pprint
import random
import multiprocessing
import settings
import test
import ga
import threading
thread_pool = None
input_file = settings.filename
population = 160
generation = 0
matrix_list = []
top_score = 0
top_matrix = None
top_title = ''
score_list = []
mutate_ratio = 0.2
crossover_ratio = 0.4
population_limit = int((1 + crossover_ratio / 2) * population)
def print_list(name_list):
for i in range(0, len(name_list)):
# print str(i) + ":\t" + name_list[i]
print str(i) + ": " + name_list[i]
i += 1
def print_result_from_matrix(m):
row_size, col_size = m.shape
res = []
for i in range(0, row_size):
row_api_list = []
for j in range(0, col_size):
if m[i, j] == 1:
row_api_list.append(settings.api_list[j])
if len(row_api_list) != 0:
res.append(row_api_list)
pprint(res)
print "Category number = " + str(len(res))
def sort_matrix_list():
global score_list
global matrix_list
together = zip(score_list, matrix_list)
sorted_together = sorted(together, lambda a, b: b[0] - a[0])
score_list = [x[0] for x in sorted_together]
matrix_list = [x[1] for x in sorted_together]
def print_result_from_matrix_list():
global max_score, min_score
valid_score_list = score_list[:population]
max_score = max(valid_score_list)
min_score = min(valid_score_list)
print "matrix list of %d instances result, generation = %d, average = %d, max = %d, min = %d" %\
(len(valid_score_list), generation, sum(valid_score_list) / len(valid_score_list), max_score, min_score)
# print valid_score_list
print ga.get_uncovered_testcases(matrix_list[0])
def do_init():
test.init_from_test()
# Print the test dictionary.
# pprint(test_dict)
test.init_lists()
# Initialize the test matrix.
settings.test_matrix = test.init_test_matrix()
print "\n*****************************************************"
print "cleansed test matrix:"
# Remove the duplicated rows in the test matrix.
settings.test_matrix, settings.case_list = test.cleanse_test_matrix(settings.test_matrix, settings.case_list)
# Remove the shadow-covered rows in the test matrix.
# settings.test_matrix, settings.case_list = test.cleanse_test_matrix2(settings.test_matrix, settings.case_list)
settings.case_count = len(settings.case_list)
ga.print_matrix(settings.test_matrix)
print "\n*****************************************************"
print "case list:"
print_list(settings.case_list)
print "\n*****************************************************"
print "API list:"
print_list(settings.api_list)
print "\n*****************************************************"
print "case number = " + str(settings.case_count)
print "API number = " + str(settings.api_count)
def do_demo():
m = ga.init_random_matrix(settings.category_max_count, settings.api_count)
ga.print_matrix(m)
print "\n*****************************************************"
print "After mutation:"
ga.mutate_matrix(m)
ga.print_matrix(m)
# print "\n*****************************************************"
# evaluate_matrix(m)
# print_result_from_matrix(m)
# a = [1, 32]
# b = [3, 4]
# c = np.array([a, b])
#
# print c[:,1]
# print c
def do_init_generation():
global score_list
for i in range(population_limit):
matrix_list.append(ga.init_random_matrix(settings.category_max_count, settings.api_count))
score_list = [0] * population_limit
# do_evaluate()
# sort_matrix_list()
# print "\n*****************************************************"
# print_result_from_matrix_list()
# init_thread_pool()
def do_mutate(start, end):
for i in range(start, end):
ga.mutate_matrix(matrix_list[i])
# Evaluate the matrix: i
score_list[i] = int(ga.evaluate_matrix(matrix_list[i]))
def do_crossover(start, end):
for i in range(start, end, 2):
new_i = population + int((i - (1 - crossover_ratio) * population) / 2)
matrix_list[new_i] = ga.crossover_matrix(matrix_list[i], matrix_list[i + 1])
# Evaluate the matrix: new_i
score_list[new_i] = int(ga.evaluate_matrix(matrix_list[new_i]))
def do_evaluate(start, end):
global score_list
for i in range(start, end):
score_list[i] = int(ga.evaluate_matrix(matrix_list[i]))
# def do_eliminate():
# eliminate_size = len(matrix_list) - population
# del matrix_list[-eliminate_size:]
# del score_list[-eliminate_size:]
# 20% - mutate
# 40% - crossover
# 40% - no change
def do_evolve_once():
global generation
generation += 1
# random.shuffle(matrix_list)
matrix_list[:population] = random.sample(matrix_list[:population], population)
# matrix_list[int((1 - crossover_ratio - mutate_ratio) * population):population] =\
# random.sample(matrix_list[int((1 - crossover_ratio - mutate_ratio) * population):population], int((crossover_ratio + mutate_ratio) * population))
do_mutate(0, int(mutate_ratio * population))
# do_mutate(int((1 - crossover_ratio - mutate_ratio) * population), int((1 - crossover_ratio) * population))
do_crossover(int((1 - crossover_ratio) * population), population)
# do_evaluate(0, population_limit)
sort_matrix_list()
# do_eliminate()
def init_thread_pool():
global thread_pool
thread_pool = multiprocessing.Pool(processes=10)
def do_evolve_once_multi_thread():
global generation
generation += 1
matrix_list[:population] = random.sample(matrix_list[:population], population)
thread_pool.apply_async(do_mutate, (0, int(mutate_ratio * population)))
thread_pool.apply_async(do_crossover, (int((1 - crossover_ratio) * population), population))
# thread_pool.close()
# thread_pool.join()
# do_mutate(0, int(mutate_ratio * population))
# do_crossover(int((1 - crossover_ratio) * population), population)
sort_matrix_list()
# def do_evolve_generation(set_data_func, set_title_func):
# global input_file, population, generation, matrix_list, top_score, top_matrix, top_title
# global score_list
#
# while True:
# do_evolve_once()
# # do_evolve_once_multi_thread()
# print_result_from_matrix_list()
# if set_data_func:
# if top_score < score_list[0]:
# top_score = score_list[0]
# top_matrix = matrix_list[0]
# reduced_top_matrix = ga.get_reduced_matrix(top_matrix)
# top_title = "top generation: %d, top score: %d/%d, %s" % (generation + 1, top_score, settings.full_score, ga.get_matrix_description(reduced_top_matrix))
# set_data_func(ga.remove_empty_rows_from_matrix(2 * top_matrix - reduced_top_matrix))
# set_title_func("input: %s, population: %d, min/max: (%d, %d), current: %d, %s" % (input_file, population, min_score, max_score, generation + 1, top_title))
# generation += 1
class Data(object):
def __init__(self, _input_file, _population, _generation, _matrix_list, _top_score, _top_matrix, _top_title):
self.input_file = _input_file
self.population = _population
self.generation = _generation
self.matrix_list = _matrix_list
self.top_score = _top_score
self.top_matrix = _top_matrix
self.top_title = _top_title
def save_session():
data = Data(input_file, population, generation, matrix_list, top_score, top_matrix, top_title)
return data
def load_session(data):
global input_file, population, generation, matrix_list, top_score, top_matrix, top_title
global population_limit
input_file = data.input_file
population = data.population
generation = data.generation
matrix_list = data.matrix_list
top_score = data.top_score
top_matrix = data.top_matrix
top_title = data.top_title
population_limit = int((1 + crossover_ratio / 2) * population)
def show_session(set_data_func, set_title_func):
global input_file, population, generation, matrix_list, top_score, top_matrix, top_title
global score_list
print_result_from_matrix_list()
reduced_top_matrix = ga.get_reduced_matrix(top_matrix)
set_data_func(ga.remove_empty_rows_from_matrix(2 * top_matrix - reduced_top_matrix))
set_title_func("input: %s, population: %d, min/max: (%d, %d), current: %d, %s" % (input_file, population, min_score, max_score, generation + 1, top_title))
class MyThread(threading.Thread):
def __init__(self, set_data_func, set_title_func):
super(MyThread, self).__init__()
self.stopped = False
# self.serialized = False
self.set_data_func = set_data_func
self.set_title_func = set_title_func
def stop(self):
self.stopped = True
def init_random_data(self):
do_init_generation()
def run(self):
global input_file, population, generation, matrix_list, top_score, top_matrix, top_title
global score_list
while True:
if self.stopped:
generation -= 1
break
else:
do_evolve_once()
# do_evolve_once_multi_thread()
print_result_from_matrix_list()
if top_score < score_list[0]:
top_score = score_list[0]
top_matrix = matrix_list[0]
reduced_top_matrix = ga.get_reduced_matrix(top_matrix)
top_title = "top generation: %d, top score: %d/%d, %s" % (generation + 1, top_score, settings.full_score, ga.get_matrix_description(reduced_top_matrix))
self.set_data_func(ga.remove_empty_rows_from_matrix(2 * top_matrix - reduced_top_matrix))
self.set_title_func("input: %s, population: %d, min/max: (%d, %d), current: %d, %s" % (input_file, population, min_score, max_score, generation + 1, top_title))
if __name__ == '__main__':
do_init()
do_demo()
do_init_generation()
# do_evolve_generation(None, None)
| |
#!/usr/bin/env python
#------------------------------------------------------------
# Script compares efficiency of automatic derivatives vs
# analytical in mpfit.py
# Vog, 31 okt 2011
#------------------------------------------------------------
import numpy
from matplotlib.pyplot import figure, show, rc
from kapteyn import kmpfit
from matplotlib.patches import Polygon
def confpred_band(x, dfdp, prob, fitobj, f, prediction, abswei=False, err=None):
#----------------------------------------------------------
# Return values for a confidence or a prediction band.
# See documentation for methods confidence_band and
# prediction_band
#----------------------------------------------------------
from scipy.stats import t
# Given the confidence or prediction probability prob = 1-alpha
# we derive alpha = 1 - prob
alpha = 1 - prob
prb = 1.0 - alpha/2
tval = t.ppf(prb, fitobj.dof)
C = fitobj.covar
n = len(fitobj.params) # Number of parameters from covariance matrix
p = fitobj.params
N = len(x)
if abswei:
covscale = 1.0 # Do not apply correction with red. chi^2
else:
covscale = fitobj.rchi2_min
df2 = numpy.zeros(N)
for j in range(n):
for k in range(n):
df2 += dfdp[j]*dfdp[k]*C[j,k]
if prediction:
df = numpy.sqrt(err*err+covscale*df2)
else:
df = numpy.sqrt(covscale*df2)
y = f(p, x)
delta = tval * df
upperband = y + delta
lowerband = y - delta
return y, upperband, lowerband
def confidence_band(x, dfdp, confprob, fitobj, f, err=None, abswei=False):
#----------------------------------------------------------
# Given a value for x, calculate the error df in y = model(p,x)
# This function returns for each x in a NumPy array, the
# upper and lower value of the confidence interval.
# The arrays with limits are returned and can be used to
# plot confidence bands.
#
#
# Input:
#
# x NumPy array with values for which you want
# the confidence interval.
#
# dfdp A list with derivatives. There are as many entries in
# this list as there are parameters in your model.
#
# confprob Confidence probability in percent (e.g. 90% or 95%).
# From this number we derive the confidence level
# (e.g. 0.05). The Confidence Band
# is a 100*(1-alpha)% band. This implies
# that for a given value of x the probability that
# the 'true' value of f(p,x) falls within these limits is
# 100*(1-alpha)%.
#
# fitobj The Fitter object from a fit with kmpfit
#
# f A function that returns a value y = f(p,x)
# p are the best-fit parameters and x is a NumPy array
# with values of x for which you want the confidence interval.
#
# abswei Are the weights absolute? For absolute weights we take
# unscaled covariance matrix elements in our calculations.
# For unit weighting (i.e. unweighted) and relative
# weighting, we scale the covariance matrix elements with
# the value of the reduced chi squared.
#
# Returns:
#
# y The model values at x: y = f(p,x)
# upperband The upper confidence limits
# lowerband The lower confidence limits
#
# Note:
#
# If parameters were fixed in the fit, the corresponding
# error is 0 and there is no contribution to the confidence
# interval.
#----------------------------------------------------------
return confpred_band(x, dfdp, confprob, fitobj, f, prediction=False, err=err, abswei=abswei)
def prediction_band(x, dfdp, predprob, fitobj, f, err=None, abswei=False):
#----------------------------------------------------------
# Given a value for x, calculate the error df in y = model(p,x)
# This function returns for each x in a NumPy array, the
# upper and lower value of the prediction interval.
# The arrays with limits are returned and can be used to
# plot confidence bands.
#
#
# Input:
#
# x NumPy array with values for which you want
# the prediction interval.
#
# dfdp A list with derivatives. There are as many entries in
# this list as there are parameters in your model.
#
# predprob Prediction probability in percent (e.g. 0.9 or 0.95).
# From this number we derive the prediction level
# (e.g. 0.05). The Prediction Band
# is a 100*(1-alpha)% band. This implies
# that values of one or more future observations from
# the same population from which a given data set was sampled,
# will fall in this band with a probability of 100*(1-alpha)%
#
# fitobj The Fitter object from a fit with kmpfit
#
# f A function that returns a value y = f(p,x)
# p are the best-fit parameters and x is a NumPy array
# with values of x for which you want the confidence interval.
#
# abswei Are the weights absolute? For absolute weights we take
# unscaled covariance matrix elements in our calculations.
# For unit weighting (i.e. unweighted) and relative
# weighting, we scale the covariance matrix elements with
# the value of the reduced chi squared.
#
# Returns:
#
# y The model values at x: y = f(p,x)
# upperband The upper prediction limits
# lowerband The lower prediction limits
#
# Note:
#
# If parameters were fixed in the fit, the corresponding
# error is 0 and there is no contribution to the prediction
# interval.
#----------------------------------------------------------
return confpred_band(x, dfdp, predprob, fitobj, f,
prediction=True, err=err, abswei=abswei)
def my_model(p, x):
#-----------------------------------------------------------------------
# This describes the model and its parameters for which we want to find
# the best fit. 'p' is a sequence of parameters (array/list/tuple).
#-----------------------------------------------------------------------
A, mu, sigma, zerolev = p
return( A * numpy.exp(-(x-mu)*(x-mu)/(2.0*sigma*sigma)) + zerolev )
def my_residuals(p, data):
#-----------------------------------------------------------------------
# This function is the function called by the fit routine in kmpfit
# It returns a weighted residual. De fit routine calculates the
# square of these values.
#-----------------------------------------------------------------------
x, y, err = data
return (y-my_model(p,x)) / err
def my_derivs(p, data, dflags):
#-----------------------------------------------------------------------
# This function is used by the fit routine to find the values for
# the explicit partial derivatives. Argument 'dflags' is a list
# with booleans. If an element is True then an explicit partial
# derivative is required.
#-----------------------------------------------------------------------
x, y, err = data # y is dummy here
A, mu, sigma, zerolev = p
pderiv = numpy.zeros([len(p), len(x)]) # You need to create the required array
sig2 = sigma * sigma
sig3 = sig2 * sigma
xmu = x-mu
xmu2 = xmu**2
expo = numpy.exp(-xmu2/(2.0*sig2))
fx = A * expo
for i, flag in enumerate(dflags):
if flag:
if i == 0:
pderiv[0] = expo
elif i == 1:
pderiv[1] = fx * xmu/(sig2)
elif i == 2:
pderiv[2] = fx * xmu2/(sig3)
elif i == 3:
pderiv[3] = 1.0
return pderiv/-err
# Artificial data
N = 50
x = numpy.linspace(-5, 10, N)
truepars = [10.0, 5.0, 1.0, 0.0]
p0 = [9, 4.5, 0.8, 0]
rms_data = 0.8
rms_err = 0.1
y = my_model(truepars, x) + numpy.random.normal(0.0, rms_data, N)
err = numpy.random.normal(0.6, rms_err, N)
#err = err*0 + 1
# The fit
fitobj = kmpfit.Fitter(residuals=my_residuals, deriv=my_derivs, data=(x, y, err))
try:
fitobj.fit(params0=p0)
except Exception as mes:
print("Something wrong with fit: ", mes)
raise SystemExit
print("\n\n======== Results kmpfit with explicit partial derivatives =========")
print("Params: ", fitobj.params)
print("Errors from covariance matrix : ", fitobj.xerror)
print("Uncertainties assuming reduced Chi^2=1: ", fitobj.stderr)
print("Chi^2 min: ", fitobj.chi2_min)
print("Reduced Chi^2: ", fitobj.rchi2_min)
print("Iterations: ", fitobj.niter)
print("Function ev: ", fitobj.nfev)
print("Status: ", fitobj.status)
print("Status Message:", fitobj.message)
print("Covariance:\n", fitobj.covar)
# Re-use my_derivs() but rescale derivatives back again with -err
dervs = my_derivs(fitobj.params, (x,y,err), (True,True,True,True))*-err
dfdp = [dervs[0], dervs[1], dervs[2], dervs[3]]
confprob = 0.95
ydummy, upperband, lowerband = confidence_band(x, dfdp, confprob, fitobj, my_model)
verts_conf = list(zip(x, lowerband)) + list(zip(x[::-1], upperband[::-1]))
predprob = 0.90
ydummy, upperband, lowerband = prediction_band(x, dfdp, predprob, fitobj, my_model,
err=err, abswei=False)
verts_pred = list(zip(x, lowerband)) + list(zip(x[::-1], upperband[::-1]))
# Plot the result
rc('font', size=9)
rc('legend', fontsize=8)
fig = figure()
frame = fig.add_subplot(1,1,1)
X = numpy.linspace(x.min(), x.max(), 100)
frame.errorbar(x, y, yerr=err, fmt='go', alpha=0.7, label="Noisy data")
frame.plot(X, my_model(truepars,X), 'r', label="True data")
frame.plot(X, my_model(fitobj.params,X), 'b', lw=2, label="Fit with kmpfit")
poly = Polygon(verts_conf, closed=True, fc='g', ec='g', alpha=0.3,
label="CI (%g)"%confprob)
frame.add_patch(poly)
poly = Polygon(verts_pred, closed=True, fc='r', ec='r', alpha=0.3,
label="PI (%g)"%predprob)
frame.add_patch(poly)
frame.set_xlabel("X")
frame.set_ylabel("Measurement data")
frame.set_title("Confidence- and prediction bands for Gaussian model",
fontsize=10)
delta = (x.max()-x.min())/10.0
frame.set_xlim(x.min()-delta, x.max()+delta)
frame.grid(True)
# Check prediction intervals
"""
for i in range(500):
y = my_model(truepars, x) + numpy.random.normal(0.0, rms_data, N)
err = numpy.random.normal(0.0, rms_err, N)
#frame.plot(x,y,'o')
frame.errorbar(x, y, yerr=err, fmt='o')
"""
# A nice background for the entire plot
from matplotlib.cm import copper
frame.imshow([[0, 0],[1,1]], interpolation='bicubic', cmap=copper,
vmin=-0.5, vmax=0.5,
extent=(frame.get_xlim()[0], frame.get_xlim()[1],
frame.get_ylim()[0], frame.get_ylim()[1]),
alpha=1)
leg = frame.legend(loc=2)
show()
| |
from functools import wraps
from werkzeug.exceptions import HTTPException
from werkzeug.routing import Map, Rule
from flask import current_app, request
from flask_socketio import join_room, leave_room
from .exc import InvalidRequestError, InvalidURIError, SocketAPIError
class SocketAPI(object):
def __init__(self, socketio=None, namespace=None):
self.namespace = namespace
self.routes = Map()
self.urls = self.routes.bind('/', '/')
self.patch_handlers = {}
if socketio is not None:
self.init_socketio(socketio)
def init_socketio(self, socketio):
self.socketio = socketio
@socketio.on('create', namespace=self.namespace)
def handle_create(payload):
# Retreive request arguments.
if 'uri' not in payload:
raise InvalidRequestError('missing URI')
uri = payload['uri']
attributes = payload.get('attributes', {})
# Search for a matching route.
try:
creator, kwargs = self.urls.match(uri, method='POST')
except HTTPException:
# No registered resource creator for this uri.
raise InvalidRequestError("no registered resource creator for %s'" % uri)
# Create the new resource instance.
kwargs.update(attributes)
resource = creator(**kwargs)
# Send the creation event to all subscribers of the uri.
self.socketio.emit('create', {
'uri': uri,
'resource': resource
}, room=uri)
@socketio.on('patch')
def handle_patch(payload, namespace=self.namespace):
# Retreive request arguments.
if 'uri' not in payload:
raise InvalidRequestError('missing URI')
uri = payload['uri']
patch = payload.get('patch', {})
# Search for a matching route.
try:
rule, kwargs = self.urls.match(uri, return_rule=True, method='PATCH')
kwargs['patch'] = patch
except HTTPException:
# No registered resource patcher for this uri.
raise InvalidRequestError("no registered resource patcher for %s'" % uri)
# Call all the resource patchers for the given uri.
for patch_handler in self.patch_handlers[rule.rule]:
patch_handler(**kwargs)
# Send the patch event to all subscribers of the resource, and of
# the resource list.
for room_name in (uri, uri[0:len(uri) - len(uri.split('/')[-1])]):
self.socketio.emit('patch', {
'uri': uri,
'patch': patch
}, room=room_name)
@socketio.on('delete', namespace=self.namespace)
def handle_delete(payload):
# Retreive request arguments.
if 'uri' not in payload:
raise InvalidRequestError('missing URI')
uri = payload['uri']
# Search for a matching route.
try:
deleter, kwargs = self.urls.match(uri, method='DELETE')
except HTTPException:
# No registered resource deleter for this uri.
raise InvalidRequestError("no registered resource deleter for %s'" % uri)
# Delete the resource.
resource = deleter(**kwargs)
# Send the deletion event to all subscribers of the resource, and
# of the resource list.
for room_name in (uri, uri[0:len(uri) - len(uri.split('/')[-1])]):
self.socketio.emit('delete', {
'uri': uri
}, room=room_name)
@socketio.on('subscribe', namespace=self.namespace)
def handle_subscribe(uri):
# Try to retrieve the subscribed resource, so that we can send its
# current state to the subscriber.
try:
getter, kwargs = self.urls.match(uri, method='GET')
resource = getter(**kwargs)
except HTTPException:
resource = None
if resource is not None:
self.socketio.emit('state', {
'uri': uri,
'resource': resource
}, room=request.sid)
join_room(uri)
@socketio.on('unsubscribe', namespace=self.namespace)
def handle_unsubscribe(uri):
leave_room(uri)
@socketio.on_error(self.namespace)
def handle_error(e):
if isinstance(e, SocketAPIError):
# Instances of SocketAPIError are forwarded to the client.
self.socketio.emit('api_error', {
'error': e.__class__.__name__,
'message': str(e)
}, room=request.sid)
else:
# Other errors are considered server errors and should not be
# forwarded to the client, except in debug mode.
self.socketio.emit('server_error', {
'error': e.__class__.__name__,
'message': str(e) if current_app.debug else None
}, room=request.sid)
# Log the error.
current_app.logger.exception(e)
def resource_creator(self, rule):
# Make sure the given rule corresponds to a list uri.
if not rule.endswith('/'):
raise InvalidURIError('resource creators should be registered on list uri')
def decorate(fn):
@wraps(fn)
def decorated(*args, **kwargs):
return fn(*args, **kwargs)
# Register a new POST route for the given rule.
self.routes.add(Rule(rule, endpoint=decorated, methods=['POST']))
return decorated
return decorate
def resource_getter(self, rule):
def decorate(fn):
@wraps(fn)
def decorated(*args, **kwargs):
return fn(*args, **kwargs)
# Register a new GET route for the given rule.
self.routes.add(Rule(rule, endpoint=decorated, methods=['GET']))
return decorated
return decorate
def resource_patcher(self, rule):
# Make sure the rule doesn't correspond to a list.
if rule.endswith('/'):
raise InvalidURIError('cannot register resource patchers on a list uri')
def decorate(fn):
@wraps(fn)
def decorated(*args, **kwargs):
return fn(*args, **kwargs)
# Check if there already is a route to catch patch requests on the
# given rule.
for route in self.routes.iter_rules():
if (route.rule == rule) and ('PATCH' in route.methods):
break
else:
# Register a new PATCH route for the given rule.
self.routes.add(Rule(rule, methods=['PATCH']))
# Register the given patch handler.
if rule not in self.patch_handlers:
self.patch_handlers[rule] = []
self.patch_handlers[rule].append(decorated)
return decorated
return decorate
def resource_deleter(self, rule):
# Make sure the rule doesn't correspond to a list.
if rule.endswith('/'):
raise InvalidURIError('cannot register resource deleters on a list uri')
def decorate(fn):
@wraps(fn)
def decorated(*args, **kwargs):
return fn(*args, **kwargs)
# Register a new DELETE route for the given rule.
self.routes.add(Rule(rule, endpoint=decorated, methods=['DELETE']))
return decorated
return decorate
| |
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_parse_qs,
compat_urllib_parse,
compat_urllib_parse_unquote,
compat_urllib_request,
)
from ..utils import (
determine_ext,
ExtractorError,
int_or_none,
)
class MetacafeIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?metacafe\.com/watch/([^/]+)/([^/]+)/.*'
_DISCLAIMER = 'http://www.metacafe.com/family_filter/'
_FILTER_POST = 'http://www.metacafe.com/f/index.php?inputType=filter&controllerGroup=user'
IE_NAME = 'metacafe'
_TESTS = [
# Youtube video
{
'add_ie': ['Youtube'],
'url': 'http://metacafe.com/watch/yt-_aUehQsCQtM/the_electric_company_short_i_pbs_kids_go/',
'info_dict': {
'id': '_aUehQsCQtM',
'ext': 'mp4',
'upload_date': '20090102',
'title': 'The Electric Company | "Short I" | PBS KIDS GO!',
'description': 'md5:2439a8ef6d5a70e380c22f5ad323e5a8',
'uploader': 'PBS',
'uploader_id': 'PBS'
}
},
# Normal metacafe video
{
'url': 'http://www.metacafe.com/watch/11121940/news_stuff_you_wont_do_with_your_playstation_4/',
'md5': '6e0bca200eaad2552e6915ed6fd4d9ad',
'info_dict': {
'id': '11121940',
'ext': 'mp4',
'title': 'News: Stuff You Won\'t Do with Your PlayStation 4',
'uploader': 'ign',
'description': 'Sony released a massive FAQ on the PlayStation Blog detailing the PS4\'s capabilities and limitations.',
},
},
# AnyClip video
{
'url': 'http://www.metacafe.com/watch/an-dVVXnuY7Jh77J/the_andromeda_strain_1971_stop_the_bomb_part_3/',
'info_dict': {
'id': 'an-dVVXnuY7Jh77J',
'ext': 'mp4',
'title': 'The Andromeda Strain (1971): Stop the Bomb Part 3',
'uploader': 'anyclip',
'description': 'md5:38c711dd98f5bb87acf973d573442e67',
},
},
# age-restricted video
{
'url': 'http://www.metacafe.com/watch/5186653/bbc_internal_christmas_tape_79_uncensored_outtakes_etc/',
'md5': '98dde7c1a35d02178e8ab7560fe8bd09',
'info_dict': {
'id': '5186653',
'ext': 'mp4',
'title': 'BBC INTERNAL Christmas Tape \'79 - UNCENSORED Outtakes, Etc.',
'uploader': 'Dwayne Pipe',
'description': 'md5:950bf4c581e2c059911fa3ffbe377e4b',
'age_limit': 18,
},
},
# cbs video
{
'url': 'http://www.metacafe.com/watch/cb-8VD4r_Zws8VP/open_this_is_face_the_nation_february_9/',
'info_dict': {
'id': '8VD4r_Zws8VP',
'ext': 'flv',
'title': 'Open: This is Face the Nation, February 9',
'description': 'md5:8a9ceec26d1f7ed6eab610834cc1a476',
'duration': 96,
},
'params': {
# rtmp download
'skip_download': True,
},
},
# Movieclips.com video
{
'url': 'http://www.metacafe.com/watch/mv-Wy7ZU/my_week_with_marilyn_do_you_love_me/',
'info_dict': {
'id': 'mv-Wy7ZU',
'ext': 'mp4',
'title': 'My Week with Marilyn - Do You Love Me?',
'description': 'From the movie My Week with Marilyn - Colin (Eddie Redmayne) professes his love to Marilyn (Michelle Williams) and gets her to promise to return to set and finish the movie.',
'uploader': 'movie_trailers',
'duration': 176,
},
'params': {
'skip_download': 'requires rtmpdump',
}
}
]
def report_disclaimer(self):
self.to_screen('Retrieving disclaimer')
def _real_initialize(self):
# Retrieve disclaimer
self.report_disclaimer()
self._download_webpage(self._DISCLAIMER, None, False, 'Unable to retrieve disclaimer')
# Confirm age
disclaimer_form = {
'filters': '0',
'submit': "Continue - I'm over 18",
}
request = compat_urllib_request.Request(self._FILTER_POST, compat_urllib_parse.urlencode(disclaimer_form))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
self.report_age_confirmation()
self._download_webpage(request, None, False, 'Unable to confirm age')
def _real_extract(self, url):
# Extract id and simplified title from URL
mobj = re.match(self._VALID_URL, url)
if mobj is None:
raise ExtractorError('Invalid URL: %s' % url)
video_id = mobj.group(1)
# the video may come from an external site
m_external = re.match('^(\w{2})-(.*)$', video_id)
if m_external is not None:
prefix, ext_id = m_external.groups()
# Check if video comes from YouTube
if prefix == 'yt':
return self.url_result('http://www.youtube.com/watch?v=%s' % ext_id, 'Youtube')
# CBS videos use theplatform.com
if prefix == 'cb':
return self.url_result('theplatform:%s' % ext_id, 'ThePlatform')
# Retrieve video webpage to extract further information
req = compat_urllib_request.Request('http://www.metacafe.com/watch/%s/' % video_id)
# AnyClip videos require the flashversion cookie so that we get the link
# to the mp4 file
mobj_an = re.match(r'^an-(.*?)$', video_id)
if mobj_an:
req.headers['Cookie'] = 'flashVersion=0;'
webpage = self._download_webpage(req, video_id)
# Extract URL, uploader and title from webpage
self.report_extraction(video_id)
video_url = None
mobj = re.search(r'(?m)&mediaURL=([^&]+)', webpage)
if mobj is not None:
mediaURL = compat_urllib_parse_unquote(mobj.group(1))
video_ext = mediaURL[-3:]
# Extract gdaKey if available
mobj = re.search(r'(?m)&gdaKey=(.*?)&', webpage)
if mobj is None:
video_url = mediaURL
else:
gdaKey = mobj.group(1)
video_url = '%s?__gda__=%s' % (mediaURL, gdaKey)
if video_url is None:
mobj = re.search(r'<video src="([^"]+)"', webpage)
if mobj:
video_url = mobj.group(1)
video_ext = 'mp4'
if video_url is None:
flashvars = self._search_regex(
r' name="flashvars" value="(.*?)"', webpage, 'flashvars',
default=None)
if flashvars:
vardict = compat_parse_qs(flashvars)
if 'mediaData' not in vardict:
raise ExtractorError('Unable to extract media URL')
mobj = re.search(
r'"mediaURL":"(?P<mediaURL>http.*?)",(.*?)"key":"(?P<key>.*?)"', vardict['mediaData'][0])
if mobj is None:
raise ExtractorError('Unable to extract media URL')
mediaURL = mobj.group('mediaURL').replace('\\/', '/')
video_url = '%s?__gda__=%s' % (mediaURL, mobj.group('key'))
video_ext = determine_ext(video_url)
if video_url is None:
player_url = self._search_regex(
r"swfobject\.embedSWF\('([^']+)'",
webpage, 'config URL', default=None)
if player_url:
config_url = self._search_regex(
r'config=(.+)$', player_url, 'config URL')
config_doc = self._download_xml(
config_url, video_id,
note='Downloading video config')
smil_url = config_doc.find('.//properties').attrib['smil_file']
smil_doc = self._download_xml(
smil_url, video_id,
note='Downloading SMIL document')
base_url = smil_doc.find('./head/meta').attrib['base']
video_url = []
for vn in smil_doc.findall('.//video'):
br = int(vn.attrib['system-bitrate'])
play_path = vn.attrib['src']
video_url.append({
'format_id': 'smil-%d' % br,
'url': base_url,
'play_path': play_path,
'page_url': url,
'player_url': player_url,
'ext': play_path.partition(':')[0],
})
if video_url is None:
raise ExtractorError('Unsupported video type')
video_title = self._html_search_regex(
r'(?im)<title>(.*) - Video</title>', webpage, 'title')
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
video_uploader = self._html_search_regex(
r'submitter=(.*?);|googletag\.pubads\(\)\.setTargeting\("(?:channel|submiter)","([^"]+)"\);',
webpage, 'uploader nickname', fatal=False)
duration = int_or_none(
self._html_search_meta('video:duration', webpage))
age_limit = (
18
if re.search(r'"contentRating":"restricted"', webpage)
else 0)
if isinstance(video_url, list):
formats = video_url
else:
formats = [{
'url': video_url,
'ext': video_ext,
}]
self._sort_formats(formats)
return {
'id': video_id,
'description': description,
'uploader': video_uploader,
'title': video_title,
'thumbnail': thumbnail,
'age_limit': age_limit,
'formats': formats,
'duration': duration,
}
| |
import inspect
import shutil
import socket
import time
import os
import json
import sys
import json
from mtools.mlaunch.mlaunch import MLaunchTool, shutdown_host
from pymongo import MongoClient
from pymongo.errors import AutoReconnect, ConnectionFailure
from bson import SON
from nose.tools import *
from nose.plugins.attrib import attr
from nose.plugins.skip import Skip, SkipTest
class TestMLaunch(object):
""" This class tests functionality around the mlaunch tool. It has some
additional methods that are helpful for the tests, as well as a setup
and teardown method for all tests.
Don't call tests from other tests. This won't work as each test gets
its own data directory (for debugging).
"""
port = 33333
base_dir = 'data_test_mlaunch'
def __init__(self):
""" Constructor. """
self.use_auth = False
self.data_dir = ''
def setup(self):
""" start up method to create mlaunch tool and find free port """
self.tool = MLaunchTool()
# if the test data path exists, remove it
if os.path.exists(self.base_dir):
shutil.rmtree(self.base_dir)
def teardown(self):
""" tear down method after each test, removes data directory """
# kill all running processes
self.tool.discover()
ports = self.tool.get_tagged(['all', 'running'])
processes = self.tool._get_processes().values()
for p in processes:
p.terminate()
p.wait(10)
self.tool.wait_for(ports, to_start=False)
# quick sleep to avoid spurious test failures
time.sleep(0.1)
# if the test data path exists, remove it
if os.path.exists(self.base_dir):
shutil.rmtree(self.base_dir)
def run_tool(self, arg_str):
""" wrapper to call self.tool.run() with or without auth """
# name data directory according to test method name
caller = inspect.stack()[1][3]
self.data_dir = os.path.join(self.base_dir, caller)
# add data directory to arguments for all commands
arg_str += ' --dir %s' % self.data_dir
if arg_str.startswith('init') or arg_str.startswith('--'):
# add --port and --nojournal to init calls
arg_str += ' --port %i --nojournal --smallfiles' % self.port
if self.use_auth:
# add --auth to init calls if flag is set
arg_str += ' --auth'
self.tool.run(arg_str)
# -- tests below ---
@raises(ConnectionFailure)
def test_test(self):
""" TestMLaunch setup and teardown test """
# test that data dir does not exist
assert not os.path.exists(self.data_dir)
# start mongo process on free test port
self.run_tool("init --single")
# call teardown method within this test
self.teardown()
# test that data dir does not exist anymore
assert not os.path.exists(self.data_dir)
# test that mongod is not running on this port anymore (raises ConnectionFailure)
mc = MongoClient('localhost:%i' % self.port)
def test_argv_run(self):
""" mlaunch: test true command line arguments, instead of passing into tool.run() """
# make command line arguments through sys.argv
sys.argv = ['mlaunch', 'init', '--single', '--dir', self.base_dir, '--port', str(self.port), '--nojournal']
self.tool.run()
assert self.tool.is_running(self.port)
def test_init_default(self):
""" mlaunch: test that 'init' command can be omitted, is default """
# make command line arguments through sys.argv
sys.argv = ['mlaunch', '--single', '--dir', self.base_dir, '--port', str(self.port), '--nojournal']
self.tool.run()
assert self.tool.is_running(self.port)
def test_init_default_arguments(self):
""" mlaunch: test that 'init' command is default, even when specifying arguments to run() """
self.run_tool("--single")
assert self.tool.is_running(self.port)
def test_single(self):
""" mlaunch: start stand-alone server and tear down again """
# start mongo process on free test port
self.run_tool("init --single")
# make sure node is running
assert self.tool.is_running(self.port)
# check if data directory and logfile exist
assert os.path.exists(os.path.join(self.data_dir, 'db'))
assert os.path.isfile(os.path.join(self.data_dir, 'mongod.log'))
# check that the tags are set correctly: 'single', 'mongod', 'running', <port>
assert set(self.tool.get_tags_of_port(self.port)) == set(['running', 'mongod', 'all', 'single', str(self.port)])
def test_replicaset_conf(self):
""" mlaunch: start replica set of 2 nodes + arbiter and compare rs.conf() """
# start mongo process on free test port
self.run_tool("init --replicaset --nodes 2 --arbiter")
# check if data directories exist
assert os.path.exists(os.path.join(self.data_dir, 'replset'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs1'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs2'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/arb'))
# create mongo client for the next tests
mc = MongoClient('localhost:%i' % self.port)
# get rs.conf() and check for 3 members, exactly one is arbiter
conf = mc['local']['system.replset'].find_one()
assert len(conf['members']) == 3
assert sum(1 for memb in conf['members'] if 'arbiterOnly' in memb and memb['arbiterOnly']) == 1
@timed(60)
@attr('slow')
def test_replicaset_ismaster(self):
""" mlaunch: start replica set and verify that first node becomes primary """
# start mongo process on free test port
self.run_tool("init --replicaset")
# wait for primary
assert self.tool._wait_for_primary()
# insert a document and wait to replicate to 2 secondaries (10 sec timeout)
mc = MongoClient('localhost:%i' % self.port)
mc.test.smokeWait.insert({}, w=2, wtimeout=10*60*1000)
def test_sharded_status(self):
""" mlaunch: start cluster with 2 shards of single nodes, 1 config server """
# start mongo process on free test port
self.run_tool("init --sharded 2 --single")
# check if data directories and logfile exist
assert os.path.exists(os.path.join(self.data_dir, 'shard01/db'))
assert os.path.exists(os.path.join(self.data_dir, 'shard02/db'))
assert os.path.exists(os.path.join(self.data_dir, 'config/db'))
assert os.path.isfile(os.path.join(self.data_dir, 'mongos.log'))
# create mongo client
mc = MongoClient('localhost:%i' % (self.port))
# check for 2 shards and 1 mongos
assert mc['config']['shards'].count() == 2
assert mc['config']['mongos'].count() == 1
def helper_output_has_line_with(self, keywords, output):
""" checks if output contains a line where all keywords are present. """
return len( filter( None, [ all( [kw in line for kw in keywords] ) for line in output] ) )
def test_verbose_sharded(self):
""" mlaunch: test verbose output when creating sharded cluster """
self.run_tool("init --sharded 2 --replicaset --config 3 --mongos 2 --verbose")
# capture stdout
output = sys.stdout.getvalue().splitlines()
keywords = ('rs1', 'rs2', 'rs3', 'shard01', 'shard02', 'config1', 'config2', 'config3')
# creating directory
for keyword in keywords:
# make sure every directory creation was announced to stdout
assert self.helper_output_has_line_with(['creating directory', keyword, 'db'], output)
assert self.helper_output_has_line_with(['creating directory', 'mongos'], output)
# launching nodes
for keyword in keywords:
assert self.helper_output_has_line_with(['launching', keyword, '--port', '--logpath', '--dbpath'], output)
# mongos
assert self.helper_output_has_line_with(['launching', 'mongos', '--port', '--logpath', str(self.port)], output)
assert self.helper_output_has_line_with(['launching', 'mongos', '--port', '--logpath', str(self.port + 1)], output)
# some fixed outputs
assert self.helper_output_has_line_with(['waiting for nodes to start'], output)
assert self.helper_output_has_line_with(['adding shards. can take up to 30 seconds'], output)
assert self.helper_output_has_line_with(['writing .mlaunch_startup file'], output)
assert self.helper_output_has_line_with(['done'], output)
# replica sets initialized, shard added
for keyword in ('shard01', 'shard02'):
assert self.helper_output_has_line_with(['replica set', keyword, 'initialized'], output)
assert self.helper_output_has_line_with(['shard', keyword, 'added successfully'], output)
def test_shard_names(self):
""" mlaunch: test if sharded cluster with explicit shard names works """
# start mongo process on free test port
self.run_tool("init --sharded tic tac toe --replicaset")
# create mongo client
mc = MongoClient('localhost:%i' % (self.port))
# check that shard names match
shard_names = set( doc['_id'] for doc in mc['config']['shards'].find() )
assert shard_names == set(['tic', 'tac', 'toe'])
def test_startup_file(self):
""" mlaunch: create .mlaunch_startup file in data path """
# Also tests utf-8 to byte conversion and json import
self.run_tool("init --single -v")
# check if the startup file exists
startup_file = os.path.join(self.data_dir, '.mlaunch_startup')
assert os.path.isfile(startup_file)
# compare content of startup file with tool.args
file_contents = self.tool._convert_u2b(json.load(open(startup_file, 'r')))
assert file_contents['parsed_args'] == self.tool.args
assert file_contents['unknown_args'] == self.tool.unknown_args
def test_single_mongos_explicit(self):
""" mlaunch: test if single mongos is running on start port and creates <datadir>/mongos.log """
# start 2 shards, 1 config server, 1 mongos
self.run_tool("init --sharded 2 --single --config 1 --mongos 1")
# check if mongos log files exist on correct ports
assert os.path.exists(os.path.join(self.data_dir, 'mongos.log'))
# check for correct port
assert self.tool.get_tagged('mongos') == set([self.port])
def test_single_mongos(self):
""" mlaunch: test if multiple mongos use separate log files in 'mongos' subdir """
# start 2 shards, 1 config server, 2 mongos
self.run_tool("init --sharded 2 --single --config 1 --mongos 1")
# check that 2 mongos are running
assert len( self.tool.get_tagged(['mongos', 'running']) ) == 1
def test_multiple_mongos(self):
""" mlaunch: test if multiple mongos use separate log files in 'mongos' subdir """
# start 2 shards, 1 config server, 2 mongos
self.run_tool("init --sharded 2 --single --config 1 --mongos 2")
# this also tests that mongos are started at the beginning of the port range
assert os.path.exists(os.path.join(self.data_dir, 'mongos', 'mongos_%i.log' % (self.port)))
assert os.path.exists(os.path.join(self.data_dir, 'mongos', 'mongos_%i.log' % (self.port + 1)))
# check that 2 mongos are running
assert len( self.tool.get_tagged(['mongos', 'running']) ) == 2
def test_filter_valid_arguments(self):
""" mlaunch: check arguments unknown to mlaunch against mongos and mongod """
# filter against mongod
result = self.tool._filter_valid_arguments("--slowms 500 -vvv --configdb localhost:27017 --foobar".split(), "mongod")
assert result == "--slowms 500 -vvv"
# filter against mongos
result = self.tool._filter_valid_arguments("--slowms 500 -vvv --configdb localhost:27017 --foobar".split(), "mongos")
assert result == "-vvv --configdb localhost:27017"
def test_large_replicaset_arbiter(self):
""" mlaunch: start large replica set of 12 nodes with arbiter """
# start mongo process on free test port (don't need journal for this test)
self.run_tool("init --replicaset --nodes 11 --arbiter")
# check if data directories exist
assert os.path.exists(os.path.join(self.data_dir, 'replset'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs1'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs2'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs3'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs4'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs5'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs6'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs7'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs8'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs9'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs10'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs11'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/arb'))
# create mongo client for the next tests
mc = MongoClient('localhost:%i' % self.port)
# get rs.conf() and check for 12 members, exactly one arbiter
conf = mc['local']['system.replset'].find_one()
assert len(conf['members']) == 12
assert sum(1 for memb in conf['members'] if 'arbiterOnly' in memb and memb['arbiterOnly']) == 1
# check that 12 nodes are discovered
assert len(self.tool.get_tagged('all')) == 12
def test_large_replicaset_noarbiter(self):
""" mlaunch: start large replica set of 12 nodes without arbiter """
# start mongo process on free test port (don't need journal for this test)
self.run_tool("init --replicaset --nodes 12")
# check if data directories exist
assert os.path.exists(os.path.join(self.data_dir, 'replset'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs1'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs2'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs3'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs4'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs5'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs6'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs7'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs8'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs9'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs10'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs11'))
assert os.path.exists(os.path.join(self.data_dir, 'replset/rs12'))
# create mongo client for the next tests
mc = MongoClient('localhost:%i' % self.port)
# get rs.conf() and check for 12 members, no arbiters
conf = mc['local']['system.replset'].find_one()
assert len(conf['members']) == 12
assert sum(1 for memb in conf['members'] if 'arbiterOnly' in memb and memb['arbiterOnly']) == 0
def test_stop(self):
""" mlaunch: test stopping all nodes """
self.run_tool("init --replicaset")
self.run_tool("stop")
# make sure all nodes are down
nodes = self.tool.get_tagged('all')
assert all( not self.tool.is_running(node) for node in nodes )
def test_kill(self):
""" mlaunch: test killing all nodes """
# start sharded cluster and kill with default signal (15)
self.run_tool("init --sharded 2 --single")
self.run_tool("kill")
# make sure all nodes are down
nodes = self.tool.get_tagged('all')
assert all( not self.tool.is_running(node) for node in nodes )
# start nodes again, this time, kill with string "SIGTERM"
self.run_tool("start")
self.run_tool("kill --signal SIGTERM")
# make sure all nodes are down
nodes = self.tool.get_tagged('all')
assert all( not self.tool.is_running(node) for node in nodes )
# start nodes again, this time, kill with signal 9 (SIGKILL)
self.run_tool("start")
self.run_tool("kill --signal 9")
# make sure all nodes are down
nodes = self.tool.get_tagged('all')
assert all( not self.tool.is_running(node) for node in nodes )
def test_stop_start(self):
""" mlaunch: test stop and then re-starting nodes """
# start mongo process on free test port
self.run_tool("init --replicaset")
self.run_tool("stop")
time.sleep(1)
self.run_tool("start")
# make sure all nodes are running
nodes = self.tool.get_tagged('all')
assert all( self.tool.is_running(node) for node in nodes )
@timed(180)
@attr('slow')
def test_kill_partial(self):
""" mlaunch: test killing and restarting tagged groups on different tags """
# key is tag for command line, value is tag for get_tagged
tags = ['shard01', 'shard 1', 'mongos', 'config 1', str(self.port)]
# start large cluster
self.run_tool("init --sharded 2 --replicaset --config 3 --mongos 3")
# make sure all nodes are running
nodes = self.tool.get_tagged('all')
assert all( self.tool.is_running(node) for node in nodes )
# go through all tags, stop nodes for each tag, confirm only the tagged ones are down, start again
for tag in tags:
print "---------", tag
self.run_tool("kill %s" % tag)
assert self.tool.get_tagged('down') == self.tool.get_tagged(tag)
time.sleep(1)
# short sleep, because travis seems to be sensitive and sometimes fails otherwise
self.run_tool("start")
assert len(self.tool.get_tagged('down')) == 0
time.sleep(1)
# make sure primaries are running again (we just failed them over above).
# while True is ok, because test times out after some time
while True:
primaries = self.tool.get_tagged('primary')
if len(primaries) == 2:
break
time.sleep(1)
self.tool.discover()
# test for primary, but as the nodes lose their tags, needs to be manual
self.run_tool("kill primary")
assert len(self.tool.get_tagged('down')) == 2
def test_restart_with_unkown_args(self):
""" mlaunch: test start command with extra unknown arguments """
# init environment (sharded, single shards ok)
self.run_tool("init --single")
# get verbosity of mongod, assert it is 0
mc = MongoClient(port=self.port)
loglevel = mc.admin.command(SON([('getParameter', 1), ('logLevel', 1)]))
assert loglevel[u'logLevel'] == 0
# stop and start nodes but pass in unknown_args
self.run_tool("stop")
# short sleep, because travis seems to be sensitive and sometimes fails otherwise
time.sleep(1)
self.run_tool("start -vv")
# compare that the nodes are restarted with the new unknown_args, assert loglevel is now 2
mc = MongoClient(port=self.port)
loglevel = mc.admin.command(SON([('getParameter', 1), ('logLevel', 1)]))
assert loglevel[u'logLevel'] == 2
# stop and start nodes without unknown args again
self.run_tool("stop")
# short sleep, because travis seems to be sensitive and sometimes fails otherwise
time.sleep(1)
self.run_tool("start")
# compare that the nodes are restarted with the previous loglevel
mc = MongoClient(port=self.port)
loglevel = mc.admin.command(SON([('getParameter', 1), ('logLevel', 1)]))
assert loglevel[u'logLevel'] == 0
def test_start_stop_single_repeatedly(self):
""" mlaunch: test starting and stopping single node in short succession """
# repeatedly start single node
self.run_tool("init --single")
for i in range(10):
self.run_tool("stop")
# short sleep, because travis seems to be sensitive and sometimes fails otherwise
time.sleep(1)
self.run_tool("start")
@raises(SystemExit)
def test_init_init_replicaset(self):
""" mlaunch: test calling init a second time on the replica set """
# init a replica set
self.run_tool("init --replicaset")
# now stop and init again, this should work if everything is stopped and identical environment
self.run_tool("stop")
self.run_tool("init --replicaset")
# but another init should fail with a SystemExit
self.run_tool("init --replicaset")
def test_start_stop_replicaset_repeatedly(self):
""" mlaunch: test starting and stopping replica set in short succession """
# repeatedly start replicaset nodes
self.run_tool("init --replicaset")
for i in range(10):
self.run_tool("stop")
# short sleep, because travis seems to be sensitive and sometimes fails otherwise
time.sleep(1)
self.run_tool("start")
@attr('slow')
@attr('auth')
def test_repeat_all_with_auth(self):
""" this test will repeat all the tests in this class (excluding itself) but with auth. """
tests = [t for t in inspect.getmembers(self, predicate=inspect.ismethod) if t[0].startswith('test_') ]
self.use_auth = True
for name, method in tests:
# don't call any tests that use auth already (tagged with 'auth' attribute), including this method
if hasattr(method, 'auth'):
continue
setattr(method.__func__, 'description', method.__doc__.strip() + ', with auth.')
yield ( method, )
self.use_auth = False
# TODO
# - test functionality of --binarypath, --verbose, --name
# All tests that use auth need to be decorated with @attr('auth')
def helper_adding_default_user(self, environment):
""" This is a helper function for the next test: test_adding_default_user() """
self.run_tool("init %s --auth" % environment)
# connect and authenticate with default credentials: user / password on admin database
mc = MongoClient('localhost:%i' % self.port)
mc.admin.authenticate('user', password='password')
# check if the user roles are correctly set to the default roles
user = mc.admin.system.users.find_one()
assert set(user['roles']) == set(self.tool._default_auth_roles)
@attr('auth')
def test_adding_default_user(self):
envs = (
"--single",
"--replicaset",
"--sharded 2 --single",
"--sharded 2 --replicaset",
"--sharded 2 --single --config 3"
)
for env in envs:
method = self.helper_adding_default_user
setattr(method.__func__, 'description', method.__doc__.strip() + ', with ' + env)
yield (method, env)
@attr('auth')
def test_adding_default_user_no_mongos(self):
""" mlaunch: test that even with --mongos 0 there is a user created """
self.run_tool("init --sharded 2 --single --mongos 0 --auth")
# connect to config server instead to check for credentials (no mongos)
ports = list(self.tool.get_tagged('config'))
mc = MongoClient('localhost:%i' % ports[0])
mc.admin.authenticate('user', password='password')
# check if the user roles are correctly set to the default roles
user = mc.admin.system.users.find_one()
assert set(user['roles']) == set(self.tool._default_auth_roles)
@attr('auth')
def test_adding_custom_user(self):
""" mlaunch: test custom username and password and custom roles. """
self.run_tool("init --single --auth --username corben --password fitzroy --auth-roles dbAdminAnyDatabase readWriteAnyDatabase userAdminAnyDatabase")
# connect and authenticate with default credentials: user / password on admin database
mc = MongoClient('localhost:%i' % self.port)
mc.admin.authenticate('corben', password='fitzroy')
# check if the user roles are correctly set to the specified roles
user = mc.admin.system.users.find_one()
print user
assert set(user['roles']) == set(["dbAdminAnyDatabase", "readWriteAnyDatabase", "userAdminAnyDatabase"])
assert user['user'] == 'corben'
def test_existing_environment(self):
""" mlaunch: test warning for overwriting an existing environment """
self.run_tool("init --single")
self.run_tool("stop")
try:
self.run_tool("init --replicaset")
except SystemExit as e:
assert 'different environment already exists' in e.message
def test_upgrade_v1_to_v2(self):
""" mlaunch: test upgrade from protocol version 1 to 2. """
startup_options = {"name": "replset", "replicaset": True, "dir": "./data", "authentication": False, "single": False, "arbiter": False, "mongos": 1, "binarypath": None, "sharded": None, "nodes": 3, "config": 1, "port": 33333, "restart": False, "verbose": False}
# create directory
self.run_tool("init --replicaset")
self.run_tool("stop")
# replace startup options
with open(os.path.join(self.base_dir, 'test_upgrade_v1_to_v2', '.mlaunch_startup'), 'w') as f:
json.dump(startup_options, f, -1)
# now start with old config and check if upgrade worked
self.run_tool("start")
with open(os.path.join(self.base_dir, 'test_upgrade_v1_to_v2', '.mlaunch_startup'), 'r') as f:
startup_options = json.load(f)
assert startup_options['protocol_version'] == 2
def test_sharded_named_1(self):
""" mlaunch: test --sharded <name> for a single shard """
self.run_tool("init --sharded foo --single")
assert len(self.tool.get_tagged('foo')) == 1
def test_mlaunch_list(self):
""" mlaunch: test list command """
self.run_tool("init --sharded 2 --replicaset --mongos 2")
self.run_tool("list")
# capture stdout and only keep from actual LIST output
output = sys.stdout.getvalue().splitlines()
output = output[output.index( next(o for o in output if o.startswith('PROCESS')) ):]
assert self.helper_output_has_line_with(['PROCESS', 'STATUS', 'PORT'], output) == 1
assert self.helper_output_has_line_with(['mongos', 'running'], output) == 2
assert self.helper_output_has_line_with(['config server', 'running'], output) == 1
assert self.helper_output_has_line_with(['shard01'], output) == 1
assert self.helper_output_has_line_with(['shard02'], output) == 1
assert self.helper_output_has_line_with(['primary', 'running'], output) == 2
assert self.helper_output_has_line_with(['running', 'running'], output) == 9
def helper_which(self, pgm):
""" equivalent of which command """
path=os.getenv('PATH')
for p in path.split(os.path.pathsep):
p=os.path.join(p,pgm)
if os.path.exists(p) and os.access(p,os.X_OK):
return p
def test_mlaunch_binary_path_start(self):
""" mlaunch: test if --binarypath is persistent between init and start """
# get true binary path (to test difference to not specifying one)
path = self.helper_which('mongod')
path = path[:path.rfind('/')]
self.run_tool("init --single --binarypath %s" % path)
self.run_tool("stop")
self.run_tool("start")
assert self.tool.loaded_args['binarypath'] == path
assert self.tool.startup_info[str(self.port)].startswith('%s/mongod' % path)
self.run_tool("stop")
try:
self.run_tool("start --binarypath /some/other/path")
raise Exception
except:
assert self.tool.args['binarypath'] == '/some/other/path'
assert self.tool.startup_info[str(self.port)].startswith('/some/other/path/mongod')
@raises(SystemExit)
def test_single_and_arbiter(self):
""" mlaunch: test --single with --arbiter error """
self.run_tool("init --single --arbiter")
def test_oplogsize_config(self):
""" mlaunch: test config server never receives --oplogSize parameter """
self.run_tool("init --sharded 1 --single --oplogSize 19 --verbose")
output = sys.stdout.getvalue().splitlines()
output_launch_config = next(o for o in output if '--configsvr' in o)
assert '--oplogSize' not in output_launch_config
if __name__ == '__main__':
# run individual tests with normal print output
tml = TestMLaunch()
tml.setup()
tml.test_kill_partial()
tml.teardown()
| |
"""
=====================================================
Using the RESTORE algorithm for robust tensor fitting
=====================================================
The diffusion tensor model takes into account certain kinds of noise (thermal),
but not other kinds, such as "physiological" noise. For example, if a subject
moves during the acquisition of one of the diffusion-weighted samples, this
might have a substantial effect on the parameters of the tensor fit calculated
in all voxels in the brain for that subject. One of the pernicious consequences
of this is that it can lead to wrong interepertation of group differences. For
example, some groups of participants (e.g. young children, patient groups,
etc.) are particularly prone to motion and differences in tensor parameters and
derived statistics (such as FA) due to motion would be confounded with actual
differences in the physical properties of the white matter. An example of this
was shown in a paper by Yendiki et al. [1]_.
One of the strategies to deal with this problem is to apply an automatic method
for detecting outliers in the data, excluding these outliers and refitting the
model without the presence of these outliers. This is often referred to as
"robust model fitting". One of the common algorithms for robust tensor fitting
is called RESTORE, and was first proposed by Chang et al. [2]_.
In the following example, we will demonstrate how to use RESTORE on a simulated
dataset, which we will corrupt by adding intermittent noise.
We start by importing a few of the libraries we will use.
"""
import numpy as np
import nibabel as nib
"""
The module ``dipy.reconst.dti`` contains the implementation of tensor fitting,
including an implementation of the RESTORE algorithm.
"""
import dipy.reconst.dti as dti
reload(dti)
"""
``dipy.data`` is used for small datasets that we use in tests and examples.
"""
import dipy.data as dpd
"""
``dipy.viz.fvtk`` is used for 3D visualization and matplotlib for 2D
visualizations:
"""
import dipy.viz.fvtk as fvtk
import matplotlib.pyplot as plt
"""
If needed, the fetch_stanford_hardi function will download the raw dMRI dataset
of a single subject. The size of this dataset is 87 MBytes. You only need to
fetch once.
"""
dpd.fetch_stanford_hardi()
img, gtab = dpd.read_stanford_hardi()
"""
We initialize a DTI model class instance using the gradient table used in the
measurement. By default, dti.Tensor model will use a weighted least-squares
algorithm (described in [2]_) to fit the parameters of the model. We initialize
this model as a baseline for comparison of noise-corrupted models:
"""
dti_wls = dti.TensorModel(gtab)
"""
For the purpose of this example, we will focus on the data from a region of
interest (ROI) surrounding the Corpus Callosum. We define that ROI as the
following indices:
"""
roi_idx = (slice(20,50), slice(55,85), slice(38,39))
"""
And use them to index into the data:
"""
data = img.get_data()[roi_idx]
"""
This data-set is not very noisy, so we will artificially corrupt it to simulate
the effects of "physiological" noise, such as subject motion. But first, let's
establish a baseline, using the data as it is:
"""
fit_wls = dti_wls.fit(data)
fa1 = fit_wls.fa
evals1 = fit_wls.evals
evecs1 = fit_wls.evecs
cfa1 = dti.color_fa(fa1, evecs1)
sphere = dpd.get_sphere('symmetric724')
"""
We visualize the ODFs in the ROI using fvtk:
"""
ren = fvtk.ren()
fvtk.add(ren, fvtk.tensor(evals1, evecs1, cfa1, sphere))
print('Saving illustration as tensor_ellipsoids_wls.png')
fvtk.record(ren, n_frames=1, out_path='tensor_ellipsoids_wls.png',
size=(600, 600))
"""
.. figure:: tensor_ellipsoids_wls.png
:align: center
**Tensor Ellipsoids**.
"""
fvtk.clear(ren)
"""
Next, we corrupt the data with some noise. To simulate a subject that moves
intermittently, we will replace a few of the images with a very low signal
"""
noisy_data = np.copy(data)
noisy_idx = slice(-10, None) # The last 10 volumes are corrupted
noisy_data[..., noisy_idx] = 1.0
"""
We use the same model to fit this noisy data
"""
fit_wls_noisy = dti_wls.fit(noisy_data)
fa2 = fit_wls_noisy.fa
evals2 = fit_wls_noisy.evals
evecs2 = fit_wls_noisy.evecs
cfa2 = dti.color_fa(fa2, evecs2)
ren = fvtk.ren()
fvtk.add(ren, fvtk.tensor(evals2, evecs2, cfa2, sphere))
print('Saving illustration as tensor_ellipsoids_wls_noisy.png')
fvtk.record(ren, n_frames=1, out_path='tensor_ellipsoids_wls_noisy.png',
size=(600, 600))
"""
In places where the tensor model is particularly sensitive to noise, the
resulting tensor field will be distorted
.. figure:: tensor_ellipsoids_wls_noisy.png
:align: center
**Tensor Ellipsoids from noisy data**.
To estimate the parameters from the noisy data using RESTORE, we need to
estimate what would be a reasonable amount of noise to expect in the
measurement. There are two common ways of doing that. The first is to look at
the variance in the signal in parts of the volume outside of the brain, or in
the ventricles, where the signal is expected to be identical regardless of
the direction of diffusion weighting. The variance in these regions is
therefore noise. Another option is available, if several non diffusion-weighted
volumes were acquired. In this cas,e the variance in these volumes is an
estimate of 'reasonable' noise in the data.
"""
mean_std = np.mean(np.std(data[..., gtab.b0s_mask], -1))
"""
This estimate is usually based on a small sample, and is thus a bit biased (for
a proof of that fact, see the following derivation_.)
.. _derivation: http://nbviewer.ipython.org/4287207
Therefore, we apply a small-sample correction. In this case, the bias is rather
small:
"""
from scipy.special import gamma
n = np.sum(gtab.b0s_mask)
bias = mean_std*(1. - np.sqrt(2. / (n-1)) * (gamma(n / 2.) / gamma((n-1) / 2.)))
sigma = mean_std + bias
"""
This estimate of the standard deviation will be used by the RESTORE algorithm
to identify the outliers in each voxel and is given as an input when
initializing the TensorModel object:
"""
dti_restore = dti.TensorModel(gtab,fit_method='RESTORE', sigma=sigma)
fit_restore_noisy = dti_restore.fit(noisy_data)
fa3 = fit_restore_noisy.fa
evals3 = fit_restore_noisy.evals
evecs3 = fit_restore_noisy.evecs
cfa3 = dti.color_fa(fa3, evecs3)
ren = fvtk.ren()
fvtk.add(ren, fvtk.tensor(evals3, evecs3, cfa3, sphere))
print('Saving illustration as tensor_ellipsoids_restore_noisy.png')
fvtk.record(ren, n_frames=1, out_path='tensor_ellipsoids_restore_noisy.png',
size=(600, 600))
"""
.. figure:: tensor_ellipsoids_restore_noisy.png
:align: center
**Tensor Ellipsoids from noisy data recovered with RESTORE**.
The tensor field looks rather restored to its noiseless state in this
image, but to convince ourselves further that this did the right thing, we will
compare the distribution of FA in this region relative to the baseline, using
the RESTORE estimate and the WLS estimate.
"""
fig_hist, ax = plt.subplots(1)
ax.hist(np.ravel(fa2), color='b', histtype='step', label='WLS')
ax.hist(np.ravel(fa3), color='r', histtype='step', label='RESTORE')
ax.hist(np.ravel(fa1), color='g', histtype='step', label='Original')
ax.set_xlabel('Fractional Anisotropy')
ax.set_ylabel('Count')
plt.legend()
fig_hist.savefig('dti_fa_distributions.png')
"""
.. figure:: dti_fa_distributions.png
:align: center
This demonstrates that RESTORE can recover a distribution of FA that more
closely resembles the baseline distribution of the noiseless signal, and
demonstrates the utility of the method to data with intermittent
noise. Importantly, this method assumes that the tensor is a good
representation of the diffusion signal in the data. If you have reason to
believe this is not the case (for example, you have data with very high b
values and you are particularly interested in locations in the brain in which
fibers cross), you might want to use a different method to fit your data.
References
----------
.. [1] Yendiki, A, Koldewynb, K, Kakunooria, S, Kanwisher, N, and Fischl,
B. (2013). Spurious group differences due to head motion in a diffusion
MRI study. Neuroimage.
.. [2] Chang, L-C, Jones, DK and Pierpaoli, C (2005). RESTORE: robust estimation
of tensors by outlier rejection. MRM, 53: 1088-95.
.. [3] Chung, SW, Lu, Y, Henry, R-G, (2006). Comparison of bootstrap
approaches for estimation of uncertainties of DTI parameters.
NeuroImage 33, 531-541.
.. include:: ../links_names.inc
"""
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
from oslo.config import cfg
from heat.openstack.common import importutils
from heat.openstack.common import log as logging
logger = logging.getLogger(__name__)
from heat.common import exception
from heat.common import heat_keystoneclient as hkc
from novaclient import client as novaclient
try:
from swiftclient import client as swiftclient
except ImportError:
swiftclient = None
logger.info('swiftclient not available')
try:
from quantumclient.v2_0 import client as quantumclient
except ImportError:
quantumclient = None
logger.info('quantumclient not available')
try:
from cinderclient.v1 import client as cinderclient
from cinderclient import exceptions as cinder_exceptions
except ImportError:
cinderclient = None
logger.info('cinderclient not available')
cloud_opts = [
cfg.StrOpt('cloud_backend',
default=None,
help="Cloud module to use as a backend. Defaults to OpenStack.")
]
cfg.CONF.register_opts(cloud_opts)
class OpenStackClients(object):
'''
Convenience class to create and cache client instances.
'''
def __init__(self, context):
self.context = context
self._nova = {}
self._keystone = None
self._swift = None
self._quantum = None
self._cinder = None
def keystone(self):
if self._keystone:
return self._keystone
self._keystone = hkc.KeystoneClient(self.context)
return self._keystone
def nova(self, service_type='compute'):
if service_type in self._nova:
return self._nova[service_type]
con = self.context
args = {
'project_id': con.tenant,
'auth_url': con.auth_url,
'service_type': service_type,
}
if con.password is not None:
args['username'] = con.username
args['api_key'] = con.password
elif con.auth_token is not None:
args['username'] = con.service_user
args['api_key'] = con.service_password
args['project_id'] = con.service_tenant
args['proxy_token'] = con.auth_token
args['proxy_tenant_id'] = con.tenant_id
else:
logger.error("Nova connection failed, no password or auth_token!")
return None
client = None
try:
# Workaround for issues with python-keyring, need no_cache=True
# ref https://bugs.launchpad.net/python-novaclient/+bug/1020238
# TODO(shardy): May be able to remove when the bug above is fixed
client = novaclient.Client(1.1, no_cache=True, **args)
client.authenticate()
self._nova[service_type] = client
except TypeError:
# for compatibility with essex, which doesn't have no_cache=True
# TODO(shardy): remove when we no longer support essex
client = novaclient.Client(1.1, **args)
client.authenticate()
self._nova[service_type] = client
return client
def swift(self):
if swiftclient is None:
return None
if self._swift:
return self._swift
con = self.context
args = {
'auth_version': '2'
}
if con.password is not None:
args['user'] = con.username
args['key'] = con.password
args['authurl'] = con.auth_url
args['tenant_name'] = con.tenant
elif con.auth_token is not None:
args['user'] = None
args['key'] = None
args['authurl'] = None
args['preauthtoken'] = con.auth_token
# Lookup endpoint for object-store service type
service_type = 'object-store'
endpoints = self.keystone().service_catalog.get_endpoints(
service_type=service_type)
if len(endpoints[service_type]) == 1:
args['preauthurl'] = endpoints[service_type][0]['publicURL']
else:
logger.error("No endpoint found for %s service type" %
service_type)
return None
else:
logger.error("Swift connection failed, no password or " +
"auth_token!")
return None
self._swift = swiftclient.Connection(**args)
return self._swift
def quantum(self):
if quantumclient is None:
return None
if self._quantum:
logger.debug('using existing _quantum')
return self._quantum
con = self.context
args = {
'auth_url': con.auth_url,
'service_type': 'network',
}
if con.password is not None:
args['username'] = con.username
args['password'] = con.password
args['tenant_name'] = con.tenant
elif con.auth_token is not None:
args['username'] = con.service_user
args['password'] = con.service_password
args['tenant_name'] = con.service_tenant
args['token'] = con.auth_token
else:
logger.error("Quantum connection failed, "
"no password or auth_token!")
return None
logger.debug('quantum args %s', args)
self._quantum = quantumclient.Client(**args)
return self._quantum
def cinder(self):
if cinderclient is None:
return self.nova('volume')
if self._cinder:
return self._cinder
con = self.context
args = {
'project_id': con.tenant,
'auth_url': con.auth_url,
'service_type': 'volume',
}
if con.password is not None:
args['username'] = con.username
args['api_key'] = con.password
elif con.auth_token is not None:
args['username'] = con.service_user
args['api_key'] = con.service_password
args['project_id'] = con.service_tenant
args['proxy_token'] = con.auth_token
args['proxy_token_id'] = con.tenant_id
else:
logger.error("Cinder connection failed, "
"no password or auth_token!")
return None
logger.debug('cinder args %s', args)
self._cinder = cinderclient.Client(**args)
return self._cinder
def attach_volume_to_instance(self, server_id, volume_id, device_id):
logger.warn('Attaching InstanceId %s VolumeId %s Device %s' %
(server_id, volume_id, device_id))
va = self.nova().volumes.create_server_volume(
server_id=server_id,
volume_id=volume_id,
device=device_id)
vol = self.cinder().volumes.get(va.id)
while vol.status == 'available' or vol.status == 'attaching':
eventlet.sleep(1)
vol.get()
if vol.status == 'in-use':
return va.id
else:
raise exception.Error(vol.status)
def detach_volume_from_instance(self, server_id, volume_id):
logger.info('VolumeAttachment un-attaching %s %s' %
(server_id, volume_id))
try:
vol = self.cinder().volumes.get(volume_id)
except cinder_exceptions.NotFound:
logger.warning('Volume %s - not found' %
(volume_id))
return
try:
self.nova().volumes.delete_server_volume(server_id,
volume_id)
except novaclient.exceptions.NotFound:
logger.warning('Deleting VolumeAttachment %s %s - not found' %
(server_id, volume_id))
try:
logger.info('un-attaching %s, status %s' % (volume_id, vol.status))
while vol.status == 'in-use':
logger.info('trying to un-attach %s, but still %s' %
(volume_id, vol.status))
eventlet.sleep(1)
try:
self.nova().volumes.delete_server_volume(
server_id,
volume_id)
except Exception:
pass
vol.get()
logger.info('volume status of %s now %s' % (volume_id, vol.status))
except cinder_exceptions.NotFound:
logger.warning('Volume %s - not found' %
(volume_id))
if cfg.CONF.cloud_backend:
cloud_backend_module = importutils.import_module(cfg.CONF.cloud_backend)
Clients = cloud_backend_module.Clients
else:
Clients = OpenStackClients
logger.debug('Using backend %s' % Clients)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for checkpoints tools."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import saver as saver_lib
def _create_checkpoints(sess, checkpoint_dir):
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
checkpoint_state_name = "checkpoint"
v1 = variable_scope.get_variable("var1", [1, 10])
v2 = variable_scope.get_variable("var2", [10, 10])
v3 = variable_scope.get_variable("var3", [100, 100])
with variable_scope.variable_scope("useful_scope"):
v4 = variable_scope.get_variable("var4", [9, 9])
sess.run(variables.global_variables_initializer())
v1_value, v2_value, v3_value, v4_value = sess.run([v1, v2, v3, v4])
saver = saver_lib.Saver()
saver.save(
sess,
checkpoint_prefix,
global_step=0,
latest_filename=checkpoint_state_name)
return v1_value, v2_value, v3_value, v4_value
def _create_partition_checkpoints(sess, checkpoint_dir):
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
checkpoint_state_name = "checkpoint"
with variable_scope.variable_scope("scope"):
v1 = variable_scope.get_variable(
name="var1",
shape=[100, 100],
initializer=init_ops.truncated_normal_initializer(0.5),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=8 << 10))
sess.run(variables.global_variables_initializer())
v1_value = sess.run(v1._get_variable_list())
saver = saver_lib.Saver()
saver.save(
sess,
checkpoint_prefix,
global_step=0,
latest_filename=checkpoint_state_name)
return v1_value
class CheckpointsTest(test.TestCase):
def testNoCheckpoints(self):
checkpoint_dir = self.get_temp_dir() + "/no_checkpoints"
with self.assertRaises(errors_impl.OpError):
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var1"), [])
def testNoTensor(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
_, _, _, _ = _create_checkpoints(session, checkpoint_dir)
with self.assertRaises(errors_impl.OpError):
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var5"), [])
def testGetTensor(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var1"), v1)
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var2"), v2)
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "var3"), v3)
self.assertAllEqual(
checkpoint_utils.load_variable(checkpoint_dir, "useful_scope/var4"), v4)
def testGetAllVariables(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
_create_checkpoints(session, checkpoint_dir)
self.assertEqual(
checkpoint_utils.list_variables(checkpoint_dir),
[("useful_scope/var4", [9, 9]), ("var1", [1, 10]), ("var2", [10, 10]),
("var3", [100, 100])])
def testInitFromCheckpoint(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
my1 = variable_scope.get_variable("my1", [1, 10])
with variable_scope.variable_scope("some_other_scope"):
my2 = variable_scope.get_variable("my2", [10, 10])
with variable_scope.variable_scope("other_useful_scope"):
my4 = variable_scope.get_variable("var4", [9, 9])
my3 = variable_scope.get_variable("my3", [100, 100])
checkpoint_utils.init_from_checkpoint(checkpoint_dir, {
"var1": "some_scope/my1",
"useful_scope/": "some_scope/some_other_scope/other_useful_scope/",
})
checkpoint_utils.init_from_checkpoint(checkpoint_dir, {
"var2": "some_scope/some_other_scope/my2",
"var3": my3,
})
session.run(variables.global_variables_initializer())
self.assertAllEqual(my1.eval(session), v1)
self.assertAllEqual(my2.eval(session), v2)
self.assertAllEqual(my3.eval(session), v3)
self.assertAllEqual(my4.eval(session), v4)
# Check that tensors are not explicitly in the graph.
self.assertLess(len(str(session.graph.as_graph_def())), 29000)
def testInitialValueComesFromCheckpoint(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1, _, _, _ = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
with variable_scope.variable_scope(
"some_scope", initializer=init_ops.zeros_initializer()):
my1 = variable_scope.get_variable("my1", [1, 10])
before = my1.initialized_value()
checkpoint_utils.init_from_checkpoint(checkpoint_dir, {"var1": my1})
after = my1.initialized_value()
self.assertAllEqual(session.run(before), [[0.0] * 10])
self.assertAllEqual(session.run(after), v1)
session.run(variables.global_variables_initializer())
self.assertAllEqual(session.run(my1), v1)
self.assertAllEqual(session.run(my1.initialized_value()), v1)
self.assertAllClose(session.run(before), v1)
self.assertAllClose(session.run(after), v1)
with self.assertRaises(AssertionError):
self.assertAllClose(v1, [[0.0] * 10])
def testInitWithScopeDoesNotCaptureSuffixes(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
_, _, _, v4 = _create_checkpoints(session, checkpoint_dir)
with ops.Graph().as_default() as g:
with variable_scope.variable_scope("useful_scope"):
my4 = variable_scope.get_variable("var4", [9, 9])
with variable_scope.variable_scope("useful_scope_1"):
my5_init = [[1.0, 2.0], [3.0, 4.0]]
my5 = variable_scope.get_variable("var5", initializer=my5_init)
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"useful_scope/": "useful_scope/"})
with self.session(graph=g) as session:
session.run(variables.global_variables_initializer())
self.assertAllEqual(my4.eval(session), v4)
self.assertAllEqual(my5.eval(session), my5_init)
def testRestoreRunsOnSameDevice(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
_create_checkpoints(session, checkpoint_dir)
with ops.Graph().as_default():
with ops.device("/job:ps"):
with variable_scope.variable_scope("useful_scope"):
my4 = variable_scope.get_variable("var4", [9, 9])
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"useful_scope/": "useful_scope/"})
def testInitFromRootCheckpoint(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
my1 = variable_scope.get_variable("var1", [1, 10])
my2 = variable_scope.get_variable("var2", [10, 10])
my3 = variable_scope.get_variable("var3", [100, 100])
with variable_scope.variable_scope("useful_scope"):
my4 = variable_scope.get_variable("var4", [9, 9])
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"/": "some_scope/",})
session.run(variables.global_variables_initializer())
self.assertAllEqual(my1.eval(session), v1)
self.assertAllEqual(my2.eval(session), v2)
self.assertAllEqual(my3.eval(session), v3)
self.assertAllEqual(my4.eval(session), v4)
def testInitToRootCheckpoint(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1, v2, v3, v4 = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
my1 = variable_scope.get_variable("var1", [1, 10])
my2 = variable_scope.get_variable("var2", [10, 10])
my3 = variable_scope.get_variable("var3", [100, 100])
with variable_scope.variable_scope("useful_scope"):
my4 = variable_scope.get_variable("var4", [9, 9])
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"/": "/",})
session.run(variables.global_variables_initializer())
self.assertAllEqual(my1.eval(session), v1)
self.assertAllEqual(my2.eval(session), v2)
self.assertAllEqual(my3.eval(session), v3)
self.assertAllEqual(my4.eval(session), v4)
def testInitFromPartitionVar(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1 = _create_partition_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
my1 = variable_scope.get_variable(
name="my1",
shape=[100, 100],
initializer=init_ops.zeros_initializer(),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=8 << 10))
my1_var_list = my1._get_variable_list()
# Create another variable with different partitions than the variable in
# the checkpoint.
with variable_scope.variable_scope("some_other_scope"):
my2 = variable_scope.get_variable(
name="var1",
shape=[100, 100],
initializer=init_ops.zeros_initializer(),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=16 << 10))
my2_var_list = my2._get_variable_list()
checkpoint_utils.init_from_checkpoint(checkpoint_dir, {
"scope/var1": "some_scope/my1",
"scope/": "some_other_scope/"})
session.run(variables.global_variables_initializer())
my1_values = session.run(my1_var_list)
self.assertAllEqual(my1_values, v1)
my2_values = session.run(my2_var_list)
# Verify we created different number of partitions.
self.assertNotEquals(len(my2_values), len(v1))
# Verify the values were correctly initialized inspite of different
# partitions.
full_my2_values = np.concatenate(my2_values, axis=0)
full_v1_values = np.concatenate(v1, axis=0)
self.assertAllEqual(full_my2_values, full_v1_values)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
my1 = variable_scope.get_variable(
name="my1",
shape=[100, 100],
initializer=init_ops.truncated_normal_initializer(0.5),
partitioner=partitioned_variables.min_max_variable_partitioner(
max_partitions=5, axis=0, min_slice_size=8 << 10))
my1_var_list = my1._get_variable_list()
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"scope/var1": my1_var_list,})
session.run(variables.global_variables_initializer())
my1_values = session.run(my1_var_list)
self.assertAllEqual(my1_values, v1)
def testInitFromCheckpointMissing(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
_, _, _, _ = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
with variable_scope.variable_scope("some_scope"):
_ = variable_scope.get_variable("my1", [10, 10])
_ = variable_scope.get_variable(
"my2", [1, 10],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer())
# No directory.
with self.assertRaises(errors_impl.OpError):
checkpoint_utils.init_from_checkpoint("no_dir",
{"var1": "some_scope/my1"})
# No variable in checkpoint.
with self.assertRaises(ValueError):
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"no_var": "some_scope/my1"})
# No variable in the graph.
with self.assertRaises(ValueError):
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"var3": "some_scope/no_var"})
# Shape mismatch.
with self.assertRaises(ValueError):
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"var1": "some_scope/my1"})
# Variable 'my1' and 'my2' are missing in given checkpoint scope.
with self.assertRaises(ValueError):
checkpoint_utils.init_from_checkpoint(
checkpoint_dir, {"useful_scope/": "some_scope/"})
# Mapping is not to scope name.
with self.assertRaises(ValueError):
checkpoint_utils.init_from_checkpoint(checkpoint_dir,
{"useful_scope": "some_scope/"})
def testNoAdditionalReadOpsForResourceVariables(self):
checkpoint_dir = self.get_temp_dir()
with self.cached_session() as session:
v1, _, _, _ = _create_checkpoints(session, checkpoint_dir)
# New graph and session.
with ops.Graph().as_default() as g:
with self.session(graph=g) as session:
my1 = resource_variable_ops.ResourceVariable([[0.0] * 10], name="my1")
with ops.name_scope("init_from_checkpoint"):
checkpoint_utils.init_from_checkpoint(checkpoint_dir, {"var1": my1})
# Basic sanity checks:
session.run(variables.global_variables_initializer())
self.assertAllEqual(session.run(my1), v1)
ops_in_init_from_checkpoint_scope = [
op for op in g.get_operations()
if (op.name.startswith("init_from_checkpoint/") and
not op.name.startswith("init_from_checkpoint/checkpoint_initializer"
) and
op.type != "AssignVariableOp" and
op.type != "Identity")
]
self.assertEqual(ops_in_init_from_checkpoint_scope, [])
if __name__ == "__main__":
test.main()
| |
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import os
import re
import unittest
from contextlib import contextmanager
from textwrap import dedent
from pants.base.specs import DescendantAddresses, SiblingAddresses, SingleAddress
from pants.build_graph.address import Address
from pants.engine.addressable import Addresses, SubclassesOf, addressable_list
from pants.engine.build_files import UnhydratedStruct, create_graph_tasks
from pants.engine.engine import LocalSerialEngine
from pants.engine.mapper import (AddressFamily, AddressMap, AddressMapper, DifferingFamiliesError,
DuplicateNameError, UnaddressableObjectError)
from pants.engine.nodes import Throw
from pants.engine.parser import SymbolTable
from pants.engine.selectors import SelectDependencies
from pants.engine.struct import HasProducts, Struct
from pants.util.dirutil import safe_open
from pants_test.engine.examples.parsers import JsonParser
from pants_test.engine.scheduler_test_base import SchedulerTestBase
class Target(Struct, HasProducts):
def __init__(self, name=None, configurations=None, **kwargs):
super(Target, self).__init__(name=name, **kwargs)
self.configurations = configurations
@property
def products(self):
return self.configurations
@addressable_list(SubclassesOf(Struct))
def configurations(self):
pass
class Thing(object):
def __init__(self, **kwargs):
self._kwargs = kwargs
def _asdict(self):
return self._kwargs
def _key(self):
return {k: v for k, v in self._kwargs.items() if k != 'type_alias'}
def __eq__(self, other):
return isinstance(other, Thing) and self._key() == other._key()
class ThingTable(SymbolTable):
@classmethod
def table(cls):
return {'thing': Thing}
class AddressMapTest(unittest.TestCase):
_parser_cls = JsonParser
_symbol_table_cls = ThingTable
@contextmanager
def parse_address_map(self, json, exclude_patterns=None):
path = '/dev/null'
address_map = AddressMap.parse(path, json, self._symbol_table_cls, self._parser_cls, exclude_patterns)
self.assertEqual(path, address_map.path)
yield address_map
def test_parse(self):
with self.parse_address_map(dedent("""
{
"type_alias": "thing",
"name": "one",
"age": 42
}
{
"type_alias": "thing",
"name": "two",
"age": 37
}
""")) as address_map:
self.assertEqual({'one': Thing(name='one', age=42), 'two': Thing(name='two', age=37)},
address_map.objects_by_name)
def test_not_serializable(self):
with self.assertRaises(UnaddressableObjectError):
with self.parse_address_map('{}'):
self.fail()
def test_not_named(self):
with self.assertRaises(UnaddressableObjectError):
with self.parse_address_map('{"type_alias": "thing"}'):
self.fail()
def test_duplicate_names(self):
with self.assertRaises(DuplicateNameError):
with self.parse_address_map('{"type_alias": "thing", "name": "one"}'
'{"type_alias": "thing", "name": "one"}'):
self.fail()
def test_exclude_target_regexps_plain_string(self):
with self.parse_address_map(dedent("""
{
"type_alias": "thing",
"name": "one",
"age": 42
}
{
"type_alias": "thing",
"name": "two",
"age": 37
}
"""), [re.compile('two')]) as address_map:
self.assertEqual({'one': Thing(name='one', age=42)}, address_map.objects_by_name)
def test_exclude_target_regexps_exclude_all(self):
with self.parse_address_map(dedent("""
{
"type_alias": "thing",
"name": "one",
"age": 42
}
{
"type_alias": "thing",
"name": "two",
"age": 37
}
"""), [re.compile('o')]) as address_map:
self.assertEqual(dict(), address_map.objects_by_name)
def test_exclude_target_regexps_re_expression(self):
with self.parse_address_map(dedent("""
{
"type_alias": "thing",
"name": "one",
"age": 42
}
{
"type_alias": "thing",
"name": "one_two",
"age": 37
}
{
"type_alias": "thing",
"name": "one_two_three",
"age": 32
}
"""), [re.compile('o.*_two$')]) as address_map:
self.assertEqual({'one': Thing(name='one', age=42), 'one_two_three': Thing(name='one_two_three', age=32)},
address_map.objects_by_name)
def test_exclude_target_regexps_multiple_re(self):
with self.parse_address_map(dedent("""
{
"type_alias": "thing",
"name": "one",
"age": 42
}
{
"type_alias": "thing",
"name": "one_two",
"age": 37
}
{
"type_alias": "thing",
"name": "one_two_three",
"age": 32
}
"""), [re.compile('_.*_'), re.compile('e$')]) as address_map:
self.assertEqual({'one_two': Thing(name='one_two', age=37)}, address_map.objects_by_name)
class AddressFamilyTest(unittest.TestCase):
def test_create_single(self):
address_family = AddressFamily.create('',
[AddressMap('0', {
'one': Thing(name='one', age=42),
'two': Thing(name='two', age=37)
})])
self.assertEqual('', address_family.namespace)
self.assertEqual({Address.parse('//:one'): Thing(name='one', age=42),
Address.parse('//:two'): Thing(name='two', age=37)},
address_family.addressables)
def test_create_multiple(self):
address_family = AddressFamily.create('name/space',
[AddressMap('name/space/0',
{'one': Thing(name='one', age=42)}),
AddressMap('name/space/1',
{'two': Thing(name='two', age=37)})])
self.assertEqual('name/space', address_family.namespace)
self.assertEqual({Address.parse('name/space:one'): Thing(name='one', age=42),
Address.parse('name/space:two'): Thing(name='two', age=37)},
address_family.addressables)
def test_create_empty(self):
# Case where directory exists but is empty.
address_family = AddressFamily.create('name/space', [])
self.assertEquals(dict(), address_family.addressables)
def test_mismatching_paths(self):
with self.assertRaises(DifferingFamiliesError):
AddressFamily.create('one',
[AddressMap('/dev/null/one/0', {}),
AddressMap('/dev/null/two/0', {})])
def test_duplicate_names(self):
with self.assertRaises(DuplicateNameError):
AddressFamily.create('name/space',
[AddressMap('name/space/0',
{'one': Thing(name='one', age=42)}),
AddressMap('name/space/1',
{'one': Thing(name='one', age=37)})])
class TargetTable(SymbolTable):
@classmethod
def table(cls):
return {'struct': Struct, 'target': Target}
class AddressMapperTest(unittest.TestCase, SchedulerTestBase):
def setUp(self):
# Set up a scheduler that supports address mapping.
symbol_table_cls = TargetTable
address_mapper = AddressMapper(symbol_table_cls=symbol_table_cls,
parser_cls=JsonParser,
build_pattern='*.BUILD.json')
tasks = create_graph_tasks(address_mapper, symbol_table_cls)
project_tree = self.mk_fs_tree(os.path.join(os.path.dirname(__file__), 'examples/mapper_test'))
self.build_root = project_tree.build_root
self.scheduler = self.mk_scheduler(tasks=tasks, project_tree=project_tree)
self.a_b = Address.parse('a/b')
self.a_b_target = Target(name='b',
dependencies=['//d:e'],
configurations=['//a', Struct(embedded='yes')],
type_alias='target')
def resolve(self, spec):
select = SelectDependencies(UnhydratedStruct, Addresses, field_types=(Address,))
request = self.scheduler.selection_request([(select, spec)])
result = LocalSerialEngine(self.scheduler).execute(request)
if result.error:
raise result.error
# Expect a single root.
state, = result.root_products.values()
if type(state) is Throw:
raise Exception(state.exc)
return state.value
def resolve_multi(self, spec):
return {uhs.address: uhs.struct for uhs in self.resolve(spec)}
def test_no_address_no_family(self):
spec = SingleAddress('a/c', None)
# Should fail: does not exist.
with self.assertRaises(Exception):
self.resolve(spec)
# Exists on disk, but not yet in memory.
directory = 'a/c'
build_file = os.path.join(self.build_root, directory, 'c.BUILD.json')
with safe_open(build_file, 'w') as fp:
fp.write('{"type_alias": "struct", "name": "c"}')
with self.assertRaises(Exception):
self.resolve(spec)
# Success.
self.scheduler.invalidate_files([directory])
resolved = self.resolve(spec)
self.assertEqual(1, len(resolved))
self.assertEqual(Struct(name='c', type_alias='struct'), resolved[0].struct)
def test_resolve(self):
resolved = self.resolve(SingleAddress('a/b', None))
self.assertEqual(1, len(resolved))
self.assertEqual(self.a_b, resolved[0].address)
@staticmethod
def addr(spec):
return Address.parse(spec)
def test_walk_siblings(self):
self.assertEqual({self.addr('a/b:b'): self.a_b_target},
self.resolve_multi(SiblingAddresses('a/b')))
def test_walk_descendants(self):
self.assertEqual({self.addr('//:root'): Struct(name='root', type_alias='struct'),
self.addr('a/b:b'): self.a_b_target,
self.addr('a/d:d'): Target(name='d', type_alias='target'),
self.addr('a/d/e:e'): Target(name='e', type_alias='target'),
self.addr('a/d/e:e-prime'): Struct(name='e-prime', type_alias='struct')},
self.resolve_multi(DescendantAddresses('')))
def test_walk_descendants_rel_path(self):
self.assertEqual({self.addr('a/d:d'): Target(name='d', type_alias='target'),
self.addr('a/d/e:e'): Target(name='e', type_alias='target'),
self.addr('a/d/e:e-prime'): Struct(name='e-prime', type_alias='struct')},
self.resolve_multi(DescendantAddresses('a/d')))
# Excludes are not implemented: expects excludes=['a/b', 'a/d/e'].
@unittest.expectedFailure
def test_walk_descendants_path_excludes(self):
self.assertEqual({self.addr('//:root'): Struct(name='root'),
self.addr('a/d:d'): Target(name='d')},
self.resolve_multi(DescendantAddresses('')))
| |
"""Tests for flattening module.
"""
import ast
import unittest
import astunparse
from sspam import pre_processing
from sspam.tools.asttools import Comparator
from sspam.tools.flattening import Flattening, Unflattening
class TestFlattening(unittest.TestCase):
"""
Test that flattening produce expected ast.
"""
def generic_flattening(self, refstring_list, result):
'Test matching of flattened AST and ref AST'
for refstring in refstring_list:
ref = ast.parse(refstring, mode="eval").body
ref = Flattening().visit(ref)
self.assertTrue(Comparator().visit(ref, result))
def test_basics(self):
'Simple tests with matching of AST'
corresp = [(["a + b + c", "a + (b + c)", "b + c + a"],
ast.BoolOp(ast.Add(),
[ast.Name('a', ast.Load()),
ast.Name('b', ast.Load()),
ast.Name('c', ast.Load())])),
(["a + b + c + d", "(a + b) + (c + d)",
"a + (b + c + d)", "a + (b + (c + d))"],
ast.BoolOp(ast.Add(),
[ast.Name('a', ast.Load()),
ast.Name('b', ast.Load()),
ast.Name('c', ast.Load()),
ast.Name('d', ast.Load())])),
(["a + b + c*d", "a + c*d + b"],
ast.BoolOp(ast.Add(),
[ast.Name('a', ast.Load()),
ast.Name('b', ast.Load()),
ast.BinOp(ast.Name('c', ast.Load()),
ast.Mult(),
ast.Name('d', ast.Load()))])),
(["a*b*c"],
ast.BoolOp(ast.Mult(),
[ast.Name('a', ast.Load()),
ast.Name('b', ast.Load()),
ast.Name('c', ast.Load())])),
(["a + c*d*e"],
ast.BinOp(ast.Name('a', ast.Load()), ast.Add(),
ast.BoolOp(ast.Mult(),
[ast.Name('c', ast.Load()),
ast.Name('d', ast.Load()),
ast.Name('e', ast.Load())]))),
(["a + b + c + c*d", "a + c*d + b + c"],
ast.BoolOp(ast.Add(),
[ast.Name('a', ast.Load()),
ast.Name('b', ast.Load()),
ast.Name('c', ast.Load()),
ast.BinOp(ast.Name('c', ast.Load()),
ast.Mult(),
ast.Name('d', ast.Load()))])),
(["a + b + c*d*e", "a + c*d*e + b", "b + e*c*d + a"],
ast.BoolOp(ast.Add(),
[ast.Name('a', ast.Load()),
ast.Name('b', ast.Load()),
ast.BoolOp(ast.Mult(),
[ast.Name('c', ast.Load()),
ast.Name('d', ast.Load()),
ast.Name('e', ast.Load())])])),
(["a + c*d*e + b + c"],
ast.BoolOp(ast.Add(),
[ast.Name('a', ast.Load()),
ast.Name('b', ast.Load()),
ast.Name('c', ast.Load()),
ast.BoolOp(ast.Mult(),
[ast.Name('c', ast.Load()),
ast.Name('d', ast.Load()),
ast.Name('e', ast.Load())])]))]
for refstring, result in corresp:
self.generic_flattening(refstring, result)
def test_astform(self):
'Tests with different types of ast'
t1 = ast.parse("1 + 2 + 3", mode="eval").body
t1_ref = ast.BoolOp(ast.Add(), [ast.Num(1), ast.Num(2), ast.Num(3)])
t2 = ast.parse("1 + 2 + 3", mode="eval")
t3 = ast.parse("1 + 2 + 3").body[0]
tests = [(t1, t1_ref), (t2, ast.Expression(t1_ref)),
(t3, ast.Expr(t1_ref))]
for test, ref in tests:
ltest = Flattening().visit(test)
self.assertTrue(Comparator().visit(ltest, ref))
def test_afterSubMult(self):
'Tests after SubToMult pre-processing'
tests = [("1 + 2 - 3", ast.BoolOp(ast.Add(), [ast.Num(1), ast.Num(2),
ast.BinOp(ast.Num(-1),
ast.Mult(),
ast.Num(3))])),
("1 + 2 - 3 + 4", ast.BoolOp(ast.Add(),
[ast.Num(1),
ast.Num(2),
ast.BinOp(ast.Num(-1),
ast.Mult(),
ast.Num(3)),
ast.Num(4)])),
("(1 + 2) - (3 + 4)",
ast.BoolOp(ast.Add(),
[ast.Num(1), ast.Num(2),
ast.BinOp(ast.Num(-1), ast.Mult(),
ast.BinOp(ast.Num(3), ast.Add(),
ast.Num(4)))]))]
for teststring, ref_ast in tests:
test_ast = ast.parse(teststring, mode="eval").body
test_ast = pre_processing.all_preprocessings(test_ast)
test_ast = Flattening(ast.Add).visit(test_ast)
self.assertTrue(Comparator().visit(test_ast, ref_ast))
def test_differentops(self):
'Test with other types of operators'
tests = [("(3 & 5 & 6)",
ast.BoolOp(ast.BitAnd(),
[ast.Num(3), ast.Num(5), ast.Num(6)])),
("(1 ^ 2 ^ 3) - 4",
ast.BinOp(ast.BoolOp(ast.BitXor(),
[ast.Num(1), ast.Num(2), ast.Num(3)]),
ast.Add(),
ast.BinOp(ast.Num(-1), ast.Mult(), ast.Num(4)))),
("((1 + 2 + 3) & (4 + 5))",
ast.BinOp(ast.BoolOp(ast.Add(),
[ast.Num(1), ast.Num(2), ast.Num(3)]),
ast.BitAnd(),
ast.BinOp(ast.Num(4), ast.Add(), ast.Num(5)))),
("(1 & 2 & 3) - (4 & 5)",
ast.BinOp(ast.BoolOp(ast.BitAnd(),
[ast.Num(1), ast.Num(2), ast.Num(3)]),
ast.Add(),
ast.BinOp(ast.Num(-1), ast.Mult(),
ast.BinOp(ast.Num(4), ast.BitAnd(),
ast.Num(5))))),
("(1 & 2 & 3) << (4 & 5)",
ast.BinOp(ast.BoolOp(ast.BitAnd(),
[ast.Num(1), ast.Num(2), ast.Num(3)]),
ast.LShift(),
ast.BinOp(ast.Num(4), ast.BitAnd(), ast.Num(5))))]
for teststring, ref_ast in tests:
test_ast = ast.parse(teststring, mode="eval").body
test_ast = pre_processing.all_preprocessings(test_ast)
test_ast = Flattening().visit(test_ast)
self.assertTrue(Comparator().visit(test_ast, ref_ast))
def test_withUnaryOp(self):
'Test with UnaryOp involved'
tests = [("5 + (-(6 + 2)) + 3",
ast.BoolOp(ast.Add(),
[ast.Num(5),
ast.UnaryOp(ast.USub(), ast.BinOp(ast.Num(6),
ast.Add(),
ast.Num(2))),
ast.Num(3)]))]
for teststring, ref_ast in tests:
test_ast = ast.parse(teststring, mode="eval").body
test_ast = Flattening(ast.Add).visit(test_ast)
self.assertTrue(Comparator().visit(test_ast, ref_ast))
def test_with_funcs(self):
'Tests with functions'
tests = [
("f(1 + 1 + 1)",
ast.Call(ast.Name('f', ast.Load()),
[ast.BoolOp(ast.Add(),
[ast.Num(n) for n in [1, 1, 1]])],
[],
None,
None)),
("f(1 + 1 + g(2 + 2 + 2))",
ast.Call(ast.Name('f', ast.Load()),
[ast.BoolOp(ast.Add(),
[ast.Num(1),
ast.Num(1),
ast.Call(ast.Name('g', ast.Load()),
[ast.BoolOp(ast.Add(),
[ast.Num(2),
ast.Num(2),
ast.Num(2)])],
[],
None,
None)])],
[],
None,
None)),
("f(8) + (a + f(8)) + f(14)",
ast.BoolOp(ast.Add(),
[ast.Call(ast.Name('f', ast.Load()),
[ast.Num(8)], [], None, None),
ast.Name('a', ast.Load()),
ast.Call(ast.Name('f', ast.Load()),
[ast.Num(8)], [], None, None),
ast.Call(ast.Name('f', ast.Load()),
[ast.Num(14)], [], None, None)]))]
for teststring, ref_ast in tests:
test_ast = ast.parse(teststring, mode="eval").body
test_ast = Flattening(ast.Add).visit(test_ast)
self.assertTrue(Comparator().visit(test_ast, ref_ast))
def test_noflattening(self):
'Tests where nothing should be flattened'
corresp = [(["a + b", "b + a"],
ast.BinOp(ast.Name('a', ast.Load()),
ast.Add(),
ast.Name('b', ast.Load()))),
(["c*d", "d*c"],
ast.BinOp(ast.Name('c', ast.Load()),
ast.Mult(),
ast.Name('d', ast.Load()))),
(["a + c*d", "d*c + a"],
ast.BinOp(ast.Name('a', ast.Load()), ast.Add(),
ast.BinOp(ast.Name('c', ast.Load()), ast.Mult(),
ast.Name('d', ast.Load()))))]
for refstring, result in corresp:
self.generic_flattening(refstring, result)
def test_unflattening(self):
'Tests to see if unflattening is correct'
tests = [("x + (3 + y)", "3 + (y + x)"),
("x*(2*z)", "2*(z*x)"),
("x + (y + (z*(5*var)))", "y + (5*(var*z) + x)")]
for test, ref in tests:
ref_ast = ast.parse(ref)
ast_test = ast.parse(test)
Flattening().visit(ast_test)
Unflattening().visit(ast_test)
self.assertTrue(Comparator().visit(ast_test, ref_ast))
self.assertFalse('BoolOp' in astunparse.unparse(ast_test))
if __name__ == '__main__':
unittest.main()
| |
"""Lattice module.
In this module the lattice of the corresponding accelerator is defined.
"""
import math as _math
from pyaccel import lattice as _pyacc_lat, elements as _pyacc_ele, \
accelerator as _pyacc_acc, optics as _pyacc_opt
from . import segmented_models as _segmented_models
energy = 0.150e9 # [eV]
default_optics_mode = 'M1'
class LatticeError(Exception):
"""LatticeError class."""
def create_lattice(optics_mode=default_optics_mode):
"""Create lattice function."""
strengths, twiss_at_start = get_optics_mode(optics_mode)
# -- shortcut symbols --
marker = _pyacc_ele.marker
drift = _pyacc_ele.drift
quadrupole = _pyacc_ele.quadrupole
rbend_sirius = _pyacc_ele.rbend
sextupole = _pyacc_ele.sextupole
deg_2_rad = _math.pi / 180.0
corr_length = 0.082
# --- drift spaces ---
lp2 = drift('lp2', 0.0002)
lp3 = drift('lp3', 0.0003)
lp4 = drift('lp4', 0.0004)
lp5 = drift('lp5', 0.0005)
lp6 = drift('lp6', 0.0006)
lp7 = drift('lp7', 0.0007)
l1 = drift('l1', 0.001)
l2 = drift('l2', 0.002)
l3 = drift('l3', 0.003)
l4 = drift('l4', 0.004)
l5 = drift('l5', 0.005)
l6 = drift('l6', 0.006)
l7 = drift('l7', 0.007)
l8 = drift('l8', 0.008)
l9 = drift('l9', 0.009)
l10 = drift('l10', 0.010)
l30 = drift('l30', 0.030)
l40 = drift('l40', 0.040)
l60 = drift('l60', 0.060)
l70 = drift('l70', 0.070)
l80 = drift('l80', 0.080)
l90 = drift('l90', 0.090)
l100 = drift('l100', 0.100)
l200 = drift('l200', 0.200)
# --- markers ---
inicio = marker('start')
fim = marker('end')
# --- slits ---
slith = marker('SlitH')
slitv = marker('SlitV')
# --- beam screens ---
scrn = marker('Scrn')
# --- beam current monitors ---
ict = marker('ICT')
fct = marker('FCT')
# --- beam position monitors ---
bpm = marker('BPM')
# --- correctors ---
chv = sextupole('CHV', corr_length, 0.0)
qs = quadrupole('QS', corr_length, 0.0)
# --- quadrupoles ---
qf2L = quadrupole('QF2L', 0.112, strengths['qf2l']) # LINAC TRIPLET
qd2L = quadrupole('QD2L', 0.162, strengths['qd2l']) # LINAC TRIPLET
qf3L = quadrupole('QF3L', 0.112, strengths['qf3l']) # LINAC QUADRUPOLE
# -- spec --
ang = 15.0 # injection mode
dip_nam = 'Spect'
dip_len = 0.45003
dip_ang = -ang * deg_2_rad
dip_K = 0.0
dip_S = 0.00
spech = rbend_sirius(dip_nam, dip_len/2, dip_ang/2,
0, 0,
0, 0, 0, [0, 0, 0], [0, dip_K, dip_S])
spec = [spech, spech]
qd1 = quadrupole('QD1', 0.100, strengths['qd1'])
qf1 = quadrupole('QF1', 0.100, strengths['qf1'])
qd2a = quadrupole('QD2A', 0.100, strengths['qd2a'])
qf2a = quadrupole('QF2A', 0.100, strengths['qf2a'])
qf2b = quadrupole('QF2B', 0.100, strengths['qf2b'])
qd2b = quadrupole('QD2B', 0.100, strengths['qd2b'])
qf3 = quadrupole('QF3', 0.100, strengths['qf3'])
qd3 = quadrupole('QD3', 0.100, strengths['qd3'])
qf4 = quadrupole('QF4', 0.100, strengths['qf4'])
qd4 = quadrupole('QD4', 0.100, strengths['qd4'])
# --- bending magnets ---
bp = _segmented_models.dipole(sign=+1)
bn = _segmented_models.dipole(sign=-1)
septin = _segmented_models.septum(strengths)
# --- lines ---
s00_1 = [l80, l4, qf2L, l30, l8, qd2L, l30, l8, qf2L, l30, l8, qf3L]
s00_2 = [l80, l7, bpm, l200, l40, l6, ict, l200, l100, l90, l5]
s01_1 = [
l200, l200, l200, l80, l4, lp2, scrn, l100, l40, lp2, bpm,
l100, l2, lp4]
s01_2 = [l80, l8, lp4, chv, l200, l90, l1, lp2]
s01_3 = [
l200, l200, l200, l200, l200, l40, l4, slith, l100, l80, scrn,
l100, l40, bpm, l100, l90, l9, chv, l100, l90, l3, lp3, slitv,
l200, l10, lp4]
s02_1 = [l100, l90, l4, lp4, ict, l200, l200, l200, l10, l6]
s02_2 = [l200, l70]
s02_3 = [
l200, scrn, l100, l40, bpm, l60, l9, chv] + [l200]*26 + \
[l100, l70, l3]
s02_4 = [l200, l70]
s02_5 = [
l200, scrn, l100, l40, bpm, l60, l8, lp5, chv, l200, l100,
l10, l9, lp7]
s03_1 = [l200] * 10 + [l100, l90, l9, lp6]
s03_2 = [l200, l6]
s03_3 = [l100, bpm, l100, l40, l4, scrn, l200, l10, lp4]
s04_1 = [
l200, l70, l2, lp4, chv, l200, l200, l100, l80, lp5, fct,
l100, l40, ict, l200, l100, l5, lp7, bpm, l100, l10, l5, lp6]
s04_2 = [l200, l10, l6]
s04_3 = [l100, l70, scrn, l60, l1, lp2, qs, l80, l6, lp6]
sector00 = [s00_1, s00_2, spec]
sector01 = [s01_1, qd1, s01_2, qf1, s01_3, bn]
sector02 = [s02_1, qd2a, s02_2, qf2a, s02_3, qf2b, s02_4, qd2b, s02_5, bp]
sector03 = [s03_1, qf3, s03_2, qd3, s03_3, bp]
sector04 = [s04_1, qf4, s04_2, qd4, s04_3, septin]
# TB beamline
ltlb = [inicio, sector00, sector01, sector02, sector03, sector04, fim]
elist = ltlb
the_line = _pyacc_lat.build(elist)
# --- shifts model to marker 'start' ---
idx = _pyacc_lat.find_indices(the_line, 'fam_name', 'start')
the_line = _pyacc_lat.shift(the_line, idx[0])
lengths = _pyacc_lat.get_attribute(the_line, 'length')
for length in lengths:
if length < 0:
raise LatticeError('Model with negative drift!')
# sets number of integration steps
set_num_integ_steps(the_line)
# -- define vacuum chamber for all elements
the_line = set_vacuum_chamber(the_line)
return the_line, twiss_at_start
def get_optics_mode(optics_mode):
"""Return magnet strengths of a given opics mode."""
# -- selection of optics mode --
if optics_mode == 'M1':
# Initial Conditions from Linac measured parameters on 30/08/2019
# Linac second quadrupole triplet set to same values used during
# measurements
# (Sem tripleto)
# QD4 freed to be focusing in fitting.
twiss_at_start = _pyacc_opt.Twiss.make_new(
beta=[1.45401, 2.47656], alpha=[-1.57249, 0.527312], etax=[0, 0])
strengths = {
'qf2l': 12.37,
'qd2l': -14.85,
'qf3l': 6.3387,
'qd1': -8.8224,
'qf1': 13.3361,
'qd2a': -10.8698,
'qf2a': 13.8136,
'qf2b': 6.9037,
'qd2b': -6.3496,
'qf3': 13.4901,
'qd3': -10.8577,
'qf4': 8.1889,
'qd4': 0.6693,
'injsept_kxl': -0.39475202,
'injsept_kyl': +0.35823882,
'injsept_ksxl': -0.04944937,
'injsept_ksyl': -0.00393883,
}
if optics_mode == 'M2':
# Initial Conditions from Linac measured parameters on 30/08/2019
# Linac second quadrupole triplet set to same values used during
# measurements
# (Sem tripleto)
twiss_at_start = _pyacc_opt.Twiss.make_new(
beta=[1.45401, 2.47656], alpha=[-1.57249, 0.527312], etax=[0, 0])
strengths = {
'qf2l': 12.37,
'qd2l': -14.85,
'qf3l': 6.342735948415,
'qd1': -8.822330690694,
'qf1': 13.336079810152,
'qd2a': -11.779088961602,
'qf2a': 14.331275527616,
'qf2b': 8.958478776817,
'qd2b': -8.99233133968,
'qf3': 11.263508962434,
'qd3': -6.891349798498,
'qf4': 9.84840688362,
'qd4': -3.114739958144,
'injsept_kxl': -0.3,
'injsept_kyl': 0.3,
'injsept_ksxl': 0.0,
'injsept_ksyl': 0.0,
}
if optics_mode == 'M3':
# Initial Conditions from Linac measured parameters on 16/07/2019
# Linac second quadrupole triplet set to same values used during
# measurements
# (Sem tripleto)
twiss_at_start = _pyacc_opt.Twiss.make_new(
beta=[2.71462, 4.69925], alpha=[-2.34174, 1.04009],
etax=[0.0, 0.0])
strengths = {
'qf2l': 12.37,
'qd2l': -14.85,
'qf3l': 5.713160289024,
'qd1': -8.821809143987,
'qf1': 13.335946597802,
'qd2a': -11.859318300947,
'qf2a': 14.532892396682,
'qf2b': 8.647545577362,
'qd2b': -8.836916532517,
'qf3': 10.020651462368,
'qd3': -4.974049498621,
'qf4': 11.168208453391,
'qd4': -6.191738912262,
'injsept_kxl': 0.0,
'injsept_kyl': 0.0,
'injsept_ksxl': 0.0,
'injsept_ksyl': 0.0,
}
elif optics_mode == 'M4':
# Initial Conditions from Linac measured parameters on 16/07/2019
# Linac second quadrupole triplet is used to match the LBT optics
# (Sem tripleto)
twiss_at_start = _pyacc_opt.Twiss.make_new(
beta=[2.71462, 4.69925], alpha=[-2.34174, 1.04009],
etax=[0.0, 0.0])
strengths = {
'qf2l': 11.78860,
'qd2l': -14.298290,
'qf3l': 4.801910,
'qd1': -8.822256368219,
'qf1': 13.336060990905,
'qd2a': -9.382785447106,
'qf2a': 12.670391768958,
'qf2b': 7.994238513566,
'qd2b': -7.118805773505,
'qf3': 10.328752039153,
'qd3': -5.519539215470,
'qf4': 11.635406805193,
'qd4': -6.936225524796,
'injsept_kxl': 0.0,
'injsept_kyl': 0.0,
'injsept_ksxl': 0.0,
'injsept_ksyl': 0.0,
}
else:
_pyacc_acc.AcceleratorException(
'Invalid TB optics mode: ' + optics_mode)
return strengths, twiss_at_start
def set_num_integ_steps(the_line):
"""Set number of integration steps in each lattice element."""
dl = 0.035
for i, _ in enumerate(the_line):
if the_line[i].angle:
length = the_line[i].length
the_line[i].nr_steps = max(10, int(_math.ceil(length/dl)))
elif the_line[i].polynom_b[1]:
the_line[i].nr_steps = 10
elif the_line[i].polynom_b[2]:
the_line[i].nr_steps = 10
else:
the_line[i].nr_steps = 1
ch_indices = _pyacc_lat.find_indices(the_line, 'fam_name', 'CHV')
cv_indices = _pyacc_lat.find_indices(the_line, 'fam_name', 'CHV')
qs_indices = _pyacc_lat.find_indices(the_line, 'fam_name', 'QS')
corr_indices = ch_indices + cv_indices + qs_indices
for idx in corr_indices:
the_line[idx].nr_steps = 5
def set_vacuum_chamber(the_line):
"""Set vacuum chamber for all elements."""
# -- default physical apertures --
for i, _ in enumerate(the_line):
the_line[i].hmin = -0.018
the_line[i].hmax = +0.018
the_line[i].vmin = -0.018
the_line[i].vmax = +0.018
# -- bo injection septum --
beg = _pyacc_lat.find_indices(the_line, 'fam_name', 'bInjS')[0]
end = _pyacc_lat.find_indices(the_line, 'fam_name', 'eInjS')[0]
for i in range(beg, end+1):
the_line[i].hmin = -0.0075
the_line[i].hmax = +0.0075
the_line[i].vmin = -0.0080
the_line[i].vmax = +0.0080
# -- dipoles --
bnd = _pyacc_lat.find_indices(the_line, 'fam_name', 'B')
for i in bnd:
the_line[i].hmin = -0.0117
the_line[i].hmax = +0.0117
the_line[i].vmin = -0.0117
the_line[i].vmax = +0.0117
return the_line
| |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""ResNet."""
import math
import numpy as np
import mindspore.nn as nn
from mindspore import Parameter
from mindspore.common.tensor import Tensor
from mindspore.ops import operations as P
from mindspore.common.initializer import initializer
from model.thor_layer import Conv2d_Thor, Dense_Thor, Dense_ThorNoBN
def calculate_gain(nonlinearity, param=None):
"""calculate_gain"""
linear_fns = ['linear', 'conv1d', 'conv2d', 'conv3d', 'conv_transpose1d', 'conv_transpose2d', 'conv_transpose3d']
res = 0
if nonlinearity in linear_fns or nonlinearity == 'sigmoid':
res = 1
elif nonlinearity == 'tanh':
res = 5.0 / 3
elif nonlinearity == 'relu':
res = math.sqrt(2.0)
elif nonlinearity == 'leaky_relu':
if param is None:
negative_slope = 0.01
elif not isinstance(param, bool) and isinstance(param, int) or isinstance(param, float):
# True/False are instances of int, hence check above
negative_slope = param
else:
raise ValueError("negative_slope {} not a valid number".format(param))
res = math.sqrt(2.0 / (1 + negative_slope ** 2))
else:
raise ValueError("Unsupported nonlinearity {}".format(nonlinearity))
return res
def _calculate_fan_in_and_fan_out(tensor):
"""_calculate_fan_in_and_fan_out"""
dimensions = len(tensor)
if dimensions < 2:
raise ValueError("Fan in and fan out can not be computed for tensor with fewer than 2 dimensions")
if dimensions == 2: # Linear
fan_in = tensor[1]
fan_out = tensor[0]
else:
num_input_fmaps = tensor[1]
num_output_fmaps = tensor[0]
receptive_field_size = 1
if dimensions > 2:
receptive_field_size = tensor[2] * tensor[3]
fan_in = num_input_fmaps * receptive_field_size
fan_out = num_output_fmaps * receptive_field_size
return fan_in, fan_out
def _calculate_correct_fan(tensor, mode):
mode = mode.lower()
valid_modes = ['fan_in', 'fan_out']
if mode not in valid_modes:
raise ValueError("Mode {} not supported, please use one of {}".format(mode, valid_modes))
fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
return fan_in if mode == 'fan_in' else fan_out
def kaiming_normal(inputs_shape, a=0, mode='fan_in', nonlinearity='leaky_relu'):
fan = _calculate_correct_fan(inputs_shape, mode)
gain = calculate_gain(nonlinearity, a)
std = gain / math.sqrt(fan)
return np.random.normal(0, std, size=inputs_shape).astype(np.float32)
def kaiming_uniform(inputs_shape, a=0, mode='fan_in', nonlinearity='leaky_relu'):
fan = _calculate_correct_fan(inputs_shape, mode)
gain = calculate_gain(nonlinearity, a)
std = gain / math.sqrt(fan)
bound = math.sqrt(3.0) * std # Calculate uniform bounds from standard deviation
return np.random.uniform(-bound, bound, size=inputs_shape).astype(np.float32)
def _conv3x3(in_channel, out_channel, stride=1, damping=0.03, loss_scale=1, frequency=278, batch_size=32.0):
weight_shape = (out_channel, in_channel, 3, 3)
weight = Tensor(kaiming_normal(weight_shape, mode="fan_out", nonlinearity='relu'))
return Conv2d_Thor(in_channel, out_channel,
kernel_size=3, stride=stride, padding=0, pad_mode='same', weight_init=weight,
damping=damping, loss_scale=loss_scale, frequency=frequency, batch_size=batch_size)
def _conv1x1(in_channel, out_channel, stride=1, damping=0.03, loss_scale=1, frequency=278, batch_size=32.0):
weight_shape = (out_channel, in_channel, 1, 1)
weight = Tensor(kaiming_normal(weight_shape, mode="fan_out", nonlinearity='relu'))
return Conv2d_Thor(in_channel, out_channel,
kernel_size=1, stride=stride, padding=0, pad_mode='same', weight_init=weight,
damping=damping, loss_scale=loss_scale, frequency=frequency, batch_size=batch_size)
def _conv7x7(in_channel, out_channel, stride=1, damping=0.03, loss_scale=1, frequency=278, batch_size=32.0):
weight_shape = (out_channel, in_channel, 7, 7)
weight = Tensor(kaiming_normal(weight_shape, mode="fan_out", nonlinearity='relu'))
return Conv2d_Thor(in_channel, out_channel,
kernel_size=7, stride=stride, padding=0, pad_mode='same', weight_init=weight,
damping=damping, loss_scale=loss_scale, frequency=frequency, batch_size=batch_size)
def _bn(channel):
return nn.BatchNorm2d(channel, eps=1e-4, momentum=0.9,
gamma_init=1, beta_init=0, moving_mean_init=0, moving_var_init=1)
def _bn_last(channel):
return nn.BatchNorm2d(channel, eps=1e-4, momentum=0.9,
gamma_init=1, beta_init=0, moving_mean_init=0, moving_var_init=1)
def _fc(in_channel, out_channel, damping, loss_scale, frequency, batch_size=32.0):
weight_shape = (out_channel, in_channel)
weight = Tensor(kaiming_uniform(weight_shape, a=math.sqrt(5)))
return Dense_ThorNoBN(in_channel, out_channel, has_bias=False, weight_init=weight,
bias_init=0, damping=damping, loss_scale=loss_scale, frequency=frequency,
batch_size=batch_size)
class ResidualBlockNoBN(nn.Cell):
"""
ResNet V1 residual block definition.
Args:
in_channel (int): Input channel.
out_channel (int): Output channel.
stride (int): Stride size for the first convolutional layer. Default: 1.
Returns:
Tensor, output tensor.
Examples:
>>> ResidualBlockNoBN(3, 256, stride=2)
"""
expansion = 4
def __init__(self,
in_channel,
out_channel,
stride=1,
damping=0.03,
loss_scale=1,
frequency=278,
batch_size=32.0,
resnet_d=0):
super(ResidualBlockNoBN, self).__init__()
channel = out_channel // self.expansion
self.conv1 = _conv1x1(in_channel, channel, stride=1, damping=damping, loss_scale=loss_scale,
frequency=frequency, batch_size=batch_size)
# self.bn1 = _bn(channel)
self.conv2 = _conv3x3(channel, channel, stride=stride, damping=damping, loss_scale=loss_scale,
frequency=frequency, batch_size=batch_size)
# self.bn2 = _bn(channel)
self.conv3 = _conv1x1(channel, out_channel, stride=1, damping=damping, loss_scale=loss_scale,
frequency=frequency, batch_size=batch_size)
self.bn3 = _bn_last(out_channel)
self.relu = nn.ReLU()
self.down_sample = False
if stride != 1 or in_channel != out_channel:
self.down_sample = True
self.down_sample_layer = None
if self.down_sample:
if resnet_d:
if stride == 1:
self.down_sample_layer = nn.SequentialCell([_conv1x1(in_channel, out_channel, stride,
damping=damping, loss_scale=loss_scale,
frequency=frequency)])
else:
self.down_sample_layer = nn.SequentialCell(
[nn.MaxPool2d(kernel_size=stride, stride=stride, pad_mode='same'),
_conv1x1(in_channel, out_channel, 1, damping=damping, loss_scale=loss_scale,
frequency=frequency)])
else:
self.down_sample_layer = nn.SequentialCell([_conv1x1(in_channel, out_channel, stride,
damping=damping, loss_scale=loss_scale,
frequency=frequency, batch_size=batch_size)])
# _bn(out_channel)])
self.add = P.TensorAdd()
def construct(self, x):
identity = x
out = self.conv1(x)
# out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
# out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.down_sample:
identity = self.down_sample_layer(identity)
out = self.add(out, identity)
out = self.relu(out)
return out
class ResNetNoBN(nn.Cell):
"""
ResNet architecture.
Args:
block (Cell): Block for network.
layer_nums (list): Numbers of block in different layers.
in_channels (list): Input channel in each layer.
out_channels (list): Output channel in each layer.
strides (list): Stride size in each layer.
num_classes (int): The number of classes that the training images are belonging to.
Returns:
Tensor, output tensor.
Examples:
>>> ResNet(ResidualBlockNoBN,
>>> [3, 4, 6, 3],
>>> [64, 256, 512, 1024],
>>> [256, 512, 1024, 2048],
>>> [1, 2, 2, 2],
>>> 10)
"""
def __init__(self,
block,
layer_nums,
in_channels,
out_channels,
strides,
num_classes,
damping,
loss_scale,
frequency,
batch_size,
resnet_d,
init_new):
super(ResNetNoBN, self).__init__()
if not len(layer_nums) == len(in_channels) == len(out_channels) == 4:
raise ValueError("the length of layer_num, in_channels, out_channels list must be 4!")
self.conv1 = _conv7x7(3, 64, stride=2, damping=damping, loss_scale=loss_scale, frequency=frequency,
batch_size=batch_size)
# self.bn1 = _bn(64)
self.relu = P.ReLU()
self.maxpool = P.MaxPoolWithArgmax(padding="same", ksize=3, strides=2)
self.layer1 = self._make_layer(block,
layer_nums[0],
in_channel=in_channels[0],
out_channel=out_channels[0],
stride=strides[0],
damping=damping,
loss_scale=loss_scale,
frequency=frequency,
batch_size=batch_size,
resnet_d=resnet_d)
self.layer2 = self._make_layer(block,
layer_nums[1],
in_channel=in_channels[1],
out_channel=out_channels[1],
stride=strides[1],
damping=damping,
loss_scale=loss_scale,
frequency=frequency,
batch_size=batch_size,
resnet_d=resnet_d)
self.layer3 = self._make_layer(block,
layer_nums[2],
in_channel=in_channels[2],
out_channel=out_channels[2],
stride=strides[2], damping=damping,
loss_scale=loss_scale,
frequency=frequency,
batch_size=batch_size,
resnet_d=resnet_d)
self.layer4 = self._make_layer(block,
layer_nums[3],
in_channel=in_channels[3],
out_channel=out_channels[3],
stride=strides[3],
damping=damping,
loss_scale=loss_scale,
frequency=frequency,
batch_size=batch_size,
resnet_d=resnet_d)
self.mean = P.ReduceMean(keep_dims=True)
self.flatten = nn.Flatten()
self.end_point = _fc(out_channels[3], num_classes, damping=damping, loss_scale=loss_scale, frequency=frequency,
batch_size=batch_size)
L = sum(layer_nums) # e.g., resnet101 has 33 residual branches
for name, cell in self.cells_and_names():
if isinstance(cell, ResidualBlockNoBN):
# cell.conv3.weight.default_input = initializer('zeros', cell.conv3.weight.default_input.shape()).to_tensor()
fixup_m = 3 # !!!
v = math.pow(L, -1 / (2 * fixup_m - 2))
cell.conv1.weight.default_input = Tensor(cell.conv1.weight.default_input.asnumpy() * v,
cell.conv1.weight.default_input.dtype())
cell.conv2.weight.default_input = Tensor(cell.conv2.weight.default_input.asnumpy() * v,
cell.conv2.weight.default_input.dtype())
if init_new:
cell.conv3.weight.default_input = Tensor(cell.conv3.weight.default_input.asnumpy() * v,
cell.conv3.weight.default_input.dtype())
else:
cell.conv3.weight.default_input = initializer('zeros',
cell.conv3.weight.default_input.shape()).to_tensor()
elif isinstance(cell, ResidualBlockNoBN):
cell.conv2.weight.default_input = initializer('zeros',
cell.conv2.weight.default_input.shape()).to_tensor()
fixup_m = 2 # !!!
v = math.pow(L, -1 / (2 * fixup_m - 2))
cell.conv1.weight.default_input = Tensor(cell.conv1.weight.default_input.asnumpy() * v,
cell.conv1.weight.default_input.dtype())
# L = sum(layer_nums) # e.g., resnet101 has 33 residual branches
# for name, cell in self.cells_and_names():
# if isinstance(cell, ResidualBlockNoBN):
# cell.conv3.weight.default_input = initializer('zeros', cell.conv3.weight.default_input.shape()).to_tensor()
# fixup_m = 3 # !!!
# v = math.pow(L, -1 / (2 * fixup_m - 2))
# cell.conv1.weight.default_input = Tensor(cell.conv1.weight.default_input.asnumpy()*v, cell.conv1.weight.default_input.dtype())
# cell.conv2.weight.default_input = Tensor(cell.conv2.weight.default_input.asnumpy()*v, cell.conv2.weight.default_input.dtype())
# # cell.conv3.weight.default_input = Tensor(cell.conv3.weight.default_input.asnumpy()*v, cell.conv3.weight.default_input.dtype())
# elif isinstance(cell, ResidualBlockNoBN):
# cell.conv2.weight.default_input = initializer('zeros', cell.conv2.weight.default_input.shape()).to_tensor()
# fixup_m = 2 # !!!
# v = math.pow(L, -1 / (2 * fixup_m - 2))
# cell.conv1.weight.default_input = Tensor(cell.conv1.weight.default_input.asnumpy()*v, cell.conv1.weight.default_input.dtype())
def _make_layer(self, block, layer_num, in_channel, out_channel, stride,
damping, loss_scale, frequency, batch_size, resnet_d):
"""
Make stage network of ResNet.
Args:
block (Cell): Resnet block.
layer_num (int): Layer number.
in_channel (int): Input channel.
out_channel (int): Output channel.
stride (int): Stride size for the first convolutional layer.
Returns:
SequentialCell, the output layer.
Examples:
>>> _make_layer(ResidualBlockNoBN, 3, 128, 256, 2)
"""
layers = []
resnet_block = block(in_channel, out_channel, stride=stride,
damping=damping, loss_scale=loss_scale, frequency=frequency,
batch_size=batch_size, resnet_d=resnet_d)
layers.append(resnet_block)
for _ in range(1, layer_num):
resnet_block = block(out_channel, out_channel, stride=1,
damping=damping, loss_scale=loss_scale, frequency=frequency,
batch_size=batch_size, resnet_d=resnet_d)
layers.append(resnet_block)
return nn.SequentialCell(layers)
def construct(self, x):
x = self.conv1(x)
# x = self.bn1(x)
x = self.relu(x)
c1, _ = self.maxpool(x)
c2 = self.layer1(c1)
c3 = self.layer2(c2)
c4 = self.layer3(c3)
c5 = self.layer4(c4)
out = self.mean(c5, (2, 3))
out = self.flatten(out)
out = self.end_point(out)
return out
def resnet50(class_num=10, damping=0.03, loss_scale=1, frequency=278, batch_size=32.0, resnet_d=0, init_new=0):
"""
Get ResNet50 neural network.
Args:
class_num (int): Class number.
Returns:
Cell, cell instance of ResNet50 neural network.
Examples:
>>> net = resnet50(10)
"""
return ResNetNoBN(ResidualBlockNoBN,
[3, 4, 6, 3],
[64, 256, 512, 1024],
[256, 512, 1024, 2048],
[1, 2, 2, 2],
class_num,
damping,
loss_scale,
frequency,
batch_size,
resnet_d,
init_new)
| |
# Copyright 2013 OpenStack LLC.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# vim: tabstop=4 shiftwidth=4 softtabstop=4
import logging
from neutronclient.neutron import v2_0 as neutronV20
from neutronclient.neutron.v2_0 import network
from neutronclient.neutron.v2_0 import router
from neutronclient.openstack.common.gettextutils import _
PERFECT_TIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f"
class AddNetworkToDhcpAgent(neutronV20.NeutronCommand):
"""Add a network to a DHCP agent."""
log = logging.getLogger(__name__ + '.AddNetworkToDhcpAgent')
def get_parser(self, prog_name):
parser = super(AddNetworkToDhcpAgent, self).get_parser(prog_name)
parser.add_argument(
'dhcp_agent',
help='ID of the DHCP agent')
parser.add_argument(
'network',
help='network to add')
return parser
def run(self, parsed_args):
self.log.debug('run(%s)' % parsed_args)
neutron_client = self.get_client()
neutron_client.format = parsed_args.request_format
_net_id = neutronV20.find_resourceid_by_name_or_id(
neutron_client, 'network', parsed_args.network)
neutron_client.add_network_to_dhcp_agent(parsed_args.dhcp_agent,
{'network_id': _net_id})
print >>self.app.stdout, (
_('Added network %s to DHCP agent') % parsed_args.network)
class RemoveNetworkFromDhcpAgent(neutronV20.NeutronCommand):
"""Remove a network from a DHCP agent."""
log = logging.getLogger(__name__ + '.RemoveNetworkFromDhcpAgent')
def get_parser(self, prog_name):
parser = super(RemoveNetworkFromDhcpAgent, self).get_parser(prog_name)
parser.add_argument(
'dhcp_agent',
help='ID of the DHCP agent')
parser.add_argument(
'network',
help='network to remove')
return parser
def run(self, parsed_args):
self.log.debug('run(%s)' % parsed_args)
neutron_client = self.get_client()
neutron_client.format = parsed_args.request_format
_net_id = neutronV20.find_resourceid_by_name_or_id(
neutron_client, 'network', parsed_args.network)
neutron_client.remove_network_from_dhcp_agent(
parsed_args.dhcp_agent, _net_id)
print >>self.app.stdout, (
_('Removed network %s to DHCP agent') % parsed_args.network)
class ListNetworksOnDhcpAgent(network.ListNetwork):
"""List the networks on a DHCP agent."""
log = logging.getLogger(__name__ + '.ListNetworksOnDhcpAgent')
unknown_parts_flag = False
def get_parser(self, prog_name):
parser = super(ListNetworksOnDhcpAgent,
self).get_parser(prog_name)
parser.add_argument(
'dhcp_agent',
help='ID of the DHCP agent')
return parser
def call_server(self, neutron_client, search_opts, parsed_args):
data = neutron_client.list_networks_on_dhcp_agent(
parsed_args.dhcp_agent, **search_opts)
return data
class ListDhcpAgentsHostingNetwork(neutronV20.ListCommand):
"""List DHCP agents hosting a network."""
resource = 'agent'
_formatters = {}
log = logging.getLogger(__name__ + '.ListDhcpAgentsHostingNetwork')
list_columns = ['id', 'host', 'admin_state_up', 'alive']
unknown_parts_flag = False
def get_parser(self, prog_name):
parser = super(ListDhcpAgentsHostingNetwork,
self).get_parser(prog_name)
parser.add_argument(
'network',
help='network to query')
return parser
def extend_list(self, data, parsed_args):
for agent in data:
agent['alive'] = ":-)" if agent['alive'] else 'xxx'
def call_server(self, neutron_client, search_opts, parsed_args):
_id = neutronV20.find_resourceid_by_name_or_id(neutron_client,
'network',
parsed_args.network)
search_opts['network'] = _id
data = neutron_client.list_dhcp_agent_hosting_networks(**search_opts)
return data
class AddRouterToL3Agent(neutronV20.NeutronCommand):
"""Add a router to a L3 agent."""
log = logging.getLogger(__name__ + '.AddRouterToL3Agent')
def get_parser(self, prog_name):
parser = super(AddRouterToL3Agent, self).get_parser(prog_name)
parser.add_argument(
'l3_agent',
help='ID of the L3 agent')
parser.add_argument(
'router',
help='router to add')
return parser
def run(self, parsed_args):
self.log.debug('run(%s)' % parsed_args)
neutron_client = self.get_client()
neutron_client.format = parsed_args.request_format
_id = neutronV20.find_resourceid_by_name_or_id(
neutron_client, 'router', parsed_args.router)
neutron_client.add_router_to_l3_agent(parsed_args.l3_agent,
{'router_id': _id})
print >>self.app.stdout, (
_('Added router %s to L3 agent') % parsed_args.router)
class RemoveRouterFromL3Agent(neutronV20.NeutronCommand):
"""Remove a router from a L3 agent."""
log = logging.getLogger(__name__ + '.RemoveRouterFromL3Agent')
def get_parser(self, prog_name):
parser = super(RemoveRouterFromL3Agent, self).get_parser(prog_name)
parser.add_argument(
'l3_agent',
help='ID of the L3 agent')
parser.add_argument(
'router',
help='router to remove')
return parser
def run(self, parsed_args):
self.log.debug('run(%s)' % parsed_args)
neutron_client = self.get_client()
neutron_client.format = parsed_args.request_format
_id = neutronV20.find_resourceid_by_name_or_id(
neutron_client, 'router', parsed_args.router)
neutron_client.remove_router_from_l3_agent(
parsed_args.l3_agent, _id)
print >>self.app.stdout, (
_('Removed Router %s to L3 agent') % parsed_args.router)
class ListRoutersOnL3Agent(neutronV20.ListCommand):
"""List the routers on a L3 agent."""
log = logging.getLogger(__name__ + '.ListRoutersOnL3Agent')
_formatters = {'external_gateway_info':
router._format_external_gateway_info}
list_columns = ['id', 'name', 'external_gateway_info']
resource = 'router'
unknown_parts_flag = False
def get_parser(self, prog_name):
parser = super(ListRoutersOnL3Agent,
self).get_parser(prog_name)
parser.add_argument(
'l3_agent',
help='ID of the L3 agent to query')
return parser
def call_server(self, neutron_client, search_opts, parsed_args):
data = neutron_client.list_routers_on_l3_agent(
parsed_args.l3_agent, **search_opts)
return data
class ListL3AgentsHostingRouter(neutronV20.ListCommand):
"""List L3 agents hosting a router."""
resource = 'agent'
_formatters = {}
log = logging.getLogger(__name__ + '.ListL3AgentsHostingRouter')
list_columns = ['id', 'host', 'admin_state_up', 'alive', 'default']
unknown_parts_flag = False
def get_parser(self, prog_name):
parser = super(ListL3AgentsHostingRouter,
self).get_parser(prog_name)
parser.add_argument('router',
help='router to query')
return parser
def extend_list(self, data, parsed_args):
for agent in data:
agent['alive'] = ":-)" if agent['alive'] else 'xxx'
def call_server(self, neutron_client, search_opts, parsed_args):
_id = neutronV20.find_resourceid_by_name_or_id(neutron_client,
'router',
parsed_args.router)
search_opts['router'] = _id
data = neutron_client.list_l3_agent_hosting_routers(**search_opts)
return data
| |
# Copyright (c) 2013-2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Barbican exception subclasses
"""
import urlparse
from barbican import i18n as u
_FATAL_EXCEPTION_FORMAT_ERRORS = False
class RedirectException(Exception):
def __init__(self, url):
self.url = urlparse.urlparse(url)
class BarbicanException(Exception):
"""Base Barbican Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = u._("An unknown exception occurred")
def __init__(self, message_arg=None, *args, **kwargs):
if not message_arg:
message_arg = self.message
try:
self.message = message_arg % kwargs
except Exception as e:
if _FATAL_EXCEPTION_FORMAT_ERRORS:
raise e
else:
# at least get the core message out if something happened
pass
super(BarbicanException, self).__init__(self.message)
class BarbicanHTTPException(BarbicanException):
"""Base Barbican Exception to handle HTTP responses
To correctly use this class, inherit from it and define the following
properties:
- message: The message that will be displayed in the server log.
- client_message: The message that will actually be outputted to the
client.
- status_code: The HTTP status code that should be returned.
The default status code is 500.
"""
client_message = u._("failure seen - please contact site administrator.")
status_code = 500
def __init__(self, message_arg=None, client_message=None, *args, **kwargs):
if not client_message:
client_message = self.client_message
try:
self.client_message = client_message % kwargs
except Exception as e:
if _FATAL_EXCEPTION_FORMAT_ERRORS:
raise e
else:
# at least get the core message out if something happened
pass
super(BarbicanHTTPException, self).__init__(
message_arg, self.client_message, *args, **kwargs)
class MissingArgumentError(BarbicanException):
message = u._("Missing required argument.")
class MissingCredentialError(BarbicanException):
message = u._("Missing required credential: %(required)s")
class MissingMetadataField(BarbicanHTTPException):
message = u._("Missing required metadata field for %(required)s")
client_message = message
status_code = 400
class InvalidSubjectDN(BarbicanHTTPException):
message = u._("Invalid subject DN: %(subject_dn)s")
client_message = message
status_code = 400
class InvalidContainer(BarbicanHTTPException):
message = u._("Invalid container: %(reason)s")
client_message = message
status_code = 400
class InvalidExtensionsData(BarbicanHTTPException):
message = u._("Invalid extensions data.")
client_message = message
status_code = 400
class InvalidCMCData(BarbicanHTTPException):
message = u._("Invalid CMC Data")
client_message = message
status_code = 400
class InvalidPKCS10Data(BarbicanHTTPException):
message = u._("Invalid PKCS10 Data: %(reason)s")
client_message = message
status_code = 400
class InvalidCertificateRequestType(BarbicanHTTPException):
message = u._("Invalid Certificate Request Type")
client_message = message
status_code = 400
class CertificateExtensionsNotSupported(BarbicanHTTPException):
message = u._("Extensions are not yet supported. "
"Specify a valid profile instead.")
client_message = message
status_code = 400
class FullCMCNotSupported(BarbicanHTTPException):
message = u._("Full CMC Requests are not yet supported.")
client_message = message
status_code = 400
class BadAuthStrategy(BarbicanException):
message = u._("Incorrect auth strategy, expected \"%(expected)s\" but "
"received \"%(received)s\"")
class NotFound(BarbicanException):
message = u._("An object with the specified identifier was not found.")
class UnknownScheme(BarbicanException):
message = u._("Unknown scheme '%(scheme)s' found in URI")
class BadStoreUri(BarbicanException):
message = u._("The Store URI was malformed.")
class Duplicate(BarbicanException):
message = u._("An object with the same identifier already exists.")
class StorageFull(BarbicanException):
message = u._("There is not enough disk space on the image storage media.")
class StorageWriteDenied(BarbicanException):
message = u._("Permission to write image storage media denied.")
class AuthBadRequest(BarbicanException):
message = u._("Connect error/bad request to Auth service at URL %(url)s.")
class AuthUrlNotFound(BarbicanException):
message = u._("Auth service at URL %(url)s not found.")
class AuthorizationFailure(BarbicanException):
message = u._("Authorization failed.")
class NotAuthenticated(BarbicanException):
message = u._("You are not authenticated.")
class Forbidden(BarbicanException):
message = u._("You are not authorized to complete this action.")
class NotSupported(BarbicanException):
message = u._("Operation is not supported.")
class ForbiddenPublicImage(Forbidden):
message = u._("You are not authorized to complete this action.")
class ProtectedImageDelete(Forbidden):
message = u._("Image %(image_id)s is protected and cannot be deleted.")
# NOTE(bcwaldon): here for backwards-compatibility, need to deprecate.
class NotAuthorized(Forbidden):
message = u._("You are not authorized to complete this action.")
class Invalid(BarbicanException):
message = u._("Data supplied was not valid.")
class NoDataToProcess(BarbicanHTTPException):
message = u._("No data supplied to process.")
client_message = message
status_code = 400
class InvalidSortKey(Invalid):
message = u._("Sort key supplied was not valid.")
class InvalidFilterRangeValue(Invalid):
message = u._("Unable to filter using the specified range.")
class ReadonlyProperty(Forbidden):
message = u._("Attribute '%(property)s' is read-only.")
class ReservedProperty(Forbidden):
message = u._("Attribute '%(property)s' is reserved.")
class AuthorizationRedirect(BarbicanException):
message = u._("Redirecting to %(uri)s for authorization.")
class DatabaseMigrationError(BarbicanException):
message = u._("There was an error migrating the database.")
class ClientConnectionError(BarbicanException):
message = u._("There was an error connecting to a server")
class ClientConfigurationError(BarbicanException):
message = u._("There was an error configuring the client.")
class MultipleChoices(BarbicanException):
message = u._("The request returned a 302 Multiple Choices. This "
"generally means that you have not included a version "
"indicator in a request URI.\n\nThe body of response "
"returned:\n%(body)s")
class LimitExceeded(BarbicanHTTPException):
message = u._("The request returned a 413 Request Entity Too Large. This "
"generally means that rate limiting or a quota threshold "
"was breached.\n\nThe response body:\n%(body)s")
client_message = u._("Provided information too large to process")
status_code = 413
def __init__(self, *args, **kwargs):
super(LimitExceeded, self).__init__(*args, **kwargs)
self.retry_after = (int(kwargs['retry']) if kwargs.get('retry')
else None)
class ServiceUnavailable(BarbicanException):
message = u._("The request returned 503 Service Unavilable. This "
"generally occurs on service overload or other transient "
"outage.")
def __init__(self, *args, **kwargs):
super(ServiceUnavailable, self).__init__(*args, **kwargs)
self.retry_after = (int(kwargs['retry']) if kwargs.get('retry')
else None)
class ServerError(BarbicanException):
message = u._("The request returned 500 Internal Server Error.")
class UnexpectedStatus(BarbicanException):
message = u._("The request returned an unexpected status: %(status)s."
"\n\nThe response body:\n%(body)s")
class InvalidContentType(BarbicanException):
message = u._("Invalid content type %(content_type)s")
class InvalidContentEncoding(BarbicanException):
message = u._("Invalid content encoding %(content_encoding)s")
class BadRegistryConnectionConfiguration(BarbicanException):
message = u._("Registry was not configured correctly on API server. "
"Reason: %(reason)s")
class BadStoreConfiguration(BarbicanException):
message = u._("Store %(store_name)s could not be configured correctly. "
"Reason: %(reason)s")
class BadDriverConfiguration(BarbicanException):
message = u._("Driver %(driver_name)s could not be configured correctly. "
"Reason: %(reason)s")
class StoreDeleteNotSupported(BarbicanException):
message = u._("Deleting images from this store is not supported.")
class StoreAddDisabled(BarbicanException):
message = u._("Configuration for store failed. Adding images to this "
"store is disabled.")
class InvalidNotifierStrategy(BarbicanException):
message = u._("'%(strategy)s' is not an available notifier strategy.")
class MaxRedirectsExceeded(BarbicanException):
message = u._("Maximum redirects (%(redirects)s) was exceeded.")
class InvalidRedirect(BarbicanException):
message = u._("Received invalid HTTP redirect.")
class NoServiceEndpoint(BarbicanException):
message = u._("Response from Keystone does not contain a "
"Barbican endpoint.")
class RegionAmbiguity(BarbicanException):
message = u._("Multiple 'image' service matches for region %(region)s. "
"This generally means that a region is required and you "
"have not supplied one.")
class WorkerCreationFailure(BarbicanException):
message = u._("Server worker creation failed: %(reason)s.")
class SchemaLoadError(BarbicanException):
message = u._("Unable to load schema: %(reason)s")
class InvalidObject(BarbicanHTTPException):
status_code = 400
def __init__(self, *args, **kwargs):
self.invalid_property = kwargs.get('property')
self.message = u._("Failed to validate JSON information: ")
self.client_message = u._("Provided object does not match "
"schema '{schema}': "
"{reason}").format(*args, **kwargs)
self.message = self.message + self.client_message
super(InvalidObject, self).__init__(*args, **kwargs)
class PayloadDecodingError(BarbicanHTTPException):
status_code = 400
message = u._("Error while attempting to decode payload.")
client_message = u._("Unable to decode request data.")
class UnsupportedField(BarbicanHTTPException):
message = u._("No support for value set on field '%(field)s' on "
"schema '%(schema)s': %(reason)s")
client_message = u._("Provided field value is not supported")
status_code = 400
def __init__(self, *args, **kwargs):
super(UnsupportedField, self).__init__(*args, **kwargs)
self.invalid_field = kwargs.get('field')
class FeatureNotImplemented(BarbicanException):
message = u._("Feature not implemented for value set on field "
"'%(field)s' on " "schema '%(schema)s': %(reason)s")
def __init__(self, *args, **kwargs):
super(FeatureNotImplemented, self).__init__(*args, **kwargs)
self.invalid_field = kwargs.get('field')
class UnsupportedHeaderFeature(BarbicanException):
message = u._("Provided header feature is unsupported: %(feature)s")
class InUseByStore(BarbicanException):
message = u._("The image cannot be deleted because it is in use through "
"the backend store outside of Barbican.")
class ImageSizeLimitExceeded(BarbicanException):
message = u._("The provided image is too large.")
class StoredKeyContainerNotFound(BarbicanException):
message = u._("Container %(container_id)s does not exist for stored "
"key certificate generation.")
class StoredKeyPrivateKeyNotFound(BarbicanException):
message = u._("Container %(container_id)s does not reference a private "
"key needed for stored key certificate generation.")
class InvalidUUIDInURI(BarbicanHTTPException):
message = u._("The provided UUID in the URI (%(uuid_string)s) is "
"malformed.")
client_message = u._("The provided UUID in the URI is malformed.")
status_code = 404
class InvalidCAID(BarbicanHTTPException):
message = u._("Invalid CA_ID: %(ca_id)s")
client_message = u._("The ca_id provided in the request is invalid")
status_code = 400
class CANotDefinedForProject(BarbicanHTTPException):
message = u._("CA specified by ca_id %(ca_id)s not defined for project: "
"%(project_id)s")
client_message = u._("The ca_id provided in the request is not defined "
"for this project")
status_code = 403
| |
#!/usr/bin/env python
import ctypes as ct
import ssw_lib
from Bio import SeqIO
import argparse
import os,sys
def get_parser():
parser = argparse.ArgumentParser(
description="""A simple read demultiplexer for Oxford Nanopore data.""",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("input", action=FileExist,
help="Path to fasta file.")
parser.add_argument("--barcodes",default="NB_barcodes.fasta", type=str,
help="Relative path to fasta file describing barcodes.")
parser.add_argument("--threshold", default=90, type=int,
help="Minimum match score to accept called barcodes.")
return parser
def align_seq(seq,args):
resultdict=dict()
for bc_name in barcode_dict:
match,score = nucl_align(seq,barcode_dict[bc_name],"query",bc_name)
resultdict[match]=dict()
resultdict[match]["score"]=score
print resultdict
results = sorted([(resultdict[x]["score"],x,resultdict[x]) for x in resultdict.keys()])[::-1]
#for result in results:
# print result
result = results[0]
score,ide,details=result
#print ide.split("_")[0],score,details
if score >= args.threshold:
next
else:
ide = "unclassified"
return ide.split("_")[0],score
def nucl_align(sQSeq,sRSeq,query,target):
#pathtolibssw=pkg_resources.resource_filename('nanonet', 'libssw.so')
#ospathtolibssw=os.path.dirname(pathtolibssw)
sQId=query
sRId=target
lEle = []
dRc = {}
dEle2Int = {}
dInt2Ele = {}
lEle = ['A', 'C', 'G', 'T', 'N']
dRc = {'A':'C', 'C':'G', 'G':'C', 'T':'A', 'a':'C', 'c':'G', 'g':'C', 't':'A'}
for i,ele in enumerate(lEle):
dEle2Int[ele] = i
dEle2Int[ele.lower()] = i
dInt2Ele[i] = ele
nEleNum = len(lEle)
lScore = [0 for i in xrange(nEleNum**2)]
for i in xrange(nEleNum-1):
for j in xrange(nEleNum-1):
if lEle[i] == lEle[j]:
lScore[i*nEleNum+j] = 3
else:
lScore[i*nEleNum+j] = -1
# translate score matrix to ctypes
mat = (len(lScore) * ct.c_int8) ()
mat[:] = lScore
# set flag
nFlag = 0
# This line should be the path to libssw.so but I can't get it to work.
ssw = ssw_lib.CSsw(".")
# build query profile
qNum = to_int(sQSeq, lEle, dEle2Int)
qProfile = ssw.ssw_init(qNum, ct.c_int32(len(sQSeq)), mat, len(lEle), 2)
# set mask len
if len(sQSeq) > 30:
nMaskLen = len(sQSeq) / 2
else:
nMaskLen = 15
# iter target sequence
rNum = to_int(sRSeq, lEle, dEle2Int)
# format ofres: (nScore, nScore2, nRefBeg, nRefEnd, nQryBeg, nQryEnd, nRefEnd2, nCigarLen, lCigar)
res = align_one(ssw, qProfile, rNum, len(sRSeq), 3, 1, nFlag, nMaskLen)
resRc = None
# build cigar and trace back path
strand = 0
if resRc == None or res[0] > resRc[0]:
resPrint = res
strand = 0
sCigar, sQ, sA, sR = buildPath(sQSeq, sRSeq, res[4], res[2], res[8])
else:
resPrint = resRc
strand = 1
sCigar, sQ, sA, sR = buildPath(sQRcSeq, sRSeq, resRc[4], resRc[2], resRc[8])
#print 'target_name: {}\nquery_name: {}\noptimal_alignment_score: {}\t'.format(sRId, sQId, resPrint[0])
#print 'suboptimal_alignment_score: {}\t'.format(resPrint[1])
#print res
ssw.init_destroy(qProfile)
return sRId,resPrint[0]
def to_int(seq, lEle, dEle2Int):
"""
translate a sequence into numbers
@param seq a sequence
"""
num_decl = len(seq) * ct.c_int8
num = num_decl()
for i,ele in enumerate(seq):
try:
n = dEle2Int[ele]
except KeyError:
n = dEle2Int[lEle[-1]]
finally:
num[i] = n
return num
def align_one(ssw, qProfile, rNum, nRLen, nOpen, nExt, nFlag, nMaskLen):
"""
align one pair of sequences
@param qProfile query profile
@param rNum number array for reference
@param nRLen length of reference sequence
@param nFlag alignment flag
@param nMaskLen mask length
"""
res = ssw.ssw_align(qProfile, rNum, ct.c_int32(nRLen), nOpen, nExt, nFlag, 0, 0, nMaskLen)
nScore = res.contents.nScore
nScore2 = res.contents.nScore2
nRefBeg = res.contents.nRefBeg
nRefEnd = res.contents.nRefEnd
nQryBeg = res.contents.nQryBeg
nQryEnd = res.contents.nQryEnd
nRefEnd2 = res.contents.nRefEnd2
lCigar = [res.contents.sCigar[idx] for idx in range(res.contents.nCigarLen)]
nCigarLen = res.contents.nCigarLen
ssw.align_destroy(res)
return (nScore, nScore2, nRefBeg, nRefEnd, nQryBeg, nQryEnd, nRefEnd2, nCigarLen, lCigar)
def buildPath(q, r, nQryBeg, nRefBeg, lCigar):
"""
build cigar string and align path based on cigar array returned by ssw_align
@param q query sequence
@param r reference sequence
@param nQryBeg begin position of query sequence
@param nRefBeg begin position of reference sequence
@param lCigar cigar array
"""
sCigarInfo = 'MIDNSHP=X'
sCigar = ''
sQ = ''
sA = ''
sR = ''
nQOff = nQryBeg
nROff = nRefBeg
for x in lCigar:
n = x >> 4
m = x & 15
if m > 8:
c = 'M'
else:
c = sCigarInfo[m]
sCigar += str(n) + c
if c == 'M':
sQ += q[nQOff : nQOff+n]
sA += ''.join(['|' if q[nQOff+j] == r[nROff+j] else '*' for j in xrange(n)])
sR += r[nROff : nROff+n]
nQOff += n
nROff += n
elif c == 'I':
sQ += q[nQOff : nQOff+n]
sA += ' ' * n
sR += '-' * n
nQOff += n
elif c == 'D':
sQ += '-' * n
sA += ' ' * n
sR += r[nROff : nROff+n]
nROff += n
return sCigar, sQ, sA, sR
class FileExist(argparse.Action):
"""Check if the input file exist."""
def __call__(self, parser, namespace, values, option_string=None):
if not os.path.exists(values):
raise RuntimeError("File/path for '{}' does not exist, {}".format(self.dest, values))
setattr(namespace, self.dest, values)
def parse_barcodes(barcode_file):
#print "parsing barcodes"
barcode_list = list()
barcode_list.append("uncalssified")
barcode_dict = dict()
barcode_sequences = SeqIO.parse(open(barcode_file),'fasta')
for barcode in barcode_sequences:
name, sequence = barcode.id, str(barcode.seq)
barcode_dict[name]=sequence
barcode_list.append(name)
barcode_dict[name+"_rev"]=str(barcode.reverse_complement().seq)
#print barcode_list
#for barcode in barcode_dict:
# print barcode, barcode_dict[barcode]
#sys.exit()
return barcode_dict,barcode_list
def main():
args = get_parser().parse_args()
global barcode_dict
barcode_dict,barcode_list=parse_barcodes(args.barcodes)
"""barcode_dict = {
'NB01': 'GGTGCTGAAGAAAGTTGTCGGTGTCTTTGTGTTAACCTTT',
'NB01_rev': 'AAGGTTAACACAAAGACACCGACAACTTTCTTCAGCACCAGGTTA',
'NB02': 'GGTGCTGTCGATTCCGTTTGTAGTCGTCTGTTTAACCTTT',
'NB02_rev': 'AAGGTTAAACAGACGACTACAAACGGAATCGACAGCACCAGGTTA',
'NB03': 'GGTGCTGGAGTCTTGTGTCCCAGTTACCAGGTTAACCTTT',
'NB03_rev': 'AAGGTTAACCTGGTAACTGGGACACAAGACTCCAGCACCAGGTTA',
'NB04': 'GGTGCTGTTCGGATTCTATCGTGTTTCCCTATTAACCTTT',
'NB04_rev': 'AAGGTTAATAGGGAAACACGATAGAATCCGAACAGCACCAGGTTA',
'NB05': 'GGTGCTGCTTGTCCAGGGTTTGTGTAACCTTTTAACCTTT',
'NB05_rev': 'AAGGTTAAAAGGTTACACAAACCCTGGACAAGCAGCACCAGGTTA',
'NB06': 'GGTGCTGTTCTCGCAAAGGCAGAAAGTAGTCTTAACCTTT',
'NB06_rev': 'AAGGTTAAGACTACTTTCTGCCTTTGCGAGAACAGCACCAGGTTA',
'NB07': 'GGTGCTGGTGTTACCGTGGGAATGAATCCTTTTAACCTTT',
'NB07_rev': 'AAGGTTAAAAGGATTCATTCCCACGGTAACACCAGCACCAGGTTA',
'NB08': 'GGTGCTGTTCAGGGAACAAACCAAGTTACGTTTAACCTTT',
'NB08_rev': 'AAGGTTAAACGTAACTTGGTTTGTTCCCTGAACAGCACCAGGTTA',
'NB09': 'GGTGCTGAACTAGGCACAGCGAGTCTTGGTTTTAACCTTT',
'NB09_rev': 'AAGGTTAAAACCAAGACTCGCTGTGCCTAGTTCAGCACCAGGTTA',
'NB10': 'GGTGCTGAAGCGTTGAAACCTTTGTCCTCTCTTAACCTTT',
'NB10_rev': 'AAGGTTAAGAGAGGACAAAGGTTTCAACGCTTCAGCACCAGGTTA',
'NB11': 'GGTGCTGGTTTCATCTATCGGAGGGAATGGATTAACCTTT',
'NB11_rev': 'AAGGTTAATCCATTCCCTCCGATAGATGAAACCAGCACCAGGTTA',
'NB12': 'GGTGCTGCAGGTAGAAAGAAGCAGAATCGGATTAACCTTT',
'NB12_rev': 'AAGGTTAATCCGATTCTGCTTCTTTCTACCTGCAGCACCAGGTTA'
}
barcode_list = ('NB01','NB02','NB03','NB04','NB05','NB06','NB07','NB08','NB09','NB10','NB11','NB12','unclassified')
"""
resultdict=dict()
input_file = args.input
fasta_sequences = SeqIO.parse(open(input_file),'fasta')
for fasta in fasta_sequences:
name, sequence = fasta.id, str(fasta.seq)
#new_sequence = some_function(sequence)
#print ">"+str(name)
#print sequence
id_,score=align_seq(sequence,args)
print str(name),id_,score
if id_ not in resultdict:
resultdict[id_]=dict()
resultdict[id_]["counter"]=0
resultdict[id_]["score"]=list()
resultdict[id_]["sequences"]=list()
resultdict[id_]["counter"]+=1
resultdict[id_]["score"].append(score)
resultdict[id_]["sequences"].append(fasta)
##print resultdict
print "Score Threshold:",args.threshold
for ids in barcode_list:
if ids in resultdict.keys():
print ids,
print resultdict[ids]["counter"],
print "Mean:", (sum(resultdict[ids]["score"])/resultdict[ids]["counter"])
output_handle=open(os.path.join(os.path.dirname(input_file),ids+"_"+os.path.basename(input_file)),"w")
SeqIO.write(resultdict[ids]["sequences"], output_handle, "fasta")
output_handle.close()
else:
print ids,"0","Mean:N/A"
if __name__ == "__main__":
main()
| |
#!/usr/bin/python
# wogri@google.com
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This is not an official Google product.
"""A script to go through dovecot mailboxes and snooze mails until a given time.
Works like this:
Once you have a predefined set of folders (see below on how to
subscribe your user to these folders), this script will go through the
folders every minute (after setting up a cron-job) and add an IMAP label
on the nested messages. The label will be called something like "MoveAt123456789",
where the number is the timestamp in which when the mail should be moved back
into the users inbox. So when you drag a mail into one of these folders it will
stay there until the MoveAt Timestamp has been reached, and then the script
will remove the IMAP label and move the mail back into the user's inbox and
mark as new. Run the script from cron every minute like this:
./dovecot-snooze.py user1 user2 user3
Add -h for help.
The separator syntax for your 'Snooze' folder can be a dot or a slash depending
on your dovecot configuration. Be sure to update the paths in the 'FOLDERS' array
to match your user's mailbox tree.
To subscribe a new user:
$ user=maryjane
doveadm mailbox create -s -u $user 'Snooze'
doveadm mailbox create -s -u $user 'Snooze.Until Friday 18:00'
doveadm mailbox create -s -u $user 'Snooze.Until Monday 7:00'
doveadm mailbox create -s -u $user 'Snooze.Until 7:00'
doveadm mailbox create -s -u $user 'Snooze.Until 18:00'
doveadm mailbox create -s -u $user 'Snooze.For 1 Hour'
"""
import argparse
import datetime
import re
import subprocess
import sys
FOLDERS = ['Snooze.Until Friday 18:00',
'Snooze.Until Monday 7:00',
'Snooze.Until 7:00',
'Snooze.Until 18:00',
'Snooze.For 1 Hour']
def Debug(msg):
if args.debug:
sys.stdout.write(msg + '\n')
def Error(msg):
sys.stderr.write(msg + '\n')
def UnixTime(mytime):
epoch = datetime.datetime.fromtimestamp(0)
return int((mytime - epoch).total_seconds())
class Mail(object):
"""The class that handles snoozing and un-snoozing for a single e-mail."""
def __init__(self, uid, myfolder):
self.uid = uid
self.labels = []
self.folder = myfolder
def CheckRelease(self):
"""Check if a mail is ready for release, then move it back to the inbox."""
for label in self.labels:
result = re.search('MoveAt(.*)', label, re.IGNORECASE)
if result:
timestamp = result.group(1)
if int(timestamp) < UnixTime(datetime.datetime.now()):
self.MoveBackToInbox(timestamp)
else:
Debug('moving %s at %s' % (self.uid, timestamp))
def MoveBackToInbox(self, timestamp):
"""Moves mail back to the inbox. Removes labels and sets it unread."""
Debug('moving %s back to inbox!' % self.uid)
cmd = [args.doveadm, 'flags', 'remove', '-u', user,
'\Seen MoveAt%s' % timestamp, 'mailbox', self.folder, 'uid',
self.uid]
if 0 != subprocess.call(cmd):
Error('flags remove before move failed!')
Error(' '.join(cmd))
# move back to inbox:
cmd = [args.doveadm, 'move', '-u', user, 'INBOX', 'mailbox', self.folder,
'uid', self.uid]
if 0 != subprocess.call(cmd):
Error('move back to inbox failed!')
Error(' '.join(cmd))
def SetSnooze(self):
"""Sets a label on a mail with a unix timestamp on how long to snooze."""
newflag = self.FindSnooze()
if not newflag or newflag in self.labels:
return None
if 0 != subprocess.call([args.doveadm,
'flags',
'add',
'-u',
user,
newflag,
'mailbox',
self.folder,
'uid',
self.uid]):
Error('mail move failed!')
def FindSnooze(self):
"""Finds out how long to snooze a mail for.
Returns: unix time when to un-snooze.
"""
# find out if the Mail has been marked to move already
for label in self.labels:
if re.search('MoveAt(.*)', label, re.IGNORECASE):
return None
snooze_until = None
now = datetime.datetime.now()
Debug('now is %d' % UnixTime(now))
today = now.replace(hour=0, minute=0, second=0, microsecond=0)
day_of_week = today.weekday()
if self.folder == FOLDERS[4]:
snooze_until = now + datetime.timedelta(hours=1)
elif self.folder == FOLDERS[3]:
snooze_until = today + datetime.timedelta(hours=18)
if snooze_until < now:
snooze_until += datetime.timedelta(days=1)
elif self.folder == FOLDERS[2]:
snooze_until = today + datetime.timedelta(hours=7)
if snooze_until < now:
snooze_until += datetime.timedelta(days=1)
elif self.folder == FOLDERS[1]:
snooze_days = 7 - day_of_week
if snooze_days == 0:
snooze_days = 7
snooze_until = today + datetime.timedelta(days=snooze_days, hours=7)
elif self.folder == FOLDERS[0]:
snooze_days = 4 - day_of_week
if snooze_days < 1:
snooze_days += 7
snooze_until = today + datetime.timedelta(days=snooze_days, hours=18)
else:
return None
unix_time = UnixTime(snooze_until)
Debug('snoozing %s until %s, this is at %d' % (self.uid, snooze_until,
unix_time))
return 'MoveAt%d' % unix_time
parser = argparse.ArgumentParser(description=(
'Marks snoozed mails with a timestamp and moves it back to the inbox. Only '
'works with the dovecot mailserver. The last argument of this command will '
'have to be one or more users (e. g. dovecot-snooze.py '
'--doveadm=/usr/bin/doveadm john mary joe)'))
parser.add_argument('--doveadm', type=str, nargs='?',
default='/usr/bin/doveadm', help=(
'path to doveadm binary, defaults to /usr/bin/doveadm'))
parser.add_argument('--debug', type=bool, nargs='?', default=False,
help=('debug output, set to 1 or true to see more details '
'about what is going on.'))
parser.add_argument('users', nargs=argparse.REMAINDER)
args = parser.parse_args()
if not args.users:
Error('The last argument of this program must be one or more users '
'(separated by spaces)')
exit(1)
for user in args.users:
for folder in FOLDERS:
try:
mails = []
current_mail = None
cmd = [args.doveadm, 'fetch', '-u', user, 'uid flags', 'mailbox', folder,
'UNDELETED']
Debug(' '.join(cmd))
meta = subprocess.check_output(cmd)
lines = meta.split('\n')
for line in lines:
result = re.search('uid: (.*)', line, re.IGNORECASE)
if result:
if current_mail:
mails.append(current_mail)
current_mail = Mail(result.group(1), folder)
result = re.search('flags: (.*)', line, re.IGNORECASE)
if result:
current_mail.labels = result.group(1).split(' ')
if current_mail:
mails.append(current_mail)
for mail in mails:
mail.SetSnooze()
mail.CheckRelease()
except:
Error('unexpected Error!')
| |
"""Tests for the object inspection functionality.
"""
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from __future__ import print_function
import os
import re
import sys
import nose.tools as nt
from .. import oinspect
from IPython.core.magic import (Magics, magics_class, line_magic,
cell_magic, line_cell_magic,
register_line_magic, register_cell_magic,
register_line_cell_magic)
from decorator import decorator
from IPython.testing.decorators import skipif
from IPython.testing.tools import AssertPrints
from IPython.utils.path import compress_user
from IPython.utils import py3compat
from IPython.utils.signatures import Signature, Parameter
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
inspector = oinspect.Inspector()
ip = get_ipython()
#-----------------------------------------------------------------------------
# Local utilities
#-----------------------------------------------------------------------------
# WARNING: since this test checks the line number where a function is
# defined, if any code is inserted above, the following line will need to be
# updated. Do NOT insert any whitespace between the next line and the function
# definition below.
THIS_LINE_NUMBER = 43 # Put here the actual number of this line
def test_find_source_lines():
nt.assert_equal(oinspect.find_source_lines(test_find_source_lines),
THIS_LINE_NUMBER+1)
# A couple of utilities to ensure these tests work the same from a source or a
# binary install
def pyfile(fname):
return os.path.normcase(re.sub('.py[co]$', '.py', fname))
def match_pyfiles(f1, f2):
nt.assert_equal(pyfile(f1), pyfile(f2))
def test_find_file():
match_pyfiles(oinspect.find_file(test_find_file), os.path.abspath(__file__))
def test_find_file_decorated1():
@decorator
def noop1(f):
def wrapper():
return f(*a, **kw)
return wrapper
@noop1
def f(x):
"My docstring"
match_pyfiles(oinspect.find_file(f), os.path.abspath(__file__))
nt.assert_equal(f.__doc__, "My docstring")
def test_find_file_decorated2():
@decorator
def noop2(f, *a, **kw):
return f(*a, **kw)
@noop2
@noop2
@noop2
def f(x):
"My docstring 2"
match_pyfiles(oinspect.find_file(f), os.path.abspath(__file__))
nt.assert_equal(f.__doc__, "My docstring 2")
def test_find_file_magic():
run = ip.find_line_magic('run')
nt.assert_not_equal(oinspect.find_file(run), None)
# A few generic objects we can then inspect in the tests below
class Call(object):
"""This is the class docstring."""
def __init__(self, x, y=1):
"""This is the constructor docstring."""
def __call__(self, *a, **kw):
"""This is the call docstring."""
def method(self, x, z=2):
"""Some method's docstring"""
class HasSignature(object):
"""This is the class docstring."""
__signature__ = Signature([Parameter('test', Parameter.POSITIONAL_OR_KEYWORD)])
def __init__(self, *args):
"""This is the init docstring"""
class SimpleClass(object):
def method(self, x, z=2):
"""Some method's docstring"""
class OldStyle:
"""An old-style class for testing."""
pass
def f(x, y=2, *a, **kw):
"""A simple function."""
def g(y, z=3, *a, **kw):
pass # no docstring
@register_line_magic
def lmagic(line):
"A line magic"
@register_cell_magic
def cmagic(line, cell):
"A cell magic"
@register_line_cell_magic
def lcmagic(line, cell=None):
"A line/cell magic"
@magics_class
class SimpleMagics(Magics):
@line_magic
def Clmagic(self, cline):
"A class-based line magic"
@cell_magic
def Ccmagic(self, cline, ccell):
"A class-based cell magic"
@line_cell_magic
def Clcmagic(self, cline, ccell=None):
"A class-based line/cell magic"
class Awkward(object):
def __getattr__(self, name):
raise Exception(name)
class NoBoolCall:
"""
callable with `__bool__` raising should still be inspect-able.
"""
def __call__(self):
"""does nothing"""
pass
def __bool__(self):
"""just raise NotImplemented"""
raise NotImplementedError('Must be implemented')
class SerialLiar(object):
"""Attribute accesses always get another copy of the same class.
unittest.mock.call does something similar, but it's not ideal for testing
as the failure mode is to eat all your RAM. This gives up after 10k levels.
"""
def __init__(self, max_fibbing_twig, lies_told=0):
if lies_told > 10000:
raise RuntimeError('Nose too long, honesty is the best policy')
self.max_fibbing_twig = max_fibbing_twig
self.lies_told = lies_told
max_fibbing_twig[0] = max(max_fibbing_twig[0], lies_told)
def __getattr__(self, item):
return SerialLiar(self.max_fibbing_twig, self.lies_told + 1)
def check_calltip(obj, name, call, docstring):
"""Generic check pattern all calltip tests will use"""
info = inspector.info(obj, name)
call_line, ds = oinspect.call_tip(info)
nt.assert_equal(call_line, call)
nt.assert_equal(ds, docstring)
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
def test_calltip_class():
check_calltip(Call, 'Call', 'Call(x, y=1)', Call.__init__.__doc__)
def test_calltip_instance():
c = Call(1)
check_calltip(c, 'c', 'c(*a, **kw)', c.__call__.__doc__)
def test_calltip_method():
c = Call(1)
check_calltip(c.method, 'c.method', 'c.method(x, z=2)', c.method.__doc__)
def test_calltip_function():
check_calltip(f, 'f', 'f(x, y=2, *a, **kw)', f.__doc__)
def test_calltip_function2():
check_calltip(g, 'g', 'g(y, z=3, *a, **kw)', '<no docstring>')
@skipif(sys.version_info >= (3, 5))
def test_calltip_builtin():
check_calltip(sum, 'sum', None, sum.__doc__)
def test_calltip_line_magic():
check_calltip(lmagic, 'lmagic', 'lmagic(line)', "A line magic")
def test_calltip_cell_magic():
check_calltip(cmagic, 'cmagic', 'cmagic(line, cell)', "A cell magic")
def test_calltip_line_cell_magic():
check_calltip(lcmagic, 'lcmagic', 'lcmagic(line, cell=None)',
"A line/cell magic")
def test_class_magics():
cm = SimpleMagics(ip)
ip.register_magics(cm)
check_calltip(cm.Clmagic, 'Clmagic', 'Clmagic(cline)',
"A class-based line magic")
check_calltip(cm.Ccmagic, 'Ccmagic', 'Ccmagic(cline, ccell)',
"A class-based cell magic")
check_calltip(cm.Clcmagic, 'Clcmagic', 'Clcmagic(cline, ccell=None)',
"A class-based line/cell magic")
def test_info():
"Check that Inspector.info fills out various fields as expected."
i = inspector.info(Call, oname='Call')
nt.assert_equal(i['type_name'], 'type')
expted_class = str(type(type)) # <class 'type'> (Python 3) or <type 'type'>
nt.assert_equal(i['base_class'], expted_class)
if sys.version_info > (3,):
nt.assert_regex(i['string_form'], "<class 'IPython.core.tests.test_oinspect.Call'( at 0x[0-9a-f]{1,9})?>")
fname = __file__
if fname.endswith(".pyc"):
fname = fname[:-1]
# case-insensitive comparison needed on some filesystems
# e.g. Windows:
nt.assert_equal(i['file'].lower(), compress_user(fname).lower())
nt.assert_equal(i['definition'], None)
nt.assert_equal(i['docstring'], Call.__doc__)
nt.assert_equal(i['source'], None)
nt.assert_true(i['isclass'])
_self_py2 = '' if py3compat.PY3 else 'self, '
nt.assert_equal(i['init_definition'], "Call(%sx, y=1)" % _self_py2)
nt.assert_equal(i['init_docstring'], Call.__init__.__doc__)
i = inspector.info(Call, detail_level=1)
nt.assert_not_equal(i['source'], None)
nt.assert_equal(i['docstring'], None)
c = Call(1)
c.__doc__ = "Modified instance docstring"
i = inspector.info(c)
nt.assert_equal(i['type_name'], 'Call')
nt.assert_equal(i['docstring'], "Modified instance docstring")
nt.assert_equal(i['class_docstring'], Call.__doc__)
nt.assert_equal(i['init_docstring'], Call.__init__.__doc__)
nt.assert_equal(i['call_docstring'], Call.__call__.__doc__)
# Test old-style classes, which for example may not have an __init__ method.
if not py3compat.PY3:
i = inspector.info(OldStyle)
nt.assert_equal(i['type_name'], 'classobj')
i = inspector.info(OldStyle())
nt.assert_equal(i['type_name'], 'instance')
nt.assert_equal(i['docstring'], OldStyle.__doc__)
def test_class_signature():
info = inspector.info(HasSignature, 'HasSignature')
nt.assert_equal(info['init_definition'], "HasSignature(test)")
nt.assert_equal(info['init_docstring'], HasSignature.__init__.__doc__)
def test_info_awkward():
# Just test that this doesn't throw an error.
inspector.info(Awkward())
def test_bool_raise():
inspector.info(NoBoolCall())
def test_info_serialliar():
fib_tracker = [0]
i = inspector.info(SerialLiar(fib_tracker))
# Nested attribute access should be cut off at 100 levels deep to avoid
# infinite loops: https://github.com/ipython/ipython/issues/9122
nt.assert_less(fib_tracker[0], 9000)
def test_calldef_none():
# We should ignore __call__ for all of these.
for obj in [f, SimpleClass().method, any, str.upper]:
print(obj)
i = inspector.info(obj)
nt.assert_is(i['call_def'], None)
if py3compat.PY3:
exec("def f_kwarg(pos, *, kwonly): pass")
@skipif(not py3compat.PY3)
def test_definition_kwonlyargs():
i = inspector.info(f_kwarg, oname='f_kwarg') # analysis:ignore
nt.assert_equal(i['definition'], "f_kwarg(pos, *, kwonly)")
def test_getdoc():
class A(object):
"""standard docstring"""
pass
class B(object):
"""standard docstring"""
def getdoc(self):
return "custom docstring"
class C(object):
"""standard docstring"""
def getdoc(self):
return None
a = A()
b = B()
c = C()
nt.assert_equal(oinspect.getdoc(a), "standard docstring")
nt.assert_equal(oinspect.getdoc(b), "custom docstring")
nt.assert_equal(oinspect.getdoc(c), "standard docstring")
def test_empty_property_has_no_source():
i = inspector.info(property(), detail_level=1)
nt.assert_is(i['source'], None)
def test_property_sources():
import zlib
class A(object):
@property
def foo(self):
return 'bar'
foo = foo.setter(lambda self, v: setattr(self, 'bar', v))
id = property(id)
compress = property(zlib.compress)
i = inspector.info(A.foo, detail_level=1)
nt.assert_in('def foo(self):', i['source'])
nt.assert_in('lambda self, v:', i['source'])
i = inspector.info(A.id, detail_level=1)
nt.assert_in('fget = <function id>', i['source'])
i = inspector.info(A.compress, detail_level=1)
nt.assert_in('fget = <function zlib.compress>', i['source'])
def test_property_docstring_is_in_info_for_detail_level_0():
class A(object):
@property
def foobar(self):
"""This is `foobar` property."""
pass
ip.user_ns['a_obj'] = A()
nt.assert_equals(
'This is `foobar` property.',
ip.object_inspect('a_obj.foobar', detail_level=0)['docstring'])
ip.user_ns['a_cls'] = A
nt.assert_equals(
'This is `foobar` property.',
ip.object_inspect('a_cls.foobar', detail_level=0)['docstring'])
def test_pdef():
# See gh-1914
def foo(): pass
inspector.pdef(foo, 'foo')
def test_pinfo_nonascii():
# See gh-1177
from . import nonascii2
ip.user_ns['nonascii2'] = nonascii2
ip._inspect('pinfo', 'nonascii2', detail_level=1)
def test_pinfo_magic():
with AssertPrints('Docstring:'):
ip._inspect('pinfo', 'lsmagic', detail_level=0)
with AssertPrints('Source:'):
ip._inspect('pinfo', 'lsmagic', detail_level=1)
def test_init_colors():
# ensure colors are not present in signature info
info = inspector.info(HasSignature)
init_def = info['init_definition']
nt.assert_not_in('[0m', init_def)
def test_builtin_init():
info = inspector.info(list)
init_def = info['init_definition']
# Python < 3.4 can't get init definition from builtins,
# but still exercise the inspection in case of error-raising bugs.
if sys.version_info >= (3,4):
nt.assert_is_not_none(init_def)
| |
# Copyright 2014 Violin Memory, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Violin Memory 6000 Series All-Flash Array Common Driver
"""
import mock
from cinder import exception
from cinder import test
from cinder.tests.unit import fake_vmem_client as vmemclient
from cinder.volume import configuration as conf
from cinder.volume.drivers.violin import v6000_common
VOLUME_ID = "abcdabcd-1234-abcd-1234-abcdeffedcba"
VOLUME = {
"name": "volume-" + VOLUME_ID,
"id": VOLUME_ID,
"display_name": "fake_volume",
"size": 2,
"host": "irrelevant",
"volume_type": None,
"volume_type_id": None,
}
SNAPSHOT_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbb"
SNAPSHOT = {
"name": "snapshot-" + SNAPSHOT_ID,
"id": SNAPSHOT_ID,
"volume_id": VOLUME_ID,
"volume_name": "volume-" + VOLUME_ID,
"volume_size": 2,
"display_name": "fake_snapshot",
"volume": VOLUME,
}
SRC_VOL_ID = "abcdabcd-1234-abcd-1234-abcdeffedcbc"
SRC_VOL = {
"name": "volume-" + SRC_VOL_ID,
"id": SRC_VOL_ID,
"display_name": "fake_src_vol",
"size": 2,
"host": "irrelevant",
"volume_type": None,
"volume_type_id": None,
}
INITIATOR_IQN = "iqn.1111-22.org.debian:11:222"
CONNECTOR = {
"initiator": INITIATOR_IQN,
"host": "irrelevant"
}
class V6000CommonTestCase(test.TestCase):
"""Test cases for VMEM V6000 driver common class."""
def setUp(self):
super(V6000CommonTestCase, self).setUp()
self.conf = self.setup_configuration()
self.driver = v6000_common.V6000Common(self.conf)
self.driver.container = 'myContainer'
self.driver.device_id = 'ata-VIOLIN_MEMORY_ARRAY_23109R00000022'
self.stats = {}
def tearDown(self):
super(V6000CommonTestCase, self).tearDown()
def setup_configuration(self):
config = mock.Mock(spec=conf.Configuration)
config.volume_backend_name = 'v6000_common'
config.san_ip = '1.1.1.1'
config.san_login = 'admin'
config.san_password = ''
config.san_thin_provision = False
config.san_is_local = False
config.gateway_mga = '2.2.2.2'
config.gateway_mgb = '3.3.3.3'
config.use_igroups = False
config.request_timeout = 300
config.container = 'myContainer'
return config
@mock.patch('vmemclient.open')
def setup_mock_client(self, _m_client, m_conf=None):
"""Create a fake backend communication factory.
The vmemclient creates a VShare connection object (for V6000
devices) and returns it for use on a call to vmemclient.open().
"""
# configure the vshare object mock with defaults
_m_vshare = mock.Mock(name='VShare',
version='1.1.1',
spec=vmemclient.mock_client_conf)
# if m_conf, clobber the defaults with it
if m_conf:
_m_vshare.configure_mock(**m_conf)
# set calls to vmemclient.open() to return this mocked vshare object
_m_client.return_value = _m_vshare
return _m_client
def setup_mock_vshare(self, m_conf=None):
"""Create a fake VShare communication object."""
_m_vshare = mock.Mock(name='VShare',
version='1.1.1',
spec=vmemclient.mock_client_conf)
if m_conf:
_m_vshare.configure_mock(**m_conf)
return _m_vshare
def test_check_for_setup_error(self):
"""No setup errors are found."""
bn1 = ("/vshare/state/local/container/%s/threshold/usedspace"
"/threshold_hard_val" % self.driver.container)
bn2 = ("/vshare/state/local/container/%s/threshold/provision"
"/threshold_hard_val" % self.driver.container)
bn_thresholds = {bn1: 0, bn2: 100}
conf = {
'basic.get_node_values.return_value': bn_thresholds,
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
self.driver._is_supported_vmos_version = mock.Mock(return_value=True)
result = self.driver.check_for_setup_error()
self.driver._is_supported_vmos_version.assert_called_with(
self.driver.vip.version)
self.driver.vip.basic.get_node_values.assert_called_with(
[bn1, bn2])
self.assertIsNone(result)
def test_check_for_setup_error_no_container(self):
"""No container was configured."""
self.driver.vip = self.setup_mock_vshare()
self.driver.container = ''
self.assertRaises(exception.ViolinInvalidBackendConfig,
self.driver.check_for_setup_error)
def test_check_for_setup_error_invalid_usedspace_threshold(self):
"""The array's usedspace threshold was altered (not supported)."""
bn1 = ("/vshare/state/local/container/%s/threshold/usedspace"
"/threshold_hard_val" % self.driver.container)
bn2 = ("/vshare/state/local/container/%s/threshold/provision"
"/threshold_hard_val" % self.driver.container)
bn_thresholds = {bn1: 99, bn2: 100}
conf = {
'basic.get_node_values.return_value': bn_thresholds,
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
self.driver._is_supported_vmos_version = mock.Mock(return_value=True)
self.assertRaises(exception.ViolinInvalidBackendConfig,
self.driver.check_for_setup_error)
def test_check_for_setup_error_invalid_provisionedspace_threshold(self):
"""The array's provisioned threshold was altered (not supported)."""
bn1 = ("/vshare/state/local/container/%s/threshold/usedspace"
"/threshold_hard_val" % self.driver.container)
bn2 = ("/vshare/state/local/container/%s/threshold/provision"
"/threshold_hard_val" % self.driver.container)
bn_thresholds = {bn1: 0, bn2: 99}
conf = {
'basic.get_node_values.return_value': bn_thresholds,
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
self.driver._is_supported_vmos_version = mock.Mock(return_value=True)
self.assertRaises(exception.ViolinInvalidBackendConfig,
self.driver.check_for_setup_error)
def test_create_lun(self):
"""Lun is successfully created."""
response = {'code': 0, 'message': 'LUN create: success!'}
conf = {
'lun.create_lun.return_value': response,
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
self.driver._send_cmd = mock.Mock(return_value=response)
result = self.driver._create_lun(VOLUME)
self.driver._send_cmd.assert_called_with(
self.driver.vip.lun.create_lun, 'LUN create: success!',
self.driver.container, VOLUME['id'], VOLUME['size'], 1, "0",
"0", "w", 1, 512, False, False, None)
self.assertTrue(result is None)
def test_create_lun_lun_already_exists(self):
"""Array returns error that the lun already exists."""
response = {'code': 14005,
'message': 'LUN with name ... already exists'}
conf = {
'lun.create_lun.return_value': response,
}
self.driver.vip = self.setup_mock_client(m_conf=conf)
self.driver._send_cmd = mock.Mock(
side_effect=exception.ViolinBackendErrExists(
response['message']))
self.assertTrue(self.driver._create_lun(VOLUME) is None)
def test_create_lun_create_fails_with_exception(self):
"""Array returns a out of space error."""
response = {'code': 512, 'message': 'Not enough space available'}
failure = exception.ViolinBackendErr
conf = {
'lun.create_lun.return_value': response,
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
self.driver._send_cmd = mock.Mock(
side_effect=failure(response['message']))
self.assertRaises(failure, self.driver._create_lun, VOLUME)
def test_delete_lun(self):
"""Lun is deleted successfully."""
response = {'code': 0, 'message': 'lun deletion started'}
success_msgs = ['lun deletion started', '']
conf = {
'lun.delete_lun.return_value': response,
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
self.driver._send_cmd = mock.Mock(return_value=response)
result = self.driver._delete_lun(VOLUME)
self.driver._send_cmd.assert_called_with(
self.driver.vip.lun.bulk_delete_luns,
success_msgs, self.driver.container, VOLUME['id'])
self.assertTrue(result is None)
def test_delete_lun_empty_response_message(self):
"""Array bug where delete action returns no message."""
response = {'code': 0, 'message': ''}
conf = {
'lun.delete_lun.return_value': response,
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
self.driver._send_cmd = mock.Mock(return_value=response)
self.assertTrue(self.driver._delete_lun(VOLUME) is None)
def test_delete_lun_lun_already_deleted(self):
"""Array fails to delete a lun that doesn't exist."""
response = {'code': 14005, 'message': 'LUN ... does not exist.'}
conf = {
'lun.delete_lun.return_value': response,
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
self.driver._send_cmd = mock.Mock(
side_effect=exception.ViolinBackendErrNotFound(
response['message']))
self.assertTrue(self.driver._delete_lun(VOLUME) is None)
def test_delete_lun_delete_fails_with_exception(self):
"""Array returns a generic error."""
response = {'code': 14000, 'message': 'Generic error'}
failure = exception.ViolinBackendErr
conf = {
'lun.delete_lun.return_value': response
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
self.driver._send_cmd = mock.Mock(
side_effect=failure(response['message']))
self.assertRaises(failure, self.driver._delete_lun, VOLUME)
def test_extend_lun(self):
"""Volume extend completes successfully."""
new_volume_size = 10
response = {'code': 0, 'message': 'Success '}
conf = {
'lun.resize_lun.return_value': response,
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
self.driver._send_cmd = mock.Mock(return_value=response)
result = self.driver._extend_lun(VOLUME, new_volume_size)
self.driver._send_cmd.assert_called_with(
self.driver.vip.lun.resize_lun,
'Success', self.driver.container,
VOLUME['id'], new_volume_size)
self.assertTrue(result is None)
def test_extend_lun_new_size_is_too_small(self):
"""Volume extend fails when new size would shrink the volume."""
new_volume_size = 0
response = {'code': 14036, 'message': 'Failure'}
conf = {
'lun.resize_lun.return_value': response,
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
self.driver._send_cmd = mock.Mock(
side_effect=exception.ViolinBackendErr(message='fail'))
self.assertRaises(exception.ViolinBackendErr,
self.driver._extend_lun, VOLUME, new_volume_size)
def test_create_lun_snapshot(self):
"""Snapshot creation completes successfully."""
response = {'code': 0, 'message': 'success'}
success_msg = 'Snapshot create: success!'
conf = {
'snapshot.create_lun_snapshot.return_value': response
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
self.driver._send_cmd = mock.Mock(return_value=response)
result = self.driver._create_lun_snapshot(SNAPSHOT)
self.driver._send_cmd.assert_called_with(
self.driver.vip.snapshot.create_lun_snapshot, success_msg,
self.driver.container, SNAPSHOT['volume_id'], SNAPSHOT['id'])
self.assertTrue(result is None)
def test_delete_lun_snapshot(self):
"""Snapshot deletion completes successfully."""
response = {'code': 0, 'message': 'success'}
success_msg = 'Snapshot delete: success!'
conf = {
'snapshot.delete_lun_snapshot.return_value': response,
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
self.driver._send_cmd = mock.Mock(return_value=response)
result = self.driver._delete_lun_snapshot(SNAPSHOT)
self.driver._send_cmd.assert_called_with(
self.driver.vip.snapshot.delete_lun_snapshot, success_msg,
self.driver.container, SNAPSHOT['volume_id'], SNAPSHOT['id'])
self.assertTrue(result is None)
def test_get_lun_id(self):
bn = "/vshare/config/export/container/%s/lun/%s/target/**" \
% (self.conf.container, VOLUME['id'])
response = {("/vshare/config/export/container/%s/lun"
"/%s/target/hba-a1/initiator/openstack/lun_id"
% (self.conf.container, VOLUME['id'])): 1}
conf = {
'basic.get_node_values.return_value': response,
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
result = self.driver._get_lun_id(VOLUME['id'])
self.driver.vip.basic.get_node_values.assert_called_with(bn)
self.assertEqual(1, result)
def test_get_lun_id_with_no_lun_config(self):
response = {}
conf = {
'basic.get_node_values.return_value': response,
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
self.assertRaises(exception.ViolinBackendErrNotFound,
self.driver._get_lun_id, VOLUME['id'])
def test_get_snapshot_id(self):
bn = ("/vshare/config/export/snapshot/container/%s/lun/%s/snap/%s"
"/target/**") % (self.conf.container, VOLUME['id'],
SNAPSHOT['id'])
response = {("/vshare/config/export/snapshot/container/%s/lun"
"/%s/snap/%s/target/hba-a1/initiator/openstack/lun_id"
% (self.conf.container, VOLUME['id'],
SNAPSHOT['id'])): 1}
conf = {
'basic.get_node_values.return_value': response,
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
result = self.driver._get_snapshot_id(VOLUME['id'], SNAPSHOT['id'])
self.driver.vip.basic.get_node_values.assert_called_with(bn)
self.assertEqual(1, result)
def test_get_snapshot_id_with_no_lun_config(self):
response = {}
conf = {
'basic.get_node_values.return_value': response,
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
self.assertRaises(exception.ViolinBackendErrNotFound,
self.driver._get_snapshot_id,
SNAPSHOT['volume_id'], SNAPSHOT['id'])
def test_send_cmd(self):
"""Command callback completes successfully."""
success_msg = 'success'
request_args = ['arg1', 'arg2', 'arg3']
response = {'code': 0, 'message': 'success'}
request_func = mock.Mock(return_value=response)
self.driver._fatal_error_code = mock.Mock(return_value=None)
result = self.driver._send_cmd(request_func, success_msg, request_args)
self.driver._fatal_error_code.assert_called_with(response)
self.assertEqual(response, result)
def test_send_cmd_request_timed_out(self):
"""The callback retry timeout hits immediately."""
success_msg = 'success'
request_args = ['arg1', 'arg2', 'arg3']
self.conf.request_timeout = 0
request_func = mock.Mock()
self.assertRaises(exception.ViolinRequestRetryTimeout,
self.driver._send_cmd,
request_func, success_msg, request_args)
def test_send_cmd_response_has_no_message(self):
"""The callback returns no message on the first call."""
success_msg = 'success'
request_args = ['arg1', 'arg2', 'arg3']
response1 = {'code': 0, 'message': None}
response2 = {'code': 0, 'message': 'success'}
request_func = mock.Mock(side_effect=[response1, response2])
self.driver._fatal_error_code = mock.Mock(return_value=None)
self.assertEqual(response2, self.driver._send_cmd
(request_func, success_msg, request_args))
def test_send_cmd_response_has_fatal_error(self):
"""The callback response contains a fatal error code."""
success_msg = 'success'
request_args = ['arg1', 'arg2', 'arg3']
response = {'code': 14000, 'message': 'try again later.'}
failure = exception.ViolinBackendErr
request_func = mock.Mock(return_value=response)
self.driver._fatal_error_code = mock.Mock(
side_effect=failure(message='fail'))
self.assertRaises(failure, self.driver._send_cmd,
request_func, success_msg, request_args)
def test_get_igroup(self):
"""The igroup is verified and already exists."""
bn = '/vshare/config/igroup/%s' % CONNECTOR['host']
response = {bn: CONNECTOR['host']}
conf = {
'basic.get_node_values.return_value': response,
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
result = self.driver._get_igroup(VOLUME, CONNECTOR)
self.driver.vip.basic.get_node_values.assert_called_with(bn)
self.assertEqual(CONNECTOR['host'], result)
def test_get_igroup_with_new_name(self):
"""The igroup is verified but must be created on the backend."""
response = {}
conf = {
'basic.get_node_values.return_value': response,
}
self.driver.vip = self.setup_mock_vshare(m_conf=conf)
self.assertEqual(CONNECTOR['host'],
self.driver._get_igroup(VOLUME, CONNECTOR))
def test_wait_for_export_state(self):
"""Queries to cluster nodes verify export state."""
bn = "/vshare/state/local/container/myContainer/lun/%s/usn_id" \
% VOLUME['id']
response = {bn: '012345'}
conf = {
'basic.get_node_values.return_value': response,
}
self.driver.mga = self.setup_mock_vshare(m_conf=conf)
self.driver.mgb = self.setup_mock_vshare(m_conf=conf)
result = self.driver._wait_for_export_state(VOLUME['id'], state=True)
self.driver.mga.basic.get_node_values.assert_called_with(bn)
self.driver.mgb.basic.get_node_values.assert_called_with(bn)
self.assertTrue(result)
def test_wait_for_export_state_with_no_state(self):
"""Queries to cluster nodes verify *no* export state."""
bn = "/vshare/state/local/container/myContainer/lun/%s/usn_id" \
% VOLUME['id']
response = {bn: '(not exported)'}
conf = {
'basic.get_node_values.return_value': response,
}
self.driver.mga = self.setup_mock_vshare(m_conf=conf)
self.driver.mgb = self.setup_mock_vshare(m_conf=conf)
self.assertTrue(self.driver._wait_for_export_state(
VOLUME['id'], state=False))
def test_is_supported_vmos_version(self):
"""Currently supported VMOS version."""
version = 'V6.3.1'
self.assertTrue(self.driver._is_supported_vmos_version(version))
def test_is_supported_vmos_version_supported_future_version(self):
"""Potential future supported VMOS version."""
version = 'V6.3.7'
self.assertTrue(self.driver._is_supported_vmos_version(version))
def test_is_supported_vmos_version_unsupported_past_version(self):
"""Currently unsupported VMOS version."""
version = 'G5.5.2'
self.assertFalse(self.driver._is_supported_vmos_version(version))
def test_is_supported_vmos_version_unsupported_future_version(self):
"""Future incompatible VMOS version."""
version = 'V7.0.0'
self.assertFalse(self.driver._is_supported_vmos_version(version))
def test_fatal_error_code(self):
"""Return an exception for a valid fatal error code."""
response = {'code': 14000, 'message': 'fail city'}
self.assertRaises(exception.ViolinBackendErr,
self.driver._fatal_error_code,
response)
def test_fatal_error_code_non_fatal_error(self):
"""Returns no exception for a non-fatal error code."""
response = {'code': 1024, 'message': 'try again!'}
self.assertIsNone(self.driver._fatal_error_code(response))
| |
# coding=utf-8
# Copyright 2022 The TensorFlow GAN Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Spectral normalization ops."""
import contextlib
import tensorflow.compat.v1 as tf
import tensorflow_gan as tfgan
sn_gettr = tfgan.features.spectral_normalization_custom_getter
def snconv2d(input_, output_dim, k_h=3, k_w=3, d_h=2, d_w=2, training=True,
name='snconv2d'):
"""Creates a 2d conv-layer with Spectral Norm applied to the weights.
Args:
input_: 4D input tensor (batch size, height, width, channel).
output_dim: Number of features in the output layer.
k_h: The height of the convolutional kernel.
k_w: The width of the convolutional kernel.
d_h: The height stride of the convolutional kernel.
d_w: The width stride of the convolutional kernel.
training: If `True`, add the spectral norm assign ops.
name: The name of the variable scope.
Returns:
conv: The normalized tensor.
"""
with tf.variable_scope(
name,
custom_getter=sn_gettr(training=training, equality_constrained=False)):
return tf.layers.conv2d(
input_,
filters=output_dim,
kernel_size=(k_h, k_w),
strides=(d_h, d_w),
padding='same',
activation=None,
use_bias=True,
kernel_initializer=tf.keras.initializers.VarianceScaling(
scale=1.0, mode='fan_avg', distribution='uniform'),
bias_initializer=tf.initializers.zeros(),
name=name)
def snlinear(x, output_size, bias_start=0.0, training=True, name='snlinear'):
"""Creates a linear layer with Spectral Normalization applied.
Args:
x: 2D input tensor (batch size, features).
output_size: Integer number of features in output of layer.
bias_start: Float to which bias parameters are initialized.
training: If `True`, add the spectral norm assign ops.
name: Optional, variable scope to put the layer's parameters into.
Returns:
The normalized output tensor of the linear layer.
"""
with tf.variable_scope(
name,
custom_getter=sn_gettr(training=training, equality_constrained=False)):
return tf.layers.dense(
x,
output_size,
activation=None,
use_bias=True,
kernel_initializer=tf.keras.initializers.VarianceScaling(
scale=1.0, mode='fan_avg', distribution='uniform'),
bias_initializer=tf.initializers.constant(bias_start))
def sn_embedding(x, number_classes, embedding_size, training=True,
name='snembedding'):
"""Creates an embedding lookup with Spectral Normalization applied.
Args:
x: 1D input tensor (batch size, ).
number_classes: The number of classes.
embedding_size: The length of the embeddding vector for each class.
training: If `True`, add the spectral norm assign ops.
name: Optional, variable scope to put the layer's parameters into
Returns:
The output tensor (batch size, embedding_size).
"""
with tf.variable_scope(name):
embedding_map = tf.get_variable(
name='embedding_map',
shape=[number_classes, embedding_size],
initializer=tf.keras.initializers.VarianceScaling(
scale=1.0, mode='fan_avg', distribution='uniform'))
embedding_map_bar_transpose = tfgan.features.spectral_normalize(
tf.transpose(a=embedding_map),
training=training,
equality_constrained=False)
embedding_map_bar = tf.transpose(a=embedding_map_bar_transpose)
return tf.nn.embedding_lookup(params=embedding_map_bar, ids=x)
class ConditionalBatchNorm(object):
"""Conditional Batch Normalization.
The same as normal Batch Normalization, but there is a different (gamma, beta)
pair for each possible category.
For each class, it has a specific gamma and beta as normalization variable.
"""
# TODO(augustusodena) Merge conditional batch norm with batch norm.
# TODO(augustusodena) Use more sophisticated FilM layer here.
# TODO(augustusodena) Why does this need to be a class?
def __init__(self, num_categories, name='conditional_batch_norm'):
"""Inits the object.
This is just a setter.
Args:
num_categories: Integer number of classes (and gamma, beta pairs).
name: String name to be used for scoping.
Returns:
Initialized object.
"""
with tf.variable_scope(name):
self.name = name
self.num_categories = num_categories
def __call__(self, inputs, labels):
"""Adds Conditional Batch norm to the TF Graph.
Args:
inputs: Tensor of inputs (e.g. images).
labels: Tensor of labels - same first dimension as inputs.
Returns:
Output tensor.
"""
inputs = tf.convert_to_tensor(value=inputs)
inputs_shape = inputs.get_shape()
params_shape = inputs_shape[-1:]
axis = [0, 1, 2]
shape = tf.TensorShape([self.num_categories]).concatenate(params_shape)
with tf.variable_scope(self.name):
self.gamma = tf.get_variable(
'gamma', shape, initializer=tf.initializers.ones())
self.beta = tf.get_variable(
'beta', shape, initializer=tf.initializers.zeros())
beta = tf.gather(self.beta, labels)
beta = tf.expand_dims(tf.expand_dims(beta, 1), 1)
gamma = tf.gather(self.gamma, labels)
gamma = tf.expand_dims(tf.expand_dims(gamma, 1), 1)
mean, variance = tf.nn.moments(x=inputs, axes=axis, keepdims=True)
outputs = tf.nn.batch_normalization(
inputs, mean, variance, beta, gamma, variance_epsilon=1e-5)
outputs.set_shape(inputs_shape)
return outputs
class BatchNorm(object):
"""Batch Normalization.
This is just vanilla batch normalization.
"""
def __init__(self, name='batch_norm'):
"""Inits the object.
This is just a setter.
Args:
name: String name to be used for scoping.
Returns:
Initialized object.
"""
with tf.variable_scope(name):
self.name = name
def __call__(self, inputs):
"""Adds Batch Norm to the TF Graph.
Args:
inputs: Tensor of inputs (e.g. images).
Returns:
Output tensor.
"""
inputs = tf.convert_to_tensor(value=inputs)
inputs_shape = inputs.get_shape().as_list()
params_shape = inputs_shape[-1]
axis = [0, 1, 2]
shape = tf.TensorShape([params_shape])
with tf.variable_scope(self.name):
self.gamma = tf.get_variable(
'gamma', shape, initializer=tf.initializers.ones())
self.beta = tf.get_variable(
'beta', shape, initializer=tf.initializers.zeros())
beta = self.beta
gamma = self.gamma
mean, variance = tf.nn.moments(x=inputs, axes=axis, keepdims=True)
outputs = tf.nn.batch_normalization(
inputs, mean, variance, beta, gamma, variance_epsilon=1e-5)
outputs.set_shape(inputs_shape)
return outputs
def sn_conv1x1(x, output_dim, training=True, name='sn_conv1x1'):
"""Builds graph for a spectrally normalized 1 by 1 convolution.
This is used in the context of non-local networks to reduce channel count for
strictly computational reasons.
Args:
x: A 4-D tensorflow tensor.
output_dim: An integer representing desired channel count in the output.
training: If `True`, add the spectral norm assign ops.
name: String to pass to the variable scope context.
Returns:
A new volume with the same batch, height, and width as the input.
"""
with tf.variable_scope(name, custom_getter=sn_gettr(training=training)):
w = tf.get_variable(
'weights', [1, 1, x.get_shape()[-1], output_dim],
initializer=tf.keras.initializers.VarianceScaling(
scale=1.0, mode='fan_avg', distribution='uniform'))
conv = tf.nn.conv2d(
input=x, filters=w, strides=[1, 1, 1, 1], padding='SAME')
return conv
def sn_non_local_block_sim(x, training=True, name='sn_nonlocal'):
"""Builds graph for the self-attention block.
This is one third of the tricks from the SAGAN paper.
Args:
x: A 4-D tensorflow tensor.
training: If `True`, add the spectral norm assign ops.
name: String to pass to the variable scope context.
Returns:
A new volume with self-attention having been applied.
"""
with tf.variable_scope(name):
_, h, w, num_channels = x.shape.as_list()
location_num = h * w
downsampled_num = location_num // 4
# theta path
theta = sn_conv1x1(x, num_channels // 8, training, 'sn_conv_theta')
theta = tf.reshape(
theta, [-1, location_num, num_channels // 8])
# phi path
phi = sn_conv1x1(x, num_channels // 8, training, 'sn_conv_phi')
phi = tf.layers.max_pooling2d(inputs=phi, pool_size=[2, 2], strides=2)
phi = tf.reshape(
phi, [-1, downsampled_num, num_channels // 8])
attn = tf.matmul(theta, phi, transpose_b=True)
attn = tf.nn.softmax(attn)
# g path
g = sn_conv1x1(x, num_channels // 2, training, 'sn_conv_g')
g = tf.layers.max_pooling2d(inputs=g, pool_size=[2, 2], strides=2)
g = tf.reshape(
g, [-1, downsampled_num, num_channels // 2])
attn_g = tf.matmul(attn, g)
attn_g = tf.reshape(attn_g, [-1, h, w, num_channels // 2])
sigma = tf.get_variable(
'sigma_ratio', [], initializer=tf.initializers.constant(0.0))
attn_g = sn_conv1x1(attn_g, num_channels, training, 'sn_conv_attn')
return x + sigma * attn_g
@contextlib.contextmanager
def variables_on_gpu0():
"""Put variables on GPU."""
old_fn = tf.get_variable
def new_fn(*args, **kwargs):
with tf.device('/gpu:0'):
return old_fn(*args, **kwargs)
tf.get_variable = new_fn
yield
tf.get_variable = old_fn
def avg_grads(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list is
over individual gradients. The inner list is over the gradient calculation
for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(grads, 0)
grad = tf.reduce_mean(input_tensor=grad, axis=0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
| |
"""Parser is a generator function.
Parser receives data with generator's send() method and sends data to
destination DataQueue. Parser receives ParserBuffer and DataQueue objects
as a parameters of the parser call, all subsequent send() calls should
send bytes objects. Parser sends parsed `term` to desitnation buffer with
DataQueue.feed_data() method. DataQueue object should implement two methods.
feed_data() - parser uses this method to send parsed protocol data.
feed_eof() - parser uses this method for indication of end of parsing stream.
To indicate end of incoming data stream EofStream exception should be sent
into parser. Parser could throw exceptions.
There are three stages:
* Data flow chain:
1. Application creates StreamParser object for storing incoming data.
2. StreamParser creates ParserBuffer as internal data buffer.
3. Application create parser and set it into stream buffer:
parser = HttpRequestParser()
data_queue = stream.set_parser(parser)
3. At this stage StreamParser creates DataQueue object and passes it
and internal buffer into parser as an arguments.
def set_parser(self, parser):
output = DataQueue()
self.p = parser(output, self._input)
return output
4. Application waits data on output.read()
while True:
msg = yield form output.read()
...
* Data flow:
1. asyncio's transport reads data from socket and sends data to protocol
with data_received() call.
2. Protocol sends data to StreamParser with feed_data() call.
3. StreamParser sends data into parser with generator's send() method.
4. Parser processes incoming data and sends parsed data
to DataQueue with feed_data()
5. Application received parsed data from DataQueue.read()
* Eof:
1. StreamParser recevies eof with feed_eof() call.
2. StreamParser throws EofStream exception into parser.
3. Then it unsets parser.
_SocketSocketTransport ->
-> "protocol" -> StreamParser -> "parser" -> DataQueue <- "application"
"""
__all__ = ['EofStream', 'StreamParser', 'StreamProtocol',
'ParserBuffer', 'DataQueue', 'LinesParser', 'ChunksParser']
import asyncio
import asyncio.streams
import collections
import inspect
from . import errors
BUF_LIMIT = 2**14
DEFAULT_LIMIT = 2**16
class EofStream(Exception):
"""eof stream indication."""
class StreamParser:
"""StreamParser manages incoming bytes stream and protocol parsers.
StreamParser uses ParserBuffer as internal buffer.
set_parser() sets current parser, it creates DataQueue object
and sends ParserBuffer and DataQueue into parser generator.
unset_parser() sends EofStream into parser and then removes it.
"""
def __init__(self, *, loop=None, buf=None,
paused=True, limit=DEFAULT_LIMIT):
self._loop = loop
self._eof = False
self._exception = None
self._parser = None
self._transport = None
self._limit = limit
self._paused = False
self._stream_paused = paused
self._output = None
self._buffer = buf if buf is not None else ParserBuffer()
@property
def output(self):
return self._output
def set_transport(self, transport):
assert self._transport is None, 'Transport already set'
self._transport = transport
def at_eof(self):
return self._eof
def pause_stream(self):
self._stream_paused = True
def resume_stream(self):
if self._paused and self._buffer.size <= self._limit:
self._paused = False
self._transport.resume_reading()
self._stream_paused = False
if self._parser and self._buffer:
self.feed_data(b'')
def exception(self):
return self._exception
def set_exception(self, exc):
self._exception = exc
if self._output is not None:
self._output.set_exception(exc)
self._output = None
self._parser = None
def feed_data(self, data):
"""send data to current parser or store in buffer."""
if data is None:
return
if self._parser and not self._stream_paused:
try:
self._parser.send(data)
except StopIteration:
self._output.feed_eof()
self._output = None
self._parser = None
except Exception as exc:
self._output.set_exception(exc)
self._output = None
self._parser = None
else:
self._buffer.feed_data(data)
if (self._transport is not None and not self._paused and
self._buffer.size > 2*self._limit):
try:
self._transport.pause_reading()
except NotImplementedError:
# The transport can't be paused.
# We'll just have to buffer all data.
# Forget the transport so we don't keep trying.
self._transport = None
else:
self._paused = True
def feed_eof(self):
"""send eof to all parsers, recursively."""
if self._parser:
try:
if self._buffer:
self._parser.send(b'')
self._parser.throw(EofStream())
except StopIteration:
self._output.feed_eof()
except EofStream:
self._output.set_exception(errors.ConnectionError())
except Exception as exc:
self._output.set_exception(exc)
self._parser = None
self._output = None
self._buffer.shrink()
self._eof = True
def set_parser(self, parser):
"""set parser to stream. return parser's DataQueue."""
if self._parser:
self.unset_parser()
output = DataQueue(self, loop=self._loop)
if self._exception:
output.set_exception(self._exception)
return output
# init parser
p = parser(output, self._buffer)
assert inspect.isgenerator(p), 'Generator is required'
try:
# initialize parser with data and parser buffers
next(p)
except StopIteration:
pass
except Exception as exc:
output.set_exception(exc)
else:
# parser still require more data
self._parser = p
self._output = output
if self._eof:
self.unset_parser()
return output
def unset_parser(self):
"""unset parser, send eof to the parser and then remove it."""
if self._buffer:
self._buffer.shrink()
if self._parser is None:
return
try:
self._parser.throw(EofStream())
except StopIteration:
self._output.feed_eof()
except EofStream:
self._output.set_exception(errors.ConnectionError())
except Exception as exc:
self._output.set_exception(exc)
finally:
self._output = None
self._parser = None
class StreamProtocol(asyncio.streams.FlowControlMixin, asyncio.Protocol):
"""Helper class to adapt between Protocol and StreamReader."""
def __init__(self, *, loop=None, **kwargs):
super().__init__(loop=loop)
self.transport = None
self.writer = None
self.reader = StreamParser(loop=loop, **kwargs)
def is_connected(self):
return self.transport is not None
def connection_made(self, transport):
self.transport = transport
self.reader.set_transport(transport)
self.writer = asyncio.streams.StreamWriter(
transport, self, self.reader, self._loop)
def connection_lost(self, exc):
self.transport = None
if exc is None:
self.reader.feed_eof()
else:
self.reader.set_exception(exc)
super().connection_lost(exc)
def data_received(self, data):
self.reader.feed_data(data)
def eof_received(self):
self.reader.feed_eof()
def _make_drain_waiter(self):
if not self._paused:
return ()
waiter = self._drain_waiter
if waiter is None or waiter.cancelled():
waiter = asyncio.Future(loop=self._loop)
self._drain_waiter = waiter
return waiter
class DataQueue:
"""DataQueue is a destination for parsed data."""
def __init__(self, stream, *, loop=None):
self._stream = stream
self._loop = loop
self._buffer = collections.deque()
self._eof = False
self._waiter = None
self._exception = None
def at_eof(self):
return self._eof
def exception(self):
return self._exception
def set_exception(self, exc):
self._exception = exc
waiter = self._waiter
if waiter is not None:
self._waiter = None
if not waiter.done():
waiter.set_exception(exc)
def feed_data(self, data):
self._buffer.append(data)
waiter = self._waiter
if waiter is not None:
self._waiter = None
if not waiter.cancelled():
waiter.set_result(True)
def feed_eof(self):
self._eof = True
waiter = self._waiter
if waiter is not None:
self._waiter = None
if not waiter.cancelled():
waiter.set_result(False)
@asyncio.coroutine
def read(self):
self._stream.resume_stream()
try:
if not self._buffer and not self._eof:
if self._exception is not None:
raise self._exception
assert not self._waiter
self._waiter = asyncio.Future(loop=self._loop)
yield from self._waiter
if self._buffer:
return self._buffer.popleft()
else:
if self._exception is not None:
raise self._exception
else:
raise EofStream
finally:
self._stream.pause_stream()
class ParserBuffer(bytearray):
"""ParserBuffer is a bytearray extension.
ParserBuffer provides helper methods for parsers.
"""
def __init__(self, *args, limit=BUF_LIMIT):
super().__init__(*args)
self.offset = 0
self.size = 0
self._limit = limit
self._exception = None
self._writer = self._feed_data()
next(self._writer)
def exception(self):
return self._exception
def set_exception(self, exc):
self._exception = exc
def shrink(self):
if self.offset:
del self[:self.offset]
self.offset = 0
self.size = len(self)
def _feed_data(self):
while True:
chunk = yield
if chunk:
chunk_len = len(chunk)
self.size += chunk_len
self.extend(chunk)
# shrink buffer
if (self.offset and len(self) > self._limit):
self.shrink()
if self._exception:
self._writer = self._feed_data()
next(self._writer)
raise self._exception
def feed_data(self, data):
self._writer.send(data)
def read(self, size):
"""read() reads specified amount of bytes."""
while True:
if self.size >= size:
start, end = self.offset, self.offset + size
self.offset = end
self.size = self.size - size
return self[start:end]
self._writer.send((yield))
def readsome(self, size=None):
"""reads size of less amount of bytes."""
while True:
if self.size > 0:
if size is None or self.size < size:
size = self.size
start, end = self.offset, self.offset + size
self.offset = end
self.size = self.size - size
return self[start:end]
self._writer.send((yield))
def readuntil(self, stop, limit=None):
assert isinstance(stop, bytes) and stop, \
'bytes is required: {!r}'.format(stop)
stop_len = len(stop)
while True:
pos = self.find(stop, self.offset)
if pos >= 0:
end = pos + stop_len
size = end - self.offset
if limit is not None and size > limit:
raise errors.LineLimitExceededParserError(
'Line is too long.', limit)
start, self.offset = self.offset, end
self.size = self.size - size
return self[start:end]
else:
if limit is not None and self.size > limit:
raise errors.LineLimitExceededParserError(
'Line is too long.', limit)
self._writer.send((yield))
def wait(self, size):
"""wait() waits for specified amount of bytes
then returns data without changing internal buffer."""
while True:
if self.size >= size:
return self[self.offset:self.offset + size]
self._writer.send((yield))
def waituntil(self, stop, limit=None):
"""waituntil() reads until `stop` bytes sequence."""
assert isinstance(stop, bytes) and stop, \
'bytes is required: {!r}'.format(stop)
stop_len = len(stop)
while True:
pos = self.find(stop, self.offset)
if pos >= 0:
end = pos + stop_len
size = end - self.offset
if limit is not None and size > limit:
raise errors.LineLimitExceededParserError(
'Line is too long. %s' % bytes(self), limit)
return self[self.offset:end]
else:
if limit is not None and self.size > limit:
raise errors.LineLimitExceededParserError(
'Line is too long. %s' % bytes(self), limit)
self._writer.send((yield))
def skip(self, size):
"""skip() skips specified amount of bytes."""
while self.size < size:
self._writer.send((yield))
self.size -= size
self.offset += size
def skipuntil(self, stop):
"""skipuntil() reads until `stop` bytes sequence."""
assert isinstance(stop, bytes) and stop, \
'bytes is required: {!r}'.format(stop)
stop_len = len(stop)
while True:
stop_line = self.find(stop, self.offset)
if stop_line >= 0:
end = stop_line + stop_len
self.size = self.size - (end - self.offset)
self.offset = end
return
else:
self.size = 0
self.offset = len(self) - 1
self._writer.send((yield))
def __bytes__(self):
return bytes(self[self.offset:])
class LinesParser:
"""Lines parser.
Lines parser splits a bytes stream into a chunks of data, each chunk ends
with \\n symbol."""
def __init__(self, limit=2**16):
self._limit = limit
def __call__(self, out, buf):
try:
while True:
out.feed_data((yield from buf.readuntil(b'\n', self._limit)))
except EofStream:
pass
class ChunksParser:
"""Chunks parser.
Chunks parser splits a bytes stream into a specified
size chunks of data."""
def __init__(self, size=8196):
self._size = size
def __call__(self, out, buf):
try:
while True:
out.feed_data((yield from buf.read(self._size)))
except EofStream:
pass
| |
"""
Compute all mixed Nash equilibria of a 2-player normal form game by
vertex enumeration.
References
----------
B. von Stengel, "Equilibrium Computation for Two-Player Games in
Strategic and Extensive Form," Chapter 3, N. Nisan, T. Roughgarden, E.
Tardos, and V. Vazirani eds., Algorithmic Game Theory, 2007.
"""
import numpy as np
import scipy.spatial
from numba import jit, guvectorize
def vertex_enumeration(g, qhull_options=None):
"""
Compute mixed-action Nash equilibria of a 2-player normal form game
by enumeration and matching of vertices of the best response
polytopes. For a non-degenerate game input, these are all the Nash
equilibria.
Internally, `scipy.spatial.ConvexHull` is used to compute vertex
enumeration of the best response polytopes, or equivalently, facet
enumeration of their polar polytopes. Then, for each vertex of the
polytope for player 0, vertices of the polytope for player 1 are
searched to find a completely labeled pair.
Parameters
----------
g : NormalFormGame
NormalFormGame instance with 2 players.
qhull_options : str, optional(default=None)
Options to pass to `scipy.spatial.ConvexHull`. See the `Qhull
manual <http://www.qhull.org>`_ for details.
Returns
-------
list(tuple(ndarray(float, ndim=1)))
List containing tuples of Nash equilibrium mixed actions.
"""
return list(vertex_enumeration_gen(g, qhull_options=qhull_options))
def vertex_enumeration_gen(g, qhull_options=None):
"""
Generator version of `vertex_enumeration`.
Parameters
----------
g : NormalFormGame
NormalFormGame instance with 2 players.
qhull_options : str, optional(default=None)
Options to pass to `scipy.spatial.ConvexHull`. See the `Qhull
manual <http://www.qhull.org>`_ for details.
Yields
-------
tuple(ndarray(float, ndim=1))
Tuple of Nash equilibrium mixed actions.
"""
try:
N = g.N
except AttributeError:
raise TypeError('input must be a 2-player NormalFormGame')
if N != 2:
raise NotImplementedError('Implemented only for 2-player games')
brps = [_BestResponsePolytope(
g.players[1-i], idx=i, qhull_options=qhull_options
) for i in range(N)]
labelings_bits_tup = \
tuple(_ints_arr_to_bits(brps[i].labelings) for i in range(N))
equations_tup = tuple(brps[i].equations for i in range(N))
trans_recips = tuple(brps[i].trans_recip for i in range(N))
return _vertex_enumeration_gen(labelings_bits_tup, equations_tup,
trans_recips)
@jit(nopython=True)
def _vertex_enumeration_gen(labelings_bits_tup, equations_tup, trans_recips):
"""
Main body of `vertex_enumeration_gen`.
Parameters
----------
labelings_bits_tup : tuple(ndarray(np.uint64, ndim=1))
Tuple of ndarrays of integers representing labelings of the
vertices of the best response polytopes.
equations_tup : tuple(ndarray(float, ndim=2))
Tuple of ndarrays containing the hyperplane equations of the
polar polytopes.
trans_recips : tuple(scalar(float))
Tuple of the reciprocals of the translations.
"""
m, n = equations_tup[0].shape[1] - 1, equations_tup[1].shape[1] - 1
num_vertices0, num_vertices1 = \
equations_tup[0].shape[0], equations_tup[1].shape[0]
ZERO_LABELING0_BITS = (np.uint64(1) << np.uint64(m)) - np.uint64(1)
COMPLETE_LABELING_BITS = (np.uint64(1) << np.uint64(m+n)) - np.uint64(1)
for i in range(num_vertices0):
if labelings_bits_tup[0][i] == ZERO_LABELING0_BITS:
continue
for j in range(num_vertices1):
xor = labelings_bits_tup[0][i] ^ labelings_bits_tup[1][j]
if xor == COMPLETE_LABELING_BITS:
yield _get_mixed_actions(
labelings_bits_tup[0][i],
(equations_tup[0][i], equations_tup[1][j]),
trans_recips
)
break
class _BestResponsePolytope:
"""
Class that represents a best response polytope for a player in a
two-player normal form game.
Let :math:`A` and :math:`B` be the m x n and n x m payoff matrices
of players 0 and 1, respectively, where the payoffs are assumed to
have been shifted in such a way that :math:`A` and :math:`B` are
nonnegative and have no zero column. In von Stegel (2007), the best
response polytope for player 0 is defined by
.. math::
P = \{x \in \mathbb{R}^m \mid x \geq 0,\ B x \leq 1\},
and that for player 1 by
.. math::
Q = \{y \in \mathbb{R}^n \mid A y \leq 1,\ y \geq 0\}.
Here, by translation we represent these in the form
.. math::
\hat{P} = \{z \in \mathbb{R}^m \mid D z \leq 1\},
and
.. math::
\hat{Q} = \{w \in \mathbb{R}^n \mid C w \leq 1\},
where :math:`D` and :math:`C` are (m+n) x m and (m+n) x n matrices,
respectively. The 2d array of matrix :math:`D` for player 0 (or
:math:`C` for player 1) is passed as its `points` argument to
`scipy.spatial.ConvexHull`, which then computes, by the Qhull
library, convex hull (or facet enumeration). By polar duality, this
is equivalent to vertex enumeration of the polytope :math:`\hat{P}`,
where its k-th vertex is obtained by `-equations[k, :-1]/
equations[k, -1]`, and the indices of the corresponding binding
inequalities by `labelings[k]`, while the vertex of the original
polytope :math:`P` can be obtained by `-equations[k, :-1]/
equations[k, -1] + 1/trans_recip`.
Parameters
----------
opponent_player : Player
Instance of Player with one opponent.
idx : scalar(int), optional(default=0)
Player index in the normal form game, either 0 or 1.
qhull_options : str, optional(default=None)
Options to pass to `scipy.spatial.ConvexHull`. See the `Qhull
manual <http://www.qhull.org>`_ for details.
Attributes
----------
ndim : scalar(int)
Dimension of the polytope.
hull : scipy.spatial.ConvexHull
`ConvexHull` instance reprensenting the polar polytope.
num_vertices : scalar(int)
Number of the vertices identified by `ConvexHull`.
equations : ndarray(float, ndim=2)
Output of `ConvexHull.equations`. The k-th vertex is obtained
by `-equations[k, :-1]/equations[k, -1]`.
labelings : ndarray(int32, ndim=2)
Output of `ConvexHull.simplices`. `labelings[k]` stores the
indices of the binding inequalities for the k-th vertex.
trans_recip : scalar(float)
Reciprocal of the translation; the k-th vertex of the original
polytope before translation can be computed by
`-equations[k, :-1]/equations[k, -1] + 1/trans_recip`.
"""
def __init__(self, opponent_player, idx=0, qhull_options=None):
try:
num_opponents = opponent_player.num_opponents
except AttributeError:
raise TypeError('input must be a Player instance')
if num_opponents != 1:
raise NotImplementedError(
'Implemented only for Player in a 2-player game'
)
B = opponent_player.payoff_array
n, m = B.shape
self.ndim = m
D = np.empty((m+n, m))
nonneg_cond_start, payoff_cond_start = (0, m) if idx == 0 else (n, 0)
# Shift the payoffs to be nonnegative and have no zero column
col_mins = B.min(axis=0)
col_maxs = B.max(axis=0)
nonpos_const_cols = (col_maxs == col_mins) * (col_mins <= 0)
shifts = np.zeros(m)
shifts[col_mins < 0] = -col_mins[col_mins < 0]
shifts[nonpos_const_cols] += 1
D[payoff_cond_start:payoff_cond_start+n, :] = B + shifts
# Construct matrix D for player 0 (or matrix C for player 1)
# by translation z = x - 1/trans_recip
row_sums = D[payoff_cond_start:payoff_cond_start+n, :].sum(axis=1)
trans_recip = row_sums.max() * 2
D[payoff_cond_start:payoff_cond_start+n, :] *= trans_recip
D[payoff_cond_start:payoff_cond_start+n, :] /= \
(trans_recip - row_sums).reshape(n, 1)
D[nonneg_cond_start:nonneg_cond_start+m, :] = 0
np.fill_diagonal(
D[nonneg_cond_start:nonneg_cond_start+m, :], -trans_recip
)
# Create scipy.spatial.ConvexHull
self.hull = scipy.spatial.ConvexHull(D, qhull_options=qhull_options)
self.equations = self.hull.equations
self.labelings = self.hull.simplices
self.num_vertices = self.hull.equations.shape[0]
self.trans_recip = trans_recip
@guvectorize(['(i4[:], u8[:])'], '(m)->()', nopython=True, cache=True)
def _ints_arr_to_bits(ints_arr, out):
"""
Convert an array of integers representing the set bits into the
corresponding integer.
Compiled as a ufunc by Numba's `@guvectorize`: if the input is a
2-dim array with shape[0]=K, the function returns a 1-dim array of
K converted integers.
Parameters
----------
ints_arr : ndarray(int32, ndim=1)
Array of distinct integers from 0, ..., 63.
Returns
-------
np.uint64
Integer with set bits represented by the input integers.
Examples
--------
>>> ints_arr = np.array([0, 1, 2], dtype=np.int32)
>>> _ints_arr_to_bits(ints_arr)
7
>>> ints_arr2d = np.array([[0, 1, 2], [3, 0, 1]], dtype=np.int32)
>>> _ints_arr_to_bits(ints_arr2d)
array([ 7, 11], dtype=uint64)
"""
m = ints_arr.shape[0]
out[0] = 0
for i in range(m):
out[0] |= np.uint64(1) << np.uint64(ints_arr[i])
@jit(nopython=True, cache=True)
def _get_mixed_actions(labeling_bits, equation_tup, trans_recips):
"""
From a labeling for player 0, a tuple of hyperplane equations of the
polar polytopes, and a tuple of the reciprocals of the translations,
return a tuple of the corresponding, normalized mixed actions.
Parameters
----------
labeling_bits : scalar(np.uint64)
Integer with set bits representing a labeling of a mixed action
of player 0.
equation_tup : tuple(ndarray(float, ndim=1))
Tuple of hyperplane equations of the polar polytopes.
trans_recips : tuple(scalar(float))
Tuple of the reciprocals of the translations.
Returns
-------
tuple(ndarray(float, ndim=1))
Tuple of mixed actions.
"""
m, n = equation_tup[0].shape[0] - 1, equation_tup[1].shape[0] - 1
out = np.empty(m+n)
for pl, (start, stop, skip) in enumerate([(0, m, np.uint64(1)),
(m, m+n, np.uint64(0))]):
sum_ = 0.
for i in range(start, stop):
if (labeling_bits & np.uint64(1)) == skip:
out[i] = 0
else:
out[i] = equation_tup[pl][i-start] * trans_recips[pl] - \
equation_tup[pl][-1]
sum_ += out[i]
labeling_bits = labeling_bits >> np.uint64(1)
if sum_ != 0:
out[start:stop] /= sum_
return out[:m], out[m:]
| |
"""
___ _
/ _ \ ___| |_ ___ _ __ _ _ ___
| | | |/ __| __/ _ \| '_ \| | | / __|
| |_| | (__| || (_) | |_) | |_| \__ \
\___/ \___|\__\___/| .__/ \__,_|___/
|_|
<HSC-Herve Schauer Consultants 2015>
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
"""
from twisted.protocols.basic import LineReceiver
from twisted.internet import protocol, reactor
from collections import deque
from params import api as config
from chunkify import getChunk, saveChunk
from attacks import jobParser, commands, hashtypes
from params import charsets, secondarynode, masternode
import time
import errno
import shutil
import json
import urlparse
import urllib2
import os
import re
import optparse
import logs
import backupdb
#import zlib
import matrices
# API's home directory. Contains tmp directory (used for temporary files such
# as chunks of dictionaries), policy directory (containing attack policies),
# results directory (for storing found passwords) and api.log file.
home = config["vars"]["HOME"]
# Listening interface and port
HOST = config["host"]
PORT = config["port"]
OCTOPUS = """
___ _
/ _ \\ ___| |_ ___ _ __ _ _ ___
| | | |/ __| __/ _ \\| '_ \\| | | / __|
| |_| | (__| || (_) | |_) | |_| \\__ \\
\\___/ \\___|\\__\\___/| .__/ \\__,_|___/
|_|
"""
print OCTOPUS
print "Started Octopus"
def replace_multiple(string, replace):
re_sub = re.compile('|'.join(replace.keys()))
return re_sub.sub(lambda m: replace[m.group(0)], string)
def checkFile(pPath):
if not os.path.isfile(pPath):
parent = os.path.dirname(pPath)
if not os.path.isdir(parent):
os.makedirs(parent)
open(pPath, "w").close()
# Temporary directory
tmpdir = "%s/tmp" %(home)
# Policies directory
polpath = "%s/policies" %(home)
# Save in an array all policies paths
checkFile("%s/list" %(polpath))
with open("%s/list" %(polpath), "r") as f:
policies = [policy.strip("\n") for policy in f.xreadlines()]
# Log file, used for logging connections and messages sent by and to API
logger = logs.Logger("%s/api.log" %(home), reactor)
def parse_args():
"""
Arguments parser. User can provide a backup database.
"""
usage = """usage: %prog [options] hashfile"""
parser = optparse.OptionParser(usage)
help = "Backup file"
parser.add_option("-b", "--backup", help = help)
help = "Debug mode"
parser.add_option("-d", "--debug", action = "store_true", help = help)
options, args = parser.parse_args()
return options
options = parse_args()
debug = options.debug
# Backup file path
backupath = options.backup
def httprequest(data):
"""
Embeds data into a well-formed HTTP request.
"""
#data = zlib.compress(data)
if debug:
print "httprequest(%s)" %(data)
request = "GET /?cmd=%s HTTP/1.1\r\n" %urllib2.quote(data)
request += "User-Agent: Octopus"
return request
def httpresponse(data):
"""
Embeds data into a well-formed HTTP response to be sent to a client
(typically via the API).
"""
if debug:
print "httpresponse(%s)" %(data)
header = "HTTP/1.1 200 OK\r\n"
header += "Content-Type: application/json\r\n"
header += "Access-Control-Allow-Origin: *\r\n"
header += "Access-Control-Allow-Methods:\"GET,POST,OPTIONS,DELETE,PUT\""
header += "\r\nContent-Length: %d\r\n\r\n" %len(data)
response = header + data
return response
class Job(object):
def __init__(self, type, job, father, hshtypes, id):
if debug:
print "new Job(%s, %s, %s, %s, %s)" %(type, job, father, str(hshtypes), id)
self.type = type
self.job = job
self.father = father
self.hashtypes = hshtypes
self.id = id
def __eq__(self, other):
sameType = self.type == other.type
sameJob = self.job == other.job
sameFather = self.father == other.father
sameId = self.id == other.id
return sameType and sameJob and sameId and sameFather
def __ne__(self, other):
diffType = self.type != other.type
diffJob = self.job != other.job
diffFather = self.father != other.father
diffId = self.id != other.id
return diffType or diffJob or diffId or diffFather
def __str__(self):
dic = {"type": self.type,
"job": self.job,
"father": self.father,
"hashtypes": self.hashtypes,
"id": self.id}
return str(dic)
def getJob(self):
if debug:
print "Job.getJob()"
type, job, hshtypes, id = self.type, self.job, self.hashtypes, self.id
father = self.father
return [type, job, father, hshtypes, id]
def getOneJob(self):
if debug:
print "Job.getOneJob()"
try:
type, job, hashtype = self.type, self.job, self.hashtypes.pop()
id, empty = self.id, len(self.hashtypes)==0
father = self.father
return [type, job, father, hashtype, id], empty
except IndexError:
raise IndexError("no more hashtype to test")
class InfoJob(object):
def __init__(self, nbdone=0, nbjobs=0, nbfound=0, nbpass=0, stime=None,
etime=None, how=None):
self.nbdone = nbdone
self.nbjobs = nbjobs
self.nbfound = nbfound
self.nbpass = nbpass
self.stime = stime
self.etime = etime
self.how = how
def getInfos(self):
infos = dict()
infos["nbdone"] = self.nbdone
infos["nbjobs"] = self.nbjobs
infos["nbfound"] = self.nbfound
infos["nbpass"] = self.nbpass
infos["stime"] = self.stime
infos["etime"] = self.etime
infos["how"] = self.how
return infos
class APIProtocol(object, LineReceiver):
# request is set to True when API is supposed to send HTTP requests, and is
# set to False when it is supposed to send HTTP responses
request = True
# delimiter is used to separate HTTP requests received and sent by API
delimiter = "\r\n\r\n"
# ======================= PROTOCOL FUNCTIONS ======================= #
def connectionMade(self):
"""
Triggered when a connection is made.
"""
self.buffer = ""
peer = self.transport.getPeer()
host, port = peer.host, peer.port
if debug:
print "Connection made with %s:%d" %(host, port)
# Log connection
logger.log("Connection made with %s:%d" %(host, port))
self.proto = {"NODETYPE": self.nodetype,
"LISTENING": self.listening,
"DO": self.do,
"FATHER": self.father,
"POLICY": self.policy,
"RESULT": self.result,
"PUT": self.put,
"GET": self.get,
"REMOVE": self.remove,
"STOP": self.stop,
"RESUME": self.resume,
"PURGE": self.purge,
"KICK": self.kick,
"OK": self.ok,
"ERROR": self.error}
self.nodes = {"api": self.api,
"master": self.master,
"secondary": self.secondary,
"slave": self.slave,
"slavesync": self.slave}
self.putActions = {"file": self.putFile,
"jobs": self.putJobs,
"hashes": self.putHashes,
"infos": self.putInfos,
"nbjobs": self.putNbJobs,
"policy": self.putPolicy}
self.getActions = {"results": self.getResults,
"infos": self.getInfos,
"infosended": self.getInfosEnded,
"infonodes": self.getInfonodes,
"attacks": self.getAttacks,
"hashtypes": self.getHashtypes,
"policy": self.getPolicy,
"policies": self.getPolicies}
self.removeActions = {"job": self.removeJob,
"policy": self.removePolicy}
def connectionLost(self, reason):
peer = self.transport.getPeer()
host, port = peer.host, peer.port
if debug:
print "Connection lost with %s:%d"%(host, port)
# Log connection loss
logger.log("Connection lost with %s:%d" %(host, port))
# If masternode was disconnected
if self == self.factory.master:
self.factory.master = None
# If there is a secondary node, then...
if self.factory.secondary:
jobs = self.factory.jobs
fathers = self.factory.fathers
# Send all informations, hashes and jobs to that secondary node
for id in self.factory.infos.keys():
nbrem = self.factory.infos[id].nbdone
self.factory.secondary.sendPut("nbjobs", [nbrem, id])
if id in self.factory.hashes.keys():
hashes = self.factory.hashes[id]
secondary = self.factory.secondary
while hashes:
secondary.sendPut("hashes", [hashes[:150], id])
hashes = hashes[150:]
for job in jobs[id]:
chunk = getChunk[job.type](job.job)
newjob = [job.type, chunk, job.father, job.hashtypes, id]
self.factory.secondary.sendPut("jobs", [newjob, id])
self.factory.secondary.sendPut("fathers", [fathers[id], id])
# Tell slavenodes to connect to that secondary
host, port = secondarynode["host"], secondarynode["port"]
for s in self.factory.slavenodes:
s.sendListening(host, port)
if self == self.factory.secondary:
self.factory.secondary = None
if self in self.factory.slavenodes:
self.factory.slavenodes.remove(self)
if self == self.factory.api:
self.factory.api = None
def lineReceived(self, data):
peer = self.transport.getPeer()
host, port = peer.host, peer.port
if debug:
print "APIProtocol.lineReceived(%s) from %s:%s" %(data, host, port)
lines = data.split("\r\n")
header = lines[0]
# If it is not an HTTP/1.1 request, lose connection
if "HTTP/1.1" not in header:
self.transport.loseConnection()
return
# Else get all arguments
r = re.compile("^([^:]+): (.*?)$", re.MULTILINE)
request = dict((head, value) for (head, value) in r.findall("\n".join(x for x in lines[1:])))
# If User-Agent is Octopus, then sender is part of Octopus
# infrastructure, and API is supposed to send HTTP requests. Else,
# sender is a browser, and API is supposed to send an HTTP response.
self.request = True if request["User-Agent"] == "Octopus" else False
addr = header.split()[1]
p = urlparse.parse_qs(urlparse.urlparse(addr).query)
if "cmd" in p.keys() and p['cmd']:
self.requestReceived(p['cmd'][0])
else:
self.transport.loseConnection()
def requestReceived(self, data):
"""
Maps a command with its corresponding function
"""
#if self.request:
#data = zlib.decompress(data)
if debug:
print "APIProtocol.requestReceived(%s)" %(data)
# Log received request
peer = self.transport.getPeer()
host, port = peer.host, peer.port
logger.log("Recv from %s:%d: %s"%(host, port, data))
# Requests must be sent by JSON. Send back an error if it is not.
if self.request:
if data[-1] == "\x03":
self.buffer += data[:-1]
try:
cmd = json.loads(self.buffer)
except ValueError as e:
self.sendError("Your request is not a JSON")
else:
self.buffer += data
return
else:
try:
cmd = json.loads(data)
except ValueError as e:
self.sendError("Your request is not a JSON")
# self.buffer += data
# try:
# cmd = json.loads(self.buffer)
# except ValueError as e:
# #self.sendError("Your request is not a JSON")
# return
self.buffer = ""
if cmd:
# If cmd does not match any known function, then an error is sent.
if cmd[0] in self.proto.keys():
# If no masternode is connected, and no masternode wants to
# connect, API sends an error.
if not (self.factory.master or self.factory.secondary):
if not cmd[0] == "NODETYPE":
self.sendError("API is down")
return
try:
self.proto[cmd[0]](*cmd[1:])
except TypeError as e:
raise
self.sendError("Type error, probably wrong number of args")
return
else:
self.sendError("That function does not exist")
def sendRequest(self, data):
"""
sendRequest is used to send requests to other node, or to send
responses to clients.
"""
# Log sent request
peer = self.transport.getPeer()
host, port = peer.host, peer.port
if debug:
print "APIProtocol.sendRequest(%s) to %s:%d" %(data, host, port)
logger.log("Sent to %s:%d: %s"%(host, port, data))
# Wrap data into an HTTP request or an HTTP response and send it
if self.request:
data += "\x03"
while data:
self.sendLine(self.wrap(data[:1000]))
data = data[1000:]
else:
self.sendLine(self.wrap(data))
# If an HTTP response was sent, then connection is cut
self.transport.loseConnection()
# ===================== END PROTOCOL FUNCTIONS ===================== #
# ========================= RECV FUNCTIONS ========================= #
def nodetype(self, type):
"""
Answer to a NODETYPE command.
"""
if debug:
print "APIProtocol.nodetype(%s)" %(type)
try:
self.sendNodetype()
self.nodes[type]()
except KeyError as e:
self.sendError("That nodetype does not exists")
return
def listening(self, host, port):
"""
Answer to a LISTENING command. Should tell clients or slavenodes what
is the address of the current Masternode.
"""
pass
def error(self, msg):
pass
def do(self, type, job, htypes, id):
"""
Answer to a DO command. Used to send a job.
"""
if debug:
print "APIProtocol.do(%s, %s, %s, %s)" %(type, job, str(htypes), id)
# Job name cannot contain any special character
for char in charsets["s"] + " ":
if char in id:
self.sendError("Id cannot contain \"%s\"" %(char))
# TODO: Remove hashes and infos
return
# Hashes must be sent to API before jobs
if not id in self.factory.hashes.keys():
self.sendError("No hash for your id, job cannot be done")
# TODO: Remove hashes and infos
return
# API sends an error if the attack type does not exist
if not type in jobParser.keys():
self.sendError("That attack type does not exist")
self.factory.hashes.pop(id)
# TODO: Remove hashes and infos
return
# Check syntax of attack
if not jobParser[type](job):
if type[0] in ["a", "e", "i", "o"]:
a = "an"
else:
a = "a"
self.sendError("Your job is not %s %s job" %(a, type))
self.factory.hashes.pop(id)
# TODO: Remove hashes and infos
return
# If everything is OK, then do job
self.sendOK("Your job is being processed")
if not id in self.factory.jobs:
self.factory.jobs[id] = deque()
master = self.factory.master or self.factory.secondary
if not self.factory.infos[id].stime:
self.factory.infos[id].stime = time.time()
master.sendDo(type, job, htypes, id)
if not id in self.factory.results:
self.factory.results[id] = set()
def policy(self, policy, hashtypes, id):
"""
Same function as do (but job is replaced by a policy)
"""
if debug:
print "APIProtocol.policy(%s, %s, %s)" %(policy, str(hashtypes), id)
for char in charsets["s"] + " ":
if char in id:
self.sendError("Id cannot contain \"%s\"" %(char))
return
if not id in self.factory.hashes.keys():
self.sendError("No hash for your id, job cannot be done")
return
if not policy in policies:
self.sendError("That policy does not exist")
self.factory.hashes.pop(id)
return
jinfos = [hashtypes, id]
# If everything is OK, read policy and perform DO actions
checkFile("%s/policies/%s.policy" %(home, policy))
with open("%s/policies/%s.policy" %(home, policy), "r") as f:
for line in f:
job = json.loads(line.strip("\n")) + jinfos
self.requestReceived(json.dumps(job))
def result(self, results, id):
"""
Answer to a RESULT command. Used to send the result of a request.
"""
# No answer if id does not exist
if not id in self.factory.results.keys():
return
# All hashes corresponding to results are removed
self.removeHashes(results, id)
# Update results list and nbfound
for result in results:
try:
if not result in self.factory.results[id]:
try:
self.factory.infos[id].nbfound += 1
except KeyError:
self.factory.infosEnded[id].nbfound += 1
self.factory.results[id].add(result)
with open("%s/results/%s" %(home, id), "a") as f:
f.write(result + "\n")
except KeyError:
pass
try:
done = self.factory.infos[id].nbdone
tot = self.factory.infos[id].nbjobs
found = self.factory.infos[id].nbfound
nbpass = self.factory.infos[id].nbpass
if (done == tot) or (found == nbpass):
self.factory.infos[id].etime = time.time()
self.factory.infosEnded[id] = self.factory.infos.pop(id)
self.factory.infosEnded[id].how = "finished"
except KeyError as e:
pass
def father(self, father, nb, id):
if not id in self.factory.times:
self.factory.times[id] = {"times": dict(), "found": 0}
self.factory.times[id]["times"][father] = time.time()
if not id in self.factory.fathers:
self.factory.fathers[id] = dict()
self.factory.fathers[id][father] = nb
def put(self, type, objects):
"""
Answer to a PUT command. Used to upload files such as dictionaries and
hashfiles.
"""
if debug:
print "APIProtocol.put(%s, ...)" %(type)
try:
self.putActions[type](*objects)
except KeyError as e:
self.sendError("Cannot put that kind of objects")
def get(self, type, objects):
"""
Answer to a GET command. Used to get information about slavenodes and
job requests (results, state).
"""
if debug:
print "APIProtocol.get(%s, ...)" %(type)
try:
self.getActions[type](*objects)
except KeyError as e:
self.sendError("Cannot get that kind of objects")
def remove(self, type, objects):
"""
Answer to a GET command. Used to get information about slavenodes and
job requests (results, state).
"""
if debug:
print "APIProtocol.remove(%s, ...)" %(type)
try:
self.removeActions[type](*objects)
except KeyError as e:
self.sendError("Cannot get that kind of objects")
def removeJob(self, job):
if debug:
print "APIProtocol.removeJob(%s)" %(str(job))
father = job[2]
id = job[4]
if id in self.factory.jobs.keys():
mhome = masternode["vars"]["HOME"]
shome = secondarynode["vars"]["HOME"]
#chunk = job[1].replace(mhome, home).replace(shome, home)
chunk = replace_multiple(job[1], {mhome: home, shome: home})
job = Job(job[0], chunk, job[2], job[3], job[4])
if job in self.factory.jobs[id]:
while job in self.factory.jobs[id]:
self.factory.jobs[id].remove(job)
if id in self.factory.infos.keys():
self.factory.infos[id].nbdone += 1
nbdone = self.factory.infos[id].nbdone
self.factory.fathers[id][father] -= 1
if self.factory.fathers[id][father] == 0:
m = matrices.Matrix(home, father.split(":", 1))
i = self.factory.times[id]["found"]
found = self.factory.infos[id].nbfound
nbpass = self.factory.infos[id].nbpass
j = (found*100)/nbpass
self.factory.times[id]["found"] = j
m.update(i, j)
m.save()
self.factory.fathers[id].pop(father)
if not self.factory.jobs[id]:
self.factory.jobs.pop(id)
self.factory.fathers.pop(id)
try:
done = self.factory.infos[id].nbdone
tot = self.factory.infos[id].nbjobs
found = self.factory.infos[id].nbfound
nbpass = self.factory.infos[id].nbpass
if (done == tot) or (found == nbpass):
self.factory.infos[id].etime = time.time()
etime = self.factory.infos[id].etime
self.factory.infosEnded[id] = self.factory.infos.pop(id)
self.factory.infosEnded[id].how = "finished"
how = self.factory.infosEnded[id].how
except KeyError as e:
pass
def removePolicy(self, policy):
if debug:
print "APIProtocol.removePolicy(%s)" %(policy)
os.remove("%s/policies/%s.policy" %(home, policy))
policies.remove(policy)
with open("%s/policies/list" %(home), "w") as f:
pols = "\n".join(policies)
f.write(pols)
self.sendOK("Policy %s removed" %(policy))
def stop(self, id):
if debug:
print "APIProtocol.stop(%s)" %(id)
nbjobs = self.factory.infos[id].nbjobs
if self.factory.master:
self.factory.master.sendStop(id)
if self.factory.secondary:
self.factory.secondary.sendStop(id)
try:
self.factory.infos[id].etime = time.time()
etime = self.factory.infos[id].etime
self.factory.infosEnded[id] = self.factory.infos.pop(id)
self.factory.infosEnded[id].how = "stopped"
backup.save()
except KeyError as e:
self.sendError("Key error")
def resume(self, id):
if debug:
print "APIProtocol.resume(%s)" %(id)
master = self.factory.master or self.factory.secondary
if not id in self.factory.infosEnded.keys():
return
if not self.factory.infosEnded[id].how == "stopped":
return
if master:
hashes, jobs, results, infos = backup.resume(id)
with open("/app/octopus/zpoerhez", "a") as f:
f.write(str(hashes) + "\n")
f.write(str(jobs) + "\n")
f.write(str(infos))
if hashes and jobs and id in self.factory.infosEnded.keys():
self.factory.jobs[id] = deque([Job(*job) for job in jobs])
self.factory.hashes[id] = hashes
self.factory.results[id] = results
self.factory.infos[id] = InfoJob(**infos)
self.factory.infosEnded.pop(id)
jobs = self.factory.jobs
fathers = self.factory.fathers
nbrem = self.factory.infos[id].nbdone
master.sendPut("nbjobs", [nbrem, id])
if id in self.factory.hashes.keys():
hashes = self.factory.hashes[id]
while hashes:
master.sendPut("hashes", [hashes[:150], id])
hashes = hashes[150:]
for job in jobs[id]:
job = job.getJob()
chunk = getChunk[job[0]](job[1])
newjob = [job[0], chunk, job[2], job[3], job[4]]
master.sendPut("jobs", [newjob, id])
master.sendPut("fathers", [fathers[id], id])
def purge(self, id):
if debug:
print "APIProtocol.purge(%d)" %(id)
self.popAll(id)
if self.factory.master:
self.factory.master.sendPurge(id)
if self.factory.secondary:
self.factory.secondary.sendPurge(id)
def api(self):
"""
Answer to a NODETYPE manager command. Sent by Manager when a connection
is made.
"""
if debug:
print "APIProtocol.api()"
self.factory.api = self
def master(self):
"""
Answer to a NODETYPE master command. Sent by Masternode when a
connection is made.
"""
if debug:
print "APIProtocol.master()"
self.factory.master = self
jobs = self.factory.jobs
fathers = self.factory.fathers
nbjobs = self.factory.nbjobs
for id in self.factory.infos.keys():
nbrem = self.factory.infos[id].nbdone
self.factory.master.sendPut("nbjobs", [nbrem, id])
if id in self.factory.hashes.keys():
hashes = self.factory.hashes[id]
while hashes:
self.factory.master.sendPut("hashes", [hashes[:150], id])
hashes = hashes[150:]
for job in jobs[id]:
job = job.getJob()
chunk = getChunk[job[0]](job[1])
newjob = (job[0], chunk, job[2], job[3], job[4])
self.factory.master.sendPut("jobs", [newjob, id])
self.factory.master.sendPut("fathers", [fathers[id], id])
host = masternode["host"]
port = masternode["port"]
for s in self.factory.slavenodes:
s.sendListening(host, port)
def secondary(self):
"""
Answer to a NODETYPE secondary command. Sent by Secondarynode when a
connection is made.
"""
if debug:
print "APIProtocol.secondary()"
self.factory.secondary = self
if not self.factory.master:
for id in self.factory.infos.keys():
hashes = self.factory.hashes[id]
secondary = self.factory.secondary
while hashes:
secondary.sendPut("hashes", [hashes[:150], id])
hashes = hashes[150:]
jobs = self.factory.jobs
fathers = self.factory.fathers
nbjobs = self.factory.nbjobs
for id in self.factory.infos.keys():
nbrem = self.factory.infos[id].nbdone
self.factory.secondary.sendPut("nbjobs", [nbrem, id])
for job in jobs[id]:
job.getJob()
chunk = getChunk[job[0]](job[1])
newjob = (job[0], chunk, job[2], job[3], job[4])
self.factory.secondary.sendPut("jobs", [newjob, id])
self.factory.secondary.sendPut("fathers", [fathers[id], id])
host = secondarynode["host"]
port = secondarynode["port"]
for s in self.factory.slavenodes:
s.sendListening(host, port)
def slave(self):
"""
Answer to a NODETYPE slave command. Sent by a slavenode when a
connection is made.
"""
if debug:
print "APIProtocol.slave()"
self.factory.slavenodes.append(self)
if self.factory.master:
host = masternode["host"]
port = masternode["port"]
self.sendListening(host, port)
elif self.factory.secondary:
host = secondarynode["host"]
port = secondarynode["port"]
self.sendListening(host, port)
def putNbJobs(self, nb, id):
if debug:
print "APIProtocol.putNbJobs(%d, %s)" %(nb, id)
if not "nbjobs" in self.factory.infos[id].getInfos().keys():
self.factory.infos[id].nbjobs = nb
self.factory.nbjobs[id] = nb
else:
self.factory.infos[id].nbjobs += nb
self.factory.nbjobs[id] += nb
def putFile(self, chunk, path):
"""
Answer to a PUT dictionary command. Used to store a dictionary.
"""
pass
def putHashes(self, hashes, id):
"""
Answer to a PUT hashes command. Used to store a hashfile.
"""
if debug:
print "APIProtocol.putHashes(%s, %s)" %(str(hashes[:3]), id)
for char in charsets["s"] + " ":
if char in id:
self.sendError("Id cannot contain \"%s\"" %char)
return
if not id.strip():
self.sendError("You must give an id to your attack")
return
if id in self.factory.hashes.keys() and id in self.factory.jobs.keys():
self.sendError("That id already exists")
return
else:
master = self.factory.master or self.factory.secondary
nbhashes = len(hashes)
toSend = list(hashes)
if master:
while toSend:
master.sendPut("hashes", [toSend[:150], id])
toSend = toSend[150:]
if not os.path.isdir("%s/%s" %(tmpdir, id)):
os.mkdir("%s/%s" %(tmpdir, id))
if not id in self.factory.infos.keys():
self.factory.infos[id] = InfoJob()
self.factory.nbjobs[id] = 0
self.factory.infos[id].nbpass += nbhashes
if id in self.factory.hashes.keys():
self.factory.hashes[id] += hashes
else:
self.factory.hashes[id] = hashes
self.sendOK("Hashes uploaded")
def putJobs(self, job, id):
if debug:
print "APIProtocol.putJobs(%s, %s)" %(str(job), id)
if not id in self.factory.jobs.keys():
self.factory.jobs[id] = deque()
nbjobs = self.factory.infos[id].nbjobs
path = "%s/tmp/%s/part%d" %(home, id, nbjobs)
chunk = saveChunk[job[0]](job[1], path)
newjob = Job(job[0], chunk, job[2], job[3], job[4])
self.factory.jobs[id].append(newjob)
self.factory.infos[id].nbjobs += 1
def putInfos(self, infos):
if debug:
print "APIProtocol.putInfos(%s)" %(str(infos))
host = self.transport.getPeer().host
port = self.transport.getPeer().port
addr = "%s:%d" %(host, port)
self.factory.infoslaves[addr] = infos
def putPolicy(self, name, jobs):
if debug:
print "APIProtocol.putPolicy(%s, %s)" %(name, str(jobs))
if not name:
self.sendError("You must give a name to your policy")
return
if name in policies:
self.sendError("Policy already exists")
return
cset = charsets["l"] + charsets["u"]
cset += charsets["d"] + "_-"
for char in name:
if char not in cset:
self.sendError("Policy name cannot contain %s" %char)
return
if not jobs:
self.sendError("You must give at least one attack")
return
for job in jobs:
type = job[0]
attack = job[1]
if not type in jobParser.keys():
self.sendError("One of the attack types does not exist")
return
if not jobParser[type](attack):
self.sendError("Type error")
return
with open("%s/policies/list" %home, "a") as f:
f.write(name + "\n")
with open("%s/policies/%s.policy" %(home, name), "w") as f:
p = "\n".join(json.dumps(["DO", job[0], job[1]]) for job in jobs)
f.write(p)
self.sendOK("Policy added successfully")
policies.append(name)
def getResults(self, id):
"""
Answer to a GET results command. Returns the list of results
corresponding to a given client.
"""
if debug:
print "APIProtocol.getResults(%s)" %(id)
if id in self.factory.results.keys():
self.sendResult(list(self.factory.results[id]))
else:
try:
with open("%s/results/%s" %(home, id)) as f:
results = [r.strip("\n") for r in f.xreadlines()]
self.sendResult(results)
except IOError as e:
if e.errno == errno.ENOENT:
self.sendError("No result for your id")
else:
raise
def getInfonodes(self):
if debug:
print "APIProtocol.getInfonodes()"
self.factory.infoslaves = dict()
nan = {"program": "unknown", "cpu": 0}
for s in self.factory.slavenodes:
host = s.transport.getPeer().host
port = s.transport.getPeer().port
addr = "%s:%d" %(host, port)
self.factory.infoslaves[addr] = nan
s.sendGet("infos", [])
reactor.callLater(0.3, self.sendInfonodes)
def getInfos(self):
if debug:
print "APIProtocol.getInfos()"
self.sendInfos()
def getInfosEnded(self):
if debug:
print "APIProtocol.getInfosEnded()"
self.sendInfosEnded()
def getAttacks(self):
if debug:
print "APIProtocol.getAttacks()"
self.sendAttacks()
def getHashtypes(self):
if debug:
print "APIProtocol.getHashtypes()"
self.sendHashtypes()
def getPolicies(self):
if debug:
print "APIProtocol.getPolicies()"
self.sendResult(policies)
def getPolicy(self, name):
if debug:
print "APIProtocol.getPolicy(%s)" %(name)
checkFile("%s/policies/%s.policy" %(home, name))
policy = open("%s/policies/%s.policy" %(home, name), "r").read()
policy = [json.loads(a) for a in policy.splitlines() if len(a) > 0]
self.sendResult(policy)
def kick(self, addr):
if debug:
print "APIProtocol.kick(%s)" %(addr)
host, port = addr.split(":")
p = lambda s: s.transport.getPeer()
f = lambda p: (p.host == host and p.port == int(port))
[s.sendKick() for s in self.factory.slavenodes if f(p(s))]
def ok(self, msg):
pass
# ======================= END RECV FUNCTIONS ======================= #
# ========================= SEND FUNCTIONS ========================= #
def sendNodetype(self):
if debug:
print "APIProtocol.sendNodetype"
self.sendRequest(json.dumps(["NODETYPE", "api"]))
def sendOK(self, msg):
if debug:
print "APIProtocol.sendOK"
self.sendRequest(json.dumps(["OK", msg]))
def sendError(self, msg):
if debug:
print "APIProtocol.sendError"
self.sendRequest(json.dumps(["ERROR", msg]))
def sendListening(self, host, port):
if debug:
print "APIProtocol.sendListening"
self.sendRequest(json.dumps(["LISTENING", host, port]))
def sendDo(self, type, job, htypes, id):
if debug:
print "APIProtocol.sendDo"
self.sendRequest(json.dumps(["DO", type, job, htypes, id]))
def sendGet(self, type, object):
if debug:
print "APIProtocol.sendGet"
self.sendRequest(json.dumps(["GET", type, object]))
def sendPut(self, type, object):
if debug:
print "APIProtocol.sendPut"
self.sendRequest(json.dumps(["PUT", type, object]))
def sendStop(self, id):
if debug:
print "APIProtocol.sendStop"
self.sendRequest(json.dumps(["STOP", id]))
def sendPurge(self, id):
if debug:
print "APIProtocol.sendPurge"
self.sendRequest(json.dumps(["PURGE", id]))
def sendKick(self):
if debug:
print "APIProtocol.sendKick"
self.sendRequest(json.dumps(["KICK"]))
def sendAttacks(self):
if debug:
print "APIProtocol.sendAttacks"
self.sendResult(commands.keys())
def sendHashtypes(self):
if debug:
print "APIProtocol.sendHashtypes"
self.sendResult(hashtypes)
def sendInfos(self):
if debug:
print "APIProtocol.sendInfos"
infos = self.factory.infos
infos = dict((id, infos[id].getInfos()) for id in infos)
self.sendResult(infos)
def sendInfosEnded(self):
if debug:
print "APIProtocol.sendInfosEnded"
infos = self.factory.infosEnded
infos = dict((id, infos[id].getInfos()) for id in infos)
self.sendResult(infos)
def sendInfonodes(self):
if debug:
print "APIProtocol.sendInfonodes"
self.sendResult(self.factory.infoslaves)
def sendResult(self, result):
if debug:
print "APIProtocol.sendResult"
self.sendRequest(json.dumps(["RESULT", result]))
# ======================= END RECV FUNCTIONS ======================= #
# ========================= MISC FUNCTIONS ========================= #
def removeHashes(self, results, id):
if debug:
print "APIProtocol.removeHashes(%s, %s)" %(str(results[:3]), id)
hashes = self.factory.hashes[id]
for result in results:
for hash in hashes:
if result.split(":")[1] in hash:
hashes.remove(hash)
def wrap(self, data):
if debug:
print "APIProtocol.wrap(%s)" %(data)
if self.request:
return httprequest(data)
else:
return httpresponse(data)
def popAll(self, id):
if debug:
print "APIProtocol.popAll(%s)" %(id)
if id in self.factory.results.keys():
self.factory.results.pop(id)
if id in self.factory.jobs.keys():
self.factory.jobs.pop(id)
if id in self.factory.nbjobs.keys():
self.factory.nbjobs.pop(id)
if id in self.factory.infos.keys():
self.factory.infos.pop(id)
if id in self.factory.infosEnded.keys():
self.factory.infosEnded.pop(id)
if os.path.isdir("%s/tmp/%s" %(home, id)):
shutil.rmtree("%s/tmp/%s" %(home, id))
if os.path.isfile("%s/results/%s" %(home, id)):
os.remove("%s/results/%s" %(home, id))
# ======================= END MISC FUNCTIONS ======================= #
class APIFactory(object, protocol.ClientFactory, protocol.ServerFactory):
def __init__(self, protocol=APIProtocol):
self.protocol = protocol
self.infos = dict()
self.infosEnded = dict()
self.results = dict()
self.jobs = dict()
self.fathers = dict()
self.nbjobs = dict()
self.times = dict()
self.hashes = dict()
self.slavenodes = []
self.api = None
self.master = None
self.secondary = None
def resume(self):
pass
factory = APIFactory()
backup = backupdb.BackupDB(factory, reactor, backupath)
backup.init()
reactor.listenTCP(PORT, factory, interface=HOST)
reactor.run()
backup.cur.close()
backup.con.close()
logger.write()
if os.path.isdir("%s/tmp"%home):
shutil.rmtree("%s/tmp"%home)
os.mkdir("%s/tmp"%home)
| |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for module colab_evaluation.py."""
import gzip
import os
import time
import math
from absl import flags
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import pandas as pd
import colab_evaluation
import inference
import tensorflow.compat.v1 as tf
FLAGS = flags.FLAGS
class ColabEvaluationTest(parameterized.TestCase):
def _generate_random_inferences(self, n):
serialized_inferences = []
accessions_list = []
activations_list = []
for _ in range(n):
accession = f"ACCESSION_{time.time()}"
activations = np.random.rand(100)
accessions_list.append(accession)
activations_list.append(activations)
serialized_inferences.append(
inference.serialize_inference_result(accession, activations))
return serialized_inferences, accessions_list, activations_list
@parameterized.parameters([{'batch_size': 1}, {'batch_size': 9}])
def test_batched_inferences_from_dir(self, batch_size, num_examples=100):
# Create input inference results.
serialized_inferences, accessions_list, activations_list = self._generate_random_inferences(
num_examples)
shard_1_contents = b"\n".join(serialized_inferences[0:60])
shard_2_contents = b"\n".join(serialized_inferences[60:])
shard_dir = self.create_tempdir()
shard_1_filename = shard_dir.create_file('shard_1').full_path
shard_2_filename = shard_dir.create_file('shard_2').full_path
# Write contents to a gzipped file.
with tf.io.gfile.GFile(shard_1_filename, 'wb') as f:
with gzip.GzipFile(fileobj=f, mode='wb') as f_gz:
f_gz.write(shard_1_contents)
with tf.io.gfile.GFile(shard_2_filename, 'wb') as f:
with gzip.GzipFile(fileobj=f, mode='wb') as f_gz:
f_gz.write(shard_2_contents)
# Read these shards.
iterator = colab_evaluation.batched_inferences_from_dir(
shard_dir.full_path, batch_size=batch_size)
actual = list(iterator)
# Check output.
self.assertEqual(len(actual), math.ceil(num_examples / batch_size))
self.assertEqual(actual[0][0][0], accessions_list[0])
if batch_size > 1:
self.assertEqual(actual[1][0][1], accessions_list[batch_size + 1])
np.testing.assert_equal(actual[0][1][0], activations_list[0])
if batch_size > 1:
np.testing.assert_equal(actual[1][1][1],
activations_list[batch_size + 1])
def test_make_tidy_df_from_seq_names_and_prediction_array(self):
vocab = ["ENTRY0", "ENTRY1", "ENTRY2"]
sequence_names = ['SEQ0', 'SEQ1']
predictions_array = np.array([[0.1, 0.9, 0.5], [1, 1, 1]])
min_decision_threshold = 0.4
actual_df = colab_evaluation._make_tidy_df_from_seq_names_and_prediction_array(
sequence_names,
predictions_array,
vocab,
min_decision_threshold=min_decision_threshold)
expected_df = pd.DataFrame({
'up_id': ['SEQ0', 'SEQ0', 'SEQ1', 'SEQ1', 'SEQ1'],
'label': ['ENTRY1', 'ENTRY2', 'ENTRY0', 'ENTRY1', 'ENTRY2'],
'value': [0.9, 0.5, 1.0, 1.0, 1.0]
})
pd.testing.assert_frame_equal(actual_df, expected_df)
def test_make_tidy_df_from_ground_truth(self):
input_df = pd.DataFrame({
'sequence_name': ['SEQ0', 'SEQ1', 'SEQ2', 'SEQ3'],
'true_label': [['ENTRY1'], ['ENTRY1', 'ENTRY2'], [], ['ENTRY6']]
})
actual_df = colab_evaluation.make_tidy_df_from_ground_truth(input_df)
expected_df = pd.DataFrame({
'up_id': ['SEQ0', 'SEQ1', 'SEQ1', 'SEQ3'],
'label': ['ENTRY1', 'ENTRY1', 'ENTRY2', 'ENTRY6'],
'gt': [True, True, True, True]
})
pd.testing.assert_frame_equal(actual_df, expected_df)
def test_merge_predictions_and_ground_truth(self):
pred = pd.DataFrame({
'up_id': ['SEQ0', 'SEQ0', 'SEQ1', 'SEQ1', 'SEQ1'],
'label': ['ENTRY1', 'ENTRY2', 'ENTRY0', 'ENTRY1', 'ENTRY2'],
'value': [0.9, 0.5, 1.0, 1.0, 1.0]
})
gt = pd.DataFrame({
'up_id': ['SEQ0', 'SEQ1', 'SEQ1', 'SEQ3'],
'label': ['ENTRY1', 'ENTRY1', 'ENTRY2', 'ENTRY6'],
'gt': [True, True, True, True]
})
actual_df = colab_evaluation.merge_predictions_and_ground_truth(
pred, gt)
expected_df = pd.DataFrame({
'up_id': ['SEQ0', 'SEQ0', 'SEQ1', 'SEQ1', 'SEQ1', 'SEQ3'],
'label':
['ENTRY1', 'ENTRY2', 'ENTRY0', 'ENTRY1', 'ENTRY2', 'ENTRY6'],
'value': [0.9, 0.5, 1.0, 1.0, 1.0, False],
'gt': [True, False, False, True, True, True]
})
pd.testing.assert_frame_equal(actual_df, expected_df)
def test_get_pr_curve_df(self):
pred = pd.DataFrame({
'up_id': ['SEQ0', 'SEQ0', 'SEQ1', 'SEQ1', 'SEQ1'],
'label': ['ENTRY1', 'ENTRY2', 'ENTRY0', 'ENTRY1', 'ENTRY2'],
'value': [0.9, 0.5, 1.0, 1.0, 1.0]
})
gt = pd.DataFrame({
'up_id': ['SEQ0', 'SEQ1', 'SEQ1', 'SEQ3'],
'label': ['ENTRY1', 'ENTRY1', 'ENTRY2', 'ENTRY6'],
'gt': [True, True, True, True]
})
pr_curve = colab_evaluation.get_pr_curve_df(pred, gt, filtered=False)
np.testing.assert_almost_equal(pr_curve['recall'],
np.array([1, 0.75, 0.75, .5]))
np.testing.assert_almost_equal(
pr_curve['precision'], np.array([0.6666667, 0.6, 0.75, 0.6666667]))
np.testing.assert_almost_equal(
pr_curve['f1'], np.array([0.8, 0.6666667, 0.75, 0.5714286]))
def test_assign_tp_fp_fn(self):
pred = pd.DataFrame({
'up_id': ['SEQ0', 'SEQ0', 'SEQ1', 'SEQ1', 'SEQ1'],
'label': ['ENTRY1', 'ENTRY2', 'ENTRY0', 'ENTRY1', 'ENTRY2'],
'value': [0.9, 0.5, 1.0, 1.0, 1.0]
})
gt = pd.DataFrame({
'up_id': ['SEQ0', 'SEQ1', 'SEQ1', 'SEQ3'],
'label': ['ENTRY1', 'ENTRY1', 'ENTRY2', 'ENTRY6'],
'gt': [True, True, True, True]
})
tp_fp_fn = colab_evaluation.assign_tp_fp_fn(pred, gt, threshold=0.5)
expected = pd.DataFrame({
'tp': [True, False, False, True, True, False],
'fp': [False, False, True, False, False, False],
'fn': [False, False, False, False, False, True]
})
actual = tp_fp_fn.loc[:, ["tp", "fp", "fn"]]
pd.testing.assert_frame_equal(expected, actual)
def test_apply_threshold_and_return_stats(self):
pred = pd.DataFrame({
'up_id': ['SEQ0', 'SEQ0', 'SEQ1', 'SEQ1', 'SEQ1'],
'label': ['ENTRY1', 'ENTRY2', 'ENTRY0', 'ENTRY1', 'ENTRY2'],
'value': [0.9, 0.5, 1.0, 1.0, 1.0]
})
gt = pd.DataFrame({
'up_id': ['SEQ0', 'SEQ1', 'SEQ1', 'SEQ3'],
'label': ['ENTRY1', 'ENTRY1', 'ENTRY2', 'ENTRY6'],
'gt': [True, True, True, True]
})
actual = colab_evaluation.apply_threshold_and_return_stats(pred,gt,grouping = {"ENTRY0":'A',"ENTRY1":'A',"ENTRY2":'A',"ENTRY6":'A'})
expected = pd.DataFrame({
'group': ['A'],
'tp': [3.0],
'fp': [1.0],
'fn': [1.0],
'precision': [0.75],
'recall': [0.75],
'f1': [0.75],
'count': [4.0],
'proportion': [1.0],
'proportion_text': ['100.0%'],
'threshold': [0.5]
})
pd.testing.assert_frame_equal(actual,expected, check_dtype=False)
def test_read_blast_table(self):
actual = colab_evaluation.read_blast_table("testdata/blast.tsv")
expected = pd.DataFrame({'up_id': ['ABC'], 'target': ['DEF'], 'pc_identity': [50], 'alignment_length': [100], 'bit_score': [500]})
pd.testing.assert_frame_equal(actual, expected)
if __name__ == '__main__':
absltest.main()
| |
""" Tools for reading 1D spectra extracted from the CalCOS pipeline,
for use in co-addition, and for reading and writing the line spread functions.
"""
from __future__ import division
from .spectrum import COSx1dSpectrum
from collections import OrderedDict
from astropy.units import angstrom, km, s, Quantity
from astropy.constants import c
from astropy.io import fits, ascii
from astropy.table import Table
from scipy.interpolate import interp1d
import numpy as np
import os
__all__ = ['read_x1d', 'read_lsf', 'limiting_equivalent_width',
'significance_level']
datapath = os.path.split(os.path.abspath(__file__))[0] + '/'
dw_orig = dict(G130M=0.00997, G160M=0.01223, G140L=0.083, G185M=0.034,
G225M=0.034, G285M=0.04, G230L=0.39)
c_kms = c.to(km / s)
# Cache for the LSF values:
cache_lsf = {}
def read_x1d(filename):
""" Reads an x1d format spectrum from CalCOS.
Parameters
----------
filename : str
x1d filename.
Returns
-------
x1dspec : dict
Dictionary of `COS.spectrum.COS1DSpectrum` objects with keys
corresponding to the detector segments (e.g. `FUVA`, `FUVB`).
"""
hdulist = fits.open(filename)
data = hdulist[1].data
header, meta = OrderedDict(), OrderedDict()
for key, val in hdulist[0].header.iteritems():
if key != '':
header[key] = val
for key, val in hdulist[1].header.iteritems():
if key != '':
meta[key] = val
x1dspec = dict()
for i, segment in enumerate(data['SEGMENT']):
this_header = header.copy()
this_header['SEGMENT'] = segment
x1dspec[segment] = COSx1dSpectrum(
data[i]['WAVELENGTH'], data[i]['FLUX'], data[i]['ERROR'],
data[i]['DQ'], data[i]['GROSS'], data[i]['BACKGROUND'],
data[i]['NET'], this_header, meta)
return x1dspec
def cross_correlate(s1, s2, ishift=0, width=15, s1_start=None, s1_end=None):
"""
Normalised mean and covariance cross correlation offset between
two input vectors of the same length.
Parameters
----------
s1 : array, shape (N,)
Reference spectrum.
s2 : array, shape (N,)
Comparison spectrum.
ishift : float, optional
Approximate offset in pixels (default = 0).
width : float, optional
Search width in pixels (default = 15).
s1_start, s1_end : float, optional
Start and end index for region in s1 containing features.
Returns
-------
offset : array, shape(N,)
Offset of s2 from s1 in pixels.
corr : array, shape(N,)
Output correlation.
"""
approx = np.round(ishift) # Nearest integer
ns = len(s1)
if s1_start is None:
s1_start = 0 # s1 start index
if s1_end is None:
s1_end = ns - 1 # s1 end index
# Get start and end index for s2 template:
s2_start = ((s1_start - approx + width / 2)
if (s1_start - approx + width / 2) > 0 else 0)
s2_end = ((s1_end - approx - width / 2)
if (s1_end - approx - width / 2) < (ns - 1) else (ns - 1))
# Check the length of the template:
nt = s2_end - s2_start + 1
if nt < 1.0:
raise ValueError('cross correlation region too small, '
'or width/ishift too large')
template2 = s2[s2_start:(s2_end + 1)] # template for s2
corr = np.zeros(width) # correlation matrix
# Get statistics on s2 template:
mean2 = np.mean(template2)
std2 = np.std(template2)
diff2 = template2 - mean2
# Cross correlate:
for i in range(width):
# Extract s1 template:
s1_start = s2_start - width / 2 + approx + i
s1_end = s1_start + nt - 1
template1 = s1[s1_start:(s1_end + 1)]
# Statistics on the template:
mean1 = np.mean(template1)
std1 = np.std(template1)
diff1 = template1 - mean1
# Check variance for zeros:
if (std1 == 0) or (std2 == 0):
raise ValueError('zero variance computed in cross correlation')
# Compute the cross-correlation:
corr[i] = np.sum(diff1 * diff2) / (std1 * std2)
# Find index for the correlation maximum:
k = np.argmax(corr)
# Return zero offset if the correlation maximum is on the edge of the
# search area (we have failed to find a correlation peak in this case):
if (k == 0) or (k == (width - 1.0)):
print('WARNING: correlation maximum on edge of search area, returning '
'zero offset')
offset = 0
return offset, corr
# Use quadratic refinement to pin down the offset:
kmin = ((corr[k - 1] - corr[k]) /
(corr[k - 1] + corr[k + 1] - 2.0 * corr[k]) - 0.5)
offset = k + kmin - width / 2 + approx
return offset, corr
def scale_factor(wavelength1, flux1, error1, wavelength2, flux2, error2):
""" Find the multiplicative factor to rescale flux2, so its median will
match flux1 where they overlap in wavelength.
Parameters
----------
wavelength1, wavelength2 : arrays, shape (N,), (M,)
Dispersion for spectrum 1 and 2.
flux1, flux2 : arrays, shape (N,), (M,)
Flux for spectrum 1 and 2.
error1, error2 : arrays, shape (N,), (M,)
Flux error for spectrum 1 and 2.
Returns
-------
scaling : float
Scale factor.
Notes
-----
Errors are used to identify bad pixels.
"""
wmin = max(wavelength1.min(), wavelength2.min())
wmax = min(wavelength1.max(), wavelength2.max())
good1 = ((error1 > 0) & ~np.isnan(flux1) &
(wmin < wavelength1) & (wavelength1 < wmax))
good2 = ((error2 > 0) & ~np.isnan(flux2) &
(wmin < wavelength2) & (wavelength2 < wmax))
if good1.sum() < 3 or good2.sum() < 3:
raise ValueError('Too few good pixels to use for scaling')
median1 = np.median(flux1[good1])
median2 = np.median(flux2[good2])
if not (median1 > 0) or not (median2 > 0):
print('Bad medians: ', str(median1), str(median2),
' returning unit scaling')
scaling = 1
return scaling
scaling = median1 / median2
return scaling
class LSF(object):
""" Represents a COS line-spread function.
Parameters
----------
grating : {`G130M`, `G160M`, `G140L`, `G185M`, `G225M`, `G285M`, `G230L`}
The name of the grating.
Attributes
----------
lsf : `astropy.table.Table`
The COS line-spread function as a function of pixel offset from the
centre.
dispersion : ndarray, shape (N,)
Dispersion (Angstrom).
pixel_width : float
Pixel width (Angstrom).
grating : str
The name of the grating.
"""
def __init__(self, grating):
name = ('NUV' if grating in ('G185M', 'G225M', 'G285M', 'G230L')
else grating)
self.lsf = ascii.read('{0}/LSF/{1}.txt'.format(datapath, name))
self.dispersion = self.lsf['relpix'].data * dw_orig[grating]
self.pixel_width = dw_orig[grating]
self.grating = grating
def interpolate(self, dw):
""" Interpolate LSF to a new pixel width.
Parameters
----------
dw : float
The new pixel width (Angstrom).
"""
key = '{0}_{1}'.format(self.grating, dw)
if key in cache_lsf.keys():
self.lsf = cache_lsf[key].lsf
self.dispersion = cache_lsf[key].dispersion
self.pixel_width = cache_lsf[key].pixel_width
else:
t = np.arange(0, self.dispersion[-2], dw)
new_disp = np.concatenate([-t[::-1][:-1], t])
wavs = self.lsf.colnames[1:]
new_lsf = []
for w in wavs:
new_lsf.append(
interp1d(self.dispersion, self.lsf[w],
kind='cubic')(new_disp))
t = np.arange(len(new_disp) // 2 + 1)
new_pix = np.concatenate([-t[::-1][:-1], t])
lsf = Table([new_pix] + new_lsf, names=self.lsf.colnames)
self.lsf = lsf
self.dispersion = new_disp
self.pixel_width = dw
cache_lsf['{0}_{1}'.format(self.grating, dw)] = self
def write(self, wavelength, dw):
""" Write a file giving the COS line spread function at a given
wavelength for constant pixel width dw (both in Angstrom), suitable
for input to VPFIT (http://www.ast.cam.ac.uk/rfc/vpfit.html).
Parameters
----------
wavelength : float
Wavelength at which to compute the LSF (Angstrom).
dw : float
Pixel width (Angstrom).
"""
outname = 'LSF/LSF_{0:.1f}.txt'.format(wavelength)
if self.pixel_width != dw:
self.interpolate(dw)
if not os.path.lexists('./LSF'):
os.mkdir('./LSF')
wavs = [float(n[1:]) for n in self.lsf.colnames[1:]]
lsf1 = np.array([self.lsf[n] for n in self.lsf.colnames[1:]])
new_lsf = []
for ipix in range(lsf1.shape[1]):
new_lsf.append(np.interp(wavelength, wavs, lsf1[:, ipix]))
lsf = Table([self.lsf['relpix'].tolist(), new_lsf])
lsf.write(outname, format='ascii.fixed_width_no_header', delimiter='')
def read_lsf(grating, dw_new=None):
""" Read the COS line spread function, optionally interpolated to
a new pixel width.
Parameters
----------
grating : {`G130M`, `G160M`, `G140L`, `G185M`, `G225M`, `G285M`, `G230L`}
The name of the grating.
dw_new : float
The new pixel width in Angstroms. Default is `None`, which
returns the original LSF.
Returns
-------
LSF : `COS.utils.LSF` instance
The line spread function.
"""
if grating not in dw_orig.keys():
raise KeyError('grating name not recognised')
lsf = LSF(grating)
if dw_new is not None:
lsf.interpolate(dw_new)
return lsf
def limiting_equivalent_width(significance, wavelength, b, snpix,
dispersion=None, smoothing=1):
""" Determines the limiting equivalent width for an absorption feature
in either of the G130M or G160M gratings, taking into account the
non-poissonian noise properties of the data, using Equations 4-5,
7 and 9-10 of Keeney et al. (2012), PASP, 124, 918.
Parameters
----------
significance : int
Significance level of limit (number of standard deviations).
wavelength : float or array of floats, shape (N,)
Observed wavelength of the line (Angstrom).
b : float
An estimate of the Doppler broadening parameter (km/s).
snpix : float or array of floats, shape (N,)
Signal-to-noise ratio per spectral pixel.
dispersion : float, optional
Dispersion in Angstrom/pixel (defaults to the G130M value for
wavelengths <= 1425 A and the G160M value otherwise).
smoothing : float, optional
The number of pixels the spectrum has been re-binned by
(default = 1 (no re-binning)).
Returns
-------
lim_eqw : `astropy.units.Quantity`
Limiting equivalent width (Angstrom).
"""
if isinstance(wavelength, Quantity):
wavelength = wavelength.to(angstrom)
else:
wavelength = wavelength * angstrom
if isinstance(b, Quantity):
b = b.to(km / s)
else:
b = b * km / s
if isinstance(dispersion, Quantity):
dispersion = dispersion.to(angstrom)
elif dispersion is not None:
dispersion = dispersion * angstrom
elif isinstance(wavelength, (list, np.ndarray)):
dispersion = np.empty_like(wavelength)
dispersion[wavelength.value <= 1425] = 0.00997 * angstrom
dispersion[wavelength.value > 1425] = 0.01223 * angstrom
elif wavelength <= 1425:
dispersion = 0.00997 * angstrom
else:
dispersion = 0.01223 * angstrom
dx = b * wavelength / (c_kms * dispersion)
xopt = 1.605 * dx + 5.1 * dx ** -0.25
eta = 0.15 + xopt ** 0.37
fcx = 0.743 - 0.185 * np.exp(-dx / 11.6)
sn1 = snpix / (0.15 + smoothing ** 0.37) if smoothing != 1 else snpix
lim_eqw = (significance * dispersion / sn1) * xopt / (eta * fcx)
return lim_eqw
def significance_level(eqw, wavelength, b, snpix, dispersion=None, smoothing=1):
""" Determines the significance level of a line using Equations 4,
7 and 9-11 of Keeney et al. (2012), PASP, 124, 918.
Parameters
----------
eqw : float
Observed equivalent width of the line (Angstrom).
wavelength : float or array of floats, shape (N,)
Observed wavelength (Angstrom).
b : float
Doppler broadening parameter (km/s).
snpix : float or array of floats, shape (N,)
Signal-to-noise ratio per spectral pixel.
dispersion : float, optional
Dispersion in Angstrom/pixel (defaults to the G130M value for
wavelengths <= 1425 A and the G160M value otherwise).
smoothing : float, optional
The number of pixels the spectrum has been re-binned by
(default = 1 (no re-binning)).
Returns
-------
sig_level : float
Significance level in number of standard deviations.
"""
if isinstance(eqw, Quantity):
eqw = eqw.to(angstrom)
else:
eqw = eqw * angstrom
if isinstance(wavelength, Quantity):
wavelength = wavelength.to(angstrom)
else:
wavelength = wavelength * angstrom
if isinstance(b, Quantity):
b = b.to(km / s)
else:
b = b * km / s
if isinstance(dispersion, Quantity):
dispersion = dispersion.to(angstrom)
elif dispersion is not None:
dispersion = dispersion * angstrom
elif isinstance(wavelength, (list, np.ndarray)):
dispersion = np.empty_like(wavelength)
dispersion[wavelength <= 1425] = 0.00997
dispersion[wavelength > 1425] = 0.01223
elif wavelength <= 1425:
dispersion = 0.00997 * angstrom
else:
dispersion = 0.01223 * angstrom
dx = b * wavelength / (c_kms * dispersion)
xopt = 1.605 * dx + 5.1 * dx ** -0.25
eta = 0.15 + xopt ** 0.37
fcx = 0.743 - 0.185 * np.exp(-dx / 11.6)
sn1 = snpix / (0.15 + smoothing ** 0.37) if smoothing != 1 else snpix
sig_level = sn1 * (eqw / dispersion) * eta * (fcx / xopt)
return sig_level.value
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparsemaxOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.sparsemax import sparsemax
from tensorflow.python.ops import gradient_checker
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.framework import constant_op
from tensorflow.python.platform import test
test_obs = 10
class SparsemaxTest(test.TestCase):
def _np_sparsemax(self, z):
z = z - np.mean(z, axis=1)[:, np.newaxis]
# sort z
z_sorted = np.sort(z, axis=1)[:, ::-1]
# calculate k(z)
z_cumsum = np.cumsum(z_sorted, axis=1)
k = np.arange(1, z.shape[1] + 1)
z_check = 1 + k * z_sorted > z_cumsum
# use argmax to get the index by row as .nonzero() doesn't
# take an axis argument. np.argmax return the first index, but the last
# index is required here, use np.flip to get the last index and
# `z.shape[axis]` to compensate for np.flip afterwards.
k_z = z.shape[1] - np.argmax(z_check[:, ::-1], axis=1)
# calculate tau(z)
tau_sum = z_cumsum[np.arange(0, z.shape[0]), k_z - 1]
tau_z = ((tau_sum - 1) / k_z).reshape(-1, 1)
# calculate p
return np.maximum(0, z - tau_z)
def _np_sparsemax_grad(self, z):
# chain rule
grad = np.ones_like(z)
# Construct S(z)
probability = self._np_sparsemax(z)
support = probability > 0
# Calculate \hat{v}, which will be a vector (scalar for each z)
v_hat = np.sum(grad * support, axis=1) / np.sum(support, axis=1)
# Calculates J(z) * v
return support * (grad - v_hat[:, np.newaxis])
def _tf_sparsemax(self, z, dtype, use_gpu):
with self.test_session(use_gpu=use_gpu):
tf_sparsemax_op = sparsemax(z.astype(dtype))
tf_sparsemax_out = tf_sparsemax_op.eval()
return tf_sparsemax_op, tf_sparsemax_out
def _test_sparsemax_against_numpy(self, dtype, random, use_gpu):
"""check sparsemax kernel against numpy"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax(z, dtype, use_gpu)
p_sparemax = self._np_sparsemax(z).astype(dtype)
self.assertAllCloseAccordingToType(
p_sparemax, tf_sparsemax_out, half_atol=5e-3)
self.assertShapeEqual(p_sparemax, tf_sparsemax_op)
def _test_sparsemax_of_zero(self, dtype, random, use_gpu):
"""check sparsemax proposition 1, part 1"""
z = np.zeros((1, 10))
tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax(z, dtype, use_gpu)
p_sparemax = np.ones_like(z, dtype=dtype) / z.size
self.assertAllCloseAccordingToType(p_sparemax, tf_sparsemax_out)
self.assertShapeEqual(p_sparemax, tf_sparsemax_op)
def _test_sparsemax_of_inf(self, dtype, random, use_gpu):
"""check sparsemax proposition 1, part 2"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
# assume |A(z)| = 1, as z is continues random
z_sort_arg = np.argsort(z, axis=1)[:, ::-1]
z_sort = np.sort(z, axis=-1)[:, ::-1]
gamma_z = z_sort[:, 0] - z_sort[:, 1]
epsilon = (0.99 * gamma_z * 1).reshape(-1, 1)
# construct the expected 1_A(z) array
p_expected = np.zeros((test_obs, 10), dtype=dtype)
p_expected[np.arange(0, test_obs), z_sort_arg[:, 0]] = 1
tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax((1 / epsilon) * z,
dtype, use_gpu)
self.assertAllCloseAccordingToType(p_expected, tf_sparsemax_out)
self.assertShapeEqual(p_expected, tf_sparsemax_op)
def _test_constant_add(self, dtype, random, use_gpu):
"""check sparsemax proposition 2"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10)).astype(dtype)
c = random.uniform(low=-3, high=3, size=(test_obs, 1)).astype(dtype)
_, tf_sparsemax_zpc = self._tf_sparsemax(z + c, dtype, use_gpu)
_, tf_sparsemax_z = self._tf_sparsemax(z, dtype, use_gpu)
self.assertAllCloseAccordingToType(
tf_sparsemax_zpc, tf_sparsemax_z, half_atol=5e-3)
def _test_permutation(self, dtype, random, use_gpu):
"""check sparsemax proposition 3"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
_, p = self._tf_sparsemax(z, dtype, use_gpu)
for i in range(test_obs):
per = random.permutation(10)
tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax(
z[i, per].reshape(1, -1), dtype, use_gpu)
p_expected = p[i, per].reshape(1, -1)
self.assertAllCloseAccordingToType(
p_expected, tf_sparsemax_out, half_atol=5e-3)
self.assertShapeEqual(p_expected, tf_sparsemax_op)
def _test_diffrence(self, dtype, random, use_gpu):
"""check sparsemax proposition 4"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10))
_, p = self._tf_sparsemax(z, dtype, use_gpu)
etol = {'float16': 1e-2, 'float32': 1e-6, 'float64': 1e-9}[dtype]
for val in range(0, test_obs):
for i in range(0, 10):
for j in range(0, 10):
# check condition, the obesite pair will be checked anyway
if z[val, i] > z[val, j]:
continue
self.assertTrue(
0 <= p[val, j] - p[val, i] <= z[val, j] - z[val, i] + etol,
'0 <= %.10f <= %.10f' % (p[val, j] - p[val, i],
z[val, j] - z[val, i] + etol))
def _test_two_dimentional(self, dtype, random, use_gpu):
"""check two dimentation sparsemax case"""
t = np.linspace(-2, 2, test_obs, dtype=dtype)
z = np.vstack([t, np.zeros(test_obs, dtype=dtype)]).T
tf_sparsemax_op, tf_sparsemax_out = self._tf_sparsemax(z, dtype, use_gpu)
p0_expected = np.select([t < -1, t <= 1, t > 1], [0, (t + 1) / 2, 1])
self.assertAllCloseAccordingToType(p0_expected, tf_sparsemax_out[:, 0])
self.assertAllCloseAccordingToType(1 - p0_expected, tf_sparsemax_out[:, 1])
self.assertShapeEqual(z, tf_sparsemax_op)
def _test_gradient_against_estimate(self, dtype, random, use_gpu):
"""check sparsemax Rop, against estimated Rop"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10)).astype(dtype)
logits = array_ops.placeholder(dtype, name='z')
sparsemax_op = sparsemax(logits)
with self.test_session(use_gpu=use_gpu):
err = gradient_checker.compute_gradient_error(
logits, z.shape, sparsemax_op, z.shape, x_init_value=z, delta=1e-9)
self.assertLess(err, 1e-4)
def _test_gradient_against_numpy(self, dtype, random, use_gpu):
"""check sparsemax Rop, against numpy Rop"""
z = random.uniform(low=-3, high=3, size=(test_obs, 10)).astype(dtype)
logits = constant_op.constant(z, name='z')
sparsemax_op = sparsemax(logits)
sparsemax_grad_op = gradients_impl.gradients(sparsemax_op, [logits])[0]
with self.test_session(use_gpu=use_gpu):
tf_grad = sparsemax_grad_op.eval()
np_grad = self._np_sparsemax_grad(z)
self.assertAllCloseAccordingToType(np_grad, tf_grad)
self.assertShapeEqual(np_grad, sparsemax_grad_op)
def _test_dtype(self, dtype):
random = np.random.RandomState(1)
self._test_sparsemax_against_numpy(dtype, random, use_gpu=False)
self._test_sparsemax_of_zero(dtype, random, use_gpu=False)
self._test_sparsemax_of_inf(dtype, random, use_gpu=False)
self._test_constant_add(dtype, random, use_gpu=False)
self._test_permutation(dtype, random, use_gpu=False)
self._test_diffrence(dtype, random, use_gpu=False)
self._test_two_dimentional(dtype, random, use_gpu=False)
# sparsemax is not a smooth function so gradient estimation is only
# possibol for float64.
if dtype == 'float64':
self._test_gradient_against_estimate(dtype, random, use_gpu=False)
self._test_gradient_against_numpy(dtype, random, use_gpu=False)
def testFloat(self):
self._test_dtype('float32')
def testDouble(self):
self._test_dtype('float64')
if __name__ == '__main__':
test.main()
| |
import warnings
import numpy
from chainer.backends import cuda
from chainer import function_node
from chainer import utils
from chainer.utils import type_check
def _mat_ptrs(a):
"""Creates an array of pointers to matrices
Args:
a: A batch of matrices on GPU.
Returns:
GPU array of pointers to matrices.
"""
if len(a) == 1:
return cuda.cupy.full((1,), a.data.ptr, dtype=numpy.uintp)
else:
stride = a.strides[0]
ptr = a.data.ptr
return cuda.cupy.arange(ptr, ptr + stride * len(a), stride,
dtype=numpy.uintp)
def _as_batch_mat(x):
return x.reshape(len(x), x.shape[1], -1)
def _get_ld(a):
strides = a.strides[-2:]
trans = numpy.argmin(strides)
return trans, int(max(a.shape[trans - 2], max(strides) // a.itemsize))
def _matmul(a, b, transa=False, transb=False, transout=False):
if transout:
transa, transb = not transb, not transa
a, b = b, a
if transa and a.ndim != 1:
a = a.swapaxes(-1, -2)
if transb and b.ndim != 1:
b = b.swapaxes(-1, -2)
xp = cuda.get_array_module(a)
if hasattr(xp, 'matmul'): # numpy.matmul is supported from version 1.10.0
return xp.matmul(a, b)
if a.ndim <= 2:
return numpy.dot(a, b)
else:
return numpy.einsum('...ij,...jk->...ik', a, b)
def _check_ndim(in_type, lower=1, upper=2):
type_check.expect(
in_type.ndim >= lower,
in_type.ndim <= upper
)
def _get_check_index(trans, right, row_idx=0, col_idx=1):
if trans ^ right:
return row_idx
else:
return col_idx
class MatMul(function_node.FunctionNode):
def __init__(self, transa=False, transb=False, transc=False, dtype=None):
self.transa = transa
self.transb = transb
self.transc = transc
self.dtype = dtype
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
a_type, b_type = in_types
type_check.expect(
a_type.dtype.kind == 'f',
b_type.dtype.kind == 'f',
)
a_ndim = type_check.eval(a_type.ndim)
b_ndim = type_check.eval(b_type.ndim)
if a_ndim == 0 or b_ndim == 0:
pass
elif a_ndim == 1 or b_ndim == 1:
type_check.expect(
a_type.ndim == b_type.ndim,
a_type.shape == b_type.shape,
)
else:
a_idx = _get_check_index(self.transa, False,
row_idx=-2, col_idx=-1)
b_idx = _get_check_index(self.transb, True,
row_idx=-2, col_idx=-1)
type_check.expect(
a_type.ndim == b_type.ndim,
a_type.shape[:-2] == b_type.shape[:-2],
a_type.shape[a_idx] == b_type.shape[b_idx],
)
def forward(self, x):
self.retain_inputs((0, 1))
a, b = x
if a.ndim == 0 or b.ndim == 0:
y = a * b
else:
y = _matmul(a, b, self.transa, self.transb, self.transc)
if self.dtype is not None and y.dtype != self.dtype:
y = y.astype(self.dtype)
return utils.force_array(y),
def backward(self, indexes, grad_outputs):
a, b = self.get_retained_inputs()
gy, = grad_outputs
ga = None
if 0 in indexes:
ga, = MatMul(self.transc, not self.transb, self.transa,
a.dtype).apply((gy, b))
gb = None
if 1 in indexes:
gb, = MatMul(not self.transa, self.transc, self.transb,
b.dtype).apply((a, gy))
return ga, gb
def matmul(a, b, transa=False, transb=False):
"""Computes the matrix multiplication of two arrays.
Args:
a (Variable): The left operand of the matrix multiplication.
If ``a`` and ``b`` are both 1-D arrays, ``matmul`` returns a dot
product of vector `a` and vector `b`. If 2-D arrays, ``matmul``
returns matrix product of ``a`` and ``b``. If arrays' dimension is
larger than 2, they are treated as a stack of matrices residing in
the last two indexes. ``matmul`` returns a stack of each two
arrays. ``a`` and ``b`` must have the same dimension.
b (Variable): The right operand of the matrix multiplication.
Its array is treated as a matrix in the same way as ``a``'s array.
transa (bool): If ``True``, each matrices in ``a`` will be transposed.
If ``a.ndim == 1``, do nothing.
transb (bool): If ``True``, each matrices in ``b`` will be transposed.
If ``b.ndim == 1``, do nothing.
Returns:
~chainer.Variable: The result of the matrix multiplication.
.. admonition:: Example
>>> a = np.array([[1, 0], [0, 1]], np.float32)
>>> b = np.array([[4, 1], [2, 2]], np.float32)
>>> F.matmul(a, b).data
array([[4., 1.],
[2., 2.]], dtype=float32)
"""
return MatMul(transa=transa, transb=transb).apply((a, b))[0]
def _get_size(typ, index):
if index == 2 and type_check.eval(typ.ndim) == 2:
return 1
else:
return typ.shape[index]
def _batch_matmul(a, b, transa, transb, transout):
a = a.reshape(a.shape[:2] + (-1,))
b = b.reshape(b.shape[:2] + (-1,))
return _matmul(a, b, transa, transb, transout)
class BatchMatMul(function_node.FunctionNode):
def __init__(self, transa=False, transb=False):
self.transa = transa
self.transb = transb
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
a_type, b_type = in_types
type_check.expect(
a_type.dtype == numpy.float32,
b_type.dtype == numpy.float32
)
_check_ndim(a_type, lower=2, upper=3)
_check_ndim(b_type, lower=2, upper=3)
a_idx = _get_check_index(self.transa, False, row_idx=1, col_idx=2)
b_idx = _get_check_index(self.transb, True, row_idx=1, col_idx=2)
a_size = _get_size(a_type, a_idx)
b_size = _get_size(b_type, b_idx)
type_check.expect(
a_size == b_size
)
def forward(self, x):
self.retain_inputs((0, 1))
a, b = x
return _batch_matmul(a, b, self.transa, self.transb, False),
def backward(self, indexes, grad_outputs):
a, b = self.get_retained_inputs()
return BatchMatMulGrad(self.transa, self.transb).apply(
(a, b, grad_outputs[0]))
class BatchMatMulGrad(function_node.FunctionNode):
def __init__(self, transa=False, transb=False):
self.transa = transa
self.transb = transb
def forward(self, inputs):
self.retain_inputs((0, 1, 2))
a, b, gy = inputs
ga = _batch_matmul(gy, b, False, not self.transb,
self.transa).reshape(a.shape)
gb = _batch_matmul(a, gy, not self.transa, False,
self.transb).reshape(b.shape)
return ga, gb
def backward(self, indexes, grad_outputs):
a, b, gy = self.get_retained_inputs()
gga, ggb = grad_outputs
ret = []
if 0 in indexes or 1 in indexes:
ga, gb = BatchMatMulGrad(self.transa, self.transb).apply(
(gga, ggb, gy))
if 0 in indexes:
ret.append(ga)
if 1 in indexes:
ret.append(gb)
if 2 in indexes:
ret.append(
BatchMatMul(self.transa, self.transb).apply((gga, b))[0] +
BatchMatMul(self.transa, self.transb).apply((a, ggb))[0])
return ret
def batch_matmul(a, b, transa=False, transb=False):
"""Computes the batch matrix multiplications of two sets of arrays.
Args:
a (Variable): The left operand of the batch matrix multiplications.
A 2-D array of shape ``(B, N)`` is considered as B
:math:`N \\times 1` matrices.
A 3-D array of shape ``(B, M, N)`` is considered as B
:math:`M \\times N` matrices.
b (Variable): The right operand of the batch matrix multiplications.
Its array is treated as matrices in the same way as ``a``'s array.
transa (bool): If ``True``, transpose each matrix in ``a``.
transb (bool): If ``True``, transpose each matrix in ``b``.
Returns:
~chainer.Variable: The result of the batch matrix multiplications as a
3-D array.
.. deprecated:: v3.0.0
batch_matmul is deprecated. Use ``matmul`` instead.
"""
warnings.warn('batch_matmul is deprecated. Use matmul instead.',
DeprecationWarning)
return BatchMatMul(transa=transa, transb=transb).apply((a, b))[0]
| |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
from fancypages.utils import FP_NODE_MODEL
from fancypages.compat import AUTH_USER_MODEL, AUTH_USER_MODEL_NAME
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'Container', fields ['name', 'content_type', 'object_id']
db.delete_unique(u'fancypages_container', ['name', 'content_type_id', 'object_id'])
# Adding field 'Container.language_code'
db.add_column(u'fancypages_container', 'language_code',
self.gf('django.db.models.fields.CharField')(default='en-us', max_length=7),
keep_default=False)
# Adding unique constraint on 'Container', fields ['name', 'content_type', 'object_id', 'language_code']
db.create_unique(u'fancypages_container', ['name', 'content_type_id', 'object_id', 'language_code'])
def backwards(self, orm):
# Removing unique constraint on 'Container', fields ['name', 'content_type', 'object_id', 'language_code']
db.delete_unique(u'fancypages_container', ['name', 'content_type_id', 'object_id', 'language_code'])
# Deleting field 'Container.language_code'
db.delete_column(u'fancypages_container', 'language_code')
# Adding unique constraint on 'Container', fields ['name', 'content_type', 'object_id']
db.create_unique(u'fancypages_container', ['name', 'content_type_id', 'object_id'])
models = {
u'assets.imageasset': {
'Meta': {'object_name': 'ImageAsset'},
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['{}']".format(AUTH_USER_MODEL)}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'default': "''"}),
'height': ('django.db.models.fields.IntegerField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'uuid': ('shortuuidfield.fields.ShortUUIDField', [], {'db_index': 'True', 'max_length': '22', 'blank': 'True'}),
'width': ('django.db.models.fields.IntegerField', [], {'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
AUTH_USER_MODEL: {
'Meta': {'object_name': AUTH_USER_MODEL_NAME},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'catalogue.category': {
'Meta': {'ordering': "['full_name']", 'object_name': 'Category'},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'full_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'fancypages.carouselblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'CarouselBlock', '_ormbases': ['fancypages.ContentBlock']},
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'image_1': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['assets.ImageAsset']"}),
'image_10': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['assets.ImageAsset']"}),
'image_2': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['assets.ImageAsset']"}),
'image_3': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['assets.ImageAsset']"}),
'image_4': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['assets.ImageAsset']"}),
'image_5': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['assets.ImageAsset']"}),
'image_6': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['assets.ImageAsset']"}),
'image_7': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['assets.ImageAsset']"}),
'image_8': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['assets.ImageAsset']"}),
'image_9': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'+'", 'null': 'True', 'to': u"orm['assets.ImageAsset']"}),
'link_url_1': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_10': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_2': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_3': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_4': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_5': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_6': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_7': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_8': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'link_url_9': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'})
},
'fancypages.container': {
'Meta': {'unique_together': "(('name', 'content_type', 'object_id', 'language_code'),)", 'object_name': 'Container'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language_code': ('django.db.models.fields.CharField', [], {'default': "'en-us'", 'max_length': '7'}),
'name': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'uuid': ('shortuuidfield.fields.ShortUUIDField', [], {'db_index': 'True', 'max_length': '22', 'blank': 'True'})
},
'fancypages.contentblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'ContentBlock'},
'container': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'blocks'", 'to': "orm['fancypages.Container']"}),
'display_order': ('django.db.models.fields.PositiveIntegerField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'uuid': ('shortuuidfield.fields.ShortUUIDField', [], {'db_index': 'True', 'max_length': '22', 'blank': 'True'})
},
'fancypages.fancypage': {
'Meta': {'object_name': 'FancyPage'},
'date_visible_end': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_visible_start': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'pages'", 'symmetrical': 'False', 'to': "orm['fancypages.PageGroup']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keywords': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'node': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'page'", 'unique': 'True', 'null': 'True', 'to': "orm['{}']".format(FP_NODE_MODEL)}),
'page_type': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'pages'", 'null': 'True', 'to': "orm['fancypages.PageType']"}),
'status': ('django.db.models.fields.CharField', [], {'max_length': '15', 'blank': 'True'}),
'uuid': ('shortuuidfield.fields.ShortUUIDField', [], {'db_index': 'True', 'max_length': '22', 'blank': 'True'})
},
'fancypages.fourcolumnlayoutblock': {
'Meta': {'object_name': 'FourColumnLayoutBlock'},
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'})
},
'fancypages.horizontalseparatorblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'HorizontalSeparatorBlock', '_ormbases': ['fancypages.ContentBlock']},
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'})
},
'fancypages.imageandtextblock': {
'Meta': {'object_name': 'ImageAndTextBlock', '_ormbases': ['fancypages.ContentBlock']},
'alt_text': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'image_asset': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'image_text_blocks'", 'null': 'True', 'to': u"orm['assets.ImageAsset']"}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'default': "u'Your text goes here.'"}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'fancypages.imageblock': {
'Meta': {'object_name': 'ImageBlock', '_ormbases': ['fancypages.ContentBlock']},
'alt_text': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'image_asset': ('fancypages.assets.fields.AssetKey', [], {'blank': 'True', 'related_name': "'image_blocks'", 'null': 'True', 'to': u"orm['assets.ImageAsset']"}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
'fancypages.orderedcontainer': {
'Meta': {'object_name': 'OrderedContainer', '_ormbases': ['fancypages.Container']},
u'container_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.Container']", 'unique': 'True', 'primary_key': 'True'}),
'display_order': ('django.db.models.fields.PositiveIntegerField', [], {})
},
'fancypages.pagegroup': {
'Meta': {'object_name': 'PageGroup'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'uuid': ('shortuuidfield.fields.ShortUUIDField', [], {'db_index': 'True', 'max_length': '22', 'blank': 'True'})
},
'fancypages.pagenavigationblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'PageNavigationBlock', '_ormbases': ['fancypages.ContentBlock']},
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'depth': ('django.db.models.fields.PositiveIntegerField', [], {'default': '2'}),
'is_relative': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
FP_NODE_MODEL: {
'Meta': {'object_name': FP_NODE_MODEL.split('.')[1]},
'depth': ('django.db.models.fields.PositiveIntegerField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'numchild': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'path': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'})
},
'fancypages.pagetype': {
'Meta': {'object_name': 'PageType'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '128'}),
'template_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uuid': ('shortuuidfield.fields.ShortUUIDField', [], {'db_index': 'True', 'max_length': '22', 'blank': 'True'})
},
'fancypages.tabblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'TabBlock', '_ormbases': ['fancypages.ContentBlock']},
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'})
},
'fancypages.textblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'TextBlock', '_ormbases': ['fancypages.ContentBlock']},
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'default': "'Your text goes here.'"})
},
'fancypages.threecolumnlayoutblock': {
'Meta': {'object_name': 'ThreeColumnLayoutBlock'},
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'})
},
'fancypages.titletextblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'TitleTextBlock', '_ormbases': ['fancypages.ContentBlock']},
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'default': "'Your text goes here.'"}),
'title': ('django.db.models.fields.CharField', [], {'default': "'Your title goes here.'", 'max_length': '100'})
},
'fancypages.twitterblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'TwitterBlock', '_ormbases': ['fancypages.ContentBlock']},
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'max_tweets': ('django.db.models.fields.PositiveIntegerField', [], {'default': '5'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'fancypages.twocolumnlayoutblock': {
'Meta': {'object_name': 'TwoColumnLayoutBlock'},
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'left_width': ('django.db.models.fields.PositiveIntegerField', [], {'default': '6', 'max_length': '3'})
},
'fancypages.videoblock': {
'Meta': {'ordering': "['display_order']", 'object_name': 'VideoBlock', '_ormbases': ['fancypages.ContentBlock']},
u'contentblock_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['fancypages.ContentBlock']", 'unique': 'True', 'primary_key': 'True'}),
'source': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'video_code': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['fancypages']
| |
#
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
import subprocess
import tempfile
from signal import SIGTERM
from error import GitError
from trace import REPO_TRACE, IsTrace, Trace
GIT = 'git'
MIN_GIT_VERSION = (1, 5, 4)
GIT_DIR = 'GIT_DIR'
LAST_GITDIR = None
LAST_CWD = None
_ssh_proxy_path = None
_ssh_sock_path = None
_ssh_clients = []
def ssh_sock(create=True):
global _ssh_sock_path
if _ssh_sock_path is None:
if not create:
return None
dir = '/tmp'
if not os.path.exists(dir):
dir = tempfile.gettempdir()
_ssh_sock_path = os.path.join(
tempfile.mkdtemp('', 'ssh-', dir),
'master-%r@%h:%p')
return _ssh_sock_path
def _ssh_proxy():
global _ssh_proxy_path
if _ssh_proxy_path is None:
_ssh_proxy_path = os.path.join(
os.path.dirname(__file__),
'git_ssh')
return _ssh_proxy_path
def _add_ssh_client(p):
_ssh_clients.append(p)
def _remove_ssh_client(p):
try:
_ssh_clients.remove(p)
except ValueError:
pass
def terminate_ssh_clients():
global _ssh_clients
for p in _ssh_clients:
try:
os.kill(p.pid, SIGTERM)
p.wait()
except OSError:
pass
_ssh_clients = []
class _GitCall(object):
def version(self):
p = GitCommand(None, ['--version'], capture_stdout=True)
if p.Wait() == 0:
return p.stdout
return None
def __getattr__(self, name):
name = name.replace('_','-')
def fun(*cmdv):
command = [name]
command.extend(cmdv)
return GitCommand(None, command).Wait() == 0
return fun
git = _GitCall()
_git_version = None
def git_require(min_version, fail=False):
global _git_version
if _git_version is None:
ver_str = git.version()
if ver_str.startswith('git version '):
_git_version = tuple(
map(lambda x: int(x),
ver_str[len('git version '):].strip().split('.')[0:3]
))
else:
print >>sys.stderr, 'fatal: "%s" unsupported' % ver_str
sys.exit(1)
if min_version <= _git_version:
return True
if fail:
need = '.'.join(map(lambda x: str(x), min_version))
print >>sys.stderr, 'fatal: git %s or later required' % need
sys.exit(1)
return False
def _setenv(env, name, value):
env[name] = value.encode()
class GitCommand(object):
def __init__(self,
project,
cmdv,
bare = False,
provide_stdin = False,
capture_stdout = False,
capture_stderr = False,
disable_editor = False,
ssh_proxy = False,
cwd = None,
gitdir = None):
env = os.environ.copy()
for e in [REPO_TRACE,
GIT_DIR,
'GIT_ALTERNATE_OBJECT_DIRECTORIES',
'GIT_OBJECT_DIRECTORY',
'GIT_WORK_TREE',
'GIT_GRAFT_FILE',
'GIT_INDEX_FILE']:
if e in env:
del env[e]
if disable_editor:
_setenv(env, 'GIT_EDITOR', ':')
if ssh_proxy:
_setenv(env, 'REPO_SSH_SOCK', ssh_sock())
_setenv(env, 'GIT_SSH', _ssh_proxy())
if project:
if not cwd:
cwd = project.worktree
if not gitdir:
gitdir = project.gitdir
command = [GIT]
if bare:
if gitdir:
_setenv(env, GIT_DIR, gitdir)
cwd = None
command.extend(cmdv)
if provide_stdin:
stdin = subprocess.PIPE
else:
stdin = None
if capture_stdout:
stdout = subprocess.PIPE
else:
stdout = None
if capture_stderr:
stderr = subprocess.PIPE
else:
stderr = None
if IsTrace():
global LAST_CWD
global LAST_GITDIR
dbg = ''
if cwd and LAST_CWD != cwd:
if LAST_GITDIR or LAST_CWD:
dbg += '\n'
dbg += ': cd %s\n' % cwd
LAST_CWD = cwd
if GIT_DIR in env and LAST_GITDIR != env[GIT_DIR]:
if LAST_GITDIR or LAST_CWD:
dbg += '\n'
dbg += ': export GIT_DIR=%s\n' % env[GIT_DIR]
LAST_GITDIR = env[GIT_DIR]
dbg += ': '
dbg += ' '.join(command)
if stdin == subprocess.PIPE:
dbg += ' 0<|'
if stdout == subprocess.PIPE:
dbg += ' 1>|'
if stderr == subprocess.PIPE:
dbg += ' 2>|'
Trace('%s', dbg)
try:
p = subprocess.Popen(command,
cwd = cwd,
env = env,
stdin = stdin,
stdout = stdout,
stderr = stderr)
except Exception, e:
raise GitError('%s: %s' % (command[1], e))
if ssh_proxy:
_add_ssh_client(p)
self.process = p
self.stdin = p.stdin
def Wait(self):
p = self.process
if p.stdin:
p.stdin.close()
self.stdin = None
if p.stdout:
self.stdout = p.stdout.read()
p.stdout.close()
else:
p.stdout = None
if p.stderr:
self.stderr = p.stderr.read()
p.stderr.close()
else:
p.stderr = None
try:
rc = p.wait()
finally:
_remove_ssh_client(p)
return rc
| |
from __future__ import absolute_import
from __future__ import print_function
import mock
from mock import call
import time
from typing import Any, Dict, Union, SupportsInt, Text
import gcm
import ujson
from django.test import TestCase, override_settings
from django.conf import settings
from django.http import HttpResponse
from zerver.models import (
PushDeviceToken,
UserProfile,
Message,
UserMessage,
receives_offline_notifications,
receives_online_notifications,
get_client,
Recipient,
get_user_profile_by_email,
Stream,
)
from zerver.lib import push_notifications as apn
from zerver.lib.response import json_success
from zerver.lib.test_classes import (
ZulipTestCase,
)
from zilencer.models import RemoteZulipServer, RemotePushDeviceToken
from django.utils.timezone import now
class MockRedis(object):
data = {} # type: Dict[str, Any]
def hgetall(self, key):
# type: (str) -> Any
return self.data.get(key)
def exists(self, key):
# type: (str) -> bool
return key in self.data
def hmset(self, key, data):
# type: (str, Dict[Any, Any]) -> None
self.data[key] = data
def delete(self, key):
# type: (str) -> None
if self.exists(key):
del self.data[key]
def expire(self, *args, **kwargs):
# type: (*Any, **Any) -> None
pass
class BouncerTestCase(ZulipTestCase):
def setUp(self):
# type: () -> None
self.server_uuid = "1234-abcd"
server = RemoteZulipServer(uuid=self.server_uuid,
api_key="magic_secret_api_key",
hostname="demo.example.com",
last_updated=now())
server.save()
super(BouncerTestCase, self).setUp()
def tearDown(self):
# type: () -> None
RemoteZulipServer.objects.filter(uuid=self.server_uuid).delete()
super(BouncerTestCase, self).tearDown()
def bounce_request(self, *args, **kwargs):
# type: (*Any, **Any) -> HttpResponse
"""This method is used to carry out the push notification bouncer
requests using the Django test browser, rather than python-requests.
"""
# args[0] is method, args[1] is URL.
local_url = args[1].replace(settings.PUSH_NOTIFICATION_BOUNCER_URL, "")
if args[0] == "POST":
result = self.client_post(local_url,
kwargs['data'],
**self.get_auth())
else:
raise AssertionError("Unsupported method for bounce_request")
return result
def get_generic_payload(self, method='register'):
# type: (Text) -> Dict[str, Any]
user_id = 10
token = "111222"
token_kind = PushDeviceToken.GCM
return {'user_id': user_id,
'token': token,
'token_kind': token_kind}
def get_auth(self):
# type: () -> Dict[str, Text]
# Auth on this user
return self.api_auth(self.server_uuid)
class PushBouncerNotificationTest(BouncerTestCase):
def test_unregister_remote_push_user_params(self):
# type: () -> None
token = "111222"
token_kind = PushDeviceToken.GCM
endpoint = '/api/v1/remotes/push/unregister'
result = self.client_post(endpoint, {'token_kind': token_kind},
**self.get_auth())
self.assert_json_error(result, "Missing 'token' argument")
result = self.client_post(endpoint, {'token': token},
**self.get_auth())
self.assert_json_error(result, "Missing 'token_kind' argument")
result = self.client_post(endpoint, {'token': token, 'token_kind': token_kind},
**self.api_auth("hamlet@zulip.com"))
self.assert_json_error(result, "Must validate with valid Zulip server API key")
def test_register_remote_push_user_paramas(self):
# type: () -> None
token = "111222"
user_id = 11
token_kind = PushDeviceToken.GCM
endpoint = '/api/v1/remotes/push/register'
result = self.client_post(endpoint, {'user_id': user_id, 'token_kind': token_kind},
**self.get_auth())
self.assert_json_error(result, "Missing 'token' argument")
result = self.client_post(endpoint, {'user_id': user_id, 'token': token},
**self.get_auth())
self.assert_json_error(result, "Missing 'token_kind' argument")
result = self.client_post(endpoint, {'token': token, 'token_kind': token_kind},
**self.get_auth())
self.assert_json_error(result, "Missing 'user_id' argument")
result = self.client_post(endpoint, {'user_id': user_id, 'token_kind': token_kind,
'token': token},
**self.api_auth("hamlet@zulip.com"))
self.assert_json_error(result, "Must validate with valid Zulip server API key")
def test_remote_push_user_endpoints(self):
# type: () -> None
endpoints = [
('/api/v1/remotes/push/register', 'register'),
('/api/v1/remotes/push/unregister', 'unregister'),
]
for endpoint, method in endpoints:
payload = self.get_generic_payload(method)
# Verify correct results are success
result = self.client_post(endpoint, payload, **self.get_auth())
self.assert_json_success(result)
remote_tokens = RemotePushDeviceToken.objects.filter(token=payload['token'])
token_count = 1 if method == 'register' else 0
self.assertEqual(len(remote_tokens), token_count)
# Try adding/removing tokens that are too big...
broken_token = "x" * 5000 # too big
payload['token'] = broken_token
result = self.client_post(endpoint, payload, **self.get_auth())
self.assert_json_error(result, 'Empty or invalid length token')
@override_settings(PUSH_NOTIFICATION_BOUNCER_URL='https://push.zulip.org.example.com')
@mock.patch('zerver.lib.push_notifications.requests.request')
def test_push_bouncer_api(self, mock):
# type: (Any) -> None
"""This is a variant of the below test_push_api, but using the full
push notification bouncer flow
"""
mock.side_effect = self.bounce_request
user = self.example_user('cordelia')
email = user.email
self.login(email)
server = RemoteZulipServer.objects.get(uuid=self.server_uuid)
endpoints = [
('/json/users/me/apns_device_token', 'apple-token'),
('/json/users/me/android_gcm_reg_id', 'android-token'),
]
# Test error handling
for endpoint, _ in endpoints:
# Try adding/removing tokens that are too big...
broken_token = "x" * 5000 # too big
result = self.client_post(endpoint, {'token': broken_token})
self.assert_json_error(result, 'Empty or invalid length token')
result = self.client_delete(endpoint, {'token': broken_token})
self.assert_json_error(result, 'Empty or invalid length token')
# Try to remove a non-existent token...
result = self.client_delete(endpoint, {'token': 'non-existent token'})
self.assert_json_error(result, 'Token does not exist')
# Add tokens
for endpoint, token in endpoints:
# Test that we can push twice
result = self.client_post(endpoint, {'token': token})
self.assert_json_success(result)
result = self.client_post(endpoint, {'token': token})
self.assert_json_success(result)
tokens = list(RemotePushDeviceToken.objects.filter(user_id=user.id, token=token,
server=server))
self.assertEqual(len(tokens), 1)
self.assertEqual(tokens[0].token, token)
# User should have tokens for both devices now.
tokens = list(RemotePushDeviceToken.objects.filter(user_id=user.id,
server=server))
self.assertEqual(len(tokens), 2)
# Remove tokens
for endpoint, token in endpoints:
result = self.client_delete(endpoint, {'token': token})
self.assert_json_success(result)
tokens = list(RemotePushDeviceToken.objects.filter(user_id=user.id, token=token,
server=server))
self.assertEqual(len(tokens), 0)
class PushNotificationTest(BouncerTestCase):
def setUp(self):
# type: () -> None
super(PushNotificationTest, self).setUp()
self.user_profile = self.example_user('hamlet')
apn.connection = apn.get_connection('fake-cert', 'fake-key')
self.redis_client = apn.redis_client = MockRedis() # type: ignore
self.tokens = [u'aaaa', u'bbbb']
for token in self.tokens:
PushDeviceToken.objects.create(
kind=PushDeviceToken.APNS,
token=apn.hex_to_b64(token),
user=self.user_profile,
ios_app_id=settings.ZULIP_IOS_APP_ID)
self.remote_tokens = [u'cccc']
for token in self.remote_tokens:
RemotePushDeviceToken.objects.create(
kind=RemotePushDeviceToken.APNS,
token=apn.hex_to_b64(token),
user_id=self.user_profile.id,
server=RemoteZulipServer.objects.get(uuid=self.server_uuid),
)
self.sending_client = get_client('test')
self.sender = get_user_profile_by_email('hamlet@zulip.com')
def tearDown(self):
# type: () -> None
super(PushNotificationTest, self).tearDown()
for i in [100, 200]:
self.redis_client.delete(apn.get_apns_key(i))
def get_message(self, type, type_id=100):
# type: (int, int) -> Message
recipient, _ = Recipient.objects.get_or_create(
type_id=type_id,
type=type,
)
return Message.objects.create(
sender=self.sender,
recipient=recipient,
subject='Test Message',
content='This is test content',
pub_date=now(),
sending_client=self.sending_client,
)
class HandlePushNotificationTest(PushNotificationTest):
def bounce_request(self, *args, **kwargs):
# type: (*Any, **Any) -> HttpResponse
"""This method is used to carry out the push notification bouncer
requests using the Django test browser, rather than python-requests.
"""
# args[0] is method, args[1] is URL.
local_url = args[1].replace(settings.PUSH_NOTIFICATION_BOUNCER_URL, "")
if args[0] == "POST":
result = self.client_post(local_url,
kwargs['data'],
content_type="application/json",
**self.get_auth())
else:
raise AssertionError("Unsupported method for bounce_request")
return result
def test_end_to_end(self):
# type: () -> None
remote_gcm_tokens = [u'dddd']
for token in remote_gcm_tokens:
RemotePushDeviceToken.objects.create(
kind=RemotePushDeviceToken.GCM,
token=apn.hex_to_b64(token),
user_id=self.user_profile.id,
server=RemoteZulipServer.objects.get(uuid=self.server_uuid),
)
message = self.get_message(Recipient.PERSONAL, type_id=1)
UserMessage.objects.create(
user_profile=self.user_profile,
message=message
)
missed_message = {'message_id': message.id}
with self.settings(PUSH_NOTIFICATION_BOUNCER_URL=''), \
mock.patch('zerver.lib.push_notifications.requests.request',
side_effect=self.bounce_request), \
mock.patch('zerver.lib.push_notifications._do_push_to_apns_service'), \
mock.patch('zerver.lib.push_notifications.gcm') as mock_gcm, \
mock.patch('logging.info') as mock_info:
apns_devices = [
(apn.b64_to_hex(device.token), device.ios_app_id, device.token)
for device in RemotePushDeviceToken.objects.filter(
kind=PushDeviceToken.APNS)
]
gcm_devices = [
(apn.b64_to_hex(device.token), device.ios_app_id, device.token)
for device in RemotePushDeviceToken.objects.filter(
kind=PushDeviceToken.GCM)
]
mock_gcm.json_request.return_value = {
'success': {gcm_devices[0][2]: message.id}}
apn.handle_push_notification(self.user_profile.id, missed_message)
mock_info.assert_called_with(
"APNS: Sending apple push "
"notification to devices: %s" % (apns_devices,))
for _, _, token in gcm_devices:
mock_info.assert_any_call(
"GCM: Sent %s as %s" % (token, message.id))
def test_disabled_notifications(self):
# type: () -> None
user_profile = get_user_profile_by_email('hamlet@zulip.com')
user_profile.enable_online_email_notifications = False
user_profile.enable_online_push_notifications = False
user_profile.enable_offline_email_notifications = False
user_profile.enable_offline_push_notifications = False
user_profile.save()
apn.handle_push_notification(user_profile.id, {})
def test_read_message(self):
# type: () -> None
user_profile = get_user_profile_by_email('hamlet@zulip.com')
message = self.get_message(Recipient.PERSONAL, type_id=1)
UserMessage.objects.create(
user_profile=user_profile,
flags=UserMessage.flags.read,
message=message
)
missed_message = {'message_id': message.id}
apn.handle_push_notification(user_profile.id, missed_message)
def test_send_notifications_to_bouncer(self):
# type: () -> None
user_profile = get_user_profile_by_email('hamlet@zulip.com')
message = self.get_message(Recipient.PERSONAL, type_id=1)
UserMessage.objects.create(
user_profile=user_profile,
message=message
)
missed_message = {'message_id': message.id}
with self.settings(PUSH_NOTIFICATION_BOUNCER_URL=True), \
mock.patch('zerver.lib.push_notifications.get_apns_payload',
return_value={'apns': True}), \
mock.patch('zerver.lib.push_notifications.get_gcm_payload',
return_value={'gcm': True}), \
mock.patch('zerver.lib.push_notifications'
'.send_notifications_to_bouncer') as mock_send:
apn.handle_push_notification(user_profile.id, missed_message)
mock_send.assert_called_with(user_profile.id,
{'apns': True},
{'gcm': True},
)
def test_non_bouncer_push(self):
# type: () -> None
message = self.get_message(Recipient.PERSONAL, type_id=1)
UserMessage.objects.create(
user_profile=self.user_profile,
message=message
)
for token in [u'dddd']:
PushDeviceToken.objects.create(
kind=PushDeviceToken.GCM,
token=apn.hex_to_b64(token),
user=self.user_profile)
android_devices = list(
PushDeviceToken.objects.filter(user=self.user_profile,
kind=PushDeviceToken.GCM))
apple_devices = list(
PushDeviceToken.objects.filter(user=self.user_profile,
kind=PushDeviceToken.APNS))
missed_message = {'message_id': message.id}
with mock.patch('zerver.lib.push_notifications.get_apns_payload',
return_value={'apns': True}), \
mock.patch('zerver.lib.push_notifications.get_gcm_payload',
return_value={'gcm': True}), \
mock.patch('zerver.lib.push_notifications'
'.send_apple_push_notification') as mock_send_apple, \
mock.patch('zerver.lib.push_notifications'
'.send_android_push_notification') as mock_send_android:
apn.handle_push_notification(self.user_profile.id, missed_message)
mock_send_apple.assert_called_with(self.user_profile.id,
apple_devices,
badge=1,
zulip={'apns': True})
mock_send_android.assert_called_with(android_devices,
{'gcm': True})
def test_user_message_does_not_exist(self):
# type: () -> None
missed_message = {'message_id': 100}
with mock.patch('logging.error') as mock_logger:
apn.handle_push_notification(self.user_profile.id, missed_message)
mock_logger.assert_called_with("Could not find UserMessage with "
"message_id 100")
class APNsMessageTest(PushNotificationTest):
@mock.patch('random.getrandbits', side_effect=[100, 200])
def test_apns_message(self, mock_getrandbits):
# type: (mock.MagicMock) -> None
apn.APNsMessage(self.user_profile.id, self.tokens, alert="test")
data = self.redis_client.hgetall(apn.get_apns_key(100))
self.assertEqual(data['token'], 'aaaa')
self.assertEqual(int(data['user_id']), self.user_profile.id)
data = self.redis_client.hgetall(apn.get_apns_key(200))
self.assertEqual(data['token'], 'bbbb')
self.assertEqual(int(data['user_id']), self.user_profile.id)
class ResponseListenerTest(PushNotificationTest):
def get_error_response(self, **kwargs):
# type: (**Any) -> Dict[str, SupportsInt]
er = {'identifier': 0, 'status': 0} # type: Dict[str, SupportsInt]
er.update({k: v for k, v in kwargs.items() if k in er})
return er
def get_cache_value(self):
# type: () -> Dict[str, Union[str, int]]
return {'token': 'aaaa', 'user_id': self.user_profile.id}
@mock.patch('logging.warn')
def test_cache_does_not_exist(self, mock_warn):
# type: (mock.MagicMock) -> None
err_rsp = self.get_error_response(identifier=100, status=1)
apn.response_listener(err_rsp)
msg = "APNs key, apns:100, doesn't not exist."
mock_warn.assert_called_once_with(msg)
@mock.patch('logging.warn')
def test_cache_exists(self, mock_warn):
# type: (mock.MagicMock) -> None
self.redis_client.hmset(apn.get_apns_key(100), self.get_cache_value())
err_rsp = self.get_error_response(identifier=100, status=1)
apn.response_listener(err_rsp)
b64_token = apn.hex_to_b64('aaaa')
errmsg = apn.ERROR_CODES[int(err_rsp['status'])]
msg = ("APNS: Failed to deliver APNS notification to %s, "
"reason: %s" % (b64_token, errmsg))
mock_warn.assert_called_once_with(msg)
@mock.patch('logging.warn')
def test_error_code_eight(self, mock_warn):
# type: (mock.MagicMock) -> None
self.redis_client.hmset(apn.get_apns_key(100), self.get_cache_value())
err_rsp = self.get_error_response(identifier=100, status=8)
b64_token = apn.hex_to_b64('aaaa')
self.assertEqual(PushDeviceToken.objects.filter(
user=self.user_profile, token=b64_token).count(), 1)
apn.response_listener(err_rsp)
self.assertEqual(mock_warn.call_count, 2)
self.assertEqual(PushDeviceToken.objects.filter(
user=self.user_profile, token=b64_token).count(), 0)
@mock.patch('logging.warn')
def test_error_code_eight_when_token_doesnt_exist(self, mock_warn):
# type: (mock.MagicMock) -> None
cache_value = self.get_cache_value()
cache_value['token'] = 'cccc'
self.redis_client.hmset(apn.get_apns_key(100), cache_value)
err_rsp = self.get_error_response(identifier=100, status=8)
apn.response_listener(err_rsp)
self.assertEqual(PushDeviceToken.objects.all().count(), 2)
@mock.patch('logging.warn')
def test_error_code_eight_with_zilencer(self, mock_warn):
# type: (mock.MagicMock) -> None
cache_value = self.get_cache_value()
cache_value['token'] = 'cccc'
self.redis_client.hmset(apn.get_apns_key(100), cache_value)
err_rsp = self.get_error_response(identifier=100, status=8)
self.assertEqual(RemotePushDeviceToken.objects.all().count(), 1)
with self.settings(ZILENCER_ENABLED=True):
apn.response_listener(err_rsp)
self.assertEqual(RemotePushDeviceToken.objects.all().count(), 0)
@mock.patch('logging.warn')
def test_error_code_eight_with_zilencer_when_token_doesnt_exist(self, mock_warn):
# type: (mock.MagicMock) -> None
cache_value = self.get_cache_value()
cache_value['token'] = 'dddd'
self.redis_client.hmset(apn.get_apns_key(100), cache_value)
err_rsp = self.get_error_response(identifier=100, status=8)
self.assertEqual(RemotePushDeviceToken.objects.all().count(), 1)
with self.settings(ZILENCER_ENABLED=True):
apn.response_listener(err_rsp)
self.assertEqual(RemotePushDeviceToken.objects.all().count(), 1)
class TestGetAlertFromMessage(PushNotificationTest):
def test_get_alert_from_message(self):
# type: () -> None
alert = apn.get_alert_from_message(self.get_message(Recipient.HUDDLE))
self.assertEqual(alert, "New private group message from King Hamlet")
alert = apn.get_alert_from_message(self.get_message(Recipient.PERSONAL))
self.assertEqual(alert, "New private message from King Hamlet")
alert = apn.get_alert_from_message(self.get_message(Recipient.STREAM))
self.assertEqual(alert, "New mention from King Hamlet")
alert = apn.get_alert_from_message(self.get_message(0))
self.assertEqual(alert,
"New Zulip mentions and private messages from King "
"Hamlet")
class TestGetAPNsPayload(PushNotificationTest):
def test_get_apns_payload(self):
# type: () -> None
message = self.get_message(Recipient.HUDDLE)
payload = apn.get_apns_payload(message)
expected = {
"alert": "New private group message from King Hamlet",
"message_ids": [message.id],
}
self.assertDictEqual(payload, expected)
class TestGetGCMPayload(PushNotificationTest):
def test_get_gcm_payload(self):
# type: () -> None
email = "hamlet@zulip.com"
stream = Stream.objects.filter(name='Verona').get()
message = self.get_message(Recipient.STREAM, stream.id)
message.content = 'a' * 210
message.save()
user_profile = get_user_profile_by_email(email)
payload = apn.get_gcm_payload(user_profile, message)
expected = {
"user": email,
"event": "message",
"alert": "New mention from King Hamlet",
"zulip_message_id": message.id,
"time": apn.datetime_to_timestamp(message.pub_date),
"content": 'a' * 200 + '...',
"content_truncated": True,
"sender_email": "hamlet@zulip.com",
"sender_full_name": "King Hamlet",
"sender_avatar_url": apn.avatar_url(message.sender),
"recipient_type": "stream",
"stream": apn.get_display_recipient(message.recipient),
"topic": message.subject,
}
self.assertDictEqual(payload, expected)
def test_get_gcm_payload_personal(self):
# type: () -> None
email = "hamlet@zulip.com"
message = self.get_message(Recipient.PERSONAL, 1)
user_profile = get_user_profile_by_email(email)
payload = apn.get_gcm_payload(user_profile, message)
expected = {
"user": email,
"event": "message",
"alert": "New private message from King Hamlet",
"zulip_message_id": message.id,
"time": apn.datetime_to_timestamp(message.pub_date),
"content": message.content,
"content_truncated": False,
"sender_email": "hamlet@zulip.com",
"sender_full_name": "King Hamlet",
"sender_avatar_url": apn.avatar_url(message.sender),
"recipient_type": "private",
}
self.assertDictEqual(payload, expected)
class TestSendNotificationsToBouncer(ZulipTestCase):
@mock.patch('zerver.lib.push_notifications.send_to_push_bouncer')
def test_send_notifications_to_bouncer(self, mock_send):
# type: (mock.MagicMock) -> None
apn.send_notifications_to_bouncer(1, {'apns': True}, {'gcm': True})
post_data = {
'user_id': 1,
'apns_payload': {'apns': True},
'gcm_payload': {'gcm': True},
}
mock_send.assert_called_with('POST',
'notify',
ujson.dumps(post_data),
extra_headers={'Content-type':
'application/json'})
class TestSendToPushBouncer(PushNotificationTest):
class Result(object):
def __init__(self, status=200, content=ujson.dumps({'msg': 'error'})):
# type: (int, str) -> None
self.status_code = status
self.content = content
@mock.patch('requests.request', return_value=Result(status=500))
def test_500_error(self, mock_request):
# type: (mock.MagicMock) -> None
with self.assertRaises(apn.JsonableError) as exc:
apn.send_to_push_bouncer('register', 'register', {'data': True})
self.assertEqual(exc.exception.error,
'Error received from push notification bouncer')
@mock.patch('requests.request', return_value=Result(status=400))
def test_400_error(self, mock_request):
# type: (mock.MagicMock) -> None
with self.assertRaises(apn.JsonableError) as exc:
apn.send_to_push_bouncer('register', 'register', {'msg': True})
self.assertEqual(exc.exception.error, 'error')
@mock.patch('requests.request', return_value=Result(status=400, content='/'))
def test_400_error_when_content_is_not_serializable(self, mock_request):
# type: (mock.MagicMock) -> None
with self.assertRaises(apn.JsonableError) as exc:
apn.send_to_push_bouncer('register', 'register', {'msg': True})
self.assertEqual(exc.exception.error,
'Error received from push notification bouncer')
@mock.patch('requests.request', return_value=Result(status=300, content='/'))
def test_300_error(self, mock_request):
# type: (mock.MagicMock) -> None
with self.assertRaises(apn.JsonableError) as exc:
apn.send_to_push_bouncer('register', 'register', {'msg': True})
self.assertEqual(exc.exception.error,
'Error received from push notification bouncer')
class TestNumPushDevicesForUser(PushNotificationTest):
def test_when_kind_is_none(self):
# type: () -> None
self.assertEqual(apn.num_push_devices_for_user(self.user_profile), 2)
def test_when_kind_is_not_none(self):
# type: () -> None
count = apn.num_push_devices_for_user(self.user_profile,
kind=PushDeviceToken.APNS)
self.assertEqual(count, 2)
class TestPushApi(ZulipTestCase):
def test_push_api(self):
# type: () -> None
user = self.example_user('cordelia')
email = user.email
self.login(email)
endpoints = [
('/json/users/me/apns_device_token', 'apple-token'),
('/json/users/me/android_gcm_reg_id', 'android-token'),
]
# Test error handling
for endpoint, _ in endpoints:
# Try adding/removing tokens that are too big...
broken_token = "x" * 5000 # too big
result = self.client_post(endpoint, {'token': broken_token})
self.assert_json_error(result, 'Empty or invalid length token')
result = self.client_delete(endpoint, {'token': broken_token})
self.assert_json_error(result, 'Empty or invalid length token')
# Try to remove a non-existent token...
result = self.client_delete(endpoint, {'token': 'non-existent token'})
self.assert_json_error(result, 'Token does not exist')
# Add tokens
for endpoint, token in endpoints:
# Test that we can push twice
result = self.client_post(endpoint, {'token': token})
self.assert_json_success(result)
result = self.client_post(endpoint, {'token': token})
self.assert_json_success(result)
tokens = list(PushDeviceToken.objects.filter(user=user, token=token))
self.assertEqual(len(tokens), 1)
self.assertEqual(tokens[0].token, token)
# User should have tokens for both devices now.
tokens = list(PushDeviceToken.objects.filter(user=user))
self.assertEqual(len(tokens), 2)
# Remove tokens
for endpoint, token in endpoints:
result = self.client_delete(endpoint, {'token': token})
self.assert_json_success(result)
tokens = list(PushDeviceToken.objects.filter(user=user, token=token))
self.assertEqual(len(tokens), 0)
class SendNotificationTest(PushNotificationTest):
@mock.patch('logging.warn')
@mock.patch('logging.info')
@mock.patch('zerver.lib.push_notifications._do_push_to_apns_service')
def test_send_apple_push_notifiction(self, mock_send, mock_info, mock_warn):
# type: (mock.MagicMock, mock.MagicMock, mock.MagicMock) -> None
def test_send(user_id, message, alert):
# type: (int, Message, str) -> None
self.assertEqual(user_id, self.user_profile.id)
self.assertEqual(set(message.tokens), set(self.tokens))
mock_send.side_effect = test_send
apn.send_apple_push_notification_to_user(self.user_profile, "test alert")
self.assertEqual(mock_send.call_count, 1)
@mock.patch('apns.GatewayConnection.send_notification_multiple')
def test_do_push_to_apns_service(self, mock_push):
# type: (mock.MagicMock) -> None
msg = apn.APNsMessage(self.user_profile.id, self.tokens, alert="test")
def test_push(message):
# type: (Message) -> None
self.assertIs(message, msg.get_frame())
mock_push.side_effect = test_push
apn._do_push_to_apns_service(self.user_profile.id, msg, apn.connection)
@mock.patch('logging.warn')
@mock.patch('logging.info')
@mock.patch('apns.GatewayConnection.send_notification_multiple')
def test_connection_none(self, mock_push, mock_info, mock_warn):
# type: (mock.MagicMock, mock.MagicMock, mock.MagicMock) -> None
apn.connection = None
apn.send_apple_push_notification_to_user(self.user_profile, "test alert")
class APNsFeedbackTest(PushNotificationTest):
@mock.patch('logging.info')
@mock.patch('apns.FeedbackConnection.items')
def test_feedback(self, mock_items, mock_info):
# type: (mock.MagicMock, mock.MagicMock) -> None
update_time = apn.timestamp_to_datetime(int(time.time()) - 10000)
PushDeviceToken.objects.all().update(last_updated=update_time)
mock_items.return_value = [
('aaaa', int(time.time())),
]
self.assertEqual(PushDeviceToken.objects.all().count(), 2)
apn.check_apns_feedback()
self.assertEqual(PushDeviceToken.objects.all().count(), 1)
class GCMTest(PushNotificationTest):
def setUp(self):
# type: () -> None
super(GCMTest, self).setUp()
apn.gcm = gcm.GCM('fake key')
self.gcm_tokens = [u'1111', u'2222']
for token in self.gcm_tokens:
PushDeviceToken.objects.create(
kind=PushDeviceToken.GCM,
token=apn.hex_to_b64(token),
user=self.user_profile,
ios_app_id=None)
def get_gcm_data(self, **kwargs):
# type: (**Any) -> Dict[str, Any]
data = {
'key 1': 'Data 1',
'key 2': 'Data 2',
}
data.update(kwargs)
return data
class GCMNotSetTest(GCMTest):
@mock.patch('logging.warning')
def test_gcm_is_none(self, mock_warning):
# type: (mock.MagicMock) -> None
apn.gcm = None
apn.send_android_push_notification_to_user(self.user_profile, {})
mock_warning.assert_called_with("Attempting to send a GCM push "
"notification, but no API key was "
"configured")
class GCMIOErrorTest(GCMTest):
@mock.patch('zerver.lib.push_notifications.gcm.json_request')
@mock.patch('logging.warning')
def test_json_request_raises_ioerror(self, mock_warn, mock_json_request):
# type: (mock.MagicMock, mock.MagicMock) -> None
mock_json_request.side_effect = IOError('error')
apn.send_android_push_notification_to_user(self.user_profile, {})
mock_warn.assert_called_with('error')
class GCMSuccessTest(GCMTest):
@mock.patch('logging.warning')
@mock.patch('logging.info')
@mock.patch('gcm.GCM.json_request')
def test_success(self, mock_send, mock_info, mock_warning):
# type: (mock.MagicMock, mock.MagicMock, mock.MagicMock) -> None
res = {}
res['success'] = {token: ind for ind, token in enumerate(self.gcm_tokens)}
mock_send.return_value = res
data = self.get_gcm_data()
apn.send_android_push_notification_to_user(self.user_profile, data)
self.assertEqual(mock_info.call_count, 2)
c1 = call("GCM: Sent 1111 as 0")
c2 = call("GCM: Sent 2222 as 1")
mock_info.assert_has_calls([c1, c2], any_order=True)
mock_warning.assert_not_called()
class GCMCanonicalTest(GCMTest):
@mock.patch('logging.warning')
@mock.patch('gcm.GCM.json_request')
def test_equal(self, mock_send, mock_warning):
# type: (mock.MagicMock, mock.MagicMock) -> None
res = {}
res['canonical'] = {1: 1}
mock_send.return_value = res
data = self.get_gcm_data()
apn.send_android_push_notification_to_user(self.user_profile, data)
mock_warning.assert_called_once_with("GCM: Got canonical ref but it "
"already matches our ID 1!")
@mock.patch('logging.warning')
@mock.patch('gcm.GCM.json_request')
def test_pushdevice_not_present(self, mock_send, mock_warning):
# type: (mock.MagicMock, mock.MagicMock) -> None
res = {}
t1 = apn.hex_to_b64(u'1111')
t2 = apn.hex_to_b64(u'3333')
res['canonical'] = {t1: t2}
mock_send.return_value = res
def get_count(hex_token):
# type: (Text) -> int
token = apn.hex_to_b64(hex_token)
return PushDeviceToken.objects.filter(
token=token, kind=PushDeviceToken.GCM).count()
self.assertEqual(get_count(u'1111'), 1)
self.assertEqual(get_count(u'3333'), 0)
data = self.get_gcm_data()
apn.send_android_push_notification_to_user(self.user_profile, data)
msg = ("GCM: Got canonical ref %s "
"replacing %s but new ID not "
"registered! Updating.")
mock_warning.assert_called_once_with(msg % (t2, t1))
self.assertEqual(get_count(u'1111'), 0)
self.assertEqual(get_count(u'3333'), 1)
@mock.patch('logging.info')
@mock.patch('gcm.GCM.json_request')
def test_pushdevice_different(self, mock_send, mock_info):
# type: (mock.MagicMock, mock.MagicMock) -> None
res = {}
old_token = apn.hex_to_b64(u'1111')
new_token = apn.hex_to_b64(u'2222')
res['canonical'] = {old_token: new_token}
mock_send.return_value = res
def get_count(hex_token):
# type: (Text) -> int
token = apn.hex_to_b64(hex_token)
return PushDeviceToken.objects.filter(
token=token, kind=PushDeviceToken.GCM).count()
self.assertEqual(get_count(u'1111'), 1)
self.assertEqual(get_count(u'2222'), 1)
data = self.get_gcm_data()
apn.send_android_push_notification_to_user(self.user_profile, data)
mock_info.assert_called_once_with(
"GCM: Got canonical ref %s, dropping %s" % (new_token, old_token))
self.assertEqual(get_count(u'1111'), 0)
self.assertEqual(get_count(u'2222'), 1)
class GCMNotRegisteredTest(GCMTest):
@mock.patch('logging.info')
@mock.patch('gcm.GCM.json_request')
def test_not_registered(self, mock_send, mock_info):
# type: (mock.MagicMock, mock.MagicMock) -> None
res = {}
token = apn.hex_to_b64(u'1111')
res['errors'] = {'NotRegistered': [token]}
mock_send.return_value = res
def get_count(hex_token):
# type: (Text) -> int
token = apn.hex_to_b64(hex_token)
return PushDeviceToken.objects.filter(
token=token, kind=PushDeviceToken.GCM).count()
self.assertEqual(get_count(u'1111'), 1)
data = self.get_gcm_data()
apn.send_android_push_notification_to_user(self.user_profile, data)
mock_info.assert_called_once_with("GCM: Removing %s" % (token,))
self.assertEqual(get_count(u'1111'), 0)
class GCMFailureTest(GCMTest):
@mock.patch('logging.warning')
@mock.patch('gcm.GCM.json_request')
def test_failure(self, mock_send, mock_warn):
# type: (mock.MagicMock, mock.MagicMock) -> None
res = {}
token = apn.hex_to_b64(u'1111')
res['errors'] = {'Failed': [token]}
mock_send.return_value = res
data = self.get_gcm_data()
apn.send_android_push_notification_to_user(self.user_profile, data)
c1 = call("GCM: Delivery to %s failed: Failed" % (token,))
mock_warn.assert_has_calls([c1], any_order=True)
class TestReceivesNotificationsFunctions(ZulipTestCase):
def setUp(self):
# type: () -> None
self.user = self.example_user('cordelia')
def test_receivers_online_notifications_when_user_is_a_bot(self):
# type: () -> None
self.user.is_bot = True
self.user.enable_online_push_notifications = True
self.assertFalse(receives_online_notifications(self.user))
self.user.enable_online_push_notifications = False
self.assertFalse(receives_online_notifications(self.user))
def test_receivers_online_notifications_when_user_is_not_a_bot(self):
# type: () -> None
self.user.is_bot = False
self.user.enable_online_push_notifications = True
self.assertTrue(receives_online_notifications(self.user))
self.user.enable_online_push_notifications = False
self.assertFalse(receives_online_notifications(self.user))
def test_receivers_offline_notifications_when_user_is_a_bot(self):
# type: () -> None
self.user.is_bot = True
self.user.enable_offline_email_notifications = True
self.user.enable_offline_push_notifications = True
self.assertFalse(receives_offline_notifications(self.user))
self.user.enable_offline_email_notifications = False
self.user.enable_offline_push_notifications = False
self.assertFalse(receives_offline_notifications(self.user))
self.user.enable_offline_email_notifications = True
self.user.enable_offline_push_notifications = False
self.assertFalse(receives_offline_notifications(self.user))
self.user.enable_offline_email_notifications = False
self.user.enable_offline_push_notifications = True
self.assertFalse(receives_offline_notifications(self.user))
def test_receivers_offline_notifications_when_user_is_not_a_bot(self):
# type: () -> None
self.user.is_bot = False
self.user.enable_offline_email_notifications = True
self.user.enable_offline_push_notifications = True
self.assertTrue(receives_offline_notifications(self.user))
self.user.enable_offline_email_notifications = False
self.user.enable_offline_push_notifications = False
self.assertFalse(receives_offline_notifications(self.user))
self.user.enable_offline_email_notifications = True
self.user.enable_offline_push_notifications = False
self.assertTrue(receives_offline_notifications(self.user))
self.user.enable_offline_email_notifications = False
self.user.enable_offline_push_notifications = True
self.assertTrue(receives_offline_notifications(self.user))
| |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cPickle as pickle
import os
import signal
import sys
import time
from swift import gettext_ as _
from random import random
from eventlet import spawn, patcher, Timeout
from swift.common.bufferedhttp import http_connect
from swift.common.exceptions import ConnectionTimeout
from swift.common.ring import Ring
from swift.common.utils import get_logger, renamer, write_pickle, \
dump_recon_cache, config_true_value, ismount
from swift.common.daemon import Daemon
from swift.obj.diskfile import get_tmp_dir, get_async_dir, ASYNCDIR_BASE
from swift.common.http import is_success, HTTP_NOT_FOUND, \
HTTP_INTERNAL_SERVER_ERROR
class ObjectUpdater(Daemon):
"""Update object information in container listings."""
def __init__(self, conf, logger=None):
self.conf = conf
self.logger = logger or get_logger(conf, log_route='object-updater')
self.devices = conf.get('devices', '/srv/node')
self.mount_check = config_true_value(conf.get('mount_check', 'true'))
self.swift_dir = conf.get('swift_dir', '/etc/swift')
self.interval = int(conf.get('interval', 300))
self.container_ring = None
self.concurrency = int(conf.get('concurrency', 1))
self.slowdown = float(conf.get('slowdown', 0.01))
self.node_timeout = int(conf.get('node_timeout', 10))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.successes = 0
self.failures = 0
self.recon_cache_path = conf.get('recon_cache_path',
'/var/cache/swift')
self.rcache = os.path.join(self.recon_cache_path, 'object.recon')
def _listdir(self, path):
try:
return os.listdir(path)
except OSError as e:
self.logger.error(_('ERROR: Unable to access %(path)s: '
'%(error)s') %
{'path': path, 'error': e})
return []
def get_container_ring(self):
"""Get the container ring. Load it, if it hasn't been yet."""
if not self.container_ring:
self.container_ring = Ring(self.swift_dir, ring_name='container')
return self.container_ring
def run_forever(self, *args, **kwargs):
"""Run the updater continuously."""
time.sleep(random() * self.interval)
while True:
self.logger.info(_('Begin object update sweep'))
begin = time.time()
pids = []
# read from container ring to ensure it's fresh
self.get_container_ring().get_nodes('')
for device in self._listdir(self.devices):
if self.mount_check and \
not ismount(os.path.join(self.devices, device)):
self.logger.increment('errors')
self.logger.warn(
_('Skipping %s as it is not mounted'), device)
continue
while len(pids) >= self.concurrency:
pids.remove(os.wait()[0])
pid = os.fork()
if pid:
pids.append(pid)
else:
signal.signal(signal.SIGTERM, signal.SIG_DFL)
patcher.monkey_patch(all=False, socket=True)
self.successes = 0
self.failures = 0
forkbegin = time.time()
self.object_sweep(os.path.join(self.devices, device))
elapsed = time.time() - forkbegin
self.logger.info(
_('Object update sweep of %(device)s'
' completed: %(elapsed).02fs, %(success)s successes'
', %(fail)s failures'),
{'device': device, 'elapsed': elapsed,
'success': self.successes, 'fail': self.failures})
sys.exit()
while pids:
pids.remove(os.wait()[0])
elapsed = time.time() - begin
self.logger.info(_('Object update sweep completed: %.02fs'),
elapsed)
dump_recon_cache({'object_updater_sweep': elapsed},
self.rcache, self.logger)
if elapsed < self.interval:
time.sleep(self.interval - elapsed)
def run_once(self, *args, **kwargs):
"""Run the updater once."""
self.logger.info(_('Begin object update single threaded sweep'))
begin = time.time()
self.successes = 0
self.failures = 0
for device in self._listdir(self.devices):
if self.mount_check and \
not ismount(os.path.join(self.devices, device)):
self.logger.increment('errors')
self.logger.warn(
_('Skipping %s as it is not mounted'), device)
continue
self.object_sweep(os.path.join(self.devices, device))
elapsed = time.time() - begin
self.logger.info(
_('Object update single threaded sweep completed: '
'%(elapsed).02fs, %(success)s successes, %(fail)s failures'),
{'elapsed': elapsed, 'success': self.successes,
'fail': self.failures})
dump_recon_cache({'object_updater_sweep': elapsed},
self.rcache, self.logger)
def object_sweep(self, device):
"""
If there are async pendings on the device, walk each one and update.
:param device: path to device
"""
start_time = time.time()
# loop through async pending dirs for all policies
for asyncdir in self._listdir(device):
# skip stuff like "accounts", "containers", etc.
if not (asyncdir == ASYNCDIR_BASE or
asyncdir.startswith(ASYNCDIR_BASE + '-')):
continue
# we only care about directories
async_pending = os.path.join(device, asyncdir)
if not os.path.isdir(async_pending):
continue
if asyncdir == ASYNCDIR_BASE:
policy_idx = 0
else:
_junk, policy_idx = asyncdir.split('-', 1)
try:
policy_idx = int(policy_idx)
get_async_dir(policy_idx)
except ValueError:
self.logger.warn(_('Directory %s does not map to a '
'valid policy') % asyncdir)
continue
for prefix in self._listdir(async_pending):
prefix_path = os.path.join(async_pending, prefix)
if not os.path.isdir(prefix_path):
continue
last_obj_hash = None
for update in sorted(self._listdir(prefix_path), reverse=True):
update_path = os.path.join(prefix_path, update)
if not os.path.isfile(update_path):
continue
try:
obj_hash, timestamp = update.split('-')
except ValueError:
self.logger.increment('errors')
self.logger.error(
_('ERROR async pending file with unexpected '
'name %s')
% (update_path))
continue
if obj_hash == last_obj_hash:
self.logger.increment("unlinks")
os.unlink(update_path)
else:
self.process_object_update(update_path, device,
policy_idx)
last_obj_hash = obj_hash
time.sleep(self.slowdown)
try:
os.rmdir(prefix_path)
except OSError:
pass
self.logger.timing_since('timing', start_time)
def process_object_update(self, update_path, device, policy_idx):
"""
Process the object information to be updated and update.
:param update_path: path to pickled object update file
:param device: path to device
:param policy_idx: storage policy index of object update
"""
try:
update = pickle.load(open(update_path, 'rb'))
except Exception:
self.logger.exception(
_('ERROR Pickle problem, quarantining %s'), update_path)
self.logger.increment('quarantines')
target_path = os.path.join(device, 'quarantined', 'objects',
os.path.basename(update_path))
renamer(update_path, target_path, fsync=False)
return
successes = update.get('successes', [])
part, nodes = self.get_container_ring().get_nodes(
update['account'], update['container'])
obj = '/%s/%s/%s' % \
(update['account'], update['container'], update['obj'])
headers_out = update['headers'].copy()
headers_out['user-agent'] = 'object-updater %s' % os.getpid()
headers_out.setdefault('X-Backend-Storage-Policy-Index',
str(policy_idx))
events = [spawn(self.object_update,
node, part, update['op'], obj, headers_out)
for node in nodes if node['id'] not in successes]
success = True
new_successes = False
for event in events:
event_success, node_id = event.wait()
if event_success is True:
successes.append(node_id)
new_successes = True
else:
success = False
if success:
self.successes += 1
self.logger.increment('successes')
self.logger.debug('Update sent for %(obj)s %(path)s',
{'obj': obj, 'path': update_path})
self.logger.increment("unlinks")
os.unlink(update_path)
else:
self.failures += 1
self.logger.increment('failures')
self.logger.debug('Update failed for %(obj)s %(path)s',
{'obj': obj, 'path': update_path})
if new_successes:
update['successes'] = successes
write_pickle(update, update_path, os.path.join(
device, get_tmp_dir(policy_idx)))
def object_update(self, node, part, op, obj, headers_out):
"""
Perform the object update to the container
:param node: node dictionary from the container ring
:param part: partition that holds the container
:param op: operation performed (ex: 'POST' or 'DELETE')
:param obj: object name being updated
:param headers_out: headers to send with the update
"""
try:
with ConnectionTimeout(self.conn_timeout):
conn = http_connect(node['ip'], node['port'], node['device'],
part, op, obj, headers_out)
with Timeout(self.node_timeout):
resp = conn.getresponse()
resp.read()
success = (is_success(resp.status) or
resp.status == HTTP_NOT_FOUND)
return (success, node['id'])
except (Exception, Timeout):
self.logger.exception(_('ERROR with remote server '
'%(ip)s:%(port)s/%(device)s'), node)
return HTTP_INTERNAL_SERVER_ERROR, node['id']
| |
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""fuse_gtest_files.py v0.2.0
Fuses Google Test source code into a .h file and a .cc file.
SYNOPSIS
fuse_gtest_files.py [GTEST_ROOT_DIR] OUTPUT_DIR
Scans GTEST_ROOT_DIR for Google Test source code, and generates
two files: OUTPUT_DIR/gtest/gtest.h and OUTPUT_DIR/gtest/gtest-all.cc.
Then you can build your tests by adding OUTPUT_DIR to the include
search path and linking with OUTPUT_DIR/gtest/gtest-all.cc. These
two files contain everything you need to use Google Test. Hence
you can "install" Google Test by copying them to wherever you want.
GTEST_ROOT_DIR can be omitted and defaults to the parent
directory of the directory holding this script.
EXAMPLES
./fuse_gtest_files.py fused_gtest
./fuse_gtest_files.py path/to/unpacked/gtest fused_gtest
This tool is experimental. In particular, it assumes that there is no
conditional inclusion of Google Test headers. Please report any
problems to googletestframework@googlegroups.com. You can read
https://github.com/google/googletest/blob/master/googletest/docs/advanced.md for
more information.
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import re
try:
from sets import Set as set # For Python 2.3 compatibility
except ImportError:
pass
import sys
# We assume that this file is in the scripts/ directory in the Google
# Test root directory.
DEFAULT_GTEST_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..')
# Regex for matching '#include "gtest/..."'.
INCLUDE_GTEST_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gtest/.+)"')
# Regex for matching '#include "src/..."'.
INCLUDE_SRC_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(src/.+)"')
# Where to find the source seed files.
GTEST_H_SEED = 'include/gtest/gtest.h'
GTEST_SPI_H_SEED = 'include/gtest/gtest-spi.h'
GTEST_ALL_CC_SEED = 'src/gtest-all.cc'
# Where to put the generated files.
GTEST_H_OUTPUT = 'gtest/gtest.h'
GTEST_ALL_CC_OUTPUT = 'gtest/gtest-all.cc'
def VerifyFileExists(directory, relative_path):
"""Verifies that the given file exists; aborts on failure.
relative_path is the file path relative to the given directory.
"""
if not os.path.isfile(os.path.join(directory, relative_path)):
print('ERROR: Cannot find %s in directory %s.' % (relative_path,
directory))
print('Please either specify a valid project root directory '
'or omit it on the command line.')
sys.exit(1)
def ValidateGTestRootDir(gtest_root):
"""Makes sure gtest_root points to a valid gtest root directory.
The function aborts the program on failure.
"""
VerifyFileExists(gtest_root, GTEST_H_SEED)
VerifyFileExists(gtest_root, GTEST_ALL_CC_SEED)
def VerifyOutputFile(output_dir, relative_path):
"""Verifies that the given output file path is valid.
relative_path is relative to the output_dir directory.
"""
# Makes sure the output file either doesn't exist or can be overwritten.
output_file = os.path.join(output_dir, relative_path)
if os.path.exists(output_file):
# TODO(wan@google.com): The following user-interaction doesn't
# work with automated processes. We should provide a way for the
# Makefile to force overwriting the files.
print('%s already exists in directory %s - overwrite it? (y/N) ' %
(relative_path, output_dir))
answer = sys.stdin.readline().strip()
if answer not in ['y', 'Y']:
print('ABORTED.')
sys.exit(1)
# Makes sure the directory holding the output file exists; creates
# it and all its ancestors if necessary.
parent_directory = os.path.dirname(output_file)
if not os.path.isdir(parent_directory):
os.makedirs(parent_directory)
def ValidateOutputDir(output_dir):
"""Makes sure output_dir points to a valid output directory.
The function aborts the program on failure.
"""
VerifyOutputFile(output_dir, GTEST_H_OUTPUT)
VerifyOutputFile(output_dir, GTEST_ALL_CC_OUTPUT)
def FuseGTestH(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest.h in output_dir."""
output_file = open(os.path.join(output_dir, GTEST_H_OUTPUT), 'w')
processed_files = set() # Holds all gtest headers we've processed.
def ProcessFile(gtest_header_path):
"""Processes the given gtest header file."""
# We don't process the same header twice.
if gtest_header_path in processed_files:
return
processed_files.add(gtest_header_path)
# Reads each line in the given gtest header.
for line in open(os.path.join(gtest_root, gtest_header_path), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/..."' - let's process it recursively.
ProcessFile('include/' + m.group(1))
else:
# Otherwise we copy the line unchanged to the output file.
output_file.write(line)
ProcessFile(GTEST_H_SEED)
ProcessFile(GTEST_SPI_H_SEED)
output_file.close()
def FuseGTestAllCcToFile(gtest_root, output_file):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_file."""
processed_files = set()
def ProcessFile(gtest_source_file):
"""Processes the given gtest source file."""
# We don't process the same #included file twice.
if gtest_source_file in processed_files:
return
processed_files.add(gtest_source_file)
# Reads each line in the given gtest source file.
for line in open(os.path.join(gtest_root, gtest_source_file), 'r'):
m = INCLUDE_GTEST_FILE_REGEX.match(line)
if m:
# It's '#include "gtest/foo.h"'.
# We treat it as '#include "gtest/gtest.h"', as all other
# gtest headers are being fused into gtest.h and cannot be
# #included directly.
# There is no need to #include "gtest/gtest.h" more than once.
if not GTEST_H_SEED in processed_files:
processed_files.add(GTEST_H_SEED)
output_file.write('#include "%s"\n' % (GTEST_H_OUTPUT,))
else:
m = INCLUDE_SRC_FILE_REGEX.match(line)
if m:
# It's '#include "src/foo"' - let's process it recursively.
ProcessFile(m.group(1))
else:
output_file.write(line)
ProcessFile(GTEST_ALL_CC_SEED)
def FuseGTestAllCc(gtest_root, output_dir):
"""Scans folder gtest_root to generate gtest/gtest-all.cc in output_dir."""
output_file = open(os.path.join(output_dir, GTEST_ALL_CC_OUTPUT), 'w')
FuseGTestAllCcToFile(gtest_root, output_file)
output_file.close()
def FuseGTest(gtest_root, output_dir):
"""Fuses gtest.h and gtest-all.cc."""
ValidateGTestRootDir(gtest_root)
ValidateOutputDir(output_dir)
FuseGTestH(gtest_root, output_dir)
FuseGTestAllCc(gtest_root, output_dir)
def main():
argc = len(sys.argv)
if argc == 2:
# fuse_gtest_files.py OUTPUT_DIR
FuseGTest(DEFAULT_GTEST_ROOT_DIR, sys.argv[1])
elif argc == 3:
# fuse_gtest_files.py GTEST_ROOT_DIR OUTPUT_DIR
FuseGTest(sys.argv[1], sys.argv[2])
else:
print(__doc__)
sys.exit(1)
if __name__ == '__main__':
main()
| |
from collections import defaultdict
from itertools import ifilter
from datetime import datetime
from django.db import models, connection
from django.utils.translation import ugettext, ugettext_lazy as _
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db.models import Count
from django_extensions.db import fields as django_extensions_fields
from django_countries import CountryField
import forms_builder
import forms_builder.forms.fields
import forms_builder.forms.models
from crowdataapp.middleware import get_current_user
DEFAULT_TEMPLATE_JS = """// Javascript function to insert the document into the DOM.
// Receives the URL of the document as its only parameter.
// Must be called insertDocument
// JQuery is available
// resulting element should be inserted into div#document-viewer-container
function insertDocument(document_url) {
}
"""
# some mokeypatching, I don't want every field type to be available in forms
#from forms_builder.forms import fields
ALLOWED_FIELD_TYPES = (
forms_builder.forms.fields.TEXT,
forms_builder.forms.fields.TEXTAREA,
forms_builder.forms.fields.CHECKBOX,
forms_builder.forms.fields.CHECKBOX_MULTIPLE,
forms_builder.forms.fields.SELECT,
forms_builder.forms.fields.SELECT_MULTIPLE,
forms_builder.forms.fields.DATE,
forms_builder.forms.fields.DATE_TIME,
forms_builder.forms.fields.HIDDEN,
forms_builder.forms.fields.NUMBER,
forms_builder.forms.fields.URL,
)
forms_builder.forms.models.Field._meta.local_fields[3]._choices \
= filter(lambda i: i[0] in ALLOWED_FIELD_TYPES,
forms_builder.forms.fields.NAMES)
class UserProfile(models.Model):
user = models.ForeignKey(User, unique=True)
name = models.CharField(_('Your Name'), max_length='128', null=False, blank=False)
country = CountryField(_('Your country'), null=True)
show_in_leaderboard = models.BooleanField(_("Appear in the leaderboards"),
default=True,
help_text=_("If checked, you will appear in CrowData's leaderboards"))
class DocumentSetManager(models.Manager):
def get_query_set(self):
u = get_current_user() # from LocalUserMiddleware
rv = super(DocumentSetManager, self).get_query_set()
# only get published if we got a User and is not staff/superuser
if (u is not None) and (not u.is_staff) and (not u.is_superuser):
rv = rv.filter(published=True)
return rv
class DocumentSet(models.Model):
objects = DocumentSetManager()
name = models.CharField(_('Document set name'), max_length='128',)
description = models.TextField(null=True,
blank=True,
help_text=_('Description for this Document Set'))
header_image = models.URLField(null=True,
blank=True,
help_text=_('Header Image URL'))
slug = django_extensions_fields.AutoSlugField(populate_from=('name'))
tosum_field = models.ForeignKey("DocumentSetFormField",
related_name='tosum_fields',
null=True, blank=True,
verbose_name='Field to sum on',
help_text=_("Field from the form to sum total on. This will be displayed in the document set's homepage."))
entries_threshold = models.IntegerField(default=3,
null=False,
help_text=_('Minimum number of coincidental answers for a field before marking it as valid'))
head_html = models.TextField(default='<!-- <script> or <link rel="stylesheet"> tags go here -->',
null=True,
help_text=_('HTML to be inserted in the <head> element in this page'))
published = models.BooleanField(_('Published'),
default=True,
help_text=_('Is this Document Set published to non-admins?'))
created_at = models.DateTimeField(auto_now_add = True)
updated_at = models.DateTimeField(auto_now = True)
class Meta:
verbose_name = _('Document Set')
verbose_name_plural = _('Document Sets')
def __unicode__(self):
return self.name
def get_absolute_url(self):
return reverse('crowdataapp.views.document_set_view',
args=[self.slug])
def admin_links(self):
kw = {"args": (self.id,)}
links = [
(_("Export all answers to CSV"), reverse("admin:document_set_answers_csv", **kw)),
(_("Add Documents to this document set"), reverse("admin:document_set_add_documents", **kw)),
(_("Update Canons to this document set"), reverse("admin:document_set_update_canons", **kw))
]
for i, (text, url) in enumerate(links):
links[i] = "<a href='%s'>%s</a>" % (url, ugettext(text))
return "<br>".join(links)
admin_links.allow_tags = True
admin_links.short_description = ""
def field_names(self):
"""Used for column names in CSV export of
:class:`DocumentUserFormEntry`
"""
entry_time_name = forms_builder.forms.models.FormEntry._meta.get_field('entry_time').verbose_name.title()
document_title_name = Document._meta.get_field('name').verbose_name.title()
document_url_name = Document._meta.get_field('url').verbose_name.title()
form = self.form.all()[0]
return ['user'] \
+ [document_title_name, document_url_name] \
+ [f.label
for f in form.fields.all()] \
+ [entry_time_name]
def get_pending_documents(self):
return self.documents.filter(verified=False)
def get_pending_documents_by_category(self, category):
return self.documents.filter(verified=False, category=category)
def get_pending_documents_with_entries(self):
return self.documents.filter(verified=False)
def get_pending_documents_count_for_user(self, user):
return self.get_pending_documents().exclude(form_entries__user=user).count()
def get_pending_documents_for_user(self, user):
return self.get_pending_documents().exclude(form_entries__user=user)
def get_verified_documents_count_for_user(self, user):
return self.documents.filter(verified=True, form_entries__user=user).count()
def get_verified_documents_for_user(self, user):
return self.documents.filter(verified=True, form_entries__user=user).distinct()
def get_verified_documents(self):
return self.documents.filter(verified=True)
def get_reviewed_documents_count_for_user(self, user):
return self.documents.filter(form_entries__user=user).count()
def leaderboard(self):
""" Returns a queryset of the biggest contributors (User) to this DocumentSet """
return User.objects.filter(documentsetformentry__form__document_set=self).annotate(num_entries=Count('documentsetformentry')).order_by('-num_entries')
def userboard(self, user):
""" Returns a queryset of the contributors (User) around user to this DocumentSet """
reviewed_documents = self.get_reviewed_documents_count_for_user(user)
count = self.leaderboard().filter(num_entries__gte = reviewed_documents).count()
count_top = count -3 if count -3 >= 0 else 0
user_top = self.leaderboard().filter(num_entries__gte = reviewed_documents)[count_top:]
user_down = self.leaderboard().filter(num_entries__lte = reviewed_documents).exclude(pk=user)[:3]
result = [user_top, user_down]
return [item for sublist in result for item in sublist]
def amount_on_field(self):
""" Sums the total on verified field on the amount """
query = """ SELECT SUM(field_entry.value::DOUBLE PRECISION)
FROM crowdataapp_documentsetfieldentry field_entry
INNER JOIN crowdataapp_documentsetformentry form_entry ON form_entry.id = field_entry.entry_id
INNER JOIN crowdataapp_document document ON document.id = form_entry.document_id
WHERE document.document_set_id = %d
AND field_entry.verified = TRUE
AND field_entry.field_id = %d""" % ( self.id, self.tosum_field.id)
cursor = connection.cursor()
cursor.execute(query)
amount = cursor.fetchall()[0][0]
return amount
class DocumentSetForm(forms_builder.forms.models.AbstractForm):
document_set = models.ForeignKey(DocumentSet, unique=True, related_name='form')
#document_set = models.OneToOneField(DocumentSet, parent_link=True)
@models.permalink
def get_absolute_url(self):
return ('crowdata_form_detail', (), { 'slug': self.slug })
class DocumentSetFormFieldManager(models.Manager):
"""
Only show visible fields when displaying actual form..
"""
def visible(self):
return self.filter(visible=True).order_by('order')
class DocumentSetFormField(forms_builder.forms.models.AbstractField):
autocomplete = models.BooleanField(_("Autocomplete"),
help_text=_("If checked, this text field will have autocompletion"))
form = models.ForeignKey(DocumentSetForm, related_name="fields")
order = models.IntegerField(_("Order"), null=True, blank=True)
group = models.CharField(_("Group"),max_length= 200 ,
help_text=_("If checked, this text field will have autocompletion"))
verify = models.BooleanField(_("Verify"), default=True)
objects = DocumentSetFormFieldManager()
def save(self, *args, **kwargs):
if self.order is None:
self.order = self.form.fields.count()
super(DocumentSetFormField, self).save(*args, **kwargs)
def delete(self, *args, **kwargs):
fields_after = self.form.fields.filter(order__gte=self.order)
fields_after.update(order=models.F("order") - 1)
DocumentSetFieldEntry.objects.filter(field_id=self.id).delete()
super(DocumentSetFormField, self).delete(*args, **kwargs)
class DocumentSetFormEntry(forms_builder.forms.models.AbstractFormEntry):
""" A :class:`forms_builder.forms.models.AbstractFormEntry` plus
foreign keys to the :class:`User` and filled the form and the
:class:`Document` it belongs to
"""
form = models.ForeignKey("DocumentSetForm", related_name='entries')
document = models.ForeignKey('Document', related_name='form_entries', blank=True, null=True)
user = models.ForeignKey(User, blank=True, null=True)
def to_dict(self):
form_fields = dict([(f.id, f.label)
for f in self.form.fields.all()])
entry_time_name = forms_builder.forms.models.FormEntry._meta.get_field('entry_time').verbose_name.title()
rv = dict()
rv['user'] = str(self.user.pk)
rv['username'] = self.user.get_username()
rv[Document._meta.get_field('name').verbose_name.title()] = self.document.name
rv[Document._meta.get_field('url').verbose_name.title()] = self.document.url
for field_entry in self.fields.all():
rv['answer_' + form_fields[field_entry.field_id] + '_verified'] = field_entry.verified
rv['answer_' + form_fields[field_entry.field_id]] = field_entry.value
rv[entry_time_name] = self.entry_time
return rv
def get_answer_for_field(self, field):
answer_for_field = self.fields.filter(field_id=field.pk)[0]
if answer_for_field.canonical_label is None:
return answer_for_field.value
else:
return answer_for_field.canonical_label.value
def force_verify(self):
""" set this entire entry as verified, overriding the normal rules
(intended for marking admin' entries as verified) """
for field in self.fields.all():
if DocumentSetFormField.objects.get(pk=field.field_id).verify:
# mark field entry as verified
field.verified = True
for other_field in self.fields.filter(field_id=field.pk).exclude(pk=field.pk):
self.verified = (field.value == other_field.value)
other_field.save()
field.save()
self.document.verified = True
self.document.save()
class DocumentSetFieldEntry(forms_builder.forms.models.AbstractFieldEntry):
entry = models.ForeignKey("DocumentSetFormEntry", related_name="fields")
verified = models.BooleanField(default=False, null=False)
canonical_label = models.ForeignKey("CanonicalFieldEntryLabel", related_name="fields", null=True)
group = models.CharField(max_length=200)
created_at = models.DateTimeField(auto_now_add = True, null=True)
updated_at = models.DateTimeField(auto_now = True, null=True)
def assigned_canonical_value(self):
if self.canonical_label is None:
return ''
else:
return self.canonical_label.value
def get_canonical_value(self):
order_similarity = "similarity(unaccent(lower(value)), unaccent(lower('%s')))" % self.value.replace("'","''")
similarity = "similarity(unaccent(lower(value)), unaccent(lower('%s'))) > 0.34" % self.value.replace("'","''")
# Get all the canons
best_canons = CanonicalFieldEntryLabel \
.objects.filter(value__similar=self.value,
form_field_id=self.field_id) \
.extra(select={
'distance': similarity,
'order_distance': order_similarity
}) \
.order_by('-order_distance')
if len(best_canons) == 0:
best_canonical_label = CanonicalFieldEntryLabel(value=self.value, document_set= self.entry.document.document_set, form_field_id=self.field_id)
best_canonical_label.save()
else:
best_canonical_label = best_canons[0]
return best_canonical_label
# I'm only using this method to force the setting of the canon
def save_without_setting_canon(self, *args, **kwargs):
""" when the field is saved we need to look for the canonical form
"""
super(DocumentSetFieldEntry, self).save(*args, **kwargs)
def save(self, *args, **kwargs):
""" when the field is saved we need to look for the canonical form
"""
if DocumentSetFormField.objects.get(pk=self.field_id).autocomplete:
self.canonical_label = self.get_canonical_value()
super(DocumentSetFieldEntry, self).save(*args, **kwargs)
class DocumentSetRankingDefinition(models.Model):
""" the definition of a ranking (leaderboard of sorts) for a DocumentSet """
GROUPING_FUNCTIONS = (
('AVG', _('Average')),
('COUNT', _('Count')),
('SUM', _('Sum')),
)
SUBQUERY_LABEL = """
SELECT distinct(document.id) AS document_id,
coalesce(canonical_label.value, field_entry.value) AS value,
canonical_label.id as canonical_label_id
FROM crowdataapp_documentsetfieldentry field_entry
LEFT OUTER JOIN crowdataapp_canonicalfieldentrylabel canonical_label ON canonical_label.id = field_entry.canonical_label_id
INNER JOIN crowdataapp_documentsetformentry form_entry ON form_entry.id = field_entry.entry_id
INNER JOIN crowdataapp_document document ON document.id = form_entry.document_id
WHERE document.document_set_id = %(document_set_id)d
AND field_entry.verified = TRUE
AND field_entry.field_id = %(label_field_id)d
"""
SUBQUERY_MAGNITUDE = """
SELECT distinct(document.id) AS document_id,
cast(field_entry.value AS double PRECISION) AS value
FROM crowdataapp_documentsetfieldentry field_entry
INNER JOIN crowdataapp_documentsetformentry form_entry ON form_entry.id = field_entry.entry_id
INNER JOIN crowdataapp_document document ON document.id = form_entry.document_id
WHERE document.document_set_id = %(document_set_id)d
AND field_entry.verified = TRUE
AND field_entry.field_id = %(magnitude_field_id)d
"""
name = models.CharField(_('Ranking title'), max_length=256, editable=True, null=False)
document_set = models.ForeignKey(DocumentSet, related_name='rankings')
label_field = models.ForeignKey(DocumentSetFormField, related_name='label_fields')
magnitude_field = models.ForeignKey(DocumentSetFormField,
related_name='magnitude_fields',
null=True, blank=True)
amount_rows_on_home = models.IntegerField(default=10,
null=True,
help_text=_('Cantidad de filas a mostrar en el home page.'))
grouping_function = models.CharField(_('Grouping Function'),
max_length=10,
choices=GROUPING_FUNCTIONS,
default='SUM')
sort_order = models.BooleanField(_('Sort order'),
default=False,
help_text=_('Ascending if checked, descending otherwise'))
def _ranking_query(self, search_term=None, limit=None, offset=0):
if limit is None:
limit = 10000000
# ToDo : WHERE label.value LIKE %(search_term)s
q = None
if self.grouping_function == 'COUNT':
q = """ SELECT label.value, COUNT(label.value), label.canonical_label_id
FROM (%s) label
GROUP BY label.value, label.canonical_label_id
ORDER BY COUNT(label.value) %s
LIMIT %d OFFSET %d """ % (self.SUBQUERY_LABEL,
'ASC' if self.sort_order else 'DESC',
limit,
offset)
q = q % { 'document_set_id': self.document_set.id, 'label_field_id': self.label_field.id }
elif self.grouping_function == 'SUM':
q = """ SELECT label.value, SUM(magnitude.value), label.canonical_label_id
FROM (%s) label
INNER JOIN (%s) magnitude
ON magnitude.document_id = label.document_id
GROUP BY label.value, label.canonical_label_id
ORDER BY SUM(magnitude.value) %s
LIMIT %d OFFSET %d """ % (self.SUBQUERY_LABEL,
self.SUBQUERY_MAGNITUDE,
'ASC' if self.sort_order else 'DESC',
limit,
offset)
q = q % { 'document_set_id': self.document_set.id,
'label_field_id': self.label_field.id,
'magnitude_field_id': self.magnitude_field_id }
elif self.grouping_function == 'AVG':
q = """ SELECT label.value, AVG(magnitude.value), label.canonical_label_id
FROM (%s) label
INNER JOIN (%s) magnitude
ON magnitude.document_id = label.document_id
GROUP BY label.value, label.canonical_label_id
ORDER BY AVG(magnitude.value) %s
LIMIT %d OFFSET %d """ % (self.SUBQUERY_LABEL,
self.SUBQUERY_MAGNITUDE,
'ASC' if self.sort_order else 'DESC',
limit,
offset)
q = q % { 'document_set_id': self.document_set.id,
'label_field_id': self.label_field.id,
'magnitude_field_id': self.magnitude_field_id}
#ToDo, filter on 'search_term': "'%" + search_term.encode('utf-8') + "%'"
return q
def calculate(self):
cursor = connection.cursor()
cursor.execute(self._ranking_query(limit=self.amount_rows_on_home))
return cursor.fetchall()
def calculate_all(self, search_term):
cursor = connection.cursor()
cursor.execute(self._ranking_query(search_term))
return cursor.fetchall()
class Document(models.Model):
name = models.CharField(_('Document title'), max_length=256, editable=True, null=True)
category = models.CharField(_('Document category'), max_length=256, editable=True, null=True)
url = models.URLField(_('Document URL'), max_length='512', editable=True)
document_set = models.ForeignKey(DocumentSet, related_name='documents')
verified = models.BooleanField(_('Verified'),
help_text=_('Is this document verified?'))
updated_at = models.DateTimeField(_('Modified'), auto_now = True,
help_text=_('Is this document verified?'))
entries_threshold_override = models.IntegerField(null=True,
blank=True,
help_text=_('Minimum number of coincidental answers for a field before marking it as valid. Overrides the default value set in the Document Set this Document belongs to'))
_form_field_cache = {}
def __unicode__(self):
return "%s (%s)" % (self.name, self.url) if self.name else self.url
def get_absolute_url(self):
return "%s?document_id=%s" % (reverse('crowdataapp.views.transcription_new',
kwargs={'document_set': self.document_set.slug}),
self.pk)
def entries_threshold(self):
if self.entries_threshold_override is None:
return self.document_set.entries_threshold
else:
return self.entries_threshold_override
def is_verified_by_staff(self):
""" The document is verified because there is a staff/superuser that revised."""
form_entries = self.form_entries.all().distinct('user')
for f in form_entries:
if f.user.is_staff or f.user.is_superuser:
return self.verified
return False
def is_revised_by_staff(self):
""" The document has somebody from staff that revised it. """
form_entries = self.form_entries.all().distinct('user')
for f in form_entries:
if f.user.is_staff or f.user.is_superuser:
return True
return False
def verified_answers(self):
""" get a dict of verified answers (entries) for this Document
{ <DocumentSetFormField>: <value>, ... }
"""
if not self.verified:
return {}
verified_answers = {}
for form_entry in self.form_entries.all():
for entry in form_entry.fields \
.filter(verified=True) \
.prefetch_related('canonical_label'):
if entry.canonical_label is None:
value = entry.value
else:
value = entry.canonical_label.value
form_field = None
if entry.field_id not in self._form_field_cache:
self._form_field_cache[entry.field_id] = DocumentSetFormField.objects.get(id=entry.field_id)
verified_answers.update({self._form_field_cache[entry.field_id]: value})
return verified_answers
def force_verify(self):
form_entries = self.form_entries.all().distinct('user')
for f in form_entries:
if f.user.is_staff or f.user.is_superuser:
f.force_verify()
return
def verify(self):
# almost direct port from ProPublica's Transcribable.
# Thanks @ashaw! :)
form_entries = self.form_entries.all().distinct('user')
form_fields = self.document_set.form.all()[0].fields.filter(verify=True)
aggregate = defaultdict(dict)
for field in form_fields:
aggregate[field] = defaultdict(lambda: 0)
for fe in form_entries:
for field in form_fields:
aggregate[field][fe.get_answer_for_field(field)] += 1
# aggregate
# defaultdict(<type 'dict'>, {<DocumentSetFormField: Tipo de gasto>:
# defaultdict(<function <lambda> at 0x10f97dd70>,
# {u'Gastos': 1, u'Pasajes a\xe9reos, terrestres y otros': 2}),
# <DocumentSetFormField: Adjudicatario>: defaultdict(<function <lambda> at 0x10f97dcf8>, {u'V\xeda Bariloche S.A.': 3}),
# <DocumentSetFormField: Importe total>: defaultdict(<function <lambda> at 0x10f97dc80>, {u'14528.8': 3})})
choosen = {}
for field, answers in aggregate.items():
for answer, answer_ct in answers.items():
if answer_ct >= self.entries_threshold():
choosen[field] = answer #max(answers.items(), lambda i: i[1])[0]
# choosen
# { <DocumentSetFormField: Tipo de gasto>: (u'viaticos por viaje', 3),
# <DocumentSetFormField: Adjudicatario>: (u'Honorable Senado de la Naci\xf3n', 4),
# <DocumentSetFormField: Importe total>: (u'10854.48', 4)
# }
if len(choosen.keys()) == len(form_fields):
# choosen is
# { DocumentSetFormField -> (value, number) }
the_choosen_one = {}
for entry in self.form_entries.all():
the_choosen_one[entry] = 0
for field, verified_answer in choosen.items():
if CanonicalFieldEntryLabel.objects.filter(value=verified_answer):
canon=CanonicalFieldEntryLabel.objects.filter(value=verified_answer)[0]
if entry.fields.filter(canonical_label_id=canon.id):
the_choosen_one[entry] += 1
else:
if entry.fields.filter(value=verified_answer):
the_choosen_one[entry] += 1
if the_choosen_one[entry] == len(form_fields):
entry.force_verify()
break
self.updated_at = datetime.today()
else:
self.verified = False
self.save()
def unverify(self):
DocumentSetFieldEntry.objects.filter(entry__in=self.form_entries.all()) \
.update(verified=False)
self.verified = False
self.save()
def has_entry_for_user(self, user):
form_entries_for_user = self.form_entries.filter(user=user)
return form_entries_for_user
class Meta:
verbose_name = _('Document')
verbose_name_plural = _('Documents')
# This is the model that will save the canonical value of a string.
# For example for a DocumentSetFormField 'Adjudicatario' will save the canonical
# form ('Honorable Senado de la nacion') of the value ('honorable senado').
class CanonicalFieldEntryLabel(models.Model):
value = models.CharField(_('Canonical value'), max_length=2000, editable=True, null=True)
form_field = models.ForeignKey(DocumentSetFormField, related_name='form_field')
document_set = models.ForeignKey(DocumentSet, related_name='canonical_values', null=True)
def get_verified_documents(self, document_set):
""" Get all documents that have an entry with canon """
q = """
SELECT distinct(document.id) AS document_id, document.name as document_name
FROM crowdataapp_documentsetfieldentry field_entry
LEFT OUTER JOIN crowdataapp_canonicalfieldentrylabel canonical_label ON canonical_label.id = field_entry.canonical_label_id
INNER JOIN crowdataapp_documentsetformentry form_entry ON form_entry.id = field_entry.entry_id
INNER JOIN crowdataapp_document document ON document.id = form_entry.document_id
WHERE document.document_set_id = %d
AND field_entry.verified = TRUE
AND canonical_label.id = %d
""" % (document_set.id, self.id)
cursor = connection.cursor()
cursor.execute(q)
return cursor.fetchall()
def has_entries(self):
return (len(self.fields.all()) != 0)
def reassign_entries_to(self, new_canon):
for entry in self.fields.all():
entry.canonical_label = new_canon
entry.save_without_setting_canon()
class Feedback(models.Model):
feedback_text = models.CharField(max_length=10000)
# document_id = models.IntegerField(max_length=20)
timestamp = models.DateTimeField(auto_now_add=True)
document = models.ForeignKey(Document)
| |
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
from eventlet import greenthread
from oslo.config import cfg
from oslo.db import exception as os_db_exception
from sqlalchemy import exc as sql_exc
from sqlalchemy.orm import exc as sa_exc
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api.rpc.agentnotifiers import dhcp_rpc_agent_api
from neutron.api.rpc.handlers import dhcp_rpc
from neutron.api.rpc.handlers import dvr_rpc
from neutron.api.rpc.handlers import securitygroups_rpc
from neutron.api.v2 import attributes
from neutron.common import constants as const
from neutron.common import exceptions as exc
from neutron.common import rpc as n_rpc
from neutron.common import topics
from neutron.common import uos_utils
from neutron.common import utils
from neutron import context as n_context
from neutron.db import agents_db
from neutron.db import tenants_db
from neutron.db import agentschedulers_db
from neutron.db import allowedaddresspairs_db as addr_pair_db
from neutron.db import api as db_api
from neutron.db import db_base_plugin_v2
from neutron.db import dvr_mac_db
from neutron.db import external_net_db
from neutron.db import extradhcpopt_db
from neutron.db import models_v2
from neutron.db import quota_db # noqa
from neutron.db import securitygroups_rpc_base as sg_db_rpc
from neutron.db import uos_db # noqa
from neutron.db import uos_net_ratelimit_db
from neutron.db import uos_service_provider_db
from neutron.extensions import allowedaddresspairs as addr_pair
from neutron.extensions import extra_dhcp_opt as edo_ext
from neutron.extensions import portbindings
from neutron.extensions import providernet as provider
from neutron.extensions import securitygroup as ext_sg
from neutron import manager
from neutron.openstack.common import excutils
from neutron.openstack.common import importutils
from neutron.openstack.common import jsonutils
from neutron.openstack.common import lockutils
from neutron.openstack.common import log
from neutron.openstack.common import uuidutils
from neutron.plugins.common import constants as service_constants
from neutron.plugins.ml2.common import exceptions as ml2_exc
from neutron.plugins.ml2 import config # noqa
from neutron.plugins.ml2 import db
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2 import driver_context
from neutron.plugins.ml2 import managers
from neutron.plugins.ml2 import models
from neutron.plugins.ml2 import rpc
LOG = log.getLogger(__name__)
MAX_BIND_TRIES = 10
# REVISIT(rkukura): Move this and other network_type constants to
# providernet.py?
TYPE_MULTI_SEGMENT = 'multi-segment'
TAP_DEVICE_PREFIX = 'tap'
TAP_DEVICE_PREFIX_LENGTH = 3
class Ml2Plugin(db_base_plugin_v2.NeutronDbPluginV2,
dvr_mac_db.DVRDbMixin,
external_net_db.External_net_db_mixin,
sg_db_rpc.SecurityGroupServerRpcMixin,
agentschedulers_db.DhcpAgentSchedulerDbMixin,
addr_pair_db.AllowedAddressPairsMixin,
extradhcpopt_db.ExtraDhcpOptMixin,
uos_net_ratelimit_db.Uos_net_ratelimit_db_mixin,
uos_service_provider_db.Uos_subnet_service_provider_db_mixin,
tenants_db.TenantDbMixin,
):
"""Implement the Neutron L2 abstractions using modules.
Ml2Plugin is a Neutron plugin based on separately extensible sets
of network types and mechanisms for connecting to networks of
those types. The network types and mechanisms are implemented as
drivers loaded via Python entry points. Networks can be made up of
multiple segments (not yet fully implemented).
"""
# This attribute specifies whether the plugin supports or not
# bulk/pagination/sorting operations. Name mangling is used in
# order to ensure it is qualified by class
__native_bulk_support = True
__native_pagination_support = True
__native_sorting_support = True
# List of supported extensions
_supported_extension_aliases = ["provider", "external-net", "binding",
"quotas", "security-group", "agent",
"dhcp_agent_scheduler",
"multi-provider", "allowed-address-pairs",
"extra_dhcp_opt", "uos","tenant",
"uos-net-ratelimit","uos-service-provider"]
@property
def supported_extension_aliases(self):
if not hasattr(self, '_aliases'):
aliases = self._supported_extension_aliases[:]
aliases += self.extension_manager.extension_aliases()
sg_rpc.disable_security_group_extension_by_config(aliases)
self._aliases = aliases
return self._aliases
def __init__(self):
# First load drivers, then initialize DB, then initialize drivers
self.type_manager = managers.TypeManager()
self.extension_manager = managers.ExtensionManager()
self.mechanism_manager = managers.MechanismManager()
super(Ml2Plugin, self).__init__()
super(Ml2Plugin, self).__init_sg_db_mixin__()
self.type_manager.initialize()
self.extension_manager.initialize()
self.mechanism_manager.initialize()
# bulk support depends on the underlying drivers
self.__native_bulk_support = self.mechanism_manager.native_bulk_support
self._setup_rpc()
# REVISIT(rkukura): Use stevedore for these?
self.network_scheduler = importutils.import_object(
cfg.CONF.network_scheduler_driver
)
LOG.info(_("Modular L2 Plugin initialization complete"))
def _setup_rpc(self):
self.notifier = rpc.AgentNotifierApi(topics.AGENT)
self.agent_notifiers[const.AGENT_TYPE_DHCP] = (
dhcp_rpc_agent_api.DhcpAgentNotifyAPI()
)
def start_rpc_listeners(self):
self.endpoints = [rpc.RpcCallbacks(self.notifier, self.type_manager),
securitygroups_rpc.SecurityGroupServerRpcCallback(),
dvr_rpc.DVRServerRpcCallback(),
dhcp_rpc.DhcpRpcCallback(),
agents_db.AgentExtRpcCallback()]
self.topic = topics.PLUGIN
self.conn = n_rpc.create_connection(new=True)
self.conn.create_consumer(self.topic, self.endpoints,
fanout=False)
return self.conn.consume_in_threads()
def _filter_nets_provider(self, context, nets, filters):
# TODO(rkukura): Implement filtering.
return nets
def _process_port_binding(self, mech_context, context, attrs):
binding = mech_context._binding
port = mech_context.current
changes = False
host = attrs and attrs.get(portbindings.HOST_ID)
if (attributes.is_attr_set(host) and
binding.host != host):
binding.host = host
changes = True
# Whenever a DVR serviceable port comes up on a
# node, it has to be communicated to the L3 Plugin
# and agent for creating the respective namespaces.
if (utils.is_dvr_serviced(port['device_owner'])):
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
if (utils.is_extension_supported(
l3plugin, const.L3_DISTRIBUTED_EXT_ALIAS)):
l3plugin.dvr_update_router_addvm(context, port)
vnic_type = attrs and attrs.get(portbindings.VNIC_TYPE)
if (attributes.is_attr_set(vnic_type) and
binding.vnic_type != vnic_type):
binding.vnic_type = vnic_type
changes = True
# treat None as clear of profile.
profile = None
if attrs and portbindings.PROFILE in attrs:
profile = attrs.get(portbindings.PROFILE) or {}
if profile not in (None, attributes.ATTR_NOT_SPECIFIED,
self._get_profile(binding)):
binding.profile = jsonutils.dumps(profile)
if len(binding.profile) > models.BINDING_PROFILE_LEN:
msg = _("binding:profile value too large")
raise exc.InvalidInput(error_message=msg)
changes = True
# Unbind the port if needed.
if changes:
binding.vif_type = portbindings.VIF_TYPE_UNBOUND
binding.vif_details = ''
binding.driver = None
binding.segment = None
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
binding.vif_type = portbindings.VIF_TYPE_DISTRIBUTED
binding.vif_details = ''
binding.driver = None
binding.segment = None
binding.host = ''
self._update_port_dict_binding(port, binding)
return changes
def _bind_port_if_needed(self, context, allow_notify=False,
need_notify=False):
plugin_context = context._plugin_context
port_id = context._port['id']
# Since the mechanism driver bind_port() calls must be made
# outside a DB transaction locking the port state, it is
# possible (but unlikely) that the port's state could change
# concurrently while these calls are being made. If another
# thread or process succeeds in binding the port before this
# thread commits its results, the already committed results are
# used. If attributes such as binding:host_id,
# binding:profile, or binding:vnic_type are updated
# concurrently, this loop retries binding using the new
# values.
count = 0
while True:
# First, determine whether it is necessary and possible to
# bind the port.
binding = context._binding
if (binding.vif_type != portbindings.VIF_TYPE_UNBOUND
or not binding.host):
# We either don't need to bind the port, or can't, so
# notify if needed and return.
if allow_notify and need_notify:
self._notify_port_updated(context)
return context
# Limit binding attempts to avoid any possibility of
# infinite looping and to ensure an error is logged
# instead. This does not need to be tunable because no
# more than a couple attempts should ever be required in
# normal operation. Log at info level if not 1st attempt.
count += 1
if count > MAX_BIND_TRIES:
LOG.error(_("Failed to commit binding results for %(port)s "
"after %(max)s tries"),
{'port': port_id, 'max': MAX_BIND_TRIES})
return context
if count > 1:
greenthread.sleep(0) # yield
LOG.info(_("Attempt %(count)s to bind port %(port)s"),
{'count': count, 'port': port_id})
# The port isn't already bound and the necessary
# information is available, so attempt to bind the port.
bind_context = self._bind_port(context)
# Now try to commit result of attempting to bind the port.
new_context, did_commit = self._commit_port_binding(
plugin_context, port_id, binding, bind_context)
if not new_context:
# The port has been deleted concurrently, so just
# return the unbound result from the initial
# transaction that completed before the deletion.
LOG.debug("Port %s has been deleted concurrently",
port_id)
return context
# Need to notify if we succeed and our results were
# committed.
if did_commit and (new_context._binding.vif_type !=
portbindings.VIF_TYPE_BINDING_FAILED):
need_notify = True
context = new_context
def _bind_port(self, orig_context):
# Construct a new PortContext from the one from the previous
# transaction.
port = orig_context._port
orig_binding = orig_context._binding
new_binding = models.PortBinding(
host=orig_binding.host,
vnic_type=orig_binding.vnic_type,
profile=orig_binding.profile,
vif_type=portbindings.VIF_TYPE_UNBOUND,
vif_details=''
)
self._update_port_dict_binding(port, new_binding)
new_context = driver_context.PortContext(
self, orig_context._plugin_context, port,
orig_context._network_context._network, new_binding)
# Attempt to bind the port and return the context with the
# result.
self.mechanism_manager.bind_port(new_context)
return new_context
def _commit_port_binding(self, plugin_context, port_id, orig_binding,
new_context):
session = plugin_context.session
new_binding = new_context._binding
# After we've attempted to bind the port, we begin a
# transaction, get the current port state, and decide whether
# to commit the binding results.
#
# REVISIT: Serialize this operation with a semaphore to
# prevent deadlock waiting to acquire a DB lock held by
# another thread in the same process, leading to 'lock wait
# timeout' errors.
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
# Get the current port state and build a new PortContext
# reflecting this state as original state for subsequent
# mechanism driver update_port_*commit() calls.
port_db, cur_binding = db.get_locked_port_and_binding(session,
port_id)
if not port_db:
# The port has been deleted concurrently.
return (None, None)
oport = self._make_port_dict(port_db)
port = self._make_port_dict(port_db)
network = self.get_network(plugin_context, port['network_id'])
cur_context = driver_context.PortContext(
self, plugin_context, port, network, cur_binding,
original_port=oport)
# Commit our binding results only if port has not been
# successfully bound concurrently by another thread or
# process and no binding inputs have been changed.
commit = ((cur_binding.vif_type in
[portbindings.VIF_TYPE_UNBOUND,
portbindings.VIF_TYPE_BINDING_FAILED]) and
orig_binding.host == cur_binding.host and
orig_binding.vnic_type == cur_binding.vnic_type and
orig_binding.profile == cur_binding.profile)
if commit:
# Update the port's binding state with our binding
# results.
cur_binding.vif_type = new_binding.vif_type
cur_binding.vif_details = new_binding.vif_details
cur_binding.driver = new_binding.driver
cur_binding.segment = new_binding.segment
# REVISIT(rkukura): The binding:profile attribute is
# supposed to be input-only, but the Mellanox driver
# currently modifies it while binding. Remove this
# code when the Mellanox driver has been updated to
# use binding:vif_details instead.
if cur_binding.profile != new_binding.profile:
cur_binding.profile = new_binding.profile
# Update PortContext's port dictionary to reflect the
# updated binding state.
self._update_port_dict_binding(port, cur_binding)
# Update the port status if requested by the bound driver.
if new_binding.segment and new_context._new_port_status:
port_db.status = new_context._new_port_status
port['status'] = new_context._new_port_status
# Call the mechanism driver precommit methods, commit
# the results, and call the postcommit methods.
self.mechanism_manager.update_port_precommit(cur_context)
if commit:
self.mechanism_manager.update_port_postcommit(cur_context)
# Continue, using the port state as of the transaction that
# just finished, whether that transaction committed new
# results or discovered concurrent port state changes.
return (cur_context, commit)
def _update_port_dict_binding(self, port, binding):
port[portbindings.HOST_ID] = binding.host
port[portbindings.VNIC_TYPE] = binding.vnic_type
port[portbindings.PROFILE] = self._get_profile(binding)
port[portbindings.VIF_TYPE] = binding.vif_type
port[portbindings.VIF_DETAILS] = self._get_vif_details(binding)
port[portbindings.DISABLE_ANTI_SPOOFING] = port['disable_anti_spoofing']
def _get_vif_details(self, binding):
if binding.vif_details:
try:
return jsonutils.loads(binding.vif_details)
except Exception:
LOG.error(_("Serialized vif_details DB value '%(value)s' "
"for port %(port)s is invalid"),
{'value': binding.vif_details,
'port': binding.port_id})
return {}
def _get_profile(self, binding):
if binding.profile:
try:
return jsonutils.loads(binding.profile)
except Exception:
LOG.error(_("Serialized profile DB value '%(value)s' for "
"port %(port)s is invalid"),
{'value': binding.profile,
'port': binding.port_id})
return {}
def _ml2_extend_port_dict_binding(self, port_res, port_db):
# None when called during unit tests for other plugins.
if port_db.port_binding:
self._update_port_dict_binding(port_res, port_db.port_binding)
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.PORTS, ['_ml2_extend_port_dict_binding'])
# Register extend dict methods for network and port resources.
# Each mechanism driver that supports extend attribute for the resources
# can add those attribute to the result.
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.NETWORKS, ['_ml2_md_extend_network_dict'])
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.PORTS, ['_ml2_md_extend_port_dict'])
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.SUBNETS, ['_ml2_md_extend_subnet_dict'])
def _ml2_md_extend_network_dict(self, result, netdb):
session = db_api.get_session()
with session.begin(subtransactions=True):
self.extension_manager.extend_network_dict(session, result)
def _ml2_md_extend_port_dict(self, result, portdb):
session = db_api.get_session()
with session.begin(subtransactions=True):
self.extension_manager.extend_port_dict(session, result)
def _ml2_md_extend_subnet_dict(self, result, subnetdb):
session = db_api.get_session()
with session.begin(subtransactions=True):
self.extension_manager.extend_subnet_dict(session, result)
# Note - The following hook methods have "ml2" in their names so
# that they are not called twice during unit tests due to global
# registration of hooks in portbindings_db.py used by other
# plugins.
def _ml2_port_model_hook(self, context, original_model, query):
query = query.outerjoin(models.PortBinding,
(original_model.id ==
models.PortBinding.port_id))
return query
def _ml2_port_result_filter_hook(self, query, filters):
values = filters and filters.get(portbindings.HOST_ID, [])
if not values:
return query
return query.filter(models.PortBinding.host.in_(values))
db_base_plugin_v2.NeutronDbPluginV2.register_model_query_hook(
models_v2.Port,
"ml2_port_bindings",
'_ml2_port_model_hook',
None,
'_ml2_port_result_filter_hook')
def _notify_port_updated(self, mech_context):
port = mech_context._port
segment = mech_context.bound_segment
if not segment:
# REVISIT(rkukura): This should notify agent to unplug port
network = mech_context.network.current
LOG.warning(_("In _notify_port_updated(), no bound segment for "
"port %(port_id)s on network %(network_id)s"),
{'port_id': port['id'],
'network_id': network['id']})
return
self.notifier.port_update(mech_context._plugin_context, port,
segment[api.NETWORK_TYPE],
segment[api.SEGMENTATION_ID],
segment[api.PHYSICAL_NETWORK])
# TODO(apech): Need to override bulk operations
def update_extra_net_data(self, context, network, netdb):
self._process_uos_ratelimit_update(context, network, netdb)
def add_extra_net_data(self, context, network, netdb):
self._process_uos_ratelimit_create(context, network, netdb)
def update_extra_subnet_data(self, context, subnet, subnetdb):
self._process_uos_service_provider_update(context, subnet, subnetdb)
def add_extra_subnet_data(self, context, subnet, subnetdb):
self._process_uos_service_provider_create(context, subnet, subnetdb)
def create_network(self, context, network):
net_data = network['network']
tenant_id = self._get_tenant_id_for_create(context, net_data)
session = context.session
with session.begin(subtransactions=True):
self._ensure_default_security_group(context, tenant_id)
result = super(Ml2Plugin, self).create_network(context, network)
self.extension_manager.process_create_network(session, net_data,
result)
self._process_l3_create(context, result, net_data)
net_data['id'] = result['id']
self.type_manager.create_network_segments(context, net_data,
tenant_id)
self.type_manager._extend_network_dict_provider(context, result)
mech_context = driver_context.NetworkContext(self, context,
result)
self.mechanism_manager.create_network_precommit(mech_context)
try:
self.mechanism_manager.create_network_postcommit(mech_context)
except ml2_exc.MechanismDriverError:
with excutils.save_and_reraise_exception():
LOG.error(_("mechanism_manager.create_network_postcommit "
"failed, deleting network '%s'"), result['id'])
self.delete_network(context, result['id'])
return result
def update_network(self, context, id, network):
provider._raise_if_updates_provider_attributes(network['network'])
session = context.session
with session.begin(subtransactions=True):
original_network = super(Ml2Plugin, self).get_network(context, id)
updated_network = super(Ml2Plugin, self).update_network(context,
id,
network)
self.extension_manager.process_update_network(session, network,
original_network)
self._process_l3_update(context, updated_network,
network['network'])
self.type_manager._extend_network_dict_provider(context,
updated_network)
mech_context = driver_context.NetworkContext(
self, context, updated_network,
original_network=original_network)
self.mechanism_manager.update_network_precommit(mech_context)
# TODO(apech) - handle errors raised by update_network, potentially
# by re-calling update_network with the previous attributes. For
# now the error is propogated to the caller, which is expected to
# either undo/retry the operation or delete the resource.
self.mechanism_manager.update_network_postcommit(mech_context)
return updated_network
def get_network(self, context, id, fields=None):
session = context.session
with session.begin(subtransactions=True):
result = super(Ml2Plugin, self).get_network(context, id, None)
self.type_manager._extend_network_dict_provider(context, result)
return self._fields(result, fields)
def get_networks(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None, page_reverse=False):
session = context.session
with session.begin(subtransactions=True):
nets = super(Ml2Plugin,
self).get_networks(context, filters, None, sorts,
limit, marker, page_reverse)
for net in nets:
self.type_manager._extend_network_dict_provider(context, net)
nets = self._filter_nets_provider(context, nets, filters)
nets = self._filter_nets_l3(context, nets, filters)
return [self._fields(net, fields) for net in nets]
def delete_network(self, context, id):
# REVISIT(rkukura) The super(Ml2Plugin, self).delete_network()
# function is not used because it auto-deletes ports and
# subnets from the DB without invoking the derived class's
# delete_port() or delete_subnet(), preventing mechanism
# drivers from being called. This approach should be revisited
# when the API layer is reworked during icehouse.
LOG.debug(_("Deleting network %s"), id)
session = context.session
while True:
try:
# REVISIT(rkukura): Its not clear that
# with_lockmode('update') is really needed in this
# transaction, and if not, the semaphore can also be
# removed.
#
# REVISIT: Serialize this operation with a semaphore
# to prevent deadlock waiting to acquire a DB lock
# held by another thread in the same process, leading
# to 'lock wait timeout' errors.
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
self._process_l3_delete(context, id)
# Get ports to auto-delete.
ports = (session.query(models_v2.Port).
enable_eagerloads(False).
filter_by(network_id=id).
with_for_update().all())
LOG.debug(_("Ports to auto-delete: %s"), ports)
only_auto_del = all(p.device_owner
in db_base_plugin_v2.
AUTO_DELETE_PORT_OWNERS
for p in ports)
if not only_auto_del:
LOG.debug(_("Tenant-owned ports exist"))
raise exc.NetworkInUse(net_id=id)
# Get subnets to auto-delete.
subnets = (session.query(models_v2.Subnet).
enable_eagerloads(False).
filter_by(network_id=id).
with_lockmode('update').all())
LOG.debug(_("Subnets to auto-delete: %s"), subnets)
if not (ports or subnets):
network = self.get_network(context, id)
mech_context = driver_context.NetworkContext(self,
context,
network)
self.mechanism_manager.delete_network_precommit(
mech_context)
self.type_manager.release_network_segments(session, id)
record = self._get_network(context, id)
LOG.debug(_("Deleting network record %s"), record)
session.delete(record)
# The segment records are deleted via cascade from the
# network record, so explicit removal is not necessary.
LOG.debug(_("Committing transaction"))
break
except os_db_exception.DBError as e:
with excutils.save_and_reraise_exception() as ctxt:
if isinstance(e.inner_exception, sql_exc.IntegrityError):
ctxt.reraise = False
msg = _("A concurrent port creation has occurred")
LOG.warning(msg)
continue
for port in ports:
try:
self.delete_port(context, port.id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Exception auto-deleting port %s"),
port.id)
for subnet in subnets:
try:
self.delete_subnet(context, subnet.id)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Exception auto-deleting subnet %s"),
subnet.id)
try:
self.mechanism_manager.delete_network_postcommit(mech_context)
except ml2_exc.MechanismDriverError:
# TODO(apech) - One or more mechanism driver failed to
# delete the network. Ideally we'd notify the caller of
# the fact that an error occurred.
LOG.error(_("mechanism_manager.delete_network_postcommit failed"))
self.notifier.network_delete(context, id)
def create_subnet(self, context, subnet):
session = context.session
with session.begin(subtransactions=True):
result = super(Ml2Plugin, self).create_subnet(context, subnet)
self.extension_manager.process_create_subnet(session, subnet,
result)
mech_context = driver_context.SubnetContext(self, context, result)
self.mechanism_manager.create_subnet_precommit(mech_context)
try:
self.mechanism_manager.create_subnet_postcommit(mech_context)
except ml2_exc.MechanismDriverError:
with excutils.save_and_reraise_exception():
LOG.error(_("mechanism_manager.create_subnet_postcommit "
"failed, deleting subnet '%s'"), result['id'])
self.delete_subnet(context, result['id'])
return result
def update_subnet(self, context, id, subnet):
session = context.session
with session.begin(subtransactions=True):
original_subnet = super(Ml2Plugin, self).get_subnet(context, id)
updated_subnet = super(Ml2Plugin, self).update_subnet(
context, id, subnet)
self.extension_manager.process_update_subnet(session, subnet,
original_subnet)
mech_context = driver_context.SubnetContext(
self, context, updated_subnet, original_subnet=original_subnet)
self.mechanism_manager.update_subnet_precommit(mech_context)
# TODO(apech) - handle errors raised by update_subnet, potentially
# by re-calling update_subnet with the previous attributes. For
# now the error is propogated to the caller, which is expected to
# either undo/retry the operation or delete the resource.
self.mechanism_manager.update_subnet_postcommit(mech_context)
return updated_subnet
def delete_subnet(self, context, id):
# REVISIT(rkukura) The super(Ml2Plugin, self).delete_subnet()
# function is not used because it deallocates the subnet's addresses
# from ports in the DB without invoking the derived class's
# update_port(), preventing mechanism drivers from being called.
# This approach should be revisited when the API layer is reworked
# during icehouse.
LOG.debug(_("Deleting subnet %s"), id)
session = context.session
while True:
# REVISIT: Serialize this operation with a semaphore to
# prevent deadlock waiting to acquire a DB lock held by
# another thread in the same process, leading to 'lock
# wait timeout' errors.
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
subnet = self.get_subnet(context, id)
# Get ports to auto-deallocate
allocated = (session.query(models_v2.IPAllocation).
filter_by(subnet_id=id).
join(models_v2.Port).
filter_by(network_id=subnet['network_id']).
with_lockmode('update').all())
LOG.debug(_("Ports to auto-deallocate: %s"), allocated)
only_auto_del = all(not a.port_id or
a.ports.device_owner in db_base_plugin_v2.
AUTO_DELETE_PORT_OWNERS
for a in allocated)
if not only_auto_del:
LOG.debug(_("Tenant-owned ports exist"))
raise exc.SubnetInUse(subnet_id=id)
if not allocated:
mech_context = driver_context.SubnetContext(self, context,
subnet)
self.mechanism_manager.delete_subnet_precommit(
mech_context)
LOG.debug(_("Deleting subnet record"))
record = self._get_subnet(context, id)
session.delete(record)
LOG.debug(_("Committing transaction"))
break
for a in allocated:
if a.port_id:
# calling update_port() for each allocation to remove the
# IP from the port and call the MechanismDrivers
data = {'port':
{'fixed_ips': [{'subnet_id': ip.subnet_id,
'ip_address': ip.ip_address}
for ip in a.ports.fixed_ips
if ip.subnet_id != id]}}
try:
self.update_port(context, a.port_id, data)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(_("Exception deleting fixed_ip from "
"port %s"), a.port_id)
session.delete(a)
try:
self.mechanism_manager.delete_subnet_postcommit(mech_context)
except ml2_exc.MechanismDriverError:
# TODO(apech) - One or more mechanism driver failed to
# delete the subnet. Ideally we'd notify the caller of
# the fact that an error occurred.
LOG.error(_("mechanism_manager.delete_subnet_postcommit failed"))
def create_port(self, context, port):
attrs = port['port']
attrs['status'] = const.PORT_STATUS_DOWN
uos_limits = cfg.CONF.unitedstack.uos_pps_limits
if cfg.CONF.unitedstack.uos_pps_limits_enable and uos_limits:
attrs[portbindings.PROFILE] = {}
attrs[portbindings.PROFILE]['uos_pps_limits'] = uos_limits
# mark must more than 2, mark: ratelimit in kbps
uos_marks = cfg.CONF.unitedstack.uos_marks
uos_mark_actions = cfg.CONF.unitedstack.uos_mark_actions
uos_marks_dict = {}
for uos_mark in uos_marks:
marks = uos_mark.split(":")
uos_marks_dict[marks[0]] = marks[1]
if uos_marks_dict:
attrs[portbindings.PROFILE] = {
'uos_marks': uos_marks_dict,
'uos_mark_actions': uos_mark_actions}
session = context.session
with session.begin(subtransactions=True):
self._ensure_default_security_group_on_port(context, port)
sgids = self._get_security_groups_on_port(context, port)
dhcp_opts = port['port'].get(edo_ext.EXTRADHCPOPTS, [])
result = super(Ml2Plugin, self).create_port(context, port)
self.extension_manager.process_create_port(session, attrs, result)
self._process_port_create_security_group(context, result, sgids)
network = self.get_network(context, result['network_id'])
binding = db.add_port_binding(session, result['id'])
mech_context = driver_context.PortContext(self, context, result,
network, binding)
self._process_port_binding(mech_context, context, attrs)
result[addr_pair.ADDRESS_PAIRS] = (
self._process_create_allowed_address_pairs(
context, result,
attrs.get(addr_pair.ADDRESS_PAIRS)))
self._process_port_create_extra_dhcp_opts(context, result,
dhcp_opts)
self.mechanism_manager.create_port_precommit(mech_context)
try:
self.mechanism_manager.create_port_postcommit(mech_context)
except ml2_exc.MechanismDriverError:
with excutils.save_and_reraise_exception():
LOG.error(_("mechanism_manager.create_port_postcommit "
"failed, deleting port '%s'"), result['id'])
self.delete_port(context, result['id'])
# REVISIT(rkukura): Is there any point in calling this before
# a binding has been successfully established?
self.notify_security_groups_member_updated(context, result)
try:
bound_context = self._bind_port_if_needed(mech_context)
except ml2_exc.MechanismDriverError:
with excutils.save_and_reraise_exception():
LOG.error(_("_bind_port_if_needed "
"failed, deleting port '%s'"), result['id'])
self.delete_port(context, result['id'])
return bound_context._port
def update_port(self, context, id, port):
attrs = port['port']
need_port_update_notify = False
if portbindings.PROFILE in attrs:
if 'uos_marks' in attrs[portbindings.PROFILE]:
uos_mark_dict = {}
uos_marks = attrs[portbindings.PROFILE]['uos_marks']
#"1->10;20>20"
if uos_marks:
for uos_mark in uos_marks.split(";"):
if uos_mark:
marks = uos_mark.split("->")
uos_mark_dict[marks[0]] = marks[1]
mark_len = len(uos_mark_dict.keys())
attrs[portbindings.PROFILE]['uos_marks'] = uos_mark_dict
mark_actions = (attrs[portbindings.PROFILE][
'uos_mark_actions'])
action_len = 0
if mark_actions:
actons = mark_actions.split(";")
attrs[portbindings.PROFILE]['uos_mark_actions'] = (
actons)
action_len = len(actons)
if action_len != mark_len:
msg = _("in valid marks")
raise exc.InvalidInput(error_message=msg)
session = context.session
# REVISIT: Serialize this operation with a semaphore to
# prevent deadlock waiting to acquire a DB lock held by
# another thread in the same process, leading to 'lock wait
# timeout' errors.
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
port_db, binding = db.get_locked_port_and_binding(session, id)
if not port_db:
raise exc.PortNotFound(port_id=id)
original_port = self._make_port_dict(port_db)
updated_port = super(Ml2Plugin, self).update_port(context, id,
port)
self.extension_manager.process_update_port(session, attrs,
original_port)
if addr_pair.ADDRESS_PAIRS in port['port']:
need_port_update_notify |= (
self.update_address_pairs_on_port(context, id, port,
original_port,
updated_port))
need_port_update_notify |= self.update_security_group_on_port(
context, id, port, original_port, updated_port)
network = self.get_network(context, original_port['network_id'])
need_port_update_notify |= self._update_extra_dhcp_opts_on_port(
context, id, port, updated_port)
mech_context = driver_context.PortContext(
self, context, updated_port, network, binding,
original_port=original_port)
need_port_update_notify |= self._process_port_binding(
mech_context, context, attrs)
self.mechanism_manager.update_port_precommit(mech_context)
# TODO(apech) - handle errors raised by update_port, potentially
# by re-calling update_port with the previous attributes. For
# now the error is propogated to the caller, which is expected to
# either undo/retry the operation or delete the resource.
self.mechanism_manager.update_port_postcommit(mech_context)
need_port_update_notify |= self.is_security_group_member_updated(
context, original_port, updated_port)
if need_port_update_notify:
LOG.info(_("notify old security group member"
" change for port %s"), id)
self.notifier.security_groups_member_updated(
context, original_port.get(ext_sg.SECURITYGROUPS, []))
if original_port['admin_state_up'] != updated_port['admin_state_up']:
need_port_update_notify = True
# NOTE(changzhi) Notify agent when disable_anti_spoofing is changed.
if original_port['disable_anti_spoofing'] != updated_port['disable_anti_spoofing']:
need_port_update_notify = True
bound_port = self._bind_port_if_needed(
mech_context,
allow_notify=True,
need_notify=need_port_update_notify)
return bound_port._port
def _process_dvr_port_binding(self, mech_context, context, attrs):
binding = mech_context._binding
port = mech_context.current
if binding.vif_type != portbindings.VIF_TYPE_UNBOUND:
binding.vif_details = ''
binding.vif_type = portbindings.VIF_TYPE_UNBOUND
binding.driver = None
binding.segment = None
binding.host = ''
self._update_port_dict_binding(port, binding)
binding.host = attrs and attrs.get(portbindings.HOST_ID)
def update_dvr_port_binding(self, context, id, port):
attrs = port['port']
host = attrs and attrs.get(portbindings.HOST_ID)
host_set = attributes.is_attr_set(host)
if not host_set:
LOG.error(_("No Host supplied to bind DVR Port %s"), id)
return
session = context.session
binding = db.get_dvr_port_binding_by_host(session, id, host)
if (not binding or
binding.vif_type == portbindings.VIF_TYPE_BINDING_FAILED):
with session.begin(subtransactions=True):
if not binding:
binding = db.ensure_dvr_port_binding(
session, id, host, router_id=attrs['device_id'])
orig_port = super(Ml2Plugin, self).get_port(context, id)
network = self.get_network(context, orig_port['network_id'])
mech_context = driver_context.DvrPortContext(self,
context, orig_port, network,
binding, original_port=orig_port)
self._process_dvr_port_binding(mech_context, context, attrs)
self.mechanism_manager.bind_port(mech_context)
# Now try to commit result of attempting to bind the port.
self._commit_dvr_port_binding(mech_context._plugin_context,
orig_port['id'],
host,
mech_context)
def _commit_dvr_port_binding(self, plugin_context,
port_id, host,
mech_context):
session = plugin_context.session
new_binding = mech_context._binding
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
# Get the current port state and build a new PortContext
# reflecting this state as original state for subsequent
# mechanism driver update_port_*commit() calls.
cur_binding = db.get_dvr_port_binding_by_host(session,
port_id,
host)
# Commit our binding results only if port has not been
# successfully bound concurrently by another thread or
# process and no binding inputs have been changed.
commit = ((cur_binding.vif_type in
[portbindings.VIF_TYPE_UNBOUND,
portbindings.VIF_TYPE_BINDING_FAILED]) and
new_binding.host == cur_binding.host and
new_binding.vnic_type == cur_binding.vnic_type and
new_binding.profile == cur_binding.profile)
if commit:
# Update the port's binding state with our binding
# results.
cur_binding.vif_type = new_binding.vif_type
cur_binding.vif_details = new_binding.vif_details
cur_binding.driver = new_binding.driver
cur_binding.segment = new_binding.segment
if cur_binding.profile != new_binding.profile:
cur_binding.profile = new_binding.profile
def delete_port(self, context, id, l3_port_check=True, **kwargs):
LOG.debug(_("Deleting port %s"), id)
removed_routers = []
l3plugin = manager.NeutronManager.get_service_plugins().get(
service_constants.L3_ROUTER_NAT)
is_dvr_enabled = utils.is_extension_supported(
l3plugin, const.L3_DISTRIBUTED_EXT_ALIAS)
if l3plugin and l3_port_check:
l3plugin.prevent_l3_port_deletion(context, id)
session = context.session
# REVISIT: Serialize this operation with a semaphore to
# prevent deadlock waiting to acquire a DB lock held by
# another thread in the same process, leading to 'lock wait
# timeout' errors.
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
port_db, binding = db.get_locked_port_and_binding(session, id)
if not port_db:
# the port existed when l3plugin.prevent_l3_port_deletion
# was called but now is already gone
LOG.debug(_("The port '%s' was deleted"), id)
return
port = self._make_port_dict(port_db)
from_nova = (kwargs and 'filters' in kwargs and
'_x_nova' in kwargs['filters'])
if not from_nova:
device_owner = port['device_owner']
device_id = port['device_id']
if (device_id and
device_owner.startswith(const.DEVICE_OWNER_COMPUTE_PRE)):
raise exc.PortInUseByNova(port_id=port['id'],
device_id=device_id)
network = self.get_network(context, port['network_id'])
mech_context = None
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
bindings = db.get_dvr_port_bindings(context.session, id)
for bind in bindings:
mech_context = driver_context.DvrPortContext(
self, context, port, network, bind)
self.mechanism_manager.delete_port_precommit(mech_context)
else:
mech_context = driver_context.PortContext(self, context, port,
network, binding)
if "compute:" in port['device_owner'] and is_dvr_enabled:
router_info = l3plugin.dvr_deletens_if_no_vm(context, id)
removed_routers += router_info
self.mechanism_manager.delete_port_precommit(mech_context)
self._delete_port_security_group_bindings(context, id)
if l3plugin:
router_ids = l3plugin.disassociate_floatingips(
context, id, do_notify=False)
if is_dvr_enabled:
l3plugin.dvr_vmarp_table_update(context, id, "del")
LOG.debug("Calling delete_port for %(port_id)s owned by %(owner)s"
% {"port_id": id, "owner": port['device_owner']})
super(Ml2Plugin, self).delete_port(context, id)
# now that we've left db transaction, we are safe to notify
if l3plugin:
l3plugin.notify_routers_updated(context, router_ids)
for router in removed_routers:
l3plugin.remove_router_from_l3_agent(
context, router['agent_id'], router['router_id'])
try:
# for both normal and DVR Interface ports, only one invocation of
# delete_port_postcommit. We use gather/scatter technique for DVR
# interface ports, where the bindings are gathered in
# delete_port_precommit() call earlier and scattered as l2pop
# rules to cloud nodes in delete_port_postcommit() here
if mech_context:
self.mechanism_manager.delete_port_postcommit(mech_context)
except ml2_exc.MechanismDriverError:
# TODO(apech) - One or more mechanism driver failed to
# delete the port. Ideally we'd notify the caller of the
# fact that an error occurred.
LOG.error(_("mechanism_manager.delete_port_postcommit failed for "
"port %s"), id)
self.notify_security_groups_member_updated(context, port)
def get_bound_port_context(self, plugin_context, port_id, host=None):
session = plugin_context.session
with session.begin(subtransactions=True):
try:
port_db = (session.query(models_v2.Port).
enable_eagerloads(False).
filter(models_v2.Port.id.startswith(port_id)).
one())
except sa_exc.NoResultFound:
return
except exc.MultipleResultsFound:
LOG.error(_("Multiple ports have port_id starting with %s"),
port_id)
return
port = self._make_port_dict(port_db)
network = self.get_network(plugin_context, port['network_id'])
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
binding = db.get_dvr_port_binding_by_host(
session, port['id'], host)
if not binding:
LOG.error(_("Binding info for DVR port %s not found"),
port_id)
return None
port_context = driver_context.DvrPortContext(
self, plugin_context, port, network, binding)
else:
port_context = driver_context.PortContext(
self, plugin_context, port, network, port_db.port_binding)
return self._bind_port_if_needed(port_context)
def update_port_status(self, context, port_id, status, host=None):
"""
Returns port_id (non-truncated uuid) if the port exists.
Otherwise returns None.
"""
updated = False
session = context.session
# REVISIT: Serialize this operation with a semaphore to
# prevent deadlock waiting to acquire a DB lock held by
# another thread in the same process, leading to 'lock wait
# timeout' errors.
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
port = db.get_port(session, port_id)
tenant_id = port['tenant_id']
if not port:
LOG.warning(_("Port %(port)s updated up by agent not found"),
{'port': port_id})
return None
if (port.status != status and
port['device_owner'] != const.DEVICE_OWNER_DVR_INTERFACE):
original_port = self._make_port_dict(port)
port.status = status
updated_port = self._make_port_dict(port)
network = self.get_network(context,
original_port['network_id'])
mech_context = driver_context.PortContext(
self, context, updated_port, network, port.port_binding,
original_port=original_port)
self.mechanism_manager.update_port_precommit(mech_context)
updated = True
elif port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
binding = db.get_dvr_port_binding_by_host(
session, port['id'], host)
if not binding:
return
binding['status'] = status
binding.update(binding)
updated = True
if (updated and
port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE):
with contextlib.nested(lockutils.lock('db-access'),
session.begin(subtransactions=True)):
port = db.get_port(session, port_id)
if not port:
LOG.warning(_("Port %s not found during update"), port_id)
return
original_port = self._make_port_dict(port)
network = self.get_network(context,
original_port['network_id'])
port.status = db.generate_dvr_port_status(session, port['id'])
updated_port = self._make_port_dict(port)
mech_context = (driver_context.DvrPortContext(
self, context, updated_port, network,
binding, original_port=original_port))
self.mechanism_manager.update_port_precommit(mech_context)
if updated:
self.mechanism_manager.update_port_postcommit(mech_context)
_ctx = n_context.Context('', tenant_id)
payload = {'id': port_id, 'status': status}
uos_utils.send_notification(_ctx,
'port.update_status.end', payload)
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
db.delete_dvr_port_binding_if_stale(session, binding)
return port['id']
def port_bound_to_host(self, context, port_id, host):
port = db.get_port(context.session, port_id)
if not port:
LOG.debug("No Port match for: %s", port_id)
return False
if port['device_owner'] == const.DEVICE_OWNER_DVR_INTERFACE:
bindings = db.get_dvr_port_bindings(context.session, port_id)
for b in bindings:
if b.host == host:
return True
LOG.debug("No binding found for DVR port %s", port['id'])
return False
else:
port_host = db.get_port_binding_host(port_id)
return (port_host == host)
def get_port_from_device(self, device):
port_id = self._device_to_port_id(device)
port = db.get_port_and_sgs(port_id)
if port:
port['device'] = device
return port
def _device_to_port_id(self, device):
# REVISIT(rkukura): Consider calling into MechanismDrivers to
# process device names, or having MechanismDrivers supply list
# of device prefixes to strip.
if device.startswith(TAP_DEVICE_PREFIX):
return device[TAP_DEVICE_PREFIX_LENGTH:]
else:
# REVISIT(irenab): Consider calling into bound MD to
# handle the get_device_details RPC, then remove the 'else' clause
if not uuidutils.is_uuid_like(device):
port = db.get_port_from_device_mac(device)
if port:
return port.id
return device
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A binary to train CIFAR-10 using multiple GPU's with synchronous updates.
Accuracy:
cifar10_multi_gpu_train.py achieves ~86% accuracy after 100K steps (256
epochs of data) as judged by cifar10_eval.py.
Speed: With batch_size 128.
System | Step Time (sec/batch) | Accuracy
--------------------------------------------------------------------
1 Tesla K20m | 0.35-0.60 | ~86% at 60K steps (5 hours)
1 Tesla K40m | 0.25-0.35 | ~86% at 100K steps (4 hours)
2 Tesla K20m | 0.13-0.20 | ~84% at 30K steps (2.5 hours)
3 Tesla K20m | 0.13-0.18 | ~84% at 30K steps
4 Tesla K20m | ~0.10 | ~84% at 30K steps
Usage:
Please see the tutorial and website for how to download the CIFAR-10
data set, compile the program and train the model.
http://tensorflow.org/tutorials/deep_cnn/
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os.path
import re
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
import sys
from pathlib import Path
sys.path.append(str(Path(__file__).resolve().parents[1]))
import tensorflow_cifar10
__package__ = 'tensorflow_cifar10'
from . import cifar10
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('train_dir', '/tmp/cifar10_train',
"""Directory where to write event logs """
"""and checkpoint.""")
tf.app.flags.DEFINE_integer('max_steps', 1000000,
"""Number of batches to run.""")
tf.app.flags.DEFINE_integer('num_gpus', 1,
"""How many GPUs to use.""")
tf.app.flags.DEFINE_boolean('log_device_placement', False,
"""Whether to log device placement.""")
def tower_loss(scope):
"""Calculate the total loss on a single tower running the CIFAR model.
Args:
scope: unique prefix string identifying the CIFAR tower, e.g. 'tower_0'
Returns:
Tensor of shape [] containing the total loss for a batch of data
"""
# Get images and labels for CIFAR-10.
images, labels = cifar10.distorted_inputs()
# Build inference Graph.
logits = cifar10.inference(images)
# Build the portion of the Graph calculating the losses. Note that we will
# assemble the total_loss using a custom function below.
_ = cifar10.loss(logits, labels)
# Assemble all of the losses for the current tower only.
losses = tf.get_collection('losses', scope)
# Calculate the total loss for the current tower.
total_loss = tf.add_n(losses, name='total_loss')
# Compute the moving average of all individual losses and the total loss.
loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
loss_averages_op = loss_averages.apply(losses + [total_loss])
# Attach a scalar summary to all individual losses and the total loss; do the
# same for the averaged version of the losses.
for l in losses + [total_loss]:
# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training
# session. This helps the clarity of presentation on tensorboard.
loss_name = re.sub('%s_[0-9]*/' % cifar10.TOWER_NAME, '', l.op.name)
# Name each loss as '(raw)' and name the moving average version of the loss
# as the original loss name.
tf.scalar_summary(loss_name + ' (raw)', l)
tf.scalar_summary(loss_name, loss_averages.average(l))
with tf.control_dependencies([loss_averages_op]):
total_loss = tf.identity(total_loss)
return total_loss
def average_gradients(tower_grads):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(0, grads)
grad = tf.reduce_mean(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def train():
"""Train CIFAR-10 for a number of steps."""
with tf.Graph().as_default(), tf.device('/cpu:0'):
# Create a variable to count the number of train() calls. This equals the
# number of batches processed * FLAGS.num_gpus.
global_step = tf.get_variable(
'global_step', [],
initializer=tf.constant_initializer(0), trainable=False)
# Calculate the learning rate schedule.
num_batches_per_epoch = (cifar10.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN /
FLAGS.batch_size)
decay_steps = int(num_batches_per_epoch * cifar10.NUM_EPOCHS_PER_DECAY)
# Decay the learning rate exponentially based on the number of steps.
lr = tf.train.exponential_decay(cifar10.INITIAL_LEARNING_RATE,
global_step,
decay_steps,
cifar10.LEARNING_RATE_DECAY_FACTOR,
staircase=True)
# Create an optimizer that performs gradient descent.
opt = tf.train.GradientDescentOptimizer(lr)
# Calculate the gradients for each model tower.
tower_grads = []
for i in xrange(FLAGS.num_gpus):
with tf.device('/gpu:%d' % i):
with tf.name_scope('%s_%d' % (cifar10.TOWER_NAME, i)) as scope:
# Calculate the loss for one tower of the CIFAR model. This function
# constructs the entire CIFAR model but shares the variables across
# all towers.
loss = tower_loss(scope)
# Reuse variables for the next tower.
tf.get_variable_scope().reuse_variables()
# Retain the summaries from the final tower.
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)
# Calculate the gradients for the batch of data on this CIFAR tower.
grads = opt.compute_gradients(loss)
# Keep track of the gradients across all towers.
tower_grads.append(grads)
# We must calculate the mean of each gradient. Note that this is the
# synchronization point across all towers.
grads = average_gradients(tower_grads)
# Add a summary to track the learning rate.
summaries.append(tf.scalar_summary('learning_rate', lr))
# Add histograms for gradients.
for grad, var in grads:
if grad is not None:
summaries.append(
tf.histogram_summary(var.op.name + '/gradients', grad))
# Apply the gradients to adjust the shared variables.
apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)
# Add histograms for trainable variables.
for var in tf.trainable_variables():
summaries.append(tf.histogram_summary(var.op.name, var))
# Track the moving averages of all trainable variables.
variable_averages = tf.train.ExponentialMovingAverage(
cifar10.MOVING_AVERAGE_DECAY, global_step)
variables_averages_op = variable_averages.apply(tf.trainable_variables())
# Group all updates to into a single train op.
train_op = tf.group(apply_gradient_op, variables_averages_op)
# Create a saver.
saver = tf.train.Saver(tf.all_variables())
# Build the summary operation from the last tower summaries.
summary_op = tf.merge_summary(summaries)
# Build an initialization operation to run below.
init = tf.initialize_all_variables()
# Start running operations on the Graph. allow_soft_placement must be set to
# True to build towers on GPU, as some of the ops do not have GPU
# implementations.
sess = tf.Session(config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=FLAGS.log_device_placement))
sess.run(init)
# Start the queue runners.
tf.train.start_queue_runners(sess=sess)
summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph)
for step in xrange(FLAGS.max_steps):
start_time = time.time()
_, loss_value = sess.run([train_op, loss])
duration = time.time() - start_time
assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
if step % 10 == 0:
num_examples_per_step = FLAGS.batch_size * FLAGS.num_gpus
examples_per_sec = num_examples_per_step / duration
sec_per_batch = duration / FLAGS.num_gpus
format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
'sec/batch)')
print(format_str % (datetime.now(), step, loss_value,
examples_per_sec, sec_per_batch))
if step % 100 == 0:
summary_str = sess.run(summary_op)
summary_writer.add_summary(summary_str, step)
# Save the model checkpoint periodically.
if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
saver.save(sess, checkpoint_path, global_step=step)
def main(argv=None): # pylint: disable=unused-argument
cifar10.maybe_download_and_extract()
if tf.gfile.Exists(FLAGS.train_dir):
tf.gfile.DeleteRecursively(FLAGS.train_dir)
tf.gfile.MakeDirs(FLAGS.train_dir)
train()
if __name__ == '__main__':
tf.app.run()
| |
# -*- coding: utf-8 -*-
"""
Person Registry, Controllers
@see: U{http://eden.sahanafoundation.org/wiki/BluePrintVITA}
"""
module = request.controller
resourcename = request.function
# -----------------------------------------------------------------------------
# Options Menu (available in all Functions' Views)
def s3_menu_postp():
# @todo: rewrite this for new framework
menu_selected = []
group_id = s3mgr.get_session("pr", "group")
if group_id:
group = s3db.pr_group
query = (group.id == group_id)
record = db(query).select(group.id, group.name, limitby=(0, 1)).first()
if record:
name = record.name
menu_selected.append(["%s: %s" % (T("Group"), name), False,
URL(f="group",
args=[record.id])])
person_id = s3mgr.get_session("pr", "person")
if person_id:
person = s3db.pr_person
query = (person.id == person_id)
record = db(query).select(person.id, limitby=(0, 1)).first()
if record:
person_represent = s3db.pr_person_represent
name = person_represent(record.id)
menu_selected.append(["%s: %s" % (T("Person"), name), False,
URL(f="person",
args=[record.id])])
if menu_selected:
menu_selected = [T("Open recent"), True, None, menu_selected]
response.menu_options.append(menu_selected)
# -----------------------------------------------------------------------------
def index():
""" Module's Home Page """
try:
module_name = deployment_settings.modules[module].name_nice
except:
module_name = T("Person Registry")
# Load Model
s3mgr.load("pr_address")
def prep(r):
if r.representation == "html":
if not r.id and not r.method:
r.method = "search"
else:
redirect(URL(f="person", args=request.args))
return True
response.s3.prep = prep
def postp(r, output):
if isinstance(output, dict):
# Add information for Dashboard
pr_gender_opts = s3db.pr_gender_opts
pr_age_group_opts = s3db.pr_age_group_opts
table = db.pr_person
gender = []
for g_opt in pr_gender_opts:
query = (table.deleted == False) & \
(table.gender == g_opt)
count = db(query).count()
gender.append([str(pr_gender_opts[g_opt]), int(count)])
age = []
for a_opt in pr_age_group_opts:
query = (table.deleted == False) & \
(table.age_group == a_opt)
count = db(query).count()
age.append([str(pr_age_group_opts[a_opt]), int(count)])
total = int(db(table.deleted == False).count())
output.update(module_name=module_name,
gender=json.dumps(gender),
age=json.dumps(age),
total=total)
if r.interactive:
if not r.component:
label = READ
else:
label = UPDATE
linkto = r.resource.crud._linkto(r)("[id]")
response.s3.actions = [
dict(label=str(label), _class="action-btn", url=str(linkto))
]
r.next = None
return output
response.s3.postp = postp
output = s3_rest_controller("pr", "person")
response.view = "pr/index.html"
response.title = module_name
return output
# -----------------------------------------------------------------------------
def person():
""" RESTful CRUD controller """
# Enable this to allow migration of users between instances
#response.s3.filter = (s3db.pr_person.pe_id == s3db.pr_person_user.pe_id) & \
#(s3db.auth_user.id == s3db.pr_person_user.user_id) & \
#(s3db.auth_user.registration_key != "disabled")
# Custom Method for Contacts
s3mgr.model.set_method(module, resourcename,
method="contacts",
action=s3db.pr_contacts)
def prep(r):
if r.representation == "json" and \
not r.component and session.s3.filter_staff:
person_ids = session.s3.filter_staff
session.s3.filter_staff = None
r.resource.add_filter = (~(db.pr_person.id.belongs(person_ids)))
elif r.interactive:
if r.representation == "popup":
# Hide "pe_label" and "missing" fields in person popups
r.table.pe_label.readable = False
r.table.pe_label.writable = False
r.table.missing.readable = False
r.table.missing.writable = False
if r.component_name == "config":
_config = s3db.gis_config
response.s3.gis_config_form_setup()
# Name will be generated from person's name.
_config.name.readable = _config.name.writable = False
# Hide region fields
_config.region_location_id.readable = _config.region_location_id.writable = False
elif r.component_name == "competency":
ctable = s3db.hrm_competency
ctable.organisation_id.writable = False
ctable.skill_id.comment = None
#elif r.component_name == "pe_subscription":
# # Load all Tables
# s3mgr.model.load_all_models()
# db.pr_pe_subscription.resource.requires = IS_IN_SET(db.tables)
elif r.id:
r.table.volunteer.readable = True
r.table.volunteer.writable = True
return True
response.s3.prep = prep
def postp(r, output):
if r.component_name == "save_search":
stable = s3db.pr_save_search
# Handle Subscribe/Unsubscribe requests
if "subscribe" in r.get_vars:
save_search_id = r.get_vars.get("subscribe", None)
stable[save_search_id] = dict(subscribed = True)
if "unsubscribe" in r.get_vars:
save_search_id = r.get_vars.get("unsubscribe", None)
stable[save_search_id] = dict(subscribed = False)
s3_action_buttons(r)
rows = db(stable.subscribed == False).select(stable.id)
restrict_s = [str(row.id) for row in rows]
rows = db(stable.subscribed == True).select(stable.id)
restrict_u = [str(row.id) for row in rows]
response.s3.actions = \
response.s3.actions + [
dict(label=str(T("Load Search")),
_class="action-btn",
url=URL(f="load_search",
args=["[id]"]))
]
vars = {}
#vars["person.uid"] = r.uid
vars["subscribe"] = "[id]"
response.s3.actions.append(dict(label=str(T("Subscribe")),
_class="action-btn",
url = URL(f = "person",
args = [s3_logged_in_person(),
"save_search"],
vars = vars),
restrict = restrict_s)
)
var = {}
#var["person.uid"] = r.uid
var["unsubscribe"] = "[id]"
response.s3.actions.append(dict(label=str(T("Unsubscribe")),
_class="action-btn",
url = URL(f = "person",
args = [s3_logged_in_person(),
"save_search",],
vars = var),
restrict = restrict_u)
)
return output
response.s3.postp = postp
s3mgr.configure("pr_group_membership",
list_fields=["id",
"group_id",
"group_head",
"description"
])
# Basic tabs
tabs = [(T("Basic Details"), None),
#(T("Address"), "address"),
#(T("Contacts"), "contact"),
(T("Contact Details"), "contacts"),
(T("Images"), "image"),
(T("Identity"), "identity"),
(T("Groups"), "group_membership"),
(T("Journal"), "note"),
(T("Skills"), "competency"),
(T("Training"), "training"),
]
# Configuration tabs
if deployment_settings.get_save_search_widget():
tabs = tabs + [(T("Saved Searches"), "save_search"),
(T("Subscription Details"), "subscription")]
tabs.append((T("Map Settings"), "config"))
s3mgr.configure("pr_person", listadd=False, insertable=True)
output = s3_rest_controller(main="first_name",
extra="last_name",
rheader=lambda r: \
s3db.pr_rheader(r, tabs=tabs))
return output
# -----------------------------------------------------------------------------
def address():
"""
RESTful controller to allow creating/editing of address records within
contacts()
"""
# CRUD pre-process
def prep(r):
person_id = request.get_vars.get("person", None)
if person_id:
s3mgr.configure("pr_address",
create_next=URL(f="person",
args=[person_id, "contacts"]),
update_next=URL(f="person",
args=[person_id, "contacts"])
)
if r.method == "create":
table = s3db.pr_person
query = (table.id == person_id)
pe_id = db(query).select(table.pe_id,
limitby=(0, 1)).first().pe_id
s3db.pr_address.pe_id.default = pe_id
return True
response.s3.prep = prep
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def contact():
"""
RESTful controller to allow creating/editing of contact records within
contacts()
"""
# CRUD pre-process
def prep(r):
person_id = request.get_vars.get("person", None)
if person_id:
s3mgr.configure("pr_contact",
create_next=URL(f="person",
args=[person_id, "contacts"]),
update_next=URL(f="person",
args=[person_id, "contacts"])
)
if r.method == "create":
table = s3db.pr_person
query = (table.id == person_id)
pe_id = db(query).select(table.pe_id,
limitby=(0, 1)).first().pe_id
s3db.pr_contact.pe_id.default = pe_id
return True
response.s3.prep = prep
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def contact_emergency():
"""
RESTful controller to allow creating/editing of emergency contact
records within contacts()
"""
# CRUD pre-process
def prep(r):
person_id = request.get_vars.get("person", None)
if person_id:
s3mgr.configure("pr_contact_emergency",
create_next=URL(f="person",
args=[person_id, "contacts"]),
update_next=URL(f="person",
args=[person_id, "contacts"])
)
if r.method == "create":
table = s3db.pr_person
query = (table.id == person_id)
pe_id = db(query).select(table.pe_id,
limitby=(0, 1)).first().pe_id
s3db.pr_contact_emergency.pe_id.default = pe_id
return True
response.s3.prep = prep
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def person_search():
"""
Person REST controller
- limited to just search.json for use in Autocompletes
- allows differential access permissions
"""
response.s3.prep = lambda r: r.representation == "json" and \
r.method == "search"
return s3_rest_controller(module, "person")
# -----------------------------------------------------------------------------
def group():
""" RESTful CRUD controller """
tablename = "pr_group"
table = s3db[tablename]
response.s3.filter = (table.system == False) # do not show system groups
s3mgr.configure("pr_group_membership",
list_fields=["id",
"person_id",
"group_head",
"description"
])
rheader = lambda r: s3db.pr_rheader(r, tabs = [(T("Group Details"), None),
(T("Address"), "address"),
(T("Contact Data"), "contact"),
(T("Members"), "group_membership")
])
output = s3_rest_controller(rheader=rheader)
return output
# -----------------------------------------------------------------------------
def image():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
#def contact():
# """ RESTful CRUD controller """
#
# table = s3db.pr_contact
#
# table.pe_id.label = T("Person/Group")
# table.pe_id.readable = True
# table.pe_id.writable = True
#
# return s3_rest_controller()
# -----------------------------------------------------------------------------
def presence():
"""
RESTful CRUD controller
- needed for Map Popups (no Menu entry for direct access)
@deprecated - People now use Base Location pr_person.location_id
"""
table = s3db.pr_presence
# Settings suitable for use in Map Popups
table.pe_id.readable = True
table.pe_id.label = "Name"
table.pe_id.represent = s3db.pr_person_represent
table.observer.readable = False
table.presence_condition.readable = False
# @ToDo: Add Skills
return s3_rest_controller()
# -----------------------------------------------------------------------------
def pentity():
"""
RESTful CRUD controller
- limited to just search.json for use in Autocompletes
"""
response.s3.prep = lambda r: r.representation == "json" and \
r.method == "search"
return s3_rest_controller()
# -----------------------------------------------------------------------------
def affiliation():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def role():
""" RESTful CRUD controller """
return s3_rest_controller()
# -----------------------------------------------------------------------------
def tooltip():
""" Ajax tooltips """
if "formfield" in request.vars:
response.view = "pr/ajaxtips/%s.html" % request.vars.formfield
return dict()
# -----------------------------------------------------------------------------
def person_duplicates():
""" Handle De-duplication of People
@todo: permissions, audit, update super entity, PEP8, optimization?
@todo: check for component data!
@todo: user accounts, subscriptions?
"""
# Shortcut
persons = s3db.pr_person
table_header = THEAD(TR(TH(T("Person 1")),
TH(T("Person 2")),
TH(T("Match Percentage")),
TH(T("Resolve"))))
# Calculate max possible combinations of records
# To handle the AJAX requests by the dataTables jQuery plugin.
totalRecords = db(persons.id > 0).count()
item_list = []
if request.vars.iDisplayStart:
end = int(request.vars.iDisplayLength) + int(request.vars.iDisplayStart)
records = db((persons.id > 0) & \
(persons.deleted == False) & \
(persons.first_name != None)).select(persons.id, # Should this be persons.ALL?
persons.pe_label,
persons.missing,
persons.first_name,
persons.middle_name,
persons.last_name,
persons.preferred_name,
persons.local_name,
persons.age_group,
persons.gender,
persons.date_of_birth,
persons.nationality,
persons.country,
persons.religion,
persons.marital_status,
persons.occupation,
persons.tags,
persons.comments)
# Calculate the match percentage using Jaro wrinkler Algorithm
count = 1
i = 0
for onePerson in records: #[:len(records)/2]:
soundex1= soundex(onePerson.first_name)
array1 = []
array1.append(onePerson.pe_label)
array1.append(str(onePerson.missing))
array1.append(onePerson.first_name)
array1.append(onePerson.middle_name)
array1.append(onePerson.last_name)
array1.append(onePerson.preferred_name)
array1.append(onePerson.local_name)
array1.append(pr_age_group_opts.get(onePerson.age_group, T("None")))
array1.append(pr_gender_opts.get(onePerson.gender, T("None")))
array1.append(str(onePerson.date_of_birth))
array1.append(pr_nations.get(onePerson.nationality, T("None")))
array1.append(pr_nations.get(onePerson.country, T("None")))
array1.append(pr_religion_opts.get(onePerson.religion, T("None")))
array1.append(pr_marital_status_opts.get(onePerson.marital_status, T("None")))
array1.append(onePerson.occupation)
# Format tags into an array
if onePerson.tags != None:
tagname = []
for item in onePerson.tags:
tagname.append(pr_impact_tags.get(item, T("None")))
array1.append(tagname)
else:
array1.append(onePerson.tags)
array1.append(onePerson.comments)
i = i + 1
j = 0
for anotherPerson in records: #[len(records)/2:]:
soundex2 = soundex(anotherPerson.first_name)
if j >= i:
array2 =[]
array2.append(anotherPerson.pe_label)
array2.append(str(anotherPerson.missing))
array2.append(anotherPerson.first_name)
array2.append(anotherPerson.middle_name)
array2.append(anotherPerson.last_name)
array2.append(anotherPerson.preferred_name)
array2.append(anotherPerson.local_name)
array2.append(pr_age_group_opts.get(anotherPerson.age_group, T("None")))
array2.append(pr_gender_opts.get(anotherPerson.gender, T("None")))
array2.append(str(anotherPerson.date_of_birth))
array2.append(pr_nations.get(anotherPerson.nationality, T("None")))
array2.append(pr_nations.get(anotherPerson.country, T("None")))
array2.append(pr_religion_opts.get(anotherPerson.religion, T("None")))
array2.append(pr_marital_status_opts.get(anotherPerson.marital_status, T("None")))
array2.append(anotherPerson.occupation)
# Format tags into an array
if anotherPerson.tags != None:
tagname = []
for item in anotherPerson.tags:
tagname.append(pr_impact_tags.get(item, T("None")))
array2.append(tagname)
else:
array2.append(anotherPerson.tags)
array2.append(anotherPerson.comments)
if count > end and request.vars.max != "undefined":
count = int(request.vars.max)
break;
if onePerson.id == anotherPerson.id:
continue
else:
mpercent = jaro_winkler_distance_row(array1, array2)
# Pick all records with match percentage is >50 or whose soundex values of first name are equal
if int(mpercent) > 50 or (soundex1 == soundex2):
count = count + 1
item_list.append([onePerson.first_name,
anotherPerson.first_name,
mpercent,
"<a href=\"../pr/person_resolve?perID1=%i&perID2=%i\", class=\"action-btn\">Resolve</a>" % (onePerson.id, anotherPerson.id)
])
else:
continue
j = j + 1
item_list = item_list[int(request.vars.iDisplayStart):end]
# Convert data to JSON
result = []
result.append({
"sEcho" : request.vars.sEcho,
"iTotalRecords" : count,
"iTotalDisplayRecords" : count,
"aaData" : item_list
})
output = json.dumps(result)
# Remove unwanted brackets
output = output[1:]
output = output[:-1]
return output
else:
# Don't load records except via dataTables (saves duplicate loading & less confusing for user)
items = DIV((TABLE(table_header, TBODY(), _id="list", _class="dataTable display")))
return(dict(items=items))
# -----------------------------------------------------------------------------
def delete_person():
"""
To delete references to the old record and replace it with the new one.
@todo: components??? cannot simply be re-linked!
@todo: user accounts?
@todo: super entity not updated!
"""
# @ToDo: Error gracefully if conditions not satisfied
old = request.vars.old
new = request.vars.new
# Find all tables which link to the pr_person table
tables = s3_table_links("pr_person")
for table in tables:
for count in range(len(tables[table])):
field = tables[str(db[table])][count]
query = db[table][field] == old
db(query).update(**{field:new})
# Remove the record
db(db.pr_person.id == old).update(deleted=True)
return "Other Record Deleted, Linked Records Updated Successfully"
# -----------------------------------------------------------------------------
def person_resolve():
"""
This opens a popup screen where the de-duplication process takes place.
@todo: components??? cannot simply re-link!
@todo: user accounts linked to these records?
@todo: update the super entity!
@todo: use S3Resources, implement this as a method handler
"""
# @ToDo: Error gracefully if conditions not satisfied
perID1 = request.vars.perID1
perID2 = request.vars.perID2
# Shortcut
persons = s3db.pr_person
count = 0
for field in persons:
id1 = str(count) + "Right" # Gives a unique number to each of the arrow keys
id2 = str(count) + "Left"
count = count + 1;
# Comment field filled with buttons
field.comment = DIV(TABLE(TR(TD(INPUT(_type="button", _id=id1, _class="rightArrows", _value="-->")),
TD(INPUT(_type="button", _id=id2, _class="leftArrows", _value="<--")))))
record = persons[perID1]
myUrl = URL(c="pr", f="person")
form1 = SQLFORM(persons, record, _id="form1", _action=("%s/%s" % (myUrl, perID1)))
# For the second record remove all the comments to save space.
for field in persons:
field.comment = None
record = persons[perID2]
form2 = SQLFORM(persons, record, _id="form2", _action=("%s/%s" % (myUrl, perID2)))
return dict(form1=form1, form2=form2, perID1=perID1, perID2=perID2)
#------------------------------------------------------------------------------
# Function to redirect for loading the search
#
def load_search():
var = {}
var["load"] = request.args[0]
table = s3db.pr_save_search
rows = db(table.id == request.args[0]).select(table.ALL)
import cPickle
for row in rows:
search_vars = cPickle.loads(row.search_vars)
prefix = str(search_vars["prefix"])
function = str(search_vars["function"])
break
redirect(URL(r=request, c=prefix, f=function, args=["search"],vars=var))
return
# END =========================================================================
| |
from plumbum import cli, local
from plumbum.cli.terminal import get_terminal_size
class SimpleApp(cli.Application):
@cli.switch(["a"])
def spam(self):
print("!!a")
@cli.switch(
["b", "bacon"], argtype=int, mandatory=True, envname="PLUMBUM_TEST_BACON"
)
def bacon(self, param):
"""give me some bacon"""
print("!!b", param)
eggs = cli.SwitchAttr(
["e"], str, help="sets the eggs attribute", envname="PLUMBUM_TEST_EGGS"
)
cheese = cli.Flag(["--cheese"], help="cheese, please")
chives = cli.Flag(["--chives"], help="chives, instead")
verbose = cli.CountOf(["v"], help="increases the verbosity level")
benedict = cli.CountOf(
["--benedict"],
help="""a very long help message with lots of
useless information that nobody would ever want to read, but heck, we need to test
text wrapping in help messages as well""",
)
csv = cli.SwitchAttr(["--csv"], cli.Set("MIN", "MAX", int, csv=True))
num = cli.SwitchAttr(["--num"], cli.Set("MIN", "MAX", int))
def main(self, *args):
old = self.eggs
self.eggs = "lalala"
self.eggs = old
self.tailargs = args
class PositionalApp(cli.Application):
def main(self, one):
print("Got", one)
class Geet(cli.Application):
debug = cli.Flag("--debug")
cleanups = []
def main(self):
del self.cleanups[:]
print("hi this is geet main")
def cleanup(self, retcode):
self.cleanups.append(1)
print(f"geet cleaning up with rc = {retcode}")
@Geet.subcommand("add")
class GeetAdd(cli.Application):
def main(self, *files):
return "adding", files
@Geet.subcommand("commit")
class GeetCommit(cli.Application):
message = cli.Flag("-m", str)
def main(self):
if self.parent.debug:
return "committing in debug"
else:
return "committing"
def cleanup(self, retcode):
self.parent.cleanups.append(2)
print(f"geet commit cleaning up with rc = {retcode}")
class Sample(cli.Application):
DESCRIPTION = "A sample cli application"
DESCRIPTION_MORE = """
ABC This is just a sample help text typed with a Dvorak keyboard.
Although this paragraph is not left or right justified
in source, we expect it to appear
formatted nicely on the output, maintaining the indentation of the first line.
DEF this one has a different indentation.
Let's test that list items are not combined as paragraphs.
- Item 1
GHI more text for item 1, which may be very very very very very very long and even more long and long and long to
prove that we can actually wrap list items as well.
- Item 2 and this is
some text for item 2
- Item 3
List items with invisible bullets should be printed without the bullet.
/XYZ Invisible 1
/Invisible 2
* Star 1
* Star 2
Last paragraph can fill more than one line on the output as well. So many features is bound to cause lots of bugs.
Oh well...
"""
foo = cli.SwitchAttr("--foo")
Sample.unbind_switches("--version")
class Mumble(cli.Application):
pass
Sample.subcommand("mumble", Mumble)
class LazyLoaded(cli.Application):
def main(self):
print("hello world")
class AppA(cli.Application):
@cli.switch(["--one"])
def one(self):
pass
two = cli.SwitchAttr(["--two"])
class AppB(AppA):
@cli.switch(["--three"])
def three(self):
pass
four = cli.SwitchAttr(["--four"])
def main(self):
pass
# Testing #363
class TestInheritedApp:
def test_help(self, capsys):
_, rc = AppB.run(["AppB", "-h"], exit=False)
assert rc == 0
stdout, stderr = capsys.readouterr()
assert "--one" in stdout
assert "--two" in stdout
assert "--three" in stdout
assert "--four" in stdout
class TestCLI:
def test_meta_switches(self):
_, rc = SimpleApp.run(["foo", "-h"], exit=False)
assert rc == 0
_, rc = SimpleApp.run(["foo", "--version"], exit=False)
assert rc == 0
def test_okay(self):
_, rc = SimpleApp.run(["foo", "--bacon=81"], exit=False)
assert rc == 0
inst, rc = SimpleApp.run(
[
"foo",
"--bacon=81",
"-a",
"-v",
"-e",
"7",
"-vv",
"--",
"lala",
"-e",
"7",
],
exit=False,
)
assert rc == 0
assert inst.eggs == "7"
_, rc = SimpleApp.run(["foo", "--bacon=81", "--csv=100"], exit=False)
assert rc == 0
_, rc = SimpleApp.run(["foo", "--bacon=81", "--csv=MAX,MIN,100"], exit=False)
assert rc == 0
_, rc = SimpleApp.run(["foo", "--bacon=81", "--num=100"], exit=False)
assert rc == 0
_, rc = SimpleApp.run(["foo", "--bacon=81", "--num=MAX"], exit=False)
assert rc == 0
_, rc = SimpleApp.run(["foo", "--bacon=81", "--num=MIN"], exit=False)
assert rc == 0
def test_failures(self):
_, rc = SimpleApp.run(["foo"], exit=False)
assert rc == 2
_, rc = SimpleApp.run(["foo", "--bacon=81", "--csv=xx"], exit=False)
assert rc == 2
_, rc = SimpleApp.run(["foo", "--bacon=81", "--csv=xx"], exit=False)
assert rc == 2
_, rc = SimpleApp.run(["foo", "--bacon=81", "--num=MOO"], exit=False)
assert rc == 2
_, rc = SimpleApp.run(["foo", "--bacon=81", "--num=MIN,MAX"], exit=False)
assert rc == 2
_, rc = SimpleApp.run(["foo", "--bacon=81", "--num=10.5"], exit=False)
assert rc == 2
_, rc = SimpleApp.run(["foo", "--bacon=hello"], exit=False)
assert rc == 2
# Testing #371
def test_extra_args(self, capsys):
_, rc = PositionalApp.run(["positionalapp"], exit=False)
assert rc != 0
stdout, stderr = capsys.readouterr()
assert "Expected at least" in stdout
_, rc = PositionalApp.run(["positionalapp", "one"], exit=False)
assert rc == 0
stdout, stderr = capsys.readouterr()
_, rc = PositionalApp.run(["positionalapp", "one", "two"], exit=False)
assert rc != 0
stdout, stderr = capsys.readouterr()
assert "Expected at most" in stdout
def test_subcommands(self):
_, rc = Geet.run(["geet", "--debug"], exit=False)
assert rc == 0
assert Geet.cleanups == [1]
_, rc = Geet.run(["geet", "--debug", "add", "foo.txt", "bar.txt"], exit=False)
assert rc == ("adding", ("foo.txt", "bar.txt"))
assert Geet.cleanups == [1]
_, rc = Geet.run(["geet", "--debug", "commit"], exit=False)
assert rc == "committing in debug"
assert Geet.cleanups == [2, 1]
_, rc = Geet.run(["geet", "--help"], exit=False)
assert rc == 0
_, rc = Geet.run(["geet", "commit", "--help"], exit=False)
assert rc == 0
assert Geet.cleanups == [1]
def test_help_all(self, capsys):
_, rc = Geet.run(["geet", "--help-all"], exit=False)
assert rc == 0
stdout, stderr = capsys.readouterr()
assert "--help-all" in stdout
assert "geet add" in stdout
assert "geet commit" in stdout
def test_unbind(self, capsys):
_, rc = Sample.run(["sample", "--help"], exit=False)
assert rc == 0
stdout, stderr = capsys.readouterr()
assert "--foo" in stdout
assert "--version" not in stdout
def test_description(self, capsys):
_, rc = Sample.run(["sample", "--help"], exit=False)
assert rc == 0
stdout, stderr = capsys.readouterr()
cols, _ = get_terminal_size()
if cols < 9:
# Terminal is too narrow to test
pass
else:
# Paragraph indentation should be preserved
assert " ABC" in stdout
assert " DEF" in stdout
assert " - Item" in stdout
# List items should not be combined into paragraphs
assert " * Star 2"
# Lines of the same list item should be combined. (The right-hand expression of the 'or' operator
# below is for when the terminal is too narrow, causing "GHI" to be wrapped to the next line.)
assert " GHI" not in stdout or " GHI" in stdout
# List item with invisible bullet should be indented without the bullet
assert " XYZ" in stdout
def test_default_main(self, capsys):
_, rc = Sample.run(["sample"], exit=False)
assert rc == 1
stdout, stderr = capsys.readouterr()
assert "No sub-command given" in stdout
_, rc = Sample.run(["sample", "pimple"], exit=False)
assert rc == 1
stdout, stderr = capsys.readouterr()
assert "Unknown sub-command 'pimple'" in stdout
_, rc = Sample.run(["sample", "mumble"], exit=False)
assert rc == 1
stdout, stderr = capsys.readouterr()
assert "main() not implemented" in stdout
def test_lazy_subcommand(self, capsys):
class Foo(cli.Application):
pass
Foo.subcommand("lazy", "test_cli.LazyLoaded")
_, rc = Foo.run(["foo", "lazy"], exit=False)
assert rc == 0
stdout, stderr = capsys.readouterr()
assert "hello world" in stdout
def test_reset_switchattr(self):
inst, rc = SimpleApp.run(["foo", "--bacon=81", "-e", "bar"], exit=False)
assert rc == 0
assert inst.eggs == "bar"
inst, rc = SimpleApp.run(["foo", "--bacon=81"], exit=False)
assert rc == 0
assert inst.eggs is None
def test_invoke(self):
inst, rc = SimpleApp.invoke("arg1", "arg2", eggs="sunny", bacon=10, verbose=2)
assert (inst.eggs, inst.verbose, inst.tailargs) == (
"sunny",
2,
("arg1", "arg2"),
)
def test_env_var(self, capsys):
_, rc = SimpleApp.run(["arg", "--bacon=10"], exit=False)
assert rc == 0
stdout, stderr = capsys.readouterr()
assert "10" in stdout
with local.env(
PLUMBUM_TEST_BACON="20",
PLUMBUM_TEST_EGGS="raw",
):
inst, rc = SimpleApp.run(["arg"], exit=False)
assert rc == 0
stdout, stderr = capsys.readouterr()
assert "20" in stdout
assert inst.eggs == "raw"
def test_mandatory_env_var(self, capsys):
_, rc = SimpleApp.run(["arg"], exit=False)
assert rc == 2
stdout, stderr = capsys.readouterr()
assert "bacon is mandatory" in stdout
def test_partial_switches(self, capsys):
app = SimpleApp
app.ALLOW_ABBREV = True
inst, rc = app.run(["foo", "--bacon=2", "--ch"], exit=False)
stdout, stderr = capsys.readouterr()
assert "Ambiguous partial switch" in stdout
assert rc == 2
inst, rc = app.run(["foo", "--bacon=2", "--chee"], exit=False)
assert rc == 0
assert inst.cheese is True
assert inst.chives is False
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import tensorflow as tf
from zhusuan.utils import log_mean_exp
from zhusuan.variational.base import VariationalObjective
__all__ = [
'importance_weighted_objective',
'iw_objective',
'ImportanceWeightedObjective',
]
class ImportanceWeightedObjective(VariationalObjective):
"""
The class that represents the importance weighted objective for
variational inference (Burda, 2015). An instance of the class can be
constructed by calling :func:`importance_weighted_objective`::
# lower_bound is an ImportanceWeightedObjective instance
lower_bound = zs.variational.importance_weighted_objective(
meta_bn, observed, variational=variational, axis=axis)
Here ``meta_bn`` is a :class:`~zhusuan.framework.meta_bn.MetaBayesianNet`
instance representing the model to be inferred. ``variational`` is
a :class:`~zhusuan.framework.bn.BayesianNet` instance that defines the
variational family. ``axis`` is the index of the sample dimension used
to estimate the expectation when computing the objective.
Instances of :class:`ImportanceWeightedObjective` are Tensor-like. They
can be automatically or manually cast into Tensors when fed into Tensorflow
operations and doing computation with Tensors, or when the :attr:`tensor`
property is accessed. It can also be evaluated like a Tensor::
# evaluate the objective
with tf.Session() as sess:
print sess.run(lower_bound, feed_dict=...)
The objective computes the same importance-sampling based estimate
of the marginal log likelihood of observed variables as
:meth:`~zhusuan.evaluation.is_loglikelihood`. The difference is that the
estimate now serves as a variational objective, since it is also a lower
bound of the marginal log likelihood (as long as the number of samples is
finite). The variational posterior here is in fact the proposal. As a
variational objective, :class:`ImportanceWeightedObjective` provides two
gradient estimators for the variational (proposal) parameters:
* :meth:`sgvb`: The Stochastic Gradient Variational Bayes (SGVB) estimator,
also known as "the reparameterization trick", or "path derivative
estimator".
* :meth:`vimco`: The multi-sample score function estimator with variance
reduction, also known as "VIMCO".
The typical code for joint inference and learning is like::
# choose a gradient estimator to return the surrogate cost
cost = lower_bound.sgvb()
# or
# cost = lower_bound.vimco()
# optimize the surrogate cost wrt. model and variational
# parameters
optimizer = tf.train.AdamOptimizer(learning_rate)
infer_and_learn_op = optimizer.minimize(
cost, var_list=model_and_variational_parameters)
with tf.Session() as sess:
for _ in range(n_iters):
_, lb = sess.run([infer_op, lower_bound], feed_dict=...)
.. note::
Don't directly optimize the :class:`ImportanceWeightedObjective`
instance wrt. to variational parameters, i.e., parameters in
:math:`q`. Instead a proper gradient estimator should be chosen to
produce the correct surrogate cost to minimize, as shown in the above
code snippet.
Because the outer expectation in the objective is not related to model
parameters, it's fine to directly optimize the class instance wrt. model
parameters::
# optimize wrt. model parameters
learn_op = optimizer.minimize(-lower_bound,
var_list=model_parameters)
# or
# learn_op = optimizer.minimize(cost, var_list=model_parameters)
# both ways are correct
The above provides a way for users to combine the importance weighted
objective with different methods of adapting proposals (:math:`q`). In
this situation the true posterior is a good choice, which indicates that
any variational objectives can be used for the adaptation. Specially,
when the :func:`~zhusuan.variational.inclusive_kl.klpq` objective is
chosen, this reproduces the Reweighted Wake-Sleep algorithm
(Bornschein, 2015) for learning deep generative models.
:param meta_bn: A :class:`~zhusuan.framework.meta_bn.MetaBayesianNet`
instance or a log joint probability function.
For the latter, it must accepts a dictionary argument of
``(string, Tensor)`` pairs, which are mappings from all
node names in the model to their observed values. The
function should return a Tensor, representing the log joint likelihood
of the model.
:param observed: A dictionary of ``(string, Tensor)`` pairs. Mapping from
names of observed stochastic nodes to their values.
:param latent: A dictionary of ``(string, (Tensor, Tensor))`` pairs.
Mapping from names of latent stochastic nodes to their samples and
log probabilities. `latent` and `variational` are mutually exclusive.
:param axis: The sample dimension(s) to reduce when computing the
outer expectation in the objective. If ``None``, no dimension is
reduced.
:param variational: A :class:`~zhusuan.framework.bn.BayesianNet` instance
that defines the variational family.
`variational` and `latent` are mutually exclusive.
"""
def __init__(self, meta_bn, observed, latent=None, axis=None,
variational=None):
if axis is None:
raise ValueError(
"ImportanceWeightedObjective is a multi-sample objective, "
"the `axis` argument must be specified.")
self._axis = axis
super(ImportanceWeightedObjective, self).__init__(
meta_bn,
observed,
latent=latent,
variational=variational)
def _objective(self):
log_w = self._log_joint_term() + self._entropy_term()
if self._axis is not None:
return log_mean_exp(log_w, self._axis)
return log_w
def sgvb(self):
"""
Implements the stochastic gradient variational bayes (SGVB) gradient
estimator for the objective, also known as "reparameterization trick"
or "path derivative estimator". It was first used for importance
weighted objectives in (Burda, 2015), where it's named "IWAE".
It only works for latent `StochasticTensor` s that can be
reparameterized (Kingma, 2013). For example,
:class:`~zhusuan.framework.stochastic.Normal`
and :class:`~zhusuan.framework.stochastic.Concrete`.
.. note::
To use the :meth:`sgvb` estimator, the ``is_reparameterized``
property of each latent `StochasticTensor` must be True (which is
the default setting when they are constructed).
:return: A Tensor. The surrogate cost for Tensorflow optimizers to
minimize.
"""
return -self.tensor
def vimco(self):
"""
Implements the multi-sample score function gradient estimator for
the objective, also known as "VIMCO", which is named
by authors of the original paper (Minh, 2016).
It works for all kinds of latent `StochasticTensor` s.
.. note::
To use the :meth:`vimco` estimator, the ``is_reparameterized``
property of each reparameterizable latent `StochasticTensor` must
be set False.
:return: A Tensor. The surrogate cost for Tensorflow optimizers to
minimize.
"""
log_w = self._log_joint_term() + self._entropy_term()
l_signal = log_w
# check size along the sample axis
err_msg = "VIMCO is a multi-sample gradient estimator, size along " \
"`axis` in the objective should be larger than 1."
if l_signal.get_shape()[self._axis:self._axis + 1].is_fully_defined():
if l_signal.get_shape()[self._axis].value < 2:
raise ValueError(err_msg)
_assert_size_along_axis = tf.assert_greater_equal(
tf.shape(l_signal)[self._axis], 2, message=err_msg)
with tf.control_dependencies([_assert_size_along_axis]):
l_signal = tf.identity(l_signal)
# compute variance reduction term
mean_except_signal = (
tf.reduce_sum(l_signal, self._axis, keepdims=True) - l_signal
) / tf.cast(tf.shape(l_signal)[self._axis] - 1, l_signal.dtype)
x, sub_x = l_signal, mean_except_signal
n_dim = tf.rank(x)
axis_dim_mask = tf.cast(tf.one_hot(self._axis, n_dim), tf.bool)
original_mask = tf.cast(tf.one_hot(n_dim - 1, n_dim), tf.bool)
axis_dim = tf.ones([n_dim], tf.int32) * self._axis
originals = tf.ones([n_dim], tf.int32) * (n_dim - 1)
perm = tf.where(original_mask, axis_dim, tf.range(n_dim))
perm = tf.where(axis_dim_mask, originals, perm)
multiples = tf.concat(
[tf.ones([n_dim], tf.int32), [tf.shape(x)[self._axis]]], 0)
x = tf.transpose(x, perm=perm)
sub_x = tf.transpose(sub_x, perm=perm)
x_ex = tf.tile(tf.expand_dims(x, n_dim), multiples)
x_ex = x_ex - tf.matrix_diag(x) + tf.matrix_diag(sub_x)
control_variate = tf.transpose(log_mean_exp(x_ex, n_dim - 1),
perm=perm)
# variance reduced objective
l_signal = log_mean_exp(l_signal, self._axis,
keepdims=True) - control_variate
fake_term = tf.reduce_sum(
-self._entropy_term() * tf.stop_gradient(l_signal), self._axis)
cost = -fake_term - log_mean_exp(log_w, self._axis)
return cost
def importance_weighted_objective(
meta_bn, observed, latent=None, axis=None, variational=None):
"""
The importance weighted objective for variational inference (Burda, 2015).
The returned value is an :class:`ImportanceWeightedObjective` instance.
See :class:`ImportanceWeightedObjective` for examples of usage.
:param meta_bn: A :class:`~zhusuan.framework.meta_bn.MetaBayesianNet`
instance or a log joint probability function.
For the latter, it must accepts a dictionary argument of
``(string, Tensor)`` pairs, which are mappings from all
node names in the model to their observed values. The
function should return a Tensor, representing the log joint likelihood
of the model.
:param observed: A dictionary of ``(string, Tensor)`` pairs. Mapping from
names of observed stochastic nodes to their values.
:param latent: A dictionary of ``(string, (Tensor, Tensor))`` pairs.
Mapping from names of latent stochastic nodes to their samples and
log probabilities. `latent` and `variational` are mutually exclusive.
:param axis: The sample dimension(s) to reduce when computing the
outer expectation in the objective. If ``None``, no dimension is
reduced.
:param variational: A :class:`~zhusuan.framework.bn.BayesianNet` instance
that defines the variational family.
`variational` and `latent` are mutually exclusive.
:return: An :class:`ImportanceWeightedObjective` instance.
"""
return ImportanceWeightedObjective(
meta_bn,
observed,
latent=latent,
axis=axis,
variational=variational)
# alias
iw_objective = importance_weighted_objective
| |
import math
import mathutils
import bpy
from bpy import data, context, types
from bpy_extras.io_utils import axis_conversion
from .. import constants, logger, utilities, exceptions
from .constants import (
MESH,
EMPTY,
ARMATURE,
LAMP,
SPOT,
SUN,
POINT,
HEMI,
AREA,
CAMERA,
PERSP,
ORTHO,
RENDER,
NO_SHADOW,
ZYX
)
# Blender doesn't seem to have a good way to link a mesh back to the
# objects that are instancing it, or it is bloody obvious and I haven't
# discovered yet. This manifest serves as a way for me to map a mesh
# node to the object nodes that are using it.
_MESH_MAP = {}
def _object(func):
"""
:param func:
"""
def inner(arg, *args, **kwargs):
"""
:param arg:
:param *args:
:param **kwargs:
"""
if isinstance(arg, types.Object):
obj = arg
else:
obj = data.objects[arg]
return func(obj, *args, **kwargs)
return inner
def clear_mesh_map():
"""Clears the mesh map, required on initialization"""
_MESH_MAP.clear()
def assemblies(valid_types, options):
"""
:param valid_types:
:param options:
"""
logger.debug('object.assemblies(%s)', valid_types)
for obj in data.objects:
# rigged assets are parented under armature nodes
if obj.parent and obj.parent.type != ARMATURE:
continue
if obj.parent and obj.parent.type == ARMATURE:
logger.info('Has armature parent %s', obj.name)
if _valid_node(obj, valid_types, options):
yield obj.name
@_object
def cast_shadow(obj):
"""
:param obj:
"""
logger.debug('object.cast_shadow(%s)', obj)
if obj.type == LAMP:
if obj.data.type in (SPOT, SUN):
ret = obj.data.shadow_method != NO_SHADOW
else:
logger.info("%s is a lamp but this lamp type does not "
"have supported shadows in ThreeJS", obj.name)
ret = None
return ret
elif obj.type == MESH:
mat = material(obj)
if mat:
return data.materials[mat].use_cast_shadows
else:
return False
@_object
def children(obj, valid_types):
"""
:param obj:
:param valid_types:
"""
logger.debug('object.children(%s, %s)', obj, valid_types)
for child in obj.children:
if child.type in valid_types:
yield child.name
@_object
def material(obj):
"""
:param obj:
"""
logger.debug('object.material(%s)', obj)
try:
return obj.material_slots[0].name
except IndexError:
pass
@_object
def mesh(obj, options):
"""
:param obj:
:param options:
"""
logger.debug('object.mesh(%s, %s)', obj, options)
if obj.type != MESH:
return
for mesh_, objects in _MESH_MAP.items():
if obj in objects:
return mesh_
else:
logger.debug('Could not map object, updating manifest')
mesh_ = extract_mesh(obj, options)
if len(mesh_.tessfaces) is not 0:
manifest = _MESH_MAP.setdefault(mesh_.name, [])
manifest.append(obj)
mesh_name = mesh_.name
else:
# possibly just being used as a controller
logger.info('Object %s has no faces', obj.name)
mesh_name = None
return mesh_name
@_object
def name(obj):
"""
:param obj:
"""
return obj.name
@_object
def node_type(obj):
"""
:param obj:
"""
logger.debug('object.node_type(%s)', obj)
# standard transformation nodes are inferred
if obj.type == MESH:
return constants.MESH.title()
elif obj.type == EMPTY:
return constants.OBJECT.title()
dispatch = {
LAMP: {
POINT: constants.POINT_LIGHT,
SUN: constants.DIRECTIONAL_LIGHT,
SPOT: constants.SPOT_LIGHT,
HEMI: constants.HEMISPHERE_LIGHT,
AREA: constants.AREA_LIGHT,
},
CAMERA: {
PERSP: constants.PERSPECTIVE_CAMERA,
ORTHO: constants.ORTHOGRAPHIC_CAMERA
}
}
try:
return dispatch[obj.type][obj.data.type]
except AttributeError:
msg = 'Invalid type: %s' % obj.type
raise exceptions.UnsupportedObjectType(msg)
def nodes(valid_types, options):
"""
:param valid_types:
:param options:
"""
for obj in data.objects:
if _valid_node(obj, valid_types, options):
yield obj.name
@_object
def position(obj, options):
"""
:param obj:
:param options:
"""
logger.debug('object.position(%s)', obj)
vector = matrix(obj, options).to_translation()
return (vector.x, vector.y, vector.z)
@_object
def receive_shadow(obj):
"""
:param obj:
"""
if obj.type == MESH:
mat = material(obj)
if mat:
return data.materials[mat].use_shadows
else:
return False
AXIS_CONVERSION = axis_conversion(to_forward='Z', to_up='Y').to_4x4()
@_object
def matrix(obj, options):
"""
:param obj:
:param options:
"""
logger.debug('object.matrix(%s)', obj)
if options.get(constants.HIERARCHY, False) and obj.parent:
parent_inverted = obj.parent.matrix_world.inverted(mathutils.Matrix())
return parent_inverted * obj.matrix_world
else:
return AXIS_CONVERSION * obj.matrix_world
@_object
def rotation(obj, options):
"""
:param obj:
:param options:
"""
logger.debug('object.rotation(%s)', obj)
vector = matrix(obj, options).to_euler(ZYX)
return (vector.x, vector.y, vector.z)
@_object
def scale(obj, options):
"""
:param obj:
:param options:
"""
logger.debug('object.scale(%s)', obj)
vector = matrix(obj, options).to_scale()
return (vector.x, vector.y, vector.z)
@_object
def select(obj):
"""
:param obj:
"""
obj.select = True
@_object
def unselect(obj):
"""
:param obj:
"""
obj.select = False
@_object
def visible(obj):
"""
:param obj:
"""
logger.debug('object.visible(%s)', obj)
return obj.is_visible(context.scene)
def extract_mesh(obj, options, recalculate=False):
"""
:param obj:
:param options:
:param recalculate: (Default value = False)
"""
logger.debug('object.extract_mesh(%s, %s)', obj, options)
apply_modifiers = options.get(constants.APPLY_MODIFIERS, True)
if apply_modifiers:
bpy.ops.object.mode_set(mode='OBJECT')
mesh_node = obj.to_mesh(context.scene, apply_modifiers, RENDER)
# transfer the geometry type to the extracted mesh
mesh_node.THREE_geometry_type = obj.data.THREE_geometry_type
# now determine whether or not to export using the geometry type
# set globally from the exporter's options or to use the local
# override on the mesh node itself
opt_buffer = options.get(constants.GEOMETRY_TYPE)
opt_buffer = opt_buffer == constants.BUFFER_GEOMETRY
prop_buffer = mesh_node.THREE_geometry_type == constants.BUFFER_GEOMETRY
# if doing buffer geometry it is imperative to triangulate the mesh
if opt_buffer or prop_buffer:
original_mesh = obj.data
obj.data = mesh_node
logger.debug('swapped %s for %s',
original_mesh.name,
mesh_node.name)
bpy.ops.object.mode_set(mode='OBJECT')
obj.select = True
bpy.context.scene.objects.active = obj
logger.info('Applying triangulation to %s', obj.data.name)
bpy.ops.object.modifier_add(type='TRIANGULATE')
bpy.ops.object.modifier_apply(apply_as='DATA',
modifier='Triangulate')
obj.data = original_mesh
obj.select = False
# recalculate the normals to face outwards, this is usually
# best after applying a modifiers, especialy for something
# like the mirror
if recalculate:
logger.info('Recalculating normals')
original_mesh = obj.data
obj.data = mesh_node
bpy.context.scene.objects.active = obj
bpy.ops.object.mode_set(mode='EDIT')
bpy.ops.mesh.select_all(action='SELECT')
bpy.ops.mesh.normals_make_consistent()
bpy.ops.object.editmode_toggle()
obj.data = original_mesh
if not options.get(constants.SCENE):
xrot = mathutils.Matrix.Rotation(-math.pi/2, 4, 'X')
mesh_node.transform(xrot * obj.matrix_world)
# now generate a unique name
index = 0
while True:
if index is 0:
mesh_name = '%sGeometry' % obj.data.name
else:
mesh_name = '%sGeometry.%d' % (obj.data.name, index)
try:
data.meshes[mesh_name]
index += 1
except KeyError:
break
mesh_node.name = mesh_name
mesh_node.update(calc_tessface=True)
mesh_node.calc_normals()
mesh_node.calc_tessface()
scale_ = options.get(constants.SCALE, 1)
mesh_node.transform(mathutils.Matrix.Scale(scale_, 4))
return mesh_node
def objects_using_mesh(mesh_node):
"""
:param mesh_node:
:return: list of object names
"""
logger.debug('object.objects_using_mesh(%s)', mesh_node)
for mesh_name, objects in _MESH_MAP.items():
if mesh_name == mesh_node.name:
return objects
else:
logger.warning('Could not find mesh mapping')
def prep_meshes(options):
"""Prep the mesh nodes. Preperation includes identifying:
- nodes that are on visible layers
- nodes that have export disabled
- nodes that have modifiers that need to be applied
:param options:
"""
logger.debug('object.prep_meshes(%s)', options)
mapping = {}
visible_layers = _visible_scene_layers()
for obj in data.objects:
if obj.type != MESH:
continue
# this is ideal for skipping controller or proxy nodes
# that may apply to a Blender but not a 3js scene
if not _on_visible_layer(obj, visible_layers):
logger.info('%s is not on a visible layer', obj.name)
continue
# if someone really insists on a visible node not being exportable
if not obj.THREE_export:
logger.info('%s export is disabled', obj.name)
continue
# need to apply modifiers before moving on, and before
# handling instancing. it is possible for 2 or more objects
# instance the same mesh but to not all use the same modifiers
# this logic identifies the object with modifiers and extracts
# the mesh making the mesh unique to this particular object
if len(obj.modifiers):
logger.info('%s has modifiers' % obj.name)
mesh_node = extract_mesh(obj, options, recalculate=True)
_MESH_MAP[mesh_node.name] = [obj]
continue
logger.info('adding mesh %s.%s to prep',
obj.name, obj.data.name)
manifest = mapping.setdefault(obj.data.name, [])
manifest.append(obj)
# now associate the extracted mesh node with all the objects
# that are instancing it
for objects in mapping.values():
mesh_node = extract_mesh(objects[0], options)
_MESH_MAP[mesh_node.name] = objects
def extracted_meshes():
"""
:return: names of extracted mesh nodes
"""
logger.debug('object.extracted_meshes()')
return [key for key in _MESH_MAP.keys()]
def _on_visible_layer(obj, visible_layers):
"""
:param obj:
:param visible_layers:
"""
is_visible = False
for index, layer in enumerate(obj.layers):
if layer and index in visible_layers:
is_visible = True
break
if not is_visible:
logger.info('%s is on a hidden layer', obj.name)
return is_visible
def _visible_scene_layers():
"""
:return: list of visiible layer indices
"""
visible_layers = []
for index, layer in enumerate(context.scene.layers):
if layer:
visible_layers.append(index)
return visible_layers
def _valid_node(obj, valid_types, options):
"""
:param obj:
:param valid_types:
:param options:
"""
if obj.type not in valid_types:
return False
# skip objects that are not on visible layers
visible_layers = _visible_scene_layers()
if not _on_visible_layer(obj, visible_layers):
return False
try:
export = obj.THREE_export
except AttributeError:
export = True
if not export:
return False
mesh_node = mesh(obj, options)
is_mesh = obj.type == MESH
# skip objects that a mesh could not be resolved
if is_mesh and not mesh_node:
return False
# secondary test; if a mesh node was resolved but no
# faces are detected then bow out
if is_mesh:
mesh_node = data.meshes[mesh_node]
if len(mesh_node.tessfaces) is 0:
return False
# if we get this far assume that the mesh is valid
return True
| |
#!/usr/bin/python
#Name: netapp_api.py
#Desc: Uses Netapp Data Ontap API to get per volume latency & iops metrics. Download the managemability SDK from now.netapp.com
#Author: Evan Fraser <evan.fraser@trademe.co.nz>
#Date: 13/08/2012
import sys
import time
import pprint
import unicodedata
import os
sys.path.append("/opt/netapp/lib/python/NetApp")
from NaServer import *
descriptors = list()
params = {}
filerdict = {}
FASMETRICS = {
'time' : 0,
'data' : {}
}
LAST_FASMETRICS = dict(FASMETRICS)
#This is the minimum interval between querying the RPA for metrics
FASMETRICS_CACHE_MAX = 10
def get_metrics(name):
global FASMETRICS, LAST_FASMETRICS, FASMETRICS_CACHE_MAX, params
max_records = 10
metrics = {}
if (time.time() - FASMETRICS['time']) > FASMETRICS_CACHE_MAX:
for filer in filerdict.keys():
s = NaServer(filerdict[filer]['ipaddr'], 1, 3)
out = s.set_transport_type('HTTPS')
if (out and out.results_errno() != 0) :
r = out.results_reason()
print ("Connection to filer failed: " + r + "\n")
sys.exit(2)
out = s.set_style('LOGIN')
if (out and out.results_errno() != 0) :
r = out.results_reason()
print ("Connection to filer failed: " + r + "\n")
sys.exit(2)
out = s.set_admin_user(filerdict[filer]['user'], filerdict[filer]['password'])
perf_in = NaElement("perf-object-get-instances-iter-start")
#Hard coding volume object for testing
obj_name = "volume"
perf_in.child_add_string("objectname", obj_name)
#Create object of type counters
counters = NaElement("counters")
#Add counter names to the object
counters.child_add_string("counter", "total_ops")
counters.child_add_string("counter", "avg_latency")
counters.child_add_string("counter", "read_ops")
counters.child_add_string("counter", "read_latency")
counters.child_add_string("counter", "write_ops")
counters.child_add_string("counter", "write_latency")
perf_in.child_add(counters)
#Invoke API
out = s.invoke_elem(perf_in)
if(out.results_status() == "failed"):
print(out.results_reason() + "\n")
sys.exit(2)
iter_tag = out.child_get_string("tag")
num_records = 1
filername = filerdict[filer]['name']
while(int(num_records) != 0):
perf_in = NaElement("perf-object-get-instances-iter-next")
perf_in.child_add_string("tag", iter_tag)
perf_in.child_add_string("maximum", max_records)
out = s.invoke_elem(perf_in)
if(out.results_status() == "failed"):
print(out.results_reason() + "\n")
sys.exit(2)
num_records = out.child_get_int("records")
if(num_records > 0) :
instances_list = out.child_get("instances")
instances = instances_list.children_get()
for inst in instances:
inst_name = unicodedata.normalize('NFKD',inst.child_get_string("name")).encode('ascii','ignore')
counters_list = inst.child_get("counters")
counters = counters_list.children_get()
for counter in counters:
counter_name = unicodedata.normalize('NFKD',counter.child_get_string("name")).encode('ascii','ignore')
counter_value = counter.child_get_string("value")
counter_unit = counter.child_get_string("unit")
metrics[filername + '_vol_' + inst_name + '_' + counter_name] = float(counter_value)
# update cache
LAST_FASMETRICS = dict(FASMETRICS)
FASMETRICS = {
'time': time.time(),
'data': metrics
}
else:
metrics = FASMETRICS['data']
#print name
#calculate change in values and return
if 'total_ops' in name:
try:
delta = float(FASMETRICS['data'][name] - LAST_FASMETRICS['data'][name])/(FASMETRICS['time'] - LAST_FASMETRICS['time'])
if delta < 0:
print "Less than 0"
delta = 0
except StandardError:
delta = 0
#This is the Operations per second
return delta
elif 'avg_latency' in name:
try:
#T1 and T2
#(T2_lat - T1_lat) / (T2_ops - T1_ops)
#Find the metric name of the base counter
total_ops_name = name.replace('avg_latency', 'total_ops')
#Calculate latency in time (div 100 to change to ms)
return float((FASMETRICS['data'][name] - LAST_FASMETRICS['data'][name]) / (FASMETRICS['data'][total_ops_name] -LAST_FASMETRICS['data'][total_ops_name])) / 100
except StandardError:
return 0
elif 'read_ops' in name:
try:
delta = float(FASMETRICS['data'][name] - LAST_FASMETRICS['data'][name])/(FASMETRICS['time'] - LAST_FASMETRICS['time'])
if delta < 0:
print "Less than 0"
delta = 0
except StandardError:
delta = 0
return delta
elif 'read_latency' in name:
try:
read_ops_name = name.replace('read_latency', 'read_ops')
return float((FASMETRICS['data'][name] - LAST_FASMETRICS['data'][name]) / (FASMETRICS['data'][read_ops_name] -LAST_FASMETRICS['data'][read_ops_name])) / 100
except StandardError:
return 0
elif 'write_ops' in name:
try:
delta = float(FASMETRICS['data'][name] - LAST_FASMETRICS['data'][name])/(FASMETRICS['time'] - LAST_FASMETRICS['time'])
if delta < 0:
print "Less than 0"
delta = 0
except StandardError:
delta = 0
return delta
elif 'write_latency' in name:
try:
write_ops_name = name.replace('write_latency', 'write_ops')
return float((FASMETRICS['data'][name] - LAST_FASMETRICS['data'][name]) / (FASMETRICS['data'][write_ops_name] -LAST_FASMETRICS['data'][write_ops_name])) / 100
except StandardError:
return 0
return 0
def create_desc(skel, prop):
d = skel.copy()
for k,v in prop.iteritems():
d[k] = v
return d
def define_metrics(Desc_Skel,params):
max_records = 10
for filer in params.keys():
s = NaServer(params[filer]['ipaddr'], 1, 3)
out = s.set_transport_type('HTTPS')
if (out and out.results_errno() != 0) :
r = out.results_reason()
print ("Connection to filer failed: " + r + "\n")
sys.exit(2)
out = s.set_style('LOGIN')
if (out and out.results_errno() != 0) :
r = out.results_reason()
print ("Connection to filer failed: " + r + "\n")
sys.exit(2)
out = s.set_admin_user(params[filer]['user'], params[filer]['password'])
perf_in = NaElement("perf-object-get-instances-iter-start")
#Hard coded volume, only volume stats gathered at present
obj_name = "volume"
perf_in.child_add_string("objectname", obj_name)
#Create object of type counters
counters = NaElement("counters")
#Add counter names to the object
counters.child_add_string("counter", "total_ops")
counters.child_add_string("counter", "avg_latency")
counters.child_add_string("counter", "read_ops")
counters.child_add_string("counter", "read_latency")
counters.child_add_string("counter", "write_ops")
counters.child_add_string("counter", "write_latency")
perf_in.child_add(counters)
#Invoke API
out = s.invoke_elem(perf_in)
if(out.results_status() == "failed"):
print(out.results_reason() + "\n")
sys.exit(2)
iter_tag = out.child_get_string("tag")
num_records = 1
filername = params[filer]['name']
while(int(num_records) != 0):
perf_in = NaElement("perf-object-get-instances-iter-next")
perf_in.child_add_string("tag", iter_tag)
perf_in.child_add_string("maximum", max_records)
out = s.invoke_elem(perf_in)
if(out.results_status() == "failed"):
print(out.results_reason() + "\n")
sys.exit(2)
num_records = out.child_get_int("records")
if(num_records > 0) :
instances_list = out.child_get("instances")
instances = instances_list.children_get()
for inst in instances:
inst_name = unicodedata.normalize('NFKD',inst.child_get_string("name")).encode('ascii','ignore')
#print ("Instance = " + inst_name + "\n")
counters_list = inst.child_get("counters")
counters = counters_list.children_get()
for counter in counters:
counter_name = unicodedata.normalize('NFKD',counter.child_get_string("name")).encode('ascii','ignore')
counter_value = counter.child_get_string("value")
counter_unit = counter.child_get_string("unit")
if 'total_ops' in counter_name:
descriptors.append(create_desc(Desc_Skel, {
"name" : filername + '_vol_' + inst_name + '_' + counter_name,
"units" : 'iops',
"description" : "volume iops",
"spoof_host" : params[filer]['ipaddr'] + ':' + params[filer]['name'],
"groups" : "iops"
}))
elif 'avg_latency' in counter_name:
descriptors.append(create_desc(Desc_Skel, {
"name" : filername + '_vol_' + inst_name + '_' + counter_name,
"units" : 'ms',
"description" : "volume avg latency",
"spoof_host" : params[filer]['ipaddr'] + ':' + params[filer]['name'],
"groups" : "latency"
}))
elif 'read_ops' in counter_name:
descriptors.append(create_desc(Desc_Skel, {
"name" : filername + '_vol_' + inst_name + '_' + counter_name,
"units" : 'iops',
"description" : "volume read iops",
"spoof_host" : params[filer]['ipaddr'] + ':' + params[filer]['name'],
"groups" : "iops"
}))
elif 'read_latency' in counter_name:
descriptors.append(create_desc(Desc_Skel, {
"name" : filername + '_vol_' + inst_name + '_' + counter_name,
"units" : 'ms',
"description" : "volume read latency",
"spoof_host" : params[filer]['ipaddr'] + ':' + params[filer]['name'],
"groups" : "latency"
}))
elif 'write_ops' in counter_name:
descriptors.append(create_desc(Desc_Skel, {
"name" : filername + '_vol_' + inst_name + '_' + counter_name,
"units" : 'iops',
"description" : "volume write iops",
"spoof_host" : params[filer]['ipaddr'] + ':' + params[filer]['name'],
"groups" : "iops"
}))
elif 'write_latency' in counter_name:
descriptors.append(create_desc(Desc_Skel, {
"name" : filername + '_vol_' + inst_name + '_' + counter_name,
"units" : 'ms',
"description" : "volume write latency",
"spoof_host" : params[filer]['ipaddr'] + ':' + params[filer]['name'],
"groups" : "latency"
}))
return descriptors
def metric_init(params):
global descriptors,filerdict
print 'netapp_stats] Received the following parameters'
pprint.pprint(params)
params = {
'filer1' : {
'name' : 'filer1.localdomain',
'ipaddr' : '192.168.1.100',
'user' : 'root',
'password' : 'password',
},
}
filerdict = dict(params)
Desc_Skel = {
'name' : 'XXX',
'call_back' : get_metrics,
'time_max' : 60,
'value_type' : 'double',
'format' : '%0f',
'units' : 'XXX',
'slope' : 'both',
'description' : 'XXX',
'groups' : 'netiron',
'spoof_host' : 'XXX',
}
# Run define_metrics
descriptors = define_metrics(Desc_Skel,params)
return descriptors
# For CLI Debugging:
if __name__ == '__main__':
#global params
params = {
'filer1' : {
'name' : 'filer1.localdomain',
'ipaddr' : '192.168.1.100',
'user' : 'root',
'password' : 'password',
},
}
descriptors = metric_init(params)
pprint.pprint(descriptors)
#print len(descriptors)
while True:
for d in descriptors:
v = d['call_back'](d['name'])
#print v
print 'value for %s is %.2f' % (d['name'], v)
print 'Sleeping 5 seconds'
time.sleep(5)
| |
#!/usr/bin/env python3.4
"""A simple web crawler."""
# TODO:
# - More organized logging (with task ID or URL?).
# - Use logging module for Logger.
# - KeyboardInterrupt in HTML parsing may hang or report unretrieved error.
# - Support gzip encoding.
# - Close connection if HTTP/1.0 response.
# - Add timeouts. (E.g. when switching networks, all seems to hang.)
# - Add arguments to specify TLS settings (e.g. cert/key files).
# - Skip reading large non-text/html files?
# - Use ETag and If-Modified-Since?
# - Handle out of file descriptors directly? (How?)
import argparse
import asyncio
import asyncio.locks
import cgi
from http.client import BadStatusLine
import logging
import re
import sys
import time
import urllib.parse
ARGS = argparse.ArgumentParser(description="Web crawler")
ARGS.add_argument(
'--iocp', action='store_true', dest='iocp',
default=False, help='Use IOCP event loop (Windows only)')
ARGS.add_argument(
'--select', action='store_true', dest='select',
default=False, help='Use Select event loop instead of default')
ARGS.add_argument(
'roots', nargs='*',
default=[], help='Root URL (may be repeated)')
ARGS.add_argument(
'--max_redirect', action='store', type=int, metavar='N',
default=10, help='Limit redirection chains (for 301, 302 etc.)')
ARGS.add_argument(
'--max_tries', action='store', type=int, metavar='N',
default=4, help='Limit retries on network errors')
ARGS.add_argument(
'--max_tasks', action='store', type=int, metavar='N',
default=100, help='Limit concurrent connections')
ARGS.add_argument(
'--max_pool', action='store', type=int, metavar='N',
default=100, help='Limit connection pool size')
ARGS.add_argument(
'--exclude', action='store', metavar='REGEX',
help='Exclude matching URLs')
ARGS.add_argument(
'--strict', action='store_true',
default=True, help='Strict host matching (default)')
ARGS.add_argument(
'--lenient', action='store_false', dest='strict',
default=False, help='Lenient host matching')
ARGS.add_argument(
'-v', '--verbose', action='count', dest='level',
default=1, help='Verbose logging (repeat for more verbose)')
ARGS.add_argument(
'-q', '--quiet', action='store_const', const=0, dest='level',
default=1, help='Quiet logging (opposite of --verbose)')
ESCAPES = [('quot', '"'),
('gt', '>'),
('lt', '<'),
('amp', '&') # Must be last.
]
def unescape(url):
"""Turn & into &, and so on.
This is the inverse of cgi.escape().
"""
for name, char in ESCAPES:
url = url.replace('&' + name + ';', char)
return url
def fix_url(url):
"""Prefix a schema-less URL with http://."""
if '://' not in url:
url = 'http://' + url
return url
class Logger:
def __init__(self, level):
self.level = level
def _log(self, n, args):
if self.level >= n:
print(*args, file=sys.stderr, flush=True)
def log(self, n, *args):
self._log(n, args)
def __call__(self, n, *args):
self._log(n, args)
class ConnectionPool:
"""A connection pool.
To open a connection, use reserve(). To recycle it, use unreserve().
The pool is mostly just a mapping from (host, port, ssl) tuples to
lists of Connections. The currently active connections are *not*
in the data structure; get_connection() takes the connection out,
and recycle_connection() puts it back in. To recycle a
connection, call conn.close(recycle=True).
There are limits to both the overall pool and the per-key pool.
"""
def __init__(self, log, max_pool=10, max_tasks=5):
self.log = log
self.max_pool = max_pool # Overall limit.
self.max_tasks = max_tasks # Per-key limit.
self.loop = asyncio.get_event_loop()
self.connections = {} # {(host, port, ssl): [Connection, ...], ...}
self.queue = [] # [Connection, ...]
def close(self):
"""Close all connections available for reuse."""
for conns in self.connections.values():
for conn in conns:
conn.close()
self.connections.clear()
self.queue.clear()
@asyncio.coroutine
def get_connection(self, host, port, ssl):
"""Create or reuse a connection."""
port = port or (443 if ssl else 80)
try:
ipaddrs = yield from self.loop.getaddrinfo(host, port)
except Exception as exc:
self.log(0, 'Exception %r for (%r, %r)' % (exc, host, port))
raise
self.log(1, '* %s resolves to %s' %
(host, ', '.join(ip[4][0] for ip in ipaddrs)))
# Look for a reusable connection.
for _, _, _, _, (h, p, *_) in ipaddrs:
key = h, p, ssl
conn = None
conns = self.connections.get(key)
while conns:
conn = conns.pop(0)
self.queue.remove(conn)
if not conns:
del self.connections[key]
if conn.stale():
self.log(1, 'closing stale connection for', key)
conn.close() # Just in case.
else:
self.log(1, '* Reusing pooled connection', key,
'FD =', conn.fileno())
return conn
# Create a new connection.
conn = Connection(self.log, self, host, port, ssl)
yield from conn.connect()
self.log(1, '* New connection', conn.key, 'FD =', conn.fileno())
return conn
def recycle_connection(self, conn):
"""Make a connection available for reuse.
This also prunes the pool if it exceeds the size limits.
"""
if conn.stale():
conn.close()
return
key = conn.key
conns = self.connections.setdefault(key, [])
conns.append(conn)
self.queue.append(conn)
if len(conns) <= self.max_tasks and len(self.queue) <= self.max_pool:
return
# Prune the queue.
# Close stale connections for this key first.
stale = [conn for conn in conns if conn.stale()]
if stale:
for conn in stale:
conns.remove(conn)
self.queue.remove(conn)
self.log(1, 'closing stale connection for', key)
conn.close()
if not conns:
del self.connections[key]
# Close oldest connection(s) for this key if limit reached.
while len(conns) > self.max_tasks:
conn = conns.pop(0)
self.queue.remove(conn)
self.log(1, 'closing oldest connection for', key)
conn.close()
if len(self.queue) <= self.max_pool:
return
# Close overall stale connections.
stale = [conn for conn in self.queue if conn.stale()]
if stale:
for conn in stale:
conns = self.connections.get(conn.key)
conns.remove(conn)
self.queue.remove(conn)
self.log(1, 'closing stale connection for', key)
conn.close()
# Close oldest overall connection(s) if limit reached.
while len(self.queue) > self.max_pool:
conn = self.queue.pop(0)
conns = self.connections.get(conn.key)
c = conns.pop(0)
assert conn == c, (conn.key, conn, c, conns)
self.log(1, 'closing overall oldest connection for', conn.key)
conn.close()
class Connection:
def __init__(self, log, pool, host, port, ssl):
self.log = log
self.pool = pool
self.host = host
self.port = port
self.ssl = ssl
self.reader = None
self.writer = None
self.key = None
def stale(self):
return self.reader is None or self.reader.at_eof()
def fileno(self):
writer = self.writer
if writer is not None:
transport = writer.transport
if transport is not None:
sock = transport.get_extra_info('socket')
if sock is not None:
return sock.fileno()
return None
@asyncio.coroutine
def connect(self):
self.reader, self.writer = yield from asyncio.open_connection(
self.host, self.port, ssl=self.ssl)
peername = self.writer.get_extra_info('peername')
if peername:
self.host, self.port = peername[:2]
else:
self.log(1, 'NO PEERNAME???', self.host, self.port, self.ssl)
self.key = self.host, self.port, self.ssl
def close(self, recycle=False):
if recycle and not self.stale():
self.pool.recycle_connection(self)
else:
self.writer.close()
self.pool = self.reader = self.writer = None
class Request:
"""HTTP request.
Use connect() to open a connection; send_request() to send the
request; get_response() to receive the response headers.
"""
def __init__(self, log, url, pool):
self.log = log
self.url = url
self.pool = pool
self.parts = urllib.parse.urlparse(self.url)
self.scheme = self.parts.scheme
assert self.scheme in ('http', 'https'), repr(url)
self.ssl = self.parts.scheme == 'https'
self.netloc = self.parts.netloc
self.hostname = self.parts.hostname
self.port = self.parts.port or (443 if self.ssl else 80)
self.path = (self.parts.path or '/')
self.query = self.parts.query
if self.query:
self.full_path = '%s?%s' % (self.path, self.query)
else:
self.full_path = self.path
self.http_version = 'HTTP/1.1'
self.method = 'GET'
self.headers = []
self.conn = None
@asyncio.coroutine
def connect(self):
"""Open a connection to the server."""
self.log(1, '* Connecting to %s:%s using %s for %s' %
(self.hostname, self.port,
'ssl' if self.ssl else 'tcp',
self.url))
self.conn = yield from self.pool.get_connection(self.hostname,
self.port, self.ssl)
def close(self, recycle=False):
"""Close the connection, recycle if requested."""
if self.conn is not None:
if not recycle:
self.log(1, 'closing connection for', self.conn.key)
self.conn.close(recycle)
self.conn = None
@asyncio.coroutine
def putline(self, line):
"""Write a line to the connection.
Used for the request line and headers.
"""
self.log(2, '>', line)
self.conn.writer.write(line.encode('latin-1') + b'\r\n')
@asyncio.coroutine
def send_request(self):
"""Send the request."""
request_line = '%s %s %s' % (self.method, self.full_path,
self.http_version)
yield from self.putline(request_line)
# TODO: What if a header is already set?
self.headers.append(('User-Agent', 'asyncio-example-crawl/0.0'))
self.headers.append(('Host', self.netloc))
self.headers.append(('Accept', '*/*'))
##self.headers.append(('Accept-Encoding', 'gzip'))
for key, value in self.headers:
line = '%s: %s' % (key, value)
yield from self.putline(line)
yield from self.putline('')
@asyncio.coroutine
def get_response(self):
"""Receive the response."""
response = Response(self.log, self.conn.reader)
yield from response.read_headers()
return response
class Response:
"""HTTP response.
Call read_headers() to receive the request headers. Then check
the status attribute and call get_header() to inspect the headers.
Finally call read() to receive the body.
"""
def __init__(self, log, reader):
self.log = log
self.reader = reader
self.http_version = None # 'HTTP/1.1'
self.status = None # 200
self.reason = None # 'Ok'
self.headers = [] # [('Content-Type', 'text/html')]
@asyncio.coroutine
def getline(self):
"""Read one line from the connection."""
line = (yield from self.reader.readline()).decode('latin-1').rstrip()
self.log(2, '<', line)
return line
@asyncio.coroutine
def read_headers(self):
"""Read the response status and the request headers."""
status_line = yield from self.getline()
status_parts = status_line.split(None, 2)
if len(status_parts) != 3:
self.log(0, 'bad status_line', repr(status_line))
raise BadStatusLine(status_line)
self.http_version, status, self.reason = status_parts
self.status = int(status)
while True:
header_line = yield from self.getline()
if not header_line:
break
# TODO: Continuation lines.
key, value = header_line.split(':', 1)
self.headers.append((key, value.strip()))
def get_redirect_url(self, default=''):
"""Inspect the status and return the redirect url if appropriate."""
if self.status not in (300, 301, 302, 303, 307):
return default
return self.get_header('Location', default)
def get_header(self, key, default=''):
"""Get one header value, using a case insensitive header name."""
key = key.lower()
for k, v in self.headers:
if k.lower() == key:
return v
return default
@asyncio.coroutine
def read(self):
"""Read the response body.
This honors Content-Length and Transfer-Encoding: chunked.
"""
nbytes = None
for key, value in self.headers:
if key.lower() == 'content-length':
nbytes = int(value)
break
if nbytes is None:
if self.get_header('transfer-encoding').lower() == 'chunked':
self.log(2, 'parsing chunked response')
blocks = []
while True:
size_header = yield from self.reader.readline()
if not size_header:
self.log(0, 'premature end of chunked response')
break
self.log(3, 'size_header =', repr(size_header))
parts = size_header.split(b';')
size = int(parts[0], 16)
if size:
self.log(3, 'reading chunk of', size, 'bytes')
block = yield from self.reader.readexactly(size)
assert len(block) == size, (len(block), size)
blocks.append(block)
crlf = yield from self.reader.readline()
assert crlf == b'\r\n', repr(crlf)
if not size:
break
body = b''.join(blocks)
self.log(1, 'chunked response had', len(body),
'bytes in', len(blocks), 'blocks')
else:
self.log(3, 'reading until EOF')
body = yield from self.reader.read()
# TODO: Should make sure not to recycle the connection
# in this case.
else:
body = yield from self.reader.readexactly(nbytes)
return body
class Fetcher:
"""Logic and state for one URL.
When found in crawler.busy, this represents a URL to be fetched or
in the process of being fetched; when found in crawler.done, this
holds the results from fetching it.
This is usually associated with a task. This references the
crawler for the connection pool and to add more URLs to its todo
list.
Call fetch() to do the fetching, then report() to print the results.
"""
def __init__(self, log, url, crawler, max_redirect=10, max_tries=4):
self.log = log
self.url = url
self.crawler = crawler
# We don't loop resolving redirects here -- we just use this
# to decide whether to add the redirect URL to crawler.todo.
self.max_redirect = max_redirect
# But we do loop to retry on errors a few times.
self.max_tries = max_tries
# Everything we collect from the response goes here.
self.task = None
self.exceptions = []
self.tries = 0
self.request = None
self.response = None
self.body = None
self.next_url = None
self.ctype = None
self.pdict = None
self.encoding = None
self.urls = None
self.new_urls = None
@asyncio.coroutine
def fetch(self):
"""Attempt to fetch the contents of the URL.
If successful, and the data is HTML, extract further links and
add them to the crawler. Redirects are also added back there.
"""
while self.tries < self.max_tries:
self.tries += 1
self.request = None
try:
self.request = Request(self.log, self.url, self.crawler.pool)
yield from self.request.connect()
yield from self.request.send_request()
self.response = yield from self.request.get_response()
self.body = yield from self.response.read()
h_conn = self.response.get_header('connection').lower()
if h_conn != 'close':
self.request.close(recycle=True)
self.request = None
if self.tries > 1:
self.log(1, 'try', self.tries, 'for', self.url, 'success')
break
except (BadStatusLine, OSError) as exc:
self.exceptions.append(exc)
self.log(1, 'try', self.tries, 'for', self.url,
'raised', repr(exc))
##import pdb; pdb.set_trace()
# Don't reuse the connection in this case.
finally:
if self.request is not None:
self.request.close()
else:
# We never broke out of the while loop, i.e. all tries failed.
self.log(0, 'no success for', self.url,
'in', self.max_tries, 'tries')
return
next_url = self.response.get_redirect_url()
if next_url:
self.next_url = urllib.parse.urljoin(self.url, next_url)
if self.max_redirect > 0:
self.log(1, 'redirect to', self.next_url, 'from', self.url)
self.crawler.add_url(self.next_url, self.max_redirect-1)
else:
self.log(0, 'redirect limit reached for', self.next_url,
'from', self.url)
else:
if self.response.status == 200:
self.ctype = self.response.get_header('content-type')
self.pdict = {}
if self.ctype:
self.ctype, self.pdict = cgi.parse_header(self.ctype)
self.encoding = self.pdict.get('charset', 'utf-8')
if self.ctype == 'text/html':
body = self.body.decode(self.encoding, 'replace')
# Replace href with (?:href|src) to follow image links.
self.urls = set(re.findall(r'(?i)href=["\']?([^\s"\'<>]+)',
body))
if self.urls:
self.log(1, 'got', len(self.urls),
'distinct urls from', self.url)
self.new_urls = set()
for url in self.urls:
url = unescape(url)
url = urllib.parse.urljoin(self.url, url)
url, frag = urllib.parse.urldefrag(url)
if self.crawler.add_url(url):
self.new_urls.add(url)
def report(self, stats, file=None):
"""Print a report on the state for this URL.
Also update the Stats instance.
"""
if self.task is not None:
if not self.task.done():
stats.add('pending')
print(self.url, 'pending', file=file)
return
elif self.task.cancelled():
stats.add('cancelled')
print(self.url, 'cancelled', file=file)
return
elif self.task.exception():
stats.add('exception')
exc = self.task.exception()
stats.add('exception_' + exc.__class__.__name__)
print(self.url, exc, file=file)
return
if len(self.exceptions) == self.tries:
stats.add('fail')
exc = self.exceptions[-1]
stats.add('fail_' + str(exc.__class__.__name__))
print(self.url, 'error', exc, file=file)
elif self.next_url:
stats.add('redirect')
print(self.url, self.response.status, 'redirect', self.next_url,
file=file)
elif self.ctype == 'text/html':
stats.add('html')
size = len(self.body or b'')
stats.add('html_bytes', size)
print(self.url, self.response.status,
self.ctype, self.encoding,
size,
'%d/%d' % (len(self.new_urls or ()), len(self.urls or ())),
file=file)
elif self.response is None:
print(self.url, 'no response object')
else:
size = len(self.body or b'')
if self.response.status == 200:
stats.add('other')
stats.add('other_bytes', size)
else:
stats.add('error')
stats.add('error_bytes', size)
stats.add('status_%s' % self.response.status)
print(self.url, self.response.status,
self.ctype, self.encoding,
size,
file=file)
class Stats:
"""Record stats of various sorts."""
def __init__(self):
self.stats = {}
def add(self, key, count=1):
self.stats[key] = self.stats.get(key, 0) + count
def report(self, file=None):
for key, count in sorted(self.stats.items()):
print('%10d' % count, key, file=file)
class Crawler:
"""Crawl a set of URLs.
This manages three disjoint sets of URLs (todo, busy, done). The
data structures actually store dicts -- the values in todo give
the redirect limit, while the values in busy and done are Fetcher
instances.
"""
def __init__(self, log,
roots, exclude=None, strict=True, # What to crawl.
max_redirect=10, max_tries=4, # Per-url limits.
max_tasks=10, max_pool=10, # Global limits.
):
self.log = log
self.roots = roots
self.exclude = exclude
self.strict = strict
self.max_redirect = max_redirect
self.max_tries = max_tries
self.max_tasks = max_tasks
self.max_pool = max_pool
self.todo = {}
self.busy = {}
self.done = {}
self.pool = ConnectionPool(self.log, max_pool, max_tasks)
self.root_domains = set()
for root in roots:
parts = urllib.parse.urlparse(root)
host, port = urllib.parse.splitport(parts.netloc)
if not host:
continue
if re.match(r'\A[\d\.]*\Z', host):
self.root_domains.add(host)
else:
host = host.lower()
if self.strict:
self.root_domains.add(host)
if host.startswith('www.'):
self.root_domains.add(host[4:])
else:
self.root_domains.add('www.' + host)
else:
parts = host.split('.')
if len(parts) > 2:
host = '.'.join(parts[-2:])
self.root_domains.add(host)
for root in roots:
self.add_url(root)
self.governor = asyncio.locks.Semaphore(max_tasks)
self.termination = asyncio.locks.Condition()
self.t0 = time.time()
self.t1 = None
def close(self):
"""Close resources (currently only the pool)."""
self.pool.close()
def host_okay(self, host):
"""Check if a host should be crawled.
A literal match (after lowercasing) is always good. For hosts
that don't look like IP addresses, some approximate matches
are okay depending on the strict flag.
"""
host = host.lower()
if host in self.root_domains:
return True
if re.match(r'\A[\d\.]*\Z', host):
return False
if self.strict:
return self._host_okay_strictish(host)
else:
return self._host_okay_lenient(host)
def _host_okay_strictish(self, host):
"""Check if a host should be crawled, strict-ish version.
This checks for equality modulo an initial 'www.' component.
"""
if host.startswith('www.'):
if host[4:] in self.root_domains:
return True
else:
if 'www.' + host in self.root_domains:
return True
return False
def _host_okay_lenient(self, host):
"""Check if a host should be crawled, lenient version.
This compares the last two components of the host.
"""
parts = host.split('.')
if len(parts) > 2:
host = '.'.join(parts[-2:])
return host in self.root_domains
def add_url(self, url, max_redirect=None):
"""Add a URL to the todo list if not seen before."""
if self.exclude and re.search(self.exclude, url):
return False
parts = urllib.parse.urlparse(url)
if parts.scheme not in ('http', 'https'):
self.log(2, 'skipping non-http scheme in', url)
return False
host, port = urllib.parse.splitport(parts.netloc)
if not self.host_okay(host):
self.log(2, 'skipping non-root host in', url)
return False
if max_redirect is None:
max_redirect = self.max_redirect
if url in self.todo or url in self.busy or url in self.done:
return False
self.log(1, 'adding', url, max_redirect)
self.todo[url] = max_redirect
return True
@asyncio.coroutine
def crawl(self):
"""Run the crawler until all finished."""
with (yield from self.termination):
while self.todo or self.busy:
if self.todo:
url, max_redirect = self.todo.popitem()
fetcher = Fetcher(self.log, url,
crawler=self,
max_redirect=max_redirect,
max_tries=self.max_tries,
)
self.busy[url] = fetcher
fetcher.task = asyncio.Task(self.fetch(fetcher))
else:
yield from self.termination.wait()
self.t1 = time.time()
@asyncio.coroutine
def fetch(self, fetcher):
"""Call the Fetcher's fetch(), with a limit on concurrency.
Once this returns, move the fetcher from busy to done.
"""
url = fetcher.url
with (yield from self.governor):
try:
yield from fetcher.fetch() # Fetcher gonna fetch.
finally:
# Force GC of the task, so the error is logged.
fetcher.task = None
with (yield from self.termination):
self.done[url] = fetcher
del self.busy[url]
self.termination.notify()
def report(self, file=None):
"""Print a report on all completed URLs."""
if self.t1 is None:
self.t1 = time.time()
dt = self.t1 - self.t0
if dt and self.max_tasks:
speed = len(self.done) / dt / self.max_tasks
else:
speed = 0
stats = Stats()
print('*** Report ***', file=file)
try:
show = []
show.extend(self.done.items())
show.extend(self.busy.items())
show.sort()
for url, fetcher in show:
fetcher.report(stats, file=file)
except KeyboardInterrupt:
print('\nInterrupted', file=file)
print('Finished', len(self.done),
'urls in %.3f secs' % dt,
'(max_tasks=%d)' % self.max_tasks,
'(%.3f urls/sec/task)' % speed,
file=file)
stats.report(file=file)
print('Todo:', len(self.todo), file=file)
print('Busy:', len(self.busy), file=file)
print('Done:', len(self.done), file=file)
print('Date:', time.ctime(), 'local time', file=file)
def main():
"""Main program.
Parse arguments, set up event loop, run crawler, print report.
"""
args = ARGS.parse_args()
if not args.roots:
print('Use --help for command line help')
return
log = Logger(args.level)
if args.iocp:
from asyncio.windows_events import ProactorEventLoop
loop = ProactorEventLoop()
asyncio.set_event_loop(loop)
elif args.select:
loop = asyncio.SelectorEventLoop()
asyncio.set_event_loop(loop)
else:
loop = asyncio.get_event_loop()
roots = {fix_url(root) for root in args.roots}
crawler = Crawler(log,
roots, exclude=args.exclude,
strict=args.strict,
max_redirect=args.max_redirect,
max_tries=args.max_tries,
max_tasks=args.max_tasks,
max_pool=args.max_pool,
)
try:
loop.run_until_complete(crawler.crawl()) # Crawler gonna crawl.
except KeyboardInterrupt:
sys.stderr.flush()
print('\nInterrupted\n')
finally:
crawler.report()
crawler.close()
loop.close()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main()
| |
import numpy as np
import cea.technologies.chiller_absorption as chiller_absorption
import cea.technologies.chiller_vapor_compression as chiller_vapor_compression
import cea.technologies.cooling_tower as CTModel
from cea.constants import HEAT_CAPACITY_OF_WATER_JPERKGK
from cea.optimization.constants import VCC_T_COOL_IN, DT_COOL, ACH_T_IN_FROM_CHP_K
from cea.technologies.constants import G_VALUE_CENTRALIZED # this is where to differentiate chiller performances
from cea.technologies.pumps import calc_water_body_uptake_pumping
import cea.technologies.chiller_absorption
import pandas as pd
__author__ = "Sreepathi Bhargava Krishna"
__copyright__ = "Copyright 2015, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Sreepathi Bhargava Krishna", "Shanshan Hsieh", "Jimeno Fonseca"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "cea@arch.ethz.ch"
__status__ = "Production"
def calc_vcc_operation(Qc_from_VCC_W, T_DCN_re_K, T_DCN_sup_K, T_source_K, chiller_size, VCC_chiller):
Qc_from_VCC_W = min(Qc_from_VCC_W, chiller_size) # The chiller can not supply more cooling than the installed capacity allows
VCC_operation = chiller_vapor_compression.calc_VCC(chiller_size, Qc_from_VCC_W, T_DCN_sup_K, T_DCN_re_K, T_source_K, VCC_chiller)
# unpack outputs
Qc_VCC_W = VCC_operation['q_chw_W']
E_used_VCC_W = VCC_operation['wdot_W']
return Qc_VCC_W, E_used_VCC_W
def calc_vcc_CT_operation(Qc_from_VCC_W,
T_DCN_re_K,
T_DCN_sup_K,
T_source_K,
size_chiller_CT,
VCC_chiller):
VCC_operation = chiller_vapor_compression.calc_VCC(size_chiller_CT, Qc_from_VCC_W, T_DCN_sup_K, T_DCN_re_K, T_source_K, VCC_chiller)
# unpack outputs
Qc_CT_VCC_W = VCC_operation['q_cw_W']
Qc_VCC_W = VCC_operation['q_chw_W']
# calculate cooling tower
wdot_CT_Wh = CTModel.calc_CT(Qc_CT_VCC_W, size_chiller_CT)
# calcualte energy consumption and variable costs
E_used_VCC_W = (VCC_operation['wdot_W'] + wdot_CT_Wh)
return Qc_VCC_W, E_used_VCC_W
def calc_chiller_absorption_operation(Qc_ACH_req_W, T_DCN_re_K, T_DCN_sup_K, T_ACH_in_C, T_ground_K, chiller_prop,
size_ACH_W):
if T_DCN_re_K == T_DCN_sup_K:
mdot_ACH_kgpers = 0
else:
mdot_ACH_kgpers = Qc_ACH_req_W / (
(T_DCN_re_K - T_DCN_sup_K) * HEAT_CAPACITY_OF_WATER_JPERKGK) # required chw flow rate from ACH
ACH_operation = chiller_absorption.calc_chiller_main(mdot_ACH_kgpers,
T_DCN_sup_K,
T_DCN_re_K,
T_ACH_in_C,
T_ground_K,
chiller_prop)
Qc_CT_ACH_W = ACH_operation['q_cw_W']
# calculate cooling tower
wdot_CT_Wh = CTModel.calc_CT(Qc_CT_ACH_W, size_ACH_W)
# calcualte energy consumption and variable costs
Qh_CHP_ACH_W = ACH_operation['q_hw_W']
E_used_ACH_W = ACH_operation['wdot_W'] + wdot_CT_Wh
return Qc_CT_ACH_W, Qh_CHP_ACH_W, E_used_ACH_W
def cooling_resource_activator(Q_thermal_req,
T_district_cooling_supply_K,
T_district_cooling_return_K,
Q_therm_Lake_W,
T_source_average_Lake_K,
daily_storage_class,
T_ground_K,
master_to_slave_variables,
absorption_chiller,
CCGT_operation_data,
VCC_chiller):
"""
:param Q_thermal_req:
:param T_district_cooling_supply_K:
:param T_district_cooling_return_K:
:param Q_therm_Lake_W:
:param T_source_average_Lake_K:
:param daily_storage_class:
:param T_ground_K:
:param master_to_slave_variables:
:param cea.technologies.chiller_absorption.AbsorptionChiller absorption_chiller:
:param CCGT_operation_data:
:return:
"""
## initializing unmet cooling load and requirements from daily storage for this hour
Q_cooling_unmet_W = Q_thermal_req
Q_DailyStorage_gen_directload_W = 0.0
## ACTIVATE THE TRIGEN
if master_to_slave_variables.NG_Trigen_on == 1 and Q_cooling_unmet_W > 0.0 and not np.isclose(
T_district_cooling_supply_K,
T_district_cooling_return_K):
size_trigen_W = master_to_slave_variables.NG_Trigen_ACH_size_W
if Q_cooling_unmet_W > size_trigen_W:
Q_Trigen_gen_W = size_trigen_W
else:
Q_Trigen_gen_W = Q_cooling_unmet_W
# GET THE ABSORPTION CHILLER PERFORMANCE
T_ACH_in_C = ACH_T_IN_FROM_CHP_K - 273
Qc_CT_ACH_W, \
Qh_CCGT_req_W, \
E_ACH_req_W = calc_chiller_absorption_operation(Q_Trigen_gen_W,
T_district_cooling_return_K,
T_district_cooling_supply_K,
T_ACH_in_C,
T_ground_K,
absorption_chiller,
size_trigen_W)
# operation of the CCGT
Q_used_prim_CC_fn_W = CCGT_operation_data['q_input_fn_q_output_W']
q_output_CC_min_W = CCGT_operation_data['q_output_min_W']
Q_output_CC_max_W = CCGT_operation_data['q_output_max_W']
eta_elec_interpol = CCGT_operation_data['eta_el_fn_q_input']
# TODO: CONFIRM THAT THIS WORKS AS INTENDED
if Qh_CCGT_req_W >= q_output_CC_min_W:
if Q_cooling_unmet_W > size_trigen_W:
Q_Trigen_NG_gen_directload_W = size_trigen_W
Qc_Trigen_gen_storage_W = 0.0
Qc_from_storage_W = daily_storage_class.discharge_storage(Q_cooling_unmet_W - size_trigen_W)
Q_Trigen_gen_W = Q_Trigen_NG_gen_directload_W + Qc_Trigen_gen_storage_W
else:
Q_Trigen_NG_gen_directload_W = Q_cooling_unmet_W
Qc_Trigen_gen_storage_W = daily_storage_class.charge_storage(size_trigen_W - Q_cooling_unmet_W)
Qc_from_storage_W = 0.0
Q_Trigen_gen_W = Q_Trigen_NG_gen_directload_W + Qc_Trigen_gen_storage_W
T_ACH_in_C = ACH_T_IN_FROM_CHP_K - 273
Qc_CT_ACH_W, \
Qh_CCGT_req_W, \
E_ACH_req_W = calc_chiller_absorption_operation(Q_Trigen_gen_W,
T_district_cooling_return_K,
T_district_cooling_supply_K,
T_ACH_in_C,
T_ground_K,
absorption_chiller,
size_trigen_W)
# operation Possible if above minimal load
if Qh_CCGT_req_W <= Q_output_CC_max_W: # Normal operation Possible within partload regime
Q_CHP_gen_W = float(Qh_CCGT_req_W)
NG_Trigen_req_W = Q_used_prim_CC_fn_W(Q_CHP_gen_W)
E_Trigen_NG_gen_W = np.float(eta_elec_interpol(NG_Trigen_req_W)) * NG_Trigen_req_W
else: # Only part of the demand can be delivered as 100% load achieved
Q_CHP_gen_W = Q_output_CC_max_W
NG_Trigen_req_W = Q_used_prim_CC_fn_W(Q_CHP_gen_W)
E_Trigen_NG_gen_W = np.float(eta_elec_interpol(NG_Trigen_req_W)) * NG_Trigen_req_W
else:
Q_Trigen_gen_W = 0.0
NG_Trigen_req_W = 0.0
E_Trigen_NG_gen_W = 0.0
Q_Trigen_NG_gen_directload_W = 0.0
Qc_from_storage_W = 0.0
# update unmet cooling load
Q_cooling_unmet_W = Q_cooling_unmet_W - Q_Trigen_NG_gen_directload_W - Qc_from_storage_W
Q_DailyStorage_gen_directload_W += Qc_from_storage_W
else:
Q_Trigen_gen_W = 0.0
NG_Trigen_req_W = 0.0
E_Trigen_NG_gen_W = 0.0
Q_Trigen_NG_gen_directload_W = 0.0
# Base VCC water-source
if master_to_slave_variables.WS_BaseVCC_on == 1 and Q_cooling_unmet_W > 0.0 and T_source_average_Lake_K < VCC_T_COOL_IN and not np.isclose(
T_district_cooling_supply_K,
T_district_cooling_return_K):
# Free cooling possible from the lake
size_WS_BaseVCC_W = master_to_slave_variables.WS_BaseVCC_size_W
if Q_cooling_unmet_W > min(size_WS_BaseVCC_W, Q_therm_Lake_W): # min funtion to deal with both constraints at the same time, limiting factors being the size and the temal capacity of lake
Q_BaseVCC_WS_gen_directload_W = min(size_WS_BaseVCC_W, Q_therm_Lake_W)
Qc_BaseVCC_WS_gen_storage_W = 0.0
Qc_from_storage_W = daily_storage_class.discharge_storage(Q_cooling_unmet_W - min(size_WS_BaseVCC_W, Q_therm_Lake_W))
Q_BaseVCC_WS_gen_W = Q_BaseVCC_WS_gen_directload_W + Qc_BaseVCC_WS_gen_storage_W
else:
Q_BaseVCC_WS_gen_directload_W = Q_cooling_unmet_W
Qc_BaseVCC_WS_gen_storage_W = daily_storage_class.charge_storage(min(size_WS_BaseVCC_W, Q_therm_Lake_W) - Q_cooling_unmet_W)
Qc_from_storage_W = 0.0
Q_BaseVCC_WS_gen_W = Q_BaseVCC_WS_gen_directload_W + Qc_BaseVCC_WS_gen_storage_W
if (T_district_cooling_supply_K - DT_COOL) < T_source_average_Lake_K < VCC_T_COOL_IN: # if lake temperature lower than CT source, use compression chillers with lake water as source
WS_BaseVCC_capacity = master_to_slave_variables.WS_BaseVCC_size_W
Q_BaseVCC_WS_gen_W, \
E_BaseVCC_WS_req_W = calc_vcc_operation(Q_BaseVCC_WS_gen_W,
T_district_cooling_return_K,
T_district_cooling_supply_K,
T_source_average_Lake_K,
WS_BaseVCC_capacity,
VCC_chiller)
# Delta P from linearization after distribution optimization
E_pump_WS_req_W = calc_water_body_uptake_pumping(Q_BaseVCC_WS_gen_W,
T_district_cooling_return_K,
T_district_cooling_supply_K)
E_BaseVCC_WS_req_W += E_pump_WS_req_W
elif T_source_average_Lake_K <= (T_district_cooling_supply_K - DT_COOL): # bypass, do not use chiller but use heat exchange to cool the water directly
E_pump_WS_req_W = calc_water_body_uptake_pumping(Q_BaseVCC_WS_gen_W,
T_district_cooling_return_K,
T_district_cooling_supply_K)
E_BaseVCC_WS_req_W = E_pump_WS_req_W
else:
print("no lake water source baseload VCC was used")
Q_therm_Lake_W -= Q_BaseVCC_WS_gen_W # discount availability
Q_cooling_unmet_W = Q_cooling_unmet_W - Q_BaseVCC_WS_gen_W - Qc_from_storage_W + Qc_BaseVCC_WS_gen_storage_W # the provided cooling equals the produced cooling plus the cooling from storage minus the stored cooling
Q_DailyStorage_gen_directload_W += Qc_from_storage_W
else:
Q_BaseVCC_WS_gen_W = 0.0
E_BaseVCC_WS_req_W = 0.0
Q_BaseVCC_WS_gen_directload_W = 0.0
# Peak VCC water-source
if master_to_slave_variables.WS_PeakVCC_on == 1 and Q_cooling_unmet_W > 0.0 and T_source_average_Lake_K < VCC_T_COOL_IN and not np.isclose(
T_district_cooling_supply_K,
T_district_cooling_return_K):
# Free cooling possible from the lake
size_WS_PeakVCC_W = master_to_slave_variables.WS_PeakVCC_size_W
if Q_cooling_unmet_W > min(size_WS_PeakVCC_W, Q_therm_Lake_W):
Q_PeakVCC_WS_gen_directload_W = min(size_WS_PeakVCC_W, Q_therm_Lake_W)
Qc_PeakVCC_WS_gen_storage_W = 0.0
Qc_from_storage_W = daily_storage_class.discharge_storage(Q_cooling_unmet_W - min(size_WS_PeakVCC_W, Q_therm_Lake_W))
Q_PeakVCC_WS_gen_W = Q_PeakVCC_WS_gen_directload_W + Qc_PeakVCC_WS_gen_storage_W
else:
Q_PeakVCC_WS_gen_directload_W = Q_cooling_unmet_W
Qc_PeakVCC_WS_gen_storage_W = daily_storage_class.charge_storage(min(size_WS_PeakVCC_W, Q_therm_Lake_W) - Q_cooling_unmet_W)
Qc_from_storage_W = 0.0
Q_PeakVCC_WS_gen_W = Q_PeakVCC_WS_gen_directload_W + Qc_PeakVCC_WS_gen_storage_W
if (T_district_cooling_supply_K - DT_COOL) < T_source_average_Lake_K < VCC_T_COOL_IN:
WS_PeakVCC_capacity = master_to_slave_variables.WS_PeakVCC_size_W
Q_PeakVCC_WS_gen_W, \
E_PeakVCC_WS_req_W = calc_vcc_operation(Q_PeakVCC_WS_gen_W,
T_district_cooling_return_K,
T_district_cooling_supply_K,
T_source_average_Lake_K,
WS_PeakVCC_capacity,
VCC_chiller)
E_pump_WS_req_W = calc_water_body_uptake_pumping(Q_PeakVCC_WS_gen_W,
T_district_cooling_return_K,
T_district_cooling_supply_K)
E_PeakVCC_WS_req_W += E_pump_WS_req_W
elif T_source_average_Lake_K <= (T_district_cooling_supply_K - DT_COOL): # bypass, do not use VCC but use heat exchange to cool the water directly
E_pump_WS_req_W = calc_water_body_uptake_pumping(Q_PeakVCC_WS_gen_W,
T_district_cooling_return_K,
T_district_cooling_supply_K)
E_PeakVCC_WS_req_W = E_pump_WS_req_W
else:
print("no lake water source baseload VCC was used")
Q_therm_Lake_W -= Q_PeakVCC_WS_gen_W # discount availability
Q_cooling_unmet_W = Q_cooling_unmet_W - Q_PeakVCC_WS_gen_W - Qc_from_storage_W + Qc_PeakVCC_WS_gen_storage_W # the provided cooling equals the produced cooling plus the cooling from storage minus
Q_DailyStorage_gen_directload_W += Qc_from_storage_W
else:
Q_PeakVCC_WS_gen_directload_W = 0.0
Q_PeakVCC_WS_gen_W = 0.0
E_PeakVCC_WS_req_W = 0.0
# Base VCC air-source with a cooling tower
if master_to_slave_variables.AS_BaseVCC_on == 1 and Q_cooling_unmet_W > 0.0 and not np.isclose(
T_district_cooling_supply_K,
T_district_cooling_return_K):
size_AS_BaseVCC_W = master_to_slave_variables.AS_BaseVCC_size_W
if Q_cooling_unmet_W > size_AS_BaseVCC_W:
Q_BaseVCC_AS_gen_directload_W = size_AS_BaseVCC_W
Q_BaseVCC_AS_gen_storage_W = 0.0
Qc_from_storage_W = daily_storage_class.discharge_storage(Q_cooling_unmet_W - size_AS_BaseVCC_W)
Q_BaseVCC_AS_gen_W = Q_BaseVCC_AS_gen_directload_W + Q_BaseVCC_AS_gen_storage_W
else:
Q_BaseVCC_AS_gen_directload_W = Q_cooling_unmet_W
Q_BaseVCC_AS_gen_storage_W = daily_storage_class.charge_storage(size_AS_BaseVCC_W - Q_cooling_unmet_W)
Qc_from_storage_W = 0.0
Q_BaseVCC_AS_gen_W = Q_BaseVCC_AS_gen_directload_W + Q_BaseVCC_AS_gen_storage_W
Q_BaseVCC_AS_gen_W, \
E_BaseVCC_AS_req_W = calc_vcc_CT_operation(Q_BaseVCC_AS_gen_W,
T_district_cooling_return_K,
T_district_cooling_supply_K,
VCC_T_COOL_IN,
size_AS_BaseVCC_W,
VCC_chiller)
Q_cooling_unmet_W = Q_cooling_unmet_W - Q_BaseVCC_AS_gen_directload_W - Qc_from_storage_W
Q_DailyStorage_gen_directload_W += Qc_from_storage_W
else:
Q_BaseVCC_AS_gen_W = 0.0
E_BaseVCC_AS_req_W = 0.0
Q_BaseVCC_AS_gen_directload_W = 0.0
# Peak VCC air-source with a cooling tower
if master_to_slave_variables.AS_PeakVCC_on == 1 and Q_cooling_unmet_W > 0.0 and not np.isclose(
T_district_cooling_supply_K,
T_district_cooling_return_K):
size_AS_PeakVCC_W = master_to_slave_variables.AS_PeakVCC_size_W
if Q_cooling_unmet_W > size_AS_PeakVCC_W:
Q_PeakVCC_AS_gen_directload_W = size_AS_PeakVCC_W
Q_PeakVCC_AS_gen_storage_W = 0.0
Qc_from_storage_W = daily_storage_class.discharge_storage(Q_cooling_unmet_W - size_AS_PeakVCC_W)
Q_PeakVCC_AS_gen_W = Q_PeakVCC_AS_gen_directload_W + Q_PeakVCC_AS_gen_storage_W
else:
Q_PeakVCC_AS_gen_directload_W = Q_cooling_unmet_W
Q_PeakVCC_AS_gen_storage_W = daily_storage_class.charge_storage(size_AS_PeakVCC_W - Q_cooling_unmet_W)
Qc_from_storage_W = 0.0
Q_PeakVCC_AS_gen_W = Q_PeakVCC_AS_gen_directload_W + Q_PeakVCC_AS_gen_storage_W
Q_PeakVCC_AS_gen_W, \
E_PeakVCC_AS_req_W = calc_vcc_CT_operation(Q_PeakVCC_AS_gen_W,
T_district_cooling_return_K,
T_district_cooling_supply_K,
VCC_T_COOL_IN,
size_AS_PeakVCC_W,
VCC_chiller)
Q_cooling_unmet_W = Q_cooling_unmet_W - Q_PeakVCC_AS_gen_directload_W - Qc_from_storage_W
Q_DailyStorage_gen_directload_W += Qc_from_storage_W
else:
Q_PeakVCC_AS_gen_W = 0.0
E_PeakVCC_AS_req_W = 0.0
Q_BaseVCC_AS_gen_directload_W = 0.0
Q_PeakVCC_AS_gen_directload_W = 0.0
if Q_cooling_unmet_W > 1.0E-3:
Q_BackupVCC_AS_gen_W = Q_cooling_unmet_W # this will become the back-up chiller
Q_BackupVCC_AS_directload_W = Q_cooling_unmet_W
else:
Q_BackupVCC_AS_gen_W = 0.0
Q_BackupVCC_AS_directload_W = 0.0
## writing outputs
electricity_output = {
'E_BaseVCC_WS_req_W': E_BaseVCC_WS_req_W,
'E_PeakVCC_WS_req_W': E_PeakVCC_WS_req_W,
'E_BaseVCC_AS_req_W': E_BaseVCC_AS_req_W,
'E_PeakVCC_AS_req_W': E_PeakVCC_AS_req_W,
'E_Trigen_NG_gen_W': E_Trigen_NG_gen_W
}
thermal_output = {
# cooling total
'Q_Trigen_NG_gen_W': Q_Trigen_gen_W,
'Q_BaseVCC_WS_gen_W': Q_BaseVCC_WS_gen_W,
'Q_PeakVCC_WS_gen_W': Q_PeakVCC_WS_gen_W,
'Q_BaseVCC_AS_gen_W': Q_BaseVCC_AS_gen_W,
'Q_PeakVCC_AS_gen_W': Q_PeakVCC_AS_gen_W,
'Q_BackupVCC_AS_gen_W': Q_BackupVCC_AS_gen_W,
# cooling to direct load
'Q_DailyStorage_gen_directload_W': Q_DailyStorage_gen_directload_W,
"Q_Trigen_NG_gen_directload_W": Q_Trigen_NG_gen_directload_W,
"Q_BaseVCC_WS_gen_directload_W": Q_BaseVCC_WS_gen_directload_W,
"Q_PeakVCC_WS_gen_directload_W": Q_PeakVCC_WS_gen_directload_W,
"Q_BaseVCC_AS_gen_directload_W": Q_BaseVCC_AS_gen_directload_W,
"Q_PeakVCC_AS_gen_directload_W": Q_PeakVCC_AS_gen_directload_W,
"Q_BackupVCC_AS_directload_W": Q_BackupVCC_AS_directload_W,
}
gas_output = {
'NG_Trigen_req_W': NG_Trigen_req_W
}
return daily_storage_class, thermal_output, electricity_output, gas_output
| |
"""The generic interface for all exporters.
"""
# mbed SDK
# Copyright (c) 2011-2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from os.path import join, abspath, dirname, exists
from os.path import basename, relpath, normpath, splitext
from os import makedirs, walk
import copy
from shutil import rmtree, copyfile
import zipfile
ROOT = abspath(join(dirname(__file__), ".."))
sys.path.insert(0, ROOT)
from tools.build_api import prepare_toolchain
from tools.build_api import scan_resources
from tools.toolchains import Resources
from tools.export import lpcxpresso, ds5_5, iar, makefile
from tools.export import embitz, coide, kds, simplicity, atmelstudio, mcuxpresso
from tools.export import sw4stm32, e2studio, zip, cmsis, uvision, cdt, vscode
from tools.export import gnuarmeclipse
from tools.export import qtcreator
from tools.targets import TARGET_NAMES
EXPORTERS = {
'uvision5': uvision.Uvision,
'uvision': uvision.Uvision,
'lpcxpresso': lpcxpresso.LPCXpresso,
'gcc_arm': makefile.GccArm,
'make_gcc_arm': makefile.GccArm,
'make_armc5': makefile.Armc5,
'make_armc6': makefile.Armc6,
'make_iar': makefile.IAR,
'ds5_5': ds5_5.DS5_5,
'iar': iar.IAR,
'embitz' : embitz.EmBitz,
'coide' : coide.CoIDE,
'kds' : kds.KDS,
'simplicityv3' : simplicity.SimplicityV3,
'atmelstudio' : atmelstudio.AtmelStudio,
'sw4stm32' : sw4stm32.Sw4STM32,
'e2studio' : e2studio.E2Studio,
'eclipse_gcc_arm' : cdt.EclipseGcc,
'eclipse_iar' : cdt.EclipseIAR,
'eclipse_armc5' : cdt.EclipseArmc5,
'gnuarmeclipse': gnuarmeclipse.GNUARMEclipse,
'mcuxpresso': mcuxpresso.MCUXpresso,
'qtcreator': qtcreator.QtCreator,
'vscode_gcc_arm' : vscode.VSCodeGcc,
'vscode_iar' : vscode.VSCodeIAR,
'vscode_armc5' : vscode.VSCodeArmc5
}
ERROR_MESSAGE_UNSUPPORTED_TOOLCHAIN = """
Sorry, the target %s is not currently supported on the %s toolchain.
Please refer to <a href='/handbook/Exporting-to-offline-toolchains' target='_blank'>Exporting to offline toolchains</a> for more information.
"""
ERROR_MESSAGE_NOT_EXPORT_LIBS = """
To export this project please <a href='http://mbed.org/compiler/?import=http://mbed.org/users/mbed_official/code/mbed-export/k&mode=lib' target='_blank'>import the export version of the mbed library</a>.
"""
def mcu_ide_list():
"""Shows list of exportable ides
"""
supported_ides = sorted(EXPORTERS.keys())
return "\n".join(supported_ides)
def mcu_ide_matrix(verbose_html=False):
"""Shows target map using prettytable
Keyword argumets:
verbose_html - print the matrix in html format
"""
supported_ides = sorted(EXPORTERS.keys())
# Only use it in this function so building works without extra modules
from prettytable import PrettyTable, ALL
# All tests status table print
table_printer = PrettyTable(["Platform"] + supported_ides)
# Align table
for col in supported_ides:
table_printer.align[col] = "c"
table_printer.align["Platform"] = "l"
perm_counter = 0
for target in sorted(TARGET_NAMES):
row = [target] # First column is platform name
for ide in supported_ides:
text = "-"
if EXPORTERS[ide].is_target_supported(target):
if verbose_html:
text = "✓"
else:
text = "x"
perm_counter += 1
row.append(text)
table_printer.add_row(row)
table_printer.border = True
table_printer.vrules = ALL
table_printer.hrules = ALL
# creates a html page in a shorter format suitable for readme.md
if verbose_html:
result = table_printer.get_html_string()
else:
result = table_printer.get_string()
result += "\n"
result += "Total IDEs: %d\n"% (len(supported_ides))
if verbose_html:
result += "<br>"
result += "Total platforms: %d\n"% (len(TARGET_NAMES))
if verbose_html:
result += "<br>"
result += "Total permutations: %d"% (perm_counter)
if verbose_html:
result = result.replace("&", "&")
return result
def get_exporter_toolchain(ide):
""" Return the exporter class and the toolchain string as a tuple
Positional arguments:
ide - the ide name of an exporter
"""
return EXPORTERS[ide], EXPORTERS[ide].TOOLCHAIN
def rewrite_basepath(file_name, resources, export_path, loc):
""" Replace the basepath of filename with export_path
Positional arguments:
file_name - the absolute path to a file
resources - the resources object that the file came from
export_path - the final destination of the file after export
"""
new_f = join(loc, relpath(file_name, resources.file_basepath[file_name]))
resources.file_basepath[new_f] = export_path
return new_f
def subtract_basepath(resources, export_path, loc=""):
""" Rewrite all of the basepaths with the export_path
Positional arguments:
resources - the resource object to rewrite the basepaths of
export_path - the final destination of the resources with respect to the
generated project files
"""
keys = ['s_sources', 'c_sources', 'cpp_sources', 'hex_files',
'objects', 'libraries', 'inc_dirs', 'headers', 'linker_script',
'lib_dirs']
for key in keys:
vals = getattr(resources, key)
if isinstance(vals, set):
vals = list(vals)
if isinstance(vals, list):
new_vals = []
for val in vals:
new_vals.append(rewrite_basepath(val, resources, export_path,
loc))
if isinstance(getattr(resources, key), set):
setattr(resources, key, set(new_vals))
else:
setattr(resources, key, new_vals)
elif vals:
setattr(resources, key, rewrite_basepath(vals, resources,
export_path, loc))
def generate_project_files(resources, export_path, target, name, toolchain, ide,
macros=None):
"""Generate the project files for a project
Positional arguments:
resources - a Resources object containing all of the files needed to build
this project
export_path - location to place project files
name - name of the project
toolchain - a toolchain class that corresponds to the toolchain used by the
IDE or makefile
ide - IDE name to export to
Optional arguments:
macros - additional macros that should be defined within the exported
project
"""
exporter_cls, _ = get_exporter_toolchain(ide)
exporter = exporter_cls(target, export_path, name, toolchain,
extra_symbols=macros, resources=resources)
exporter.generate()
files = exporter.generated_files
return files, exporter
def zip_export(file_name, prefix, resources, project_files, inc_repos):
"""Create a zip file from an exported project.
Positional Parameters:
file_name - the file name of the resulting zip file
prefix - a directory name that will prefix the entire zip file's contents
resources - a resources object with files that must be included in the zip
project_files - a list of extra files to be added to the root of the prefix
directory
"""
with zipfile.ZipFile(file_name, "w") as zip_file:
for prj_file in project_files:
zip_file.write(prj_file, join(prefix, basename(prj_file)))
for loc, res in resources.iteritems():
to_zip = (
res.headers + res.s_sources + res.c_sources +\
res.cpp_sources + res.libraries + res.hex_files + \
[res.linker_script] + res.bin_files + res.objects + \
res.json_files + res.lib_refs + res.lib_builds)
if inc_repos:
for directory in res.repo_dirs:
for root, _, files in walk(directory):
for repo_file in files:
source = join(root, repo_file)
to_zip.append(source)
res.file_basepath[source] = res.base_path
to_zip += res.repo_files
for source in to_zip:
if source:
zip_file.write(
source,
join(prefix, loc,
relpath(source, res.file_basepath[source])))
for source in res.lib_builds:
target_dir, _ = splitext(source)
dest = join(prefix, loc,
relpath(target_dir, res.file_basepath[source]),
".bld", "bldrc")
zip_file.write(source, dest)
def export_project(src_paths, export_path, target, ide, libraries_paths=None,
linker_script=None, notify=None, verbose=False, name=None,
inc_dirs=None, jobs=1, silent=False, extra_verbose=False,
config=None, macros=None, zip_proj=None, inc_repos=False,
build_profile=None, app_config=None):
"""Generates a project file and creates a zip archive if specified
Positional Arguments:
src_paths - a list of paths from which to find source files
export_path - a path specifying the location of generated project files
target - the mbed board/mcu for which to generate the executable
ide - the ide for which to generate the project fields
Keyword Arguments:
libraries_paths - paths to additional libraries
linker_script - path to the linker script for the specified target
notify - function is passed all events, and expected to handle notification
of the user, emit the events to a log, etc.
verbose - assigns the notify function to toolchains print_notify_verbose
name - project name
inc_dirs - additional include directories
jobs - number of threads
silent - silent build - no output
extra_verbose - assigns the notify function to toolchains
print_notify_verbose
config - toolchain's config object
macros - User-defined macros
zip_proj - string name of the zip archive you wish to creat (exclude arg
if you do not wish to create an archive
"""
# Convert src_path to a list if needed
if isinstance(src_paths, dict):
paths = sum(src_paths.values(), [])
elif isinstance(src_paths, list):
paths = src_paths[:]
else:
paths = [src_paths]
# Extend src_paths wit libraries_paths
if libraries_paths is not None:
paths.extend(libraries_paths)
if not isinstance(src_paths, dict):
src_paths = {"": paths}
# Export Directory
if not exists(export_path):
makedirs(export_path)
_, toolchain_name = get_exporter_toolchain(ide)
# Pass all params to the unified prepare_resources()
toolchain = prepare_toolchain(
paths, "", target, toolchain_name, macros=macros, jobs=jobs,
notify=notify, silent=silent, verbose=verbose,
extra_verbose=extra_verbose, config=config, build_profile=build_profile,
app_config=app_config)
# The first path will give the name to the library
toolchain.RESPONSE_FILES = False
if name is None:
name = basename(normpath(abspath(src_paths[0])))
# Call unified scan_resources
resource_dict = {loc: scan_resources(path, toolchain, inc_dirs=inc_dirs, collect_ignores=True)
for loc, path in src_paths.iteritems()}
resources = Resources()
toolchain.build_dir = export_path
config_header = toolchain.get_config_header()
resources.headers.append(config_header)
resources.file_basepath[config_header] = dirname(config_header)
if zip_proj:
subtract_basepath(resources, ".")
for loc, res in resource_dict.iteritems():
temp = copy.deepcopy(res)
subtract_basepath(temp, ".", loc)
resources.add(temp)
else:
for _, res in resource_dict.iteritems():
resources.add(res)
# Change linker script if specified
if linker_script is not None:
resources.linker_script = linker_script
files, exporter = generate_project_files(resources, export_path,
target, name, toolchain, ide,
macros=macros)
files.append(config_header)
if zip_proj:
for resource in resource_dict.values():
for label, res in resource.features.iteritems():
if label not in toolchain.target.features:
resource.add(res)
if isinstance(zip_proj, basestring):
zip_export(join(export_path, zip_proj), name, resource_dict,
files + list(exporter.static_files), inc_repos)
else:
zip_export(zip_proj, name, resource_dict,
files + list(exporter.static_files), inc_repos)
else:
for static_file in exporter.static_files:
if not exists(join(export_path, basename(static_file))):
copyfile(static_file, join(export_path, basename(static_file)))
return exporter
| |
from pandac.PandaModules import *
from toontown.toonbase.ToonBaseGlobal import *
from direct.gui.DirectGui import *
from pandac.PandaModules import *
from direct.distributed.ClockDelta import *
from toontown.minigame.OrthoWalk import *
from string import *
from toontown.toonbase import ToontownGlobals
from direct.distributed import DistributedObject
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import ClassicFSM
from direct.fsm import State
from toontown.toon import Toon
from direct.showbase import RandomNumGen
from toontown.toonbase import TTLocalizer
import random
from direct.showbase import PythonUtil
from toontown.hood import Place
import HouseGlobals
from toontown.building import ToonInteriorColors
from direct.showbase.MessengerGlobal import messenger
from toontown.dna.DNAParser import DNADoor
from otp.nametag.NametagGroup import NametagGroup
from otp.nametag.Nametag import Nametag
class DistributedHouse(DistributedObject.DistributedObject):
notify = directNotify.newCategory('DistributedHouse')
def __init__(self, cr):
DistributedObject.DistributedObject.__init__(self, cr)
self.houseType = 0
self.avId = -1
self.ownerId = 0
self.colorIndex = 0
self.house = None
self.name = ''
self.namePlate = None
self.nameText = None
self.nametag = None
self.floorMat = None
self.matText = None
self.randomGenerator = None
self.housePosInd = 0
self.house_loaded = 0
return
def disable(self):
DistributedObject.DistributedObject.disable(self)
def delete(self):
self.notify.debug('delete')
self.unload()
self.clearNametag()
if self.namePlate:
self.namePlate.removeNode()
del self.namePlate
self.namePlate = None
if self.floorMat:
self.floorMat.removeNode()
del self.floorMat
self.floorMat = None
if self.house:
self.house.removeNode()
del self.house
self.house_loaded = 0
del self.randomGenerator
DistributedObject.DistributedObject.delete(self)
return
def clearNametag(self):
if self.nametag != None:
self.nametag.unmanage(base.marginManager)
self.nametag.setAvatar(NodePath())
self.nametag.destroy()
self.nametag = None
return
def load(self):
self.notify.debug('load')
if not self.house_loaded:
if self.houseType >= len(self.cr.playGame.hood.loader.houseModels):
self.houseType = HouseGlobals.HOUSE_DEFAULT
houseModel = self.cr.playGame.hood.loader.houseModels[self.houseType]
self.house = houseModel.copyTo(self.cr.playGame.hood.loader.houseNode[self.housePosInd])
self.house_loaded = 1
self.cr.playGame.hood.loader.houseId2house[self.doId] = self.house
if self.houseType == HouseGlobals.HOUSE_DEFAULT:
self.__setHouseColor()
if self.houseType == HouseGlobals.HOUSE_DEFAULT or self.houseType == HouseGlobals.HOUSE_TEST:
self.__setupDoor()
else:
self.__setupDoorCustom()
messenger.send('houseLoaded-%d' % self.doId)
def announceGenerate(self):
DistributedObject.DistributedObject.announceGenerate(self)
messenger.send('setBuilding-' + str(self.doId))
def __setupDoor(self):
self.notify.debug('setupDoor')
self.dnaStore = self.cr.playGame.dnaStore
doorModelName = 'door_double_round_ul'
if doorModelName[-1:] == 'r':
doorModelName = doorModelName[:-1] + 'l'
else:
doorModelName = doorModelName[:-1] + 'r'
door = self.dnaStore.findNode(doorModelName)
door_origin = self.house.find('**/door_origin')
door_origin.setHpr(90, 0, 0)
door_origin.setScale(0.6, 0.6, 0.8)
door_origin.setPos(door_origin, 0.5, 0, 0.0)
doorNP = door.copyTo(door_origin)
self.door_origin = door_origin
self.randomGenerator = random.Random()
self.randomGenerator.seed(self.doId)
houseColor = HouseGlobals.stairWood
color = Vec4(houseColor[0], houseColor[1], houseColor[2], 1)
DNADoor.setupDoor(doorNP, door_origin, door_origin, self.dnaStore, str(self.doId), color)
self.__setupNamePlate()
self.__setupFloorMat()
self.__setupNametag()
def __setupDoorCustom(self):
self.randomGenerator = random.Random()
self.randomGenerator.seed(self.doId)
self.notify.debug('setupDoorCustom')
self.dnaStore = self.cr.playGame.dnaStore
door = self.house.find('**/door_0')
door_origin = self.house.find('**/door_origin')
door_origin.setHpr(90, 0, 0)
door_origin.setScale(0.6, 0.6, 0.8)
doorNP = door
self.door_origin = door_origin
color = Vec4(1, 1, 1, 1)
parent = door_origin
rightDoor = door.find('**/rightDoor')
rightDoor.setHpr(door_origin, Vec3(0, 0, 0))
leftDoor = door.find('**/leftDoor')
leftDoor.setHpr(door_origin, Vec3(0, 0, 0))
doorTrigger = doorNP.find('**/door_*_trigger')
doorTrigger.wrtReparentTo(door_origin)
doorTrigger.node().setName('door_trigger_' + str(self.doId))
self.__setupFloorMat(changeColor=False)
self.__setupNametag()
self.__setupNamePlateCustom()
def __setupNamePlate(self):
self.notify.debug('__setupNamePlate')
if self.namePlate:
self.namePlate.removeNode()
del self.namePlate
self.namePlate = None
nameText = TextNode('nameText')
r = self.randomGenerator.random()
g = self.randomGenerator.random()
b = self.randomGenerator.random()
nameText.setTextColor(r, g, b, 1)
nameText.setAlign(nameText.ACenter)
nameText.setFont(ToontownGlobals.getBuildingNametagFont())
nameText.setShadowColor(0, 0, 0, 1)
nameText.setBin('fixed')
if TTLocalizer.BuildingNametagShadow:
nameText.setShadow(*TTLocalizer.BuildingNametagShadow)
nameText.setWordwrap(16.0)
xScale = 1.0
numLines = 0
if self.name == '':
return
else:
houseName = TTLocalizer.AvatarsHouse % TTLocalizer.GetPossesive(self.name)
nameText.setText(houseName)
self.nameText = nameText
textHeight = nameText.getHeight() - 2
textWidth = nameText.getWidth()
xScale = 1.0
if textWidth > 16:
xScale = 16.0 / textWidth
sign_origin = self.house.find('**/sign_origin')
pos = sign_origin.getPos()
sign_origin.setPosHpr(pos[0], pos[1], pos[2] + 0.15 * textHeight, 90, 0, 0)
self.namePlate = sign_origin.attachNewNode(self.nameText)
self.namePlate.setDepthWrite(0)
self.namePlate.setPos(0, -0.05, 0)
self.namePlate.setScale(xScale)
return nameText
def __setupFloorMat(self, changeColor = True):
if self.floorMat:
self.floorMat.removeNode()
del self.floorMat
self.floorMat = None
mat = self.house.find('**/mat')
if changeColor:
mat.setColor(0.4, 0.357, 0.259, 1.0)
color = HouseGlobals.houseColors[self.housePosInd]
matText = TextNode('matText')
matText.setTextColor(color[0], color[1], color[2], 1)
matText.setAlign(matText.ACenter)
matText.setFont(ToontownGlobals.getBuildingNametagFont())
matText.setShadowColor(0, 0, 0, 1)
matText.setBin('fixed')
if TTLocalizer.BuildingNametagShadow:
matText.setShadow(*TTLocalizer.BuildingNametagShadow)
matText.setWordwrap(10.0)
xScale = 1.0
numLines = 0
if self.name == '':
return
else:
houseName = TTLocalizer.AvatarsHouse % TTLocalizer.GetPossesive(self.name)
matText.setText(houseName)
self.matText = matText
textHeight = matText.getHeight() - 2
textWidth = matText.getWidth()
xScale = 1.0
if textWidth > 8:
xScale = 8.0 / textWidth
mat_origin = self.house.find('**/mat_origin')
pos = mat_origin.getPos()
mat_origin.setPosHpr(pos[0] - 0.15 * textHeight, pos[1], pos[2], 90, -90, 0)
self.floorMat = mat_origin.attachNewNode(self.matText)
self.floorMat.setDepthWrite(0)
self.floorMat.setPos(0, -.025, 0)
self.floorMat.setScale(0.45 * xScale)
return
def __setupNametag(self):
if self.nametag:
self.clearNametag()
if self.name == '':
houseName = ''
else:
houseName = TTLocalizer.AvatarsHouse % TTLocalizer.GetPossesive(self.name)
self.nametag = NametagGroup()
self.nametag.setFont(ToontownGlobals.getBuildingNametagFont())
if TTLocalizer.BuildingNametagShadow:
self.nametag.setShadow(*TTLocalizer.BuildingNametagShadow)
self.nametag.setContents(Nametag.CName)
self.nametag.setColorCode(NametagGroup.CCToonBuilding)
self.nametag.setActive(0)
self.nametag.setAvatar(self.house)
self.nametag.setObjectCode(self.doId)
self.nametag.setName(houseName)
self.nametag.manage(base.marginManager)
def unload(self):
self.notify.debug('unload')
self.ignoreAll()
def setHouseReady(self):
self.notify.debug('setHouseReady')
try:
self.House_initialized
except:
self.House_initialized = 1
self.load()
def setHousePos(self, index):
self.notify.debug('setHousePos')
self.housePosInd = index
self.__setHouseColor()
def setHouseType(self, index):
self.notify.debug('setHouseType')
self.houseType = index
def setFavoriteNum(self, index):
self.notify.debug('setFavoriteNum')
self.favoriteNum = index
def __setHouseColor(self):
if self.house:
bwall = self.house.find('**/*back')
rwall = self.house.find('**/*right')
fwall = self.house.find('**/*front')
lwall = self.house.find('**/*left')
kd = 0.8
color = HouseGlobals.houseColors[self.colorIndex]
dark = (kd * color[0], kd * color[1], kd * color[2])
if not bwall.isEmpty():
bwall.setColor(color[0], color[1], color[2], 1)
if not fwall.isEmpty():
fwall.setColor(color[0], color[1], color[2], 1)
if not rwall.isEmpty():
rwall.setColor(dark[0], dark[1], dark[2], 1)
if not lwall.isEmpty():
lwall.setColor(dark[0], dark[1], dark[2], 1)
aColor = HouseGlobals.atticWood
attic = self.house.find('**/attic')
if not attic.isEmpty():
attic.setColor(aColor[0], aColor[1], aColor[2], 1)
color = HouseGlobals.houseColors2[self.colorIndex]
chimneyList = self.house.findAllMatches('**/chim*')
for chimney in chimneyList:
chimney.setColor(color[0], color[1], color[2], 1)
def setAvId(self, id):
self.avId = id
def setAvatarId(self, avId):
self.notify.debug('setAvatarId = %s' % avId)
self.ownerId = avId
def getAvatarId(self):
self.notify.debug('getAvatarId')
return self.ownerId
def setName(self, name):
self.name = name
if self.nameText and self.nameText.getText() != self.name:
if self.name == '':
self.nameText.setText('')
else:
self.nameText.setText(self.name + "'s\n House")
def getName(self):
return self.name
def b_setColor(self, colorInd):
self.setColor(colorInd)
self.d_setColor(colorInd)
def d_setColor(self, colorInd):
self.sendUpdate('setColor', [colorInd])
def setColor(self, colorInd):
self.colorIndex = colorInd
if self.house:
self.__setHouseColor()
def getColor(self):
return self.colorIndex
def __setupNamePlateCustom(self):
self.notify.debug('__setupNamePlateCustom')
if self.namePlate:
self.namePlate.removeNode()
del self.namePlate
self.namePlate = None
nameText = TextNode('nameText')
nameText.setCardAsMargin(0.1, 0.1, 0.1, 0.1)
nameText.setCardDecal(True)
nameText.setCardColor(1.0, 1.0, 1.0, 0.0)
r = self.randomGenerator.random()
g = self.randomGenerator.random()
b = self.randomGenerator.random()
nameText.setTextColor(r, g, b, 1)
nameText.setAlign(nameText.ACenter)
nameText.setFont(ToontownGlobals.getBuildingNametagFont())
nameText.setShadowColor(0, 0, 0, 1)
nameText.setBin('fixed')
if TTLocalizer.BuildingNametagShadow:
nameText.setShadow(*TTLocalizer.BuildingNametagShadow)
nameText.setWordwrap(16.0)
xScale = 1.0
numLines = 0
if self.name == '':
return
else:
houseName = TTLocalizer.AvatarsHouse % TTLocalizer.GetPossesive(self.name)
nameText.setText(houseName)
self.nameText = nameText
textHeight = nameText.getHeight() - 2
textWidth = nameText.getWidth()
xScale = 1.0
if textWidth > 16:
xScale = 16.0 / textWidth
sign_origin = self.house.find('**/sign_origin')
pos = sign_origin.getPos()
sign_origin.setPosHpr(pos[0], pos[1], pos[2] + 0.15 * textHeight, 90, 0, 0)
self.namePlate = sign_origin.attachNewNode(self.nameText)
self.namePlate.setDepthWrite(0)
self.namePlate.setPos(0, -0.05, 0)
self.namePlate.setScale(xScale)
return nameText
| |
"""Resource utility functions."""
from datetime import datetime
import pytz
import tzlocal
from resdk.constants import RESOLWE_DATETIME_FORMAT
def iterate_fields(fields, schema):
"""Recursively iterate over all DictField sub-fields.
:param fields: Field instance (e.g. input)
:type fields: dict
:param schema: Schema instance (e.g. input_schema)
:type schema: dict
"""
schema_dict = {val["name"]: val for val in schema}
for field_id, properties in fields.items():
if "group" in schema_dict[field_id]:
for _field_sch, _fields in iterate_fields(
properties, schema_dict[field_id]["group"]
):
yield (_field_sch, _fields)
else:
yield (schema_dict[field_id], fields)
def iterate_schema(fields, schema, path=None):
"""Recursively iterate over all schema sub-fields.
:param fields: Field instance (e.g. input)
:type fields: dict
:param schema: Schema instance (e.g. input_schema)
:type schema: dict
:path schema: Field path
:path schema: string
"""
for field_schema in schema:
name = field_schema["name"]
if "group" in field_schema:
for rvals in iterate_schema(
fields[name] if name in fields else {},
field_schema["group"],
None if path is None else "{}.{}".format(path, name),
):
yield rvals
else:
if path is None:
yield (field_schema, fields)
else:
yield (field_schema, fields, "{}.{}".format(path, name))
def flatten_field(field, schema, path):
"""Reduce dicts of dicts to dot separated keys.
:param field: Field instance (e.g. input)
:type field: dict
:param schema: Schema instance (e.g. input_schema)
:type schema: dict
:param path: Field path
:type path: string
:return: flattened instance
:rtype: dictionary
"""
flat = {}
for field_schema, fields, current_path in iterate_schema(field, schema, path):
name = field_schema["name"]
typ = field_schema["type"]
label = field_schema["label"]
value = fields.get(name, None)
flat[current_path] = {"name": name, "type": typ, "label": label, "value": value}
return flat
def fill_spaces(word, desired_length):
"""Fill spaces at the end until word reaches desired length."""
return str(word) + " " * (desired_length - len(word))
def _print_input_line(element_list, level):
"""Pretty print of input_schema."""
spacing = 2
for element in element_list:
if "group" in element:
print(
"{}- {} - {}".format(" " * level, element["name"], element["label"])
)
_print_input_line(element["group"], level + 1)
else:
max_name_len = max([len(elm["name"]) for elm in element_list])
max_type_len = max(
[
len(elm["type"]) or 0
for elm in [e for e in element_list if "group" not in e]
]
)
print(
"{}- {} {} - {}".format(
" " * level,
fill_spaces(element["name"], max_name_len + spacing),
fill_spaces("[" + element["type"] + "]", max_type_len + spacing),
element["label"],
)
)
def get_collection_id(collection):
"""Return id attribute of the object if it is collection, otherwise return given value."""
return collection.id if type(collection).__name__ == "Collection" else collection
def get_data_id(data):
"""Return id attribute of the object if it is data, otherwise return given value."""
return data.id if type(data).__name__ == "Data" else data
def get_descriptor_schema_id(dschema):
"""Get descriptor schema id.
Return id attribute of the object if it is descriptor schema,
otherwise return given value.
"""
return dschema.id if type(dschema).__name__ == "DescriptorSchema" else dschema
def get_process_id(process):
"""Return id attribute of the object if it is process, otherwise return given value."""
return process.id if type(process).__name__ == "Process" else process
def get_sample_id(sample):
"""Return id attribute of the object if it is sample, otherwise return given value."""
return sample.id if type(sample).__name__ == "Sample" else sample
def get_relation_id(relation):
"""Return id attribute of the object if it is relation, otherwise return given value."""
return relation.id if type(relation).__name__ == "Relation" else relation
def get_user_id(user):
"""Return id attribute of the object if it is relation, otherwise return given value."""
return user.id if type(user).__name__ == "User" else user
def is_collection(collection):
"""Return ``True`` if passed object is Collection and ``False`` otherwise."""
return type(collection).__name__ == "Collection"
def is_data(data):
"""Return ``True`` if passed object is Data and ``False`` otherwise."""
return type(data).__name__ == "Data"
def is_descriptor_schema(data):
"""Return ``True`` if passed object is DescriptorSchema and ``False`` otherwise."""
return type(data).__name__ == "DescriptorSchema"
def is_process(process):
"""Return ``True`` if passed object is Process and ``False`` otherwise."""
return type(process).__name__ == "Process"
def is_sample(sample):
"""Return ``True`` if passed object is Sample and ``False`` otherwise."""
return type(sample).__name__ == "Sample"
def is_relation(relation):
"""Return ``True`` if passed object is Relation and ``False`` otherwise."""
return type(relation).__name__ == "Relation"
def is_user(user):
"""Return ``True`` if passed object is User and ``False`` otherwise."""
return type(user).__name__ == "User"
def is_group(group):
"""Return ``True`` if passed object is Group and ``False`` otherwise."""
return type(group).__name__ == "Group"
def parse_resolwe_datetime(dtime):
"""Convert string representation of time to local datetime.datetime object."""
if dtime:
# Get naive (=time-zone unaware) version of UTC time:
utc_naive = datetime.strptime(dtime[:-6], RESOLWE_DATETIME_FORMAT)
# Localize the time so it includes UTC timezone info:
utc_aware = pytz.utc.localize(utc_naive)
# Get name local time zone:
local_tz = tzlocal.get_localzone()
# Present time in the local time zone
local_time = utc_aware.astimezone(local_tz)
return local_time
| |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import argparse
import time
import math
import paddle
import paddle.fluid as fluid
import paddle.fluid.profiler as profiler
from paddle.fluid import core
import unittest
from multiprocessing import Process
import os
import sys
import signal
from test_dist_base import TestDistRunnerBase, runtime_main
# Fix seed for test
fluid.default_startup_program().random_seed = 1
fluid.default_main_program().random_seed = 1
train_parameters = {
"input_size": [3, 224, 224],
"input_mean": [0.485, 0.456, 0.406],
"input_std": [0.229, 0.224, 0.225],
"learning_strategy": {
"name": "piecewise_decay",
"epochs": [30, 60, 90],
"steps": [0.1, 0.01, 0.001, 0.0001]
}
}
class SE_ResNeXt():
def __init__(self, layers=50):
self.params = train_parameters
self.layers = layers
def net(self, input, class_dim=1000):
layers = self.layers
supported_layers = [50, 101, 152]
assert layers in supported_layers, \
"supported layers are {} but input layer is {}".format(supported_layers, layers)
if layers == 50:
cardinality = 32
reduction_ratio = 16
depth = [3, 4, 6, 3]
num_filters = [128, 256, 512, 1024]
conv = self.conv_bn_layer(
input=input,
num_filters=64,
filter_size=7,
stride=2,
act='relu')
conv = fluid.layers.pool2d(
input=conv,
pool_size=3,
pool_stride=2,
pool_padding=1,
pool_type='max')
elif layers == 101:
cardinality = 32
reduction_ratio = 16
depth = [3, 4, 23, 3]
num_filters = [128, 256, 512, 1024]
conv = self.conv_bn_layer(
input=input,
num_filters=64,
filter_size=7,
stride=2,
act='relu')
conv = fluid.layers.pool2d(
input=conv,
pool_size=3,
pool_stride=2,
pool_padding=1,
pool_type='max')
elif layers == 152:
cardinality = 64
reduction_ratio = 16
depth = [3, 8, 36, 3]
num_filters = [128, 256, 512, 1024]
conv = self.conv_bn_layer(
input=input,
num_filters=64,
filter_size=3,
stride=2,
act='relu')
conv = self.conv_bn_layer(
input=conv, num_filters=64, filter_size=3, stride=1, act='relu')
conv = self.conv_bn_layer(
input=conv,
num_filters=128,
filter_size=3,
stride=1,
act='relu')
conv = fluid.layers.pool2d(
input=conv, pool_size=3, pool_stride=2, pool_padding=1, \
pool_type='max')
for block in range(len(depth)):
for i in range(depth[block]):
conv = self.bottleneck_block(
input=conv,
num_filters=num_filters[block],
stride=2 if i == 0 and block != 0 else 1,
cardinality=cardinality,
reduction_ratio=reduction_ratio)
pool = fluid.layers.pool2d(
input=conv, pool_size=7, pool_type='avg', global_pooling=True)
drop = fluid.layers.dropout(x=pool, dropout_prob=0.2)
stdv = 1.0 / math.sqrt(drop.shape[1] * 1.0)
out = fluid.layers.fc(
input=drop,
size=class_dim,
act='softmax',
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.05)))
return out
def shortcut(self, input, ch_out, stride):
ch_in = input.shape[1]
if ch_in != ch_out or stride != 1:
filter_size = 1
return self.conv_bn_layer(input, ch_out, filter_size, stride)
else:
return input
def bottleneck_block(self, input, num_filters, stride, cardinality,
reduction_ratio):
conv0 = self.conv_bn_layer(
input=input, num_filters=num_filters, filter_size=1, act='relu')
conv1 = self.conv_bn_layer(
input=conv0,
num_filters=num_filters,
filter_size=3,
stride=stride,
groups=cardinality,
act='relu')
conv2 = self.conv_bn_layer(
input=conv1, num_filters=num_filters * 2, filter_size=1, act=None)
scale = self.squeeze_excitation(
input=conv2,
num_channels=num_filters * 2,
reduction_ratio=reduction_ratio)
short = self.shortcut(input, num_filters * 2, stride)
return fluid.layers.elementwise_add(x=short, y=scale, act='relu')
def conv_bn_layer(self,
input,
num_filters,
filter_size,
stride=1,
groups=1,
act=None):
conv = fluid.layers.conv2d(
input=input,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=(filter_size - 1) // 2,
groups=groups,
act=None,
# avoid pserver CPU init differs from GPU
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.05)),
bias_attr=False)
return fluid.layers.batch_norm(input=conv, act=act)
def squeeze_excitation(self, input, num_channels, reduction_ratio):
pool = fluid.layers.pool2d(
input=input, pool_size=0, pool_type='avg', global_pooling=True)
stdv = 1.0 / math.sqrt(pool.shape[1] * 1.0)
squeeze = fluid.layers.fc(
input=pool,
size=num_channels // reduction_ratio,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.05)),
act='relu')
stdv = 1.0 / math.sqrt(squeeze.shape[1] * 1.0)
excitation = fluid.layers.fc(
input=squeeze,
size=num_channels,
param_attr=fluid.ParamAttr(
initializer=fluid.initializer.Constant(value=0.05)),
act='sigmoid')
scale = fluid.layers.elementwise_mul(x=input, y=excitation, axis=0)
return scale
class DistSeResneXt2x2(TestDistRunnerBase):
def get_model(self, batch_size=2, use_dgc=False):
# Input data
image = fluid.layers.data(
name="data", shape=[3, 224, 224], dtype='float32')
label = fluid.layers.data(name="int64", shape=[1], dtype='int64')
# Train program
model = SE_ResNeXt(layers=50)
out = model.net(input=image, class_dim=102)
cost = fluid.layers.cross_entropy(input=out, label=label)
avg_cost = fluid.layers.mean(x=cost)
acc_top1 = fluid.layers.accuracy(input=out, label=label, k=1)
acc_top5 = fluid.layers.accuracy(input=out, label=label, k=5)
# Evaluator
test_program = fluid.default_main_program().clone(for_test=True)
# Optimization
total_images = 6149 # flowers
epochs = [30, 60, 90]
step = int(total_images / batch_size + 1)
bd = [step * e for e in epochs]
base_lr = 0.1
lr = [base_lr * (0.1**i) for i in range(len(bd) + 1)]
if not use_dgc:
optimizer = fluid.optimizer.Momentum(
learning_rate=fluid.layers.piecewise_decay(
boundaries=bd, values=lr),
momentum=0.9,
regularization=fluid.regularizer.L2Decay(1e-4))
else:
optimizer = fluid.optimizer.DGCMomentumOptimizer(
learning_rate=fluid.layers.piecewise_decay(
boundaries=bd, values=lr),
momentum=0.9,
rampup_begin_step=0,
regularization=fluid.regularizer.L2Decay(1e-4))
optimizer.minimize(avg_cost)
# Reader
train_reader = paddle.batch(
paddle.dataset.flowers.test(use_xmap=False), batch_size=batch_size)
test_reader = paddle.batch(
paddle.dataset.flowers.test(use_xmap=False), batch_size=batch_size)
return test_program, avg_cost, train_reader, test_reader, acc_top1, out
if __name__ == "__main__":
runtime_main(DistSeResneXt2x2)
| |
#!/usr/bin/env python
# Copyright (c) 2011-2018, wradlib developers.
# Distributed under the MIT License. See LICENSE.txt for more info.
import os
import numpy as np
import wradlib.util as util
import unittest
import datetime as dt
class HelperFunctionsTest(unittest.TestCase):
def test__shape_to_size(self):
self.assertEqual(util._shape_to_size((10, 10, 10)), 10 * 10 * 10)
def test__idvalid(self):
data = np.array(
[np.inf, np.nan, -99., 99, -9999., -9999, -10., -5., 0., 5., 10.])
self.assertTrue(
np.allclose(util._idvalid(data), np.array([6, 7, 8, 9, 10])))
self.assertTrue(np.allclose(util._idvalid(data, minval=-5., maxval=5.),
np.array([7, 8, 9])))
self.assertTrue(
np.allclose(util._idvalid(data, isinvalid=[-9999], maxval=5.),
np.array([2, 6, 7, 8, 9])))
def test_issequence(self):
self.assertTrue(util.issequence([0, 1, 2]))
self.assertFalse(util.issequence(1))
self.assertFalse(util.issequence('str'))
def test_trapezoid(self):
data = np.arange(0., 30.1, 0.1)
correct = np.arange(0., 1., 0.01)
correct = np.concatenate((correct, np.ones(101), correct[::-1]))
result = util.trapezoid(data, 0., 10., 20., 30.)
np.testing.assert_array_almost_equal(result, correct, decimal=9)
def test_prob_round(self):
np.random.seed(42)
np.testing.assert_equal(42., util.prob_round(42.4242))
np.random.seed(44)
np.testing.assert_equal(43., util.prob_round(42.4242))
def test_get_wradlib_data_path(self):
wrl_data_path = os.environ.get('WRADLIB_DATA', None)
del os.environ['WRADLIB_DATA']
with self.assertRaises(EnvironmentError):
util.get_wradlib_data_path()
filename = 'rainbow/2013070308340000dBuZ.azi'
os.environ['WRADLIB_DATA'] = os.path.join(wrl_data_path, filename)
with self.assertRaises(EnvironmentError):
util.get_wradlib_data_path()
os.environ['WRADLIB_DATA'] = wrl_data_path
filename = os.path.join(wrl_data_path, "test.dat")
with self.assertRaises(EnvironmentError):
util.get_wradlib_data_file(filename)
def test_from_to(self):
out = util.from_to("2000-01-01 00:00:00",
"2000-01-02 00:00:00",
86400)
shouldbe = [dt.datetime(2000, 1, 1, 0, 0),
dt.datetime(2000, 1, 2, 0, 0)]
self.assertEqual(out, shouldbe)
def test_calculate_polynomial(self):
data = np.arange(0, 10, 1)
w = np.arange(0, 5, 1)
out = np.array([0, 10, 98, 426, 1252, 2930, 5910, 10738, 18056, 28602])
poly = util.calculate_polynomial(data, w)
np.testing.assert_allclose(poly, out, rtol=1e-12)
def test_import_optional(self):
m = util.import_optional('math')
np.testing.assert_equal(m.log10(100), 2.0)
mod = util.import_optional('h8x')
with self.assertRaises(AttributeError):
mod.test()
def test_maximum_intensity_projection(self):
angle = 0.0
elev = 0.0
filename = util.get_wradlib_data_file('misc/polar_dBZ_tur.gz')
data = np.loadtxt(filename)
# we need to have meter here for the georef function inside mip
d1 = np.arange(data.shape[1], dtype=np.float) * 1000
d2 = np.arange(data.shape[0], dtype=np.float)
data = np.roll(data, (d2 >= angle).nonzero()[0][0], axis=0)
# calculate max intensity proj
util.maximum_intensity_projection(data, r=d1, az=d2,
angle=angle, elev=elev)
util.maximum_intensity_projection(data, autoext=False)
def test_roll2d_polar(self):
filename = util.get_wradlib_data_file('misc/polar_dBZ_tur.gz')
data = np.loadtxt(filename)
result1 = util.roll2d_polar(data, 1, axis=0)
result2 = util.roll2d_polar(data, -1, axis=0)
result3 = util.roll2d_polar(data, 1, axis=1)
result4 = util.roll2d_polar(data, -1, axis=1)
np.testing.assert_equal(result1, np.roll(data, 1, axis=0))
np.testing.assert_equal(result2, np.roll(data, -1, axis=0))
np.testing.assert_equal(result3[:, 1:],
np.roll(data, 1, axis=1)[:, 1:])
np.testing.assert_equal(result4[:, :-1],
np.roll(data, -1, axis=1)[:, :-1])
def test_medfilt_along_axis(self):
x = np.arange(10).reshape((2, 5)).astype("f4")
shouldbe = np.array([[0., 1., 2., 3., 3.],
[5., 6., 7., 8., 8.]])
result = util.medfilt_along_axis(x, 3)
np.testing.assert_allclose(result, shouldbe)
def test_gradient_along_axis(self):
x = np.arange(10).reshape((2, 5)).astype("f4") ** 2
result = util.gradient_along_axis(x)
shouldbe = np.array([[1., 2., 4., 6., 7.],
[11., 12., 14., 16., 17.]])
np.testing.assert_allclose(result, shouldbe)
def test_gradient_from_smoothed(self):
x = np.arange(10).reshape((2, 5)).astype("f4") ** 2
result = util.gradient_from_smoothed(x)
shouldbe = np.array([[1., 2., 1.5, 0., 0.],
[11., 12., 6.5, 0., 0.]])
np.testing.assert_allclose(result, shouldbe)
class TestUtil(unittest.TestCase):
def setUp(self):
np.random.seed(42)
img = np.zeros((36, 10), dtype=np.float32)
img[2, 2] = 1 # isolated pixel
img[5, 6:8] = 1 # line
img[20, :] = 1 # spike
img[9:12, 4:7] = 1 # precip field
# img[15:17,5:7] = np.nan # nodata as nans
self.img = img
def test_filter_window_polar(self):
rscale = 250
# nrays, nbins = self.img.shape
# ascale = 2 * np.pi / self.img.shape[0]
mean = util.filter_window_polar(self.img, 300, "maximum", rscale)
mean2 = util.filter_window_polar(self.img, 300, "maximum", rscale,
random=True)
correct = np.array([[0., 1., 1., 1., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 1., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 1., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 1., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 1., 0., 1., 1., 1., 1., 0.],
[0., 1., 1., 0., 0., 1., 1., 1., 1., 0.],
[1., 1., 0., 0., 0., 1., 1., 1., 1., 0.],
[1., 1., 0., 1., 1., 0., 0., 0., 0., 0.],
[1., 0., 0., 1., 1., 1., 1., 1., 0., 0.],
[1., 0., 0., 1., 1., 1., 1., 1., 0., 0.],
[1., 0., 0., 1., 1., 1., 1., 1., 0., 0.],
[1., 0., 0., 1., 1., 1., 1., 1., 0., 0.],
[1., 0., 0., 1., 1., 1., 1., 1., 0., 0.],
[1., 0., 0., 1., 1., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 0., 0., 0., 0., 0.],
[1., 1., 1., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 0., 0., 0., 0., 0., 0., 0.]])
correct2 = np.array([[0., 1., 1., 1., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 1., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 1., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 1., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 1., 0., 1., 1., 1., 1., 0.],
[0., 1., 1., 0., 0., 1., 1., 1., 1., 0.],
[1., 1., 0., 0., 0., 1., 1., 1., 1., 0.],
[1., 0., 0., 1., 1., 0., 0., 0., 0., 0.],
[1., 0., 0., 1., 1., 1., 1., 1., 0., 0.],
[1., 0., 0., 1., 1., 1., 1., 1., 0., 0.],
[1., 0., 0., 1., 1., 1., 1., 1., 0., 0.],
[1., 0., 0., 1., 1., 1., 1., 1., 0., 0.],
[1., 0., 0., 1., 1., 1., 1., 1., 0., 0.],
[1., 0., 0., 1., 1., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 0., 0., 0., 0., 0.],
[1., 1., 1., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 1., 1., 0., 0., 0., 0., 0., 0., 0.]])
np.testing.assert_array_equal(mean, correct)
np.testing.assert_array_equal(mean2, correct2)
def test_half_power_radius(self):
hpr = util.half_power_radius(np.arange(0, 100000, 10000), 1.0)
res = np.array([0., 87.266, 174.533, 261.799, 349.066, 436.332,
523.599, 610.865, 698.132, 785.398])
self.assertTrue(np.allclose(hpr, res))
def test_filter_window_cartesian(self):
correct = np.array([[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 1., 1., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 1., 1., 1., 0.],
[0., 0., 0., 0., 0., 0., 1., 1., 1., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 1., 1., 1., 1., 0., 0.],
[0., 0., 0., 0., 1., 1., 1., 1., 0., 0.],
[0., 0., 0., 0., 1., 1., 1., 1., 0., 0.],
[0., 0., 0., 0., 1., 1., 1., 1., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]])
self.assertTrue(np.allclose(
util.filter_window_cartesian(self.img, 500., "maximum",
np.array([250., 250])),
correct))
class FindBboxIndicesTest(unittest.TestCase):
def setUp(self):
xarr = np.linspace(500, 1000, num=6)
yarr = np.linspace(550, 950, num=9)
gridx, gridy = np.meshgrid(xarr, yarr)
self.grid = np.dstack((gridx, gridy))
self.outside = [400, 400, 1100, 1100]
self.inside1 = [599, 599, 901, 901]
self.inside2 = [601, 601, 899, 899]
def test_find_bbox_indices(self):
bbind = util.find_bbox_indices(self.grid, self.outside)
self.assertTrue(np.array_equal(bbind, [0, 0, self.grid.shape[1],
self.grid.shape[0]]))
bbind = util.find_bbox_indices(self.grid, self.inside1)
self.assertTrue(np.array_equal(bbind, [0, 0, 5, 8]))
bbind = util.find_bbox_indices(self.grid, self.inside2)
self.assertTrue(np.array_equal(bbind, [1, 1, 4, 7]))
| |
from flask import request, Response # type: ignore
import json
import logging
import logging.config # type: ignore
from service import app, auditing, db_access, security
AUTH_FAILURE_RESPONSE_BODY = json.dumps({'error': 'Invalid credentials'})
INVALID_REQUEST_RESPONSE_BODY = json.dumps({'error': 'Invalid request'})
INTERNAL_SERVER_ERROR_RESPONSE_BODY = json.dumps(
{'error': 'Internal server error'}
)
JSON_CONTENT_TYPE = 'application/json'
INVALID_REQUEST_RESPONSE = Response(
INVALID_REQUEST_RESPONSE_BODY,
status=400,
mimetype=JSON_CONTENT_TYPE
)
USER_NOT_FOUND_RESPONSE = Response(
json.dumps({'error': 'User not found'}),
status=404,
mimetype=JSON_CONTENT_TYPE
)
MAX_LOGIN_ATTEMPTS = 10
LOGGER = logging.getLogger(__name__)
@app.errorhandler(Exception)
def handleServerError(error):
LOGGER.error(
'An error occurred when processing a request',
exc_info=error
)
return Response(
INTERNAL_SERVER_ERROR_RESPONSE_BODY,
status=500,
mimetype=JSON_CONTENT_TYPE
)
# TODO: remove the root route when the monitoring tools can work without it
@app.route('/', methods=['GET'])
@app.route('/health', methods=['GET'])
def healthcheck():
try:
_hit_database_with_sample_query()
return _get_healthcheck_response('ok', 200, None)
except Exception as e:
error_message = 'Problem talking to PostgreSQL: {0}'.format(str(e))
return _get_healthcheck_response('error', 500, error_message)
@app.route('/user/authenticate', methods=['POST'])
def authenticate_user():
request_json = _try_get_request_json(request)
if request_json and _is_auth_request_data_valid(request_json):
credentials = request_json['credentials']
user_id = credentials['user_id']
password = credentials['password']
# Find how many failed logins the users has since last successful login
failed_login_attempts = db_access.get_failed_logins(user_id)
if failed_login_attempts is None:
return _handle_non_existing_user_auth_request(user_id)
elif failed_login_attempts >= MAX_LOGIN_ATTEMPTS:
return _handle_locked_user_auth_request(user_id, failed_login_attempts)
else:
return _handle_allowed_user_auth_request(
user_id, password, failed_login_attempts
)
else:
return INVALID_REQUEST_RESPONSE
@app.route('/admin/user', methods=['POST'])
def create_user():
request_json = _try_get_request_json(request)
if request_json and _is_create_request_data_valid(request_json):
user = request_json['user']
user_id = user['user_id']
password = user['password']
# TODO: common code
password_hash = security.get_user_password_hash(
user_id,
password,
app.config['PASSWORD_SALT']
)
if db_access.create_user(user_id, password_hash):
auditing.audit('Created user {}'.format(user_id))
return Response(json.dumps({'created': True}), mimetype=JSON_CONTENT_TYPE)
else:
response_body = json.dumps({'error': 'User already exists'})
return Response(response_body, 409, mimetype=JSON_CONTENT_TYPE)
else:
return INVALID_REQUEST_RESPONSE
@app.route('/admin/user/<user_id>/update', methods=['POST'])
def update_user(user_id):
request_json = _try_get_request_json(request)
if request_json and _is_update_request_data_valid(request_json):
new_password = request_json['user']['password']
new_password_hash = security.get_user_password_hash(
user_id,
new_password,
app.config['PASSWORD_SALT']
)
if db_access.update_user(
user_id=user_id,
password_hash=new_password_hash
):
auditing.audit('Updated user {}'.format(user_id))
return Response(
json.dumps({'updated': True}),
mimetype=JSON_CONTENT_TYPE
)
else:
return USER_NOT_FOUND_RESPONSE
else:
return INVALID_REQUEST_RESPONSE
@app.route('/admin/user/<user_id>', methods=['DELETE'])
def delete_user(user_id):
if db_access.delete_user(user_id):
auditing.audit('Deleted user {}'.format(user_id))
return Response(
json.dumps({'deleted': True}),
mimetype=JSON_CONTENT_TYPE
)
else:
return USER_NOT_FOUND_RESPONSE
@app.route('/admin/user/<user_id>/unlock-account')
def unlock_account(user_id):
if db_access.update_failed_logins(user_id, 0):
auditing.audit('Reset failed login attempts for user {}'.format(user_id))
return Response(json.dumps({'reset': True}),
mimetype=JSON_CONTENT_TYPE)
else:
return USER_NOT_FOUND_RESPONSE
@app.route('/admin/user/<user_id>/get-failed-logins')
def get_failed_logins(user_id):
failed_logins = db_access.get_failed_logins(user_id)
if failed_logins is not None:
LOGGER.info('Get failed login attempts for user {}'.format(user_id))
resp_json = json.dumps({'failed_login_attempts': failed_logins})
return Response(resp_json, mimetype=JSON_CONTENT_TYPE)
else:
return USER_NOT_FOUND_RESPONSE
def _handle_non_existing_user_auth_request(user_id):
auditing.audit('Invalid credentials used. username: {}. User does not exist.'.format(user_id))
return Response(AUTH_FAILURE_RESPONSE_BODY, status=401, mimetype=JSON_CONTENT_TYPE)
def _handle_locked_user_auth_request(user_id, failed_login_attempts):
failed_login_attempts += 1
auditing.audit('Too many bad logins. username: {}, attempt: {}.'.format(
user_id, failed_login_attempts
))
db_access.update_failed_logins(user_id, failed_login_attempts)
return Response(AUTH_FAILURE_RESPONSE_BODY, status=401, mimetype=JSON_CONTENT_TYPE)
def _handle_allowed_user_auth_request(user_id, password, failed_login_attempts):
password_salt = app.config['PASSWORD_SALT']
password_hash = security.get_user_password_hash(user_id, password, password_salt)
user = db_access.get_user(user_id, password_hash)
if user:
# Reset failed login attempts to zero and proceed
db_access.update_failed_logins(user_id, 0)
return Response(_authenticated_response_body(user), mimetype=JSON_CONTENT_TYPE)
else:
failed_login_attempts += 1
db_access.update_failed_logins(user_id, failed_login_attempts)
auditing.audit('Invalid credentials used. username: {}, attempt: {}.'.format(
user_id, failed_login_attempts
))
return Response(AUTH_FAILURE_RESPONSE_BODY, status=401, mimetype=JSON_CONTENT_TYPE)
def _try_get_request_json(request):
try:
return request.get_json()
except Exception as e:
LOGGER.error('Failed to parse JSON body from request', exc_info=e)
return None
def _is_auth_request_data_valid(request_data):
credentials = request_data.get('credentials')
if credentials:
user_id = credentials.get('user_id', None)
user_password = credentials.get('password', None)
return user_id and user_password
return False
def _is_create_request_data_valid(request_data):
user = request_data.get('user')
return user and user.get('user_id') and user.get('password')
def _is_update_request_data_valid(request_data):
user = request_data.get('user')
return user and user.get('password')
def _authenticated_response_body(user):
return json.dumps({"user": {"user_id": user.user_id}})
def _hit_database_with_sample_query():
# hitting the database just to see if it responds properly
db_access.get_user('non-existing-user', 'password-hash')
def _get_healthcheck_response(status, http_status_code, error_message):
response_body = {'status': status}
if error_message:
response_body['errors'] = [error_message]
return Response(
json.dumps(response_body),
status=http_status_code,
mimetype=JSON_CONTENT_TYPE,
)
| |
from __future__ import absolute_import
import contextlib
import errno
import io
import locale
# we have a submodule named 'logging' which would shadow this if we used the
# regular name:
import logging as std_logging
import os
import posixpath
import re
import shutil
import stat
import subprocess
import sys
import tarfile
import zipfile
from collections import deque
from pip._vendor import pkg_resources
# NOTE: retrying is not annotated in typeshed as on 2017-07-17, which is
# why we ignore the type on this import.
from pip._vendor.retrying import retry # type: ignore
from pip._vendor.six import PY2
from pip._vendor.six.moves import input
from pip._internal.compat import console_to_str, expanduser, stdlib_pkgs
from pip._internal.exceptions import InstallationError
from pip._internal.locations import (
running_under_virtualenv, site_packages, user_site, virtualenv_no_global,
write_delete_marker_file,
)
if PY2:
from io import BytesIO as StringIO
else:
from io import StringIO
__all__ = ['rmtree', 'display_path', 'backup_dir',
'ask', 'splitext',
'format_size', 'is_installable_dir',
'is_svn_page', 'file_contents',
'split_leading_dir', 'has_leading_dir',
'normalize_path',
'renames', 'get_prog',
'unzip_file', 'untar_file', 'unpack_file', 'call_subprocess',
'captured_stdout', 'ensure_dir',
'ARCHIVE_EXTENSIONS', 'SUPPORTED_EXTENSIONS',
'get_installed_version']
logger = std_logging.getLogger(__name__)
BZ2_EXTENSIONS = ('.tar.bz2', '.tbz')
XZ_EXTENSIONS = ('.tar.xz', '.txz', '.tlz', '.tar.lz', '.tar.lzma')
ZIP_EXTENSIONS = ('.zip', '.whl')
TAR_EXTENSIONS = ('.tar.gz', '.tgz', '.tar')
ARCHIVE_EXTENSIONS = (
ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS + XZ_EXTENSIONS)
SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS
try:
import bz2 # noqa
SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS
except ImportError:
logger.debug('bz2 module is not available')
try:
# Only for Python 3.3+
import lzma # noqa
SUPPORTED_EXTENSIONS += XZ_EXTENSIONS
except ImportError:
logger.debug('lzma module is not available')
def import_or_raise(pkg_or_module_string, ExceptionType, *args, **kwargs):
try:
return __import__(pkg_or_module_string)
except ImportError:
raise ExceptionType(*args, **kwargs)
def ensure_dir(path):
"""os.path.makedirs without EEXIST."""
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def get_prog():
try:
prog = os.path.basename(sys.argv[0])
if prog in ('__main__.py', '-c'):
return "%s -m pip" % sys.executable
else:
return prog
except (AttributeError, TypeError, IndexError):
pass
return 'pip'
# Retry every half second for up to 3 seconds
@retry(stop_max_delay=3000, wait_fixed=500)
def rmtree(dir, ignore_errors=False):
shutil.rmtree(dir, ignore_errors=ignore_errors,
onerror=rmtree_errorhandler)
def rmtree_errorhandler(func, path, exc_info):
"""On Windows, the files in .svn are read-only, so when rmtree() tries to
remove them, an exception is thrown. We catch that here, remove the
read-only attribute, and hopefully continue without problems."""
# if file type currently read only
if os.stat(path).st_mode & stat.S_IREAD:
# convert to read/write
os.chmod(path, stat.S_IWRITE)
# use the original function to repeat the operation
func(path)
return
else:
raise
def display_path(path):
"""Gives the display value for a given path, making it relative to cwd
if possible."""
path = os.path.normcase(os.path.abspath(path))
if sys.version_info[0] == 2:
path = path.decode(sys.getfilesystemencoding(), 'replace')
path = path.encode(sys.getdefaultencoding(), 'replace')
if path.startswith(os.getcwd() + os.path.sep):
path = '.' + path[len(os.getcwd()):]
return path
def backup_dir(dir, ext='.bak'):
"""Figure out the name of a directory to back up the given dir to
(adding .bak, .bak2, etc)"""
n = 1
extension = ext
while os.path.exists(dir + extension):
n += 1
extension = ext + str(n)
return dir + extension
def ask_path_exists(message, options):
for action in os.environ.get('PIP_EXISTS_ACTION', '').split():
if action in options:
return action
return ask(message, options)
def ask(message, options):
"""Ask the message interactively, with the given possible responses"""
while 1:
if os.environ.get('PIP_NO_INPUT'):
raise Exception(
'No input was expected ($PIP_NO_INPUT set); question: %s' %
message
)
response = input(message)
response = response.strip().lower()
if response not in options:
print(
'Your response (%r) was not one of the expected responses: '
'%s' % (response, ', '.join(options))
)
else:
return response
def format_size(bytes):
if bytes > 1000 * 1000:
return '%.1fMB' % (bytes / 1000.0 / 1000)
elif bytes > 10 * 1000:
return '%ikB' % (bytes / 1000)
elif bytes > 1000:
return '%.1fkB' % (bytes / 1000.0)
else:
return '%ibytes' % bytes
def is_installable_dir(path):
"""Return True if `path` is a directory containing a setup.py file."""
if not os.path.isdir(path):
return False
setup_py = os.path.join(path, 'setup.py')
if os.path.isfile(setup_py):
return True
return False
def is_svn_page(html):
"""
Returns true if the page appears to be the index page of an svn repository
"""
return (re.search(r'<title>[^<]*Revision \d+:', html) and
re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I))
def file_contents(filename):
with open(filename, 'rb') as fp:
return fp.read().decode('utf-8')
def read_chunks(file, size=io.DEFAULT_BUFFER_SIZE):
"""Yield pieces of data from a file-like object until EOF."""
while True:
chunk = file.read(size)
if not chunk:
break
yield chunk
def split_leading_dir(path):
path = path.lstrip('/').lstrip('\\')
if '/' in path and (('\\' in path and path.find('/') < path.find('\\')) or
'\\' not in path):
return path.split('/', 1)
elif '\\' in path:
return path.split('\\', 1)
else:
return path, ''
def has_leading_dir(paths):
"""Returns true if all the paths have the same leading path name
(i.e., everything is in one subdirectory in an archive)"""
common_prefix = None
for path in paths:
prefix, rest = split_leading_dir(path)
if not prefix:
return False
elif common_prefix is None:
common_prefix = prefix
elif prefix != common_prefix:
return False
return True
def normalize_path(path, resolve_symlinks=True):
"""
Convert a path to its canonical, case-normalized, absolute version.
"""
path = expanduser(path)
if resolve_symlinks:
path = os.path.realpath(path)
else:
path = os.path.abspath(path)
return os.path.normcase(path)
def splitext(path):
"""Like os.path.splitext, but take off .tar too"""
base, ext = posixpath.splitext(path)
if base.lower().endswith('.tar'):
ext = base[-4:] + ext
base = base[:-4]
return base, ext
def renames(old, new):
"""Like os.renames(), but handles renaming across devices."""
# Implementation borrowed from os.renames().
head, tail = os.path.split(new)
if head and tail and not os.path.exists(head):
os.makedirs(head)
shutil.move(old, new)
head, tail = os.path.split(old)
if head and tail:
try:
os.removedirs(head)
except OSError:
pass
def is_local(path):
"""
Return True if path is within sys.prefix, if we're running in a virtualenv.
If we're not in a virtualenv, all paths are considered "local."
"""
if not running_under_virtualenv():
return True
return normalize_path(path).startswith(normalize_path(sys.prefix))
def dist_is_local(dist):
"""
Return True if given Distribution object is installed locally
(i.e. within current virtualenv).
Always True if we're not in a virtualenv.
"""
return is_local(dist_location(dist))
def dist_in_usersite(dist):
"""
Return True if given Distribution is installed in user site.
"""
norm_path = normalize_path(dist_location(dist))
return norm_path.startswith(normalize_path(user_site))
def dist_in_site_packages(dist):
"""
Return True if given Distribution is installed in
sysconfig.get_python_lib().
"""
return normalize_path(
dist_location(dist)
).startswith(normalize_path(site_packages))
def dist_is_editable(dist):
"""Is distribution an editable install?"""
for path_item in sys.path:
egg_link = os.path.join(path_item, dist.project_name + '.egg-link')
if os.path.isfile(egg_link):
return True
return False
def get_installed_distributions(local_only=True,
skip=stdlib_pkgs,
include_editables=True,
editables_only=False,
user_only=False):
"""
Return a list of installed Distribution objects.
If ``local_only`` is True (default), only return installations
local to the current virtualenv, if in a virtualenv.
``skip`` argument is an iterable of lower-case project names to
ignore; defaults to stdlib_pkgs
If ``include_editables`` is False, don't report editables.
If ``editables_only`` is True , only report editables.
If ``user_only`` is True , only report installations in the user
site directory.
"""
if local_only:
local_test = dist_is_local
else:
def local_test(d):
return True
if include_editables:
def editable_test(d):
return True
else:
def editable_test(d):
return not dist_is_editable(d)
if editables_only:
def editables_only_test(d):
return dist_is_editable(d)
else:
def editables_only_test(d):
return True
if user_only:
user_test = dist_in_usersite
else:
def user_test(d):
return True
return [d for d in pkg_resources.working_set
if local_test(d) and
d.key not in skip and
editable_test(d) and
editables_only_test(d) and
user_test(d)
]
def egg_link_path(dist):
"""
Return the path for the .egg-link file if it exists, otherwise, None.
There's 3 scenarios:
1) not in a virtualenv
try to find in site.USER_SITE, then site_packages
2) in a no-global virtualenv
try to find in site_packages
3) in a yes-global virtualenv
try to find in site_packages, then site.USER_SITE
(don't look in global location)
For #1 and #3, there could be odd cases, where there's an egg-link in 2
locations.
This method will just return the first one found.
"""
sites = []
if running_under_virtualenv():
if virtualenv_no_global():
sites.append(site_packages)
else:
sites.append(site_packages)
if user_site:
sites.append(user_site)
else:
if user_site:
sites.append(user_site)
sites.append(site_packages)
for site in sites:
egglink = os.path.join(site, dist.project_name) + '.egg-link'
if os.path.isfile(egglink):
return egglink
def dist_location(dist):
"""
Get the site-packages location of this distribution. Generally
this is dist.location, except in the case of develop-installed
packages, where dist.location is the source code location, and we
want to know where the egg-link file is.
"""
egg_link = egg_link_path(dist)
if egg_link:
return egg_link
return dist.location
def current_umask():
"""Get the current umask which involves having to set it temporarily."""
mask = os.umask(0)
os.umask(mask)
return mask
def unzip_file(filename, location, flatten=True):
"""
Unzip the file (with path `filename`) to the destination `location`. All
files are written based on system defaults and umask (i.e. permissions are
not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
zipfp = open(filename, 'rb')
try:
zip = zipfile.ZipFile(zipfp, allowZip64=True)
leading = has_leading_dir(zip.namelist()) and flatten
for info in zip.infolist():
name = info.filename
data = zip.read(name)
fn = name
if leading:
fn = split_leading_dir(name)[1]
fn = os.path.join(location, fn)
dir = os.path.dirname(fn)
if fn.endswith('/') or fn.endswith('\\'):
# A directory
ensure_dir(fn)
else:
ensure_dir(dir)
fp = open(fn, 'wb')
try:
fp.write(data)
finally:
fp.close()
mode = info.external_attr >> 16
# if mode and regular file and any execute permissions for
# user/group/world?
if mode and stat.S_ISREG(mode) and mode & 0o111:
# make dest file have execute for user/group/world
# (chmod +x) no-op on windows per python docs
os.chmod(fn, (0o777 - current_umask() | 0o111))
finally:
zipfp.close()
def untar_file(filename, location):
"""
Untar the file (with path `filename`) to the destination `location`.
All files are written based on system defaults and umask (i.e. permissions
are not preserved), except that regular file members with any execute
permissions (user, group, or world) have "chmod +x" applied after being
written. Note that for windows, any execute changes using os.chmod are
no-ops per the python docs.
"""
ensure_dir(location)
if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'):
mode = 'r:gz'
elif filename.lower().endswith(BZ2_EXTENSIONS):
mode = 'r:bz2'
elif filename.lower().endswith(XZ_EXTENSIONS):
mode = 'r:xz'
elif filename.lower().endswith('.tar'):
mode = 'r'
else:
logger.warning(
'Cannot determine compression type for file %s', filename,
)
mode = 'r:*'
tar = tarfile.open(filename, mode)
try:
# note: python<=2.5 doesn't seem to know about pax headers, filter them
leading = has_leading_dir([
member.name for member in tar.getmembers()
if member.name != 'pax_global_header'
])
for member in tar.getmembers():
fn = member.name
if fn == 'pax_global_header':
continue
if leading:
fn = split_leading_dir(fn)[1]
path = os.path.join(location, fn)
if member.isdir():
ensure_dir(path)
elif member.issym():
try:
tar._extract_member(member, path)
except Exception as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
'In the tar file %s the member %s is invalid: %s',
filename, member.name, exc,
)
continue
else:
try:
fp = tar.extractfile(member)
except (KeyError, AttributeError) as exc:
# Some corrupt tar files seem to produce this
# (specifically bad symlinks)
logger.warning(
'In the tar file %s the member %s is invalid: %s',
filename, member.name, exc,
)
continue
ensure_dir(os.path.dirname(path))
with open(path, 'wb') as destfp:
shutil.copyfileobj(fp, destfp)
fp.close()
# Update the timestamp (useful for cython compiled files)
tar.utime(member, path)
# member have any execute permissions for user/group/world?
if member.mode & 0o111:
# make dest file have execute for user/group/world
# no-op on windows per python docs
os.chmod(path, (0o777 - current_umask() | 0o111))
finally:
tar.close()
def unpack_file(filename, location, content_type, link):
filename = os.path.realpath(filename)
if (content_type == 'application/zip' or
filename.lower().endswith(ZIP_EXTENSIONS) or
zipfile.is_zipfile(filename)):
unzip_file(
filename,
location,
flatten=not filename.endswith('.whl')
)
elif (content_type == 'application/x-gzip' or
tarfile.is_tarfile(filename) or
filename.lower().endswith(
TAR_EXTENSIONS + BZ2_EXTENSIONS + XZ_EXTENSIONS)):
untar_file(filename, location)
elif (content_type and content_type.startswith('text/html') and
is_svn_page(file_contents(filename))):
# We don't really care about this
from pip._internal.vcs.subversion import Subversion
Subversion('svn+' + link.url).unpack(location)
else:
# FIXME: handle?
# FIXME: magic signatures?
logger.critical(
'Cannot unpack file %s (downloaded from %s, content-type: %s); '
'cannot detect archive format',
filename, location, content_type,
)
raise InstallationError(
'Cannot determine archive format of %s' % location
)
def call_subprocess(cmd, show_stdout=True, cwd=None,
on_returncode='raise',
command_desc=None,
extra_environ=None, unset_environ=None, spinner=None):
"""
Args:
unset_environ: an iterable of environment variable names to unset
prior to calling subprocess.Popen().
"""
if unset_environ is None:
unset_environ = []
# This function's handling of subprocess output is confusing and I
# previously broke it terribly, so as penance I will write a long comment
# explaining things.
#
# The obvious thing that affects output is the show_stdout=
# kwarg. show_stdout=True means, let the subprocess write directly to our
# stdout. Even though it is nominally the default, it is almost never used
# inside pip (and should not be used in new code without a very good
# reason); as of 2016-02-22 it is only used in a few places inside the VCS
# wrapper code. Ideally we should get rid of it entirely, because it
# creates a lot of complexity here for a rarely used feature.
#
# Most places in pip set show_stdout=False. What this means is:
# - We connect the child stdout to a pipe, which we read.
# - By default, we hide the output but show a spinner -- unless the
# subprocess exits with an error, in which case we show the output.
# - If the --verbose option was passed (= loglevel is DEBUG), then we show
# the output unconditionally. (But in this case we don't want to show
# the output a second time if it turns out that there was an error.)
#
# stderr is always merged with stdout (even if show_stdout=True).
if show_stdout:
stdout = None
else:
stdout = subprocess.PIPE
if command_desc is None:
cmd_parts = []
for part in cmd:
if ' ' in part or '\n' in part or '"' in part or "'" in part:
part = '"%s"' % part.replace('"', '\\"')
cmd_parts.append(part)
command_desc = ' '.join(cmd_parts)
logger.debug("Running command %s", command_desc)
env = os.environ.copy()
if extra_environ:
env.update(extra_environ)
for name in unset_environ:
env.pop(name, None)
try:
proc = subprocess.Popen(
cmd, stderr=subprocess.STDOUT, stdin=subprocess.PIPE,
stdout=stdout, cwd=cwd, env=env,
)
proc.stdin.close()
except Exception as exc:
logger.critical(
"Error %s while executing command %s", exc, command_desc,
)
raise
all_output = []
if stdout is not None:
while True:
line = console_to_str(proc.stdout.readline())
if not line:
break
line = line.rstrip()
all_output.append(line + '\n')
if logger.getEffectiveLevel() <= std_logging.DEBUG:
# Show the line immediately
logger.debug(line)
else:
# Update the spinner
if spinner is not None:
spinner.spin()
try:
proc.wait()
finally:
if proc.stdout:
proc.stdout.close()
if spinner is not None:
if proc.returncode:
spinner.finish("error")
else:
spinner.finish("done")
if proc.returncode:
if on_returncode == 'raise':
if (logger.getEffectiveLevel() > std_logging.DEBUG and
not show_stdout):
logger.info(
'Complete output from command %s:', command_desc,
)
logger.info(
''.join(all_output) +
'\n----------------------------------------'
)
raise InstallationError(
'Command "%s" failed with error code %s in %s'
% (command_desc, proc.returncode, cwd))
elif on_returncode == 'warn':
logger.warning(
'Command "%s" had error code %s in %s',
command_desc, proc.returncode, cwd,
)
elif on_returncode == 'ignore':
pass
else:
raise ValueError('Invalid value: on_returncode=%s' %
repr(on_returncode))
if not show_stdout:
return ''.join(all_output)
def read_text_file(filename):
"""Return the contents of *filename*.
Try to decode the file contents with utf-8, the preferred system encoding
(e.g., cp1252 on some Windows machines), and latin1, in that order.
Decoding a byte string with latin1 will never raise an error. In the worst
case, the returned string will contain some garbage characters.
"""
with open(filename, 'rb') as fp:
data = fp.read()
encodings = ['utf-8', locale.getpreferredencoding(False), 'latin1']
for enc in encodings:
try:
data = data.decode(enc)
except UnicodeDecodeError:
continue
break
assert type(data) != bytes # Latin1 should have worked.
return data
def _make_build_dir(build_dir):
os.makedirs(build_dir)
write_delete_marker_file(build_dir)
class FakeFile(object):
"""Wrap a list of lines in an object with readline() to make
ConfigParser happy."""
def __init__(self, lines):
self._gen = (l for l in lines)
def readline(self):
try:
try:
return next(self._gen)
except NameError:
return self._gen.next()
except StopIteration:
return ''
def __iter__(self):
return self._gen
class StreamWrapper(StringIO):
@classmethod
def from_stream(cls, orig_stream):
cls.orig_stream = orig_stream
return cls()
# compileall.compile_dir() needs stdout.encoding to print to stdout
@property
def encoding(self):
return self.orig_stream.encoding
@contextlib.contextmanager
def captured_output(stream_name):
"""Return a context manager used by captured_stdout/stdin/stderr
that temporarily replaces the sys stream *stream_name* with a StringIO.
Taken from Lib/support/__init__.py in the CPython repo.
"""
orig_stdout = getattr(sys, stream_name)
setattr(sys, stream_name, StreamWrapper.from_stream(orig_stdout))
try:
yield getattr(sys, stream_name)
finally:
setattr(sys, stream_name, orig_stdout)
def captured_stdout():
"""Capture the output of sys.stdout:
with captured_stdout() as stdout:
print('hello')
self.assertEqual(stdout.getvalue(), 'hello\n')
Taken from Lib/support/__init__.py in the CPython repo.
"""
return captured_output('stdout')
class cached_property(object):
"""A property that is only computed once per instance and then replaces
itself with an ordinary attribute. Deleting the attribute resets the
property.
Source: https://github.com/bottlepy/bottle/blob/0.11.5/bottle.py#L175
"""
def __init__(self, func):
self.__doc__ = getattr(func, '__doc__')
self.func = func
def __get__(self, obj, cls):
if obj is None:
# We're being accessed from the class itself, not from an object
return self
value = obj.__dict__[self.func.__name__] = self.func(obj)
return value
def get_installed_version(dist_name, lookup_dirs=None):
"""Get the installed version of dist_name avoiding pkg_resources cache"""
# Create a requirement that we'll look for inside of setuptools.
req = pkg_resources.Requirement.parse(dist_name)
# We want to avoid having this cached, so we need to construct a new
# working set each time.
if lookup_dirs is None:
working_set = pkg_resources.WorkingSet()
else:
working_set = pkg_resources.WorkingSet(lookup_dirs)
# Get the installed distribution from our working set
dist = working_set.find(req)
# Check to see if we got an installed distribution or not, if we did
# we want to return it's version.
return dist.version if dist else None
def consume(iterator):
"""Consume an iterable at C speed."""
deque(iterator, maxlen=0)
# Simulates an enum
def enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
reverse = {value: key for key, value in enums.items()}
enums['reverse_mapping'] = reverse
return type('Enum', (), enums)
| |
from django.test import Client
from django.urls import reverse
import random
import base64
import os
from fileserver.ResponseWorker import ResponseType
from fileserver.OutputTableHeader import OutputTableHeader
from fileserver.UserWorker import CheckUser
from fileserver.models import User
from fileserver.TestWorker.CommonFunctions import \
IsCorrectStatusCodeResponse
from fileserver.ResponseFormatWorker import DecodeOutputIntoTable, \
GetFormatForHttpQuery
from fileserver.TestWorker.AdminAuthTests import \
CheckAdminAuthorization
def CheckUserExistance(login):
return login.lower() not in \
[user.username for user in User.objects.all()]
def generate_random_login(nbyte):
return str(base64.b32encode(os.urandom(nbyte)).lower(), "utf-8")
def GetPasswordFromResponse(response, format=None):
content = DecodeOutputIntoTable(\
OutputTableHeader.RegisterUser.value, \
response.content, format)
assert(len(content) == 1)
assert(len(content[0]) == 1)
return str(content[0][0])
def CheckUsersByDict(userDict):
for user in userDict.keys():
validUser = CheckUser(user, userDict[user])
if not validUser:
return False
return True
def TestUserRegistration(login, password, format=None):
print("TestUserRegistration...", end="")
adress = 'fileserver:register_user'
params = {'adminUserName': login, 'adminPassword': password,\
'outputFormat': GetFormatForHttpQuery(format)}
client = Client()
CheckAdminAuthorization(adress, format)
firstResponse = client.post(reverse(adress), params)
assert(IsCorrectStatusCodeResponse(
firstResponse, \
ResponseType.NotEnoughArguments,\
format))
users = {}
for i in range(10):
username = generate_random_login(5)
secondParams = {}
secondParams.update(params)
secondParams["usernameForRegistration"] = username
secondResponse = client.post(reverse(adress), secondParams)
userPassword = GetPasswordFromResponse(secondResponse, format)
users[username] = userPassword
invalidLogin = generate_random_login(5) + ":-("
thirdParams = {}
thirdParams.update(params)
thirdParams["usernameForRegistration"] = invalidLogin
thirdResponse = client.post(reverse(adress), thirdParams)
assert(IsCorrectStatusCodeResponse(
thirdResponse, \
ResponseType.InvalidParameters,\
format))
assert(CheckUserExistance(invalidLogin))
existedLogin = random.choice(tuple(users.keys())).upper()
fourthParams = {}
fourthParams.update(params)
fourthParams["usernameForRegistration"] = existedLogin
fourthResponse = client.post(reverse(adress), fourthParams)
assert(IsCorrectStatusCodeResponse(
fourthResponse, \
ResponseType.AlreadyRegistredUser,\
format))
assert(CheckUsersByDict(users))
print("OK")
return users
def TestUserResetPassword(login, password, users, format=None):
print("TestUserResetPassword...", end="")
adress = 'fileserver:reset_user_password'
CheckAdminAuthorization(adress, format)
params = {'adminUserName': login, 'adminPassword': password,\
'outputFormat': GetFormatForHttpQuery(format)}
client = Client()
usersAfterResetPassword = {}
usersAfterResetPassword.update(users)
targetUserLogin = random.choice(\
tuple(usersAfterResetPassword.keys()))
firstResponse = client.post(reverse(adress), params)
assert(IsCorrectStatusCodeResponse(
firstResponse, \
ResponseType.NotEnoughArguments,\
format))
invalidLogin = generate_random_login(5)
secondParams = {}
secondParams.update(params)
secondParams["usernameForResetPassword"] = invalidLogin
secondResponse = client.post(reverse(adress), secondParams)
assert(IsCorrectStatusCodeResponse(
secondResponse, \
ResponseType.UserNotFound,\
format))
assert(CheckUserExistance(invalidLogin))
thirdParams = {}
thirdParams.update(params)
thirdParams["usernameForResetPassword"] = targetUserLogin
thirdResponse = client.post(reverse(adress), thirdParams)
newPassword = GetPasswordFromResponse(thirdResponse, format)
usersAfterResetPassword[targetUserLogin] = newPassword
assert(CheckUsersByDict(usersAfterResetPassword))
print("OK")
return usersAfterResetPassword
def TestDeleteUser(login, password, users, format=None):
print("TestDeleteUser...", end="")
adress = 'fileserver:delete_user'
CheckAdminAuthorization(adress, format)
params = {'adminUserName': login, 'adminPassword': password,\
'outputFormat': GetFormatForHttpQuery(format)}
client = Client()
usersAfterDelete = {}
usersAfterDelete.update(users)
targetUserLogin = random.choice(tuple(usersAfterDelete.keys()))
firstResponse = client.post(reverse(adress), params)
assert(IsCorrectStatusCodeResponse(
firstResponse, \
ResponseType.NotEnoughArguments,\
format))
invalidLogin = generate_random_login(5)
secondParams = {}
secondParams.update(params)
secondParams["usernameForDelete"] = invalidLogin
secondResponse = client.post(reverse(adress), secondParams)
assert(IsCorrectStatusCodeResponse(
secondResponse, \
ResponseType.UserNotFound,\
format))
thirdParams = {}
thirdParams.update(params)
thirdParams["usernameForResetPassword"] = targetUserLogin.upper()
thirdResponse = client.post(reverse(adress), thirdParams)
assert(not CheckUserExistance(targetUserLogin))
del usersAfterDelete[targetUserLogin]
assert(CheckUsersByDict(usersAfterDelete))
print("OK")
return usersAfterDelete
def TestListAllUsers(login, password, users, format=None):
print("TestListAllUsers...", end="")
adress = 'fileserver:list_all_users'
CheckAdminAuthorization(adress, format)
params = {'adminUserName': login, 'adminPassword': password,\
'outputFormat': GetFormatForHttpQuery(format)}
client = Client()
response = client.post(reverse(adress), params)
content = DecodeOutputIntoTable(\
OutputTableHeader.ListAllUsers.value, \
response.content, format)
assert(all(map(lambda username: username in \
map(lambda responseUserRow: responseUserRow[0], content), \
users.keys())))
print("OK")
def TestUserAdminControl(login, password, format=None):
users = TestUserRegistration(login, password, format=None)
usersAfterResetPassword = \
TestUserResetPassword(login, password, users, format=None)
usersAfterDelete = TestDeleteUser(login, password, \
usersAfterResetPassword, format=None)
TestListAllUsers(login, password, usersAfterDelete)
return usersAfterDelete
| |
# -*- coding: utf-8 -*-
# from __future__ import unicode_literals
import requests
import json
from bs4 import BeautifulSoup
import re
import geocoder
import urllib3
urllib3.disable_warnings()
INSPECTION_DOMAIN_NAME = b'http://info.kingcounty.gov'
INSPECTION_PATH = b'/health/ehs/foodsafety/inspections/Results.aspx'
INSPECTION_PARAMETERS = {'Output': 'W',
'Business_Name': '',
'Business_Address': '',
'Longitude': '',
'Latitude': '',
'City': '',
'Zip_Code': '',
'Inspection_Type': 'All',
'Inspection_Start': '',
'Inspection_End': '',
'Inspection_Closed_Business': 'A',
'Violation_Points': '',
'Violation_Red_Points': '',
'Violation_Descr': '',
'Fuzzy_Search': 'N',
'Sort': 'B'
}
def get_inspection_page(**kwargs):
url = INSPECTION_DOMAIN_NAME + INSPECTION_PATH
params = INSPECTION_PARAMETERS.copy()
for key, val in kwargs.items():
if key in INSPECTION_PARAMETERS:
params[key] = val
response = requests.get(url, params=params)
return response.content, response.encoding
def load_inspection_page():
with open('output.json') as data_file:
output = json.load(data_file)
content = output[0].encode('utf-8')
encoding = output[1].encode('utf-8')
return content, encoding
def parse_source(html, encoding='utf-8'):
parsed = BeautifulSoup(html, 'html5lib', from_encoding=encoding)
return parsed
def extract_data_listings(html):
id_finder = re.compile(r'PR[\d]+~')
return html.find_all('div', id=id_finder)
def has_two_tds(element):
is_tr = element.name == 'tr'
td_children = element.find_all('td', recursive=False)
has_two = len(td_children) == 2
return is_tr and has_two
def clean_data(td):
data = td.string
try:
return data.strip(" \n:-")
except AttributeError:
return u""
def extract_restaurant_metadata(elem):
metadata_rows = elem.find('tbody').find_all(
has_two_tds, recursive=False
)
rdata = {}
current_label = ''
for row in metadata_rows:
key_cell, val_cell = row.find_all('td', recursivve=False)
new_label = clean_data(key_cell)
current_label = new_label if new_label else current_label
rdata.setdefault(current_label, []).append(clean_data(val_cell))
return rdata
def is_inspection_row(elem):
is_tr = elem.name == 'tr'
if not is_tr:
return False
td_children = elem.find_all('td', recursive=False)
has_four = len(td_children) == 4
this_text = clean_data(td_children[0]).lower()
contains_word = 'inspection' in this_text
does_not_start = not this_text.startswith('inspection')
return is_tr and has_four and contains_word and does_not_start
def extract_score_data(elem):
inspection_rows = elem.find_all(is_inspection_row)
samples = len(inspection_rows)
total = high_score = average = 0
for row in inspection_rows:
strval = clean_data(row.find_all('td')[2])
try:
intval = int(strval)
except (ValueError, TypeError):
samples -= 1
else:
total += intval
high_score = intval if intval > high_score else high_score
if samples:
average = total/float(samples)
data = {
u'Average Score': average,
u'High Score': high_score,
u'Total Inspections': samples
}
return data
def generate_results(test=False, count=10):
kwargs = {
'Inspection_Start': '2/1/2013',
'Inspection_End': '2/1/2015',
'Zip_Code': '98109'
}
if test:
html, encoding = load_inspection_page()
else:
html, encoding = get_inspection_page(**kwargs)
doc = parse_source(html, encoding)
listings = extract_data_listings(doc)
for listing in listings[:count]:
metadata = extract_restaurant_metadata(listing)
score_data = extract_score_data(listing)
metadata.update(score_data)
yield metadata
def get_geojson(result):
address = " ".join(result.get('Address', ''))
if not address:
return None
geocoded = geocoder.google(address)
geojson = geocoded.geojson
inspection_data = {}
use_keys = (
'Business Name', 'Average Score', 'Total Inspections', 'High Score',
'Address',
)
for key, val in result.items():
if key not in use_keys:
continue
if isinstance(val, list):
val = " ".join(val)
inspection_data[key] = val
new_address = geojson['properties'].get('address')
if new_address:
inspection_data['Address'] = new_address
geojson['properties'] = inspection_data
return geojson
def sort_values(total_result, sort_type, reverse=False):
if reverse:
total_result['features'] = sorted(total_result['features'],
key=lambda restaurant:
restaurant['properties']
[sort_type], reverse=True)
else:
total_result['features'] = sorted(total_result['features'],
key=lambda restaurant:
restaurant['properties']
[sort_type])
return total_result
if __name__ == '__main__':
import pprint
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--test', help='run from test file',
action='store_true')
parser.add_argument('--highscore', help='sort by high score',
action='store_true')
parser.add_argument('--average', help='sort by average score',
action='store_true')
parser.add_argument('--inspections', help='sort by most inspections',
action='store_true')
parser.add_argument('--reverse', help='reverse the sort',
action='store_true')
args = parser.parse_args()
sort_type = None
reverse = False
if args.test:
test = True
else:
test = False
if args.average:
sort_type = 'Average Score'
if args.highscore:
sort_type = 'High Score'
if args.inspections:
sort_type = 'Total Inspections'
if args.reverse:
reverse = True
total_result = {'type': 'FeatureCollection', 'features': []}
for result in generate_results(test):
geo_result = get_geojson(result)
pprint.pprint(geo_result)
total_result['features'].append(geo_result)
if sort_type is not None:
total_result = sort_values(total_result, sort_type, reverse)
with open('my_map.json', 'w') as fh:
json.dump(total_result, fh)
| |
import collections
import logging
import datetime
from aiokts.util.json_utils import JsonSerializable
class Field:
def __init__(self, default, private):
self.name = None
self.model = None
self.default = default
self.private = private
def transform_in(self, value):
return value
def transform_to_json(self, value):
return value
def __call__(self, value):
return self.transform_in(value)
class StringField(Field):
def __init__(self, default=None, private=False):
super().__init__(default, private)
def transform_in(self, value):
return str(value)
class IntField(Field):
def __init__(self, default=None, private=False):
super().__init__(default, private)
def transform_in(self, value):
return int(value)
class BooleanField(Field):
def __init__(self, default=None, private=False):
super().__init__(default, private)
def transform_in(self, value):
if value == 'true':
value = True
elif value == 'false':
value = False
return bool(value)
class UnixTimestampField(Field):
def __init__(self, default=None, private=False):
super().__init__(default, private)
def transform_in(self, value):
value = int(value)
return datetime.datetime.fromtimestamp(value)
class IntEnumField(Field):
def __init__(self, enum_cls, default=None, private=False, json_name=False):
super().__init__(default, private)
self.enum_cls = enum_cls
self.json_name = json_name
def transform_in(self, value):
return self.enum_cls(value)
def transform_to_json(self, value):
if self.json_name:
return value.name
return value.value
class DictField(Field):
def __init__(self, default=None, private=False):
super().__init__(default, private)
def transform_in(self, value):
assert isinstance(value, dict), \
'value is not dict (but {}) for {}.{}'.format(
type(value), self.model.__name__, self.name
)
return value
class ForeignModelField(Field):
def __init__(self, model_cls, default=None, private=False):
super().__init__(default, private)
self.model_cls = model_cls
def transform_in(self, value):
return self.model_cls.parse(value)
def transform_to_json(self, value):
return value.__to_json__()
class DoesNotExistBase(Exception):
MODEL_CLS = None
def __init__(self, entity_id=None, message=None):
self.entity_id = entity_id
if message is None:
self.message = \
"Entity of type '{}' with id {} not found".format(
self.MODEL_CLS.__name__, self.entity_id)
else:
self.message = message
def __str__(self):
return self.message
class ModelMetaclass(type):
@classmethod
def __prepare__(mcs, name, bases):
return collections.OrderedDict()
def __new__(mcs, class_name, bases, class_dict):
if class_name != 'Model':
fields = collections.OrderedDict()
for name, value in class_dict.items():
if not name.startswith('__') \
and isinstance(value, Field):
fields[name] = value
for name in fields:
del class_dict[name]
class_dict['_fields'] = fields
return super().__new__(mcs, class_name, bases, class_dict)
def __init__(cls, class_name, bases, class_dict):
class DoesNotExist(DoesNotExistBase):
MODEL_CLS = cls
cls.DoesNotExist = DoesNotExist
fields = class_dict['_fields']
if fields is not None:
for name, f in fields.items():
f.name = name
f.model = cls
super().__init__(class_name, bases, class_dict)
class Model(JsonSerializable, metaclass=ModelMetaclass):
_fields = None
DoesNotExist = None
LOGGER = logging.getLogger('aiokts.models')
def __init__(self, *args, **kwargs):
i = 0
used_kwargs = set()
for name, field in self._fields.items():
setattr(self, name, field.default)
if len(args) <= i:
# supplying kw arguments
if name in kwargs:
v = kwargs[name]
if v is not None:
try:
v = field.transform_in(v)
except Exception as e:
raise Exception('{} for field `{}` in {}'.format(
str(e), name, self.__class__
))
transformer = 'transform_{}'.format(name)
if hasattr(self, transformer) \
and callable(getattr(self, transformer)):
v = getattr(self, transformer)(v)
setattr(self, name, v)
used_kwargs.add(name)
else:
# supplying positional arguments
v = args[i]
if v is not None:
v = field.transform_in(v)
setattr(self, name, v)
i += 1
if len(args) > i:
self.LOGGER.warning(
'Too many positional arguments passed. '
'Expected %s max', len(self._fields)
)
return
extra_kwargs = set(kwargs.keys()) - used_kwargs
if len(extra_kwargs) > 0:
self.LOGGER.warning(
'Unknown fields passed: %s', extra_kwargs)
def __to_json__(self):
res = {}
for name, field in self._fields.items():
if field.private:
continue
v = getattr(self, name)
if v is not None:
v = field.transform_to_json(v)
transformer = 'transform_json_{}'.format(name)
if hasattr(self, transformer) \
and callable(getattr(self, transformer)):
v = getattr(self, transformer)(v)
res[name] = v
return res
def __repr__(self):
fields = ['{}={}'.format(k, getattr(self, k)) for k in self._fields]
return '<{} {}>'.format(self.__class__.__name__, ' '.join(fields))
@classmethod
def parse(cls, d: dict):
if d is None:
return None
return cls(**d)
@classmethod
def parse_list(cls, l: list):
if l is None or len(l) == 0:
return []
return list(map(lambda d: cls(**d), l))
| |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Manage the lifecycle of runtime processes and dispatch requests to them."""
import collections
import logging
import threading
import time
import google
from google.appengine.tools.devappserver2 import errors
NORMAL_REQUEST = 0
READY_REQUEST = 1 # A warmup request i.e. /_ah/warmup.
BACKGROUND_REQUEST = 2 # A request to create a background thread.
SHUTDOWN_REQUEST = 3 # A request to stop the module i.e. /_ah/stop.
# A request to send a command to the module for evaluation e.g. for use by
# interactive shells.
INTERACTIVE_REQUEST = 4
# Constants for use with FILE_CHANGE_INSTANCE_RESTART_POLICY. These constants
# determine whether an instance will be restarted if a file is changed in
# the application_root or any directory returned by
# InstanceFactory.get_restart_directories.
ALWAYS = 0 # Always restart instances.
AFTER_FIRST_REQUEST = 1 # Restart instances that have received >= 1 request.
NEVER = 2 # Never restart instances.
class CannotAcceptRequests(errors.Error):
"""An Instance cannot accept a request e.g. because it is quitting."""
class CannotQuitServingInstance(errors.Error):
"""An Instance cannot be quit e.g. because it is handling a request."""
class InvalidInstanceId(errors.Error):
"""The requested instance id is not serving."""
class RuntimeProxy(object):
"""Abstract base class for a subclass that manages a runtime process."""
def handle(self, environ, start_response, url_map, match, request_id,
request_type):
"""Serves this request by forwarding it to the runtime process.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler matching this request.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
request_type: The type of the request. See instance.*_REQUEST module
constants.
Yields:
A sequence of strings containing the body of the HTTP response.
"""
raise NotImplementedError()
def start(self):
"""Starts the runtime process and waits until it is ready to serve."""
raise NotImplementedError()
def quit(self):
"""Terminates the runtime process."""
raise NotImplementedError()
class Instance(object):
"""Handle requests through a RuntimeProxy."""
def __init__(self,
request_data,
instance_id,
runtime_proxy,
max_concurrent_requests,
max_background_threads=0,
expect_ready_request=False):
"""Initializer for Instance.
Args:
request_data: A wsgi_request_info.WSGIRequestInfo that will be provided
with request information for use by API stubs.
instance_id: A string or integer representing the unique (per module) id
of the instance.
runtime_proxy: A RuntimeProxy instance that will be used to handle
requests.
max_concurrent_requests: The maximum number of concurrent requests that
the instance can handle. If the instance does not support concurrent
requests then the value should be 1.
max_background_threads: The maximum number of background threads that
the instance can handle. If the instance does not support background
threads then the value should be 0.
expect_ready_request: If True then the instance will be sent a special
request (i.e. /_ah/warmup or /_ah/start) before it can handle external
requests.
"""
self._request_data = request_data
self._instance_id = instance_id
self._max_concurrent_requests = max_concurrent_requests
self._max_background_threads = max_background_threads
self._runtime_proxy = runtime_proxy
self._condition = threading.Condition()
self._num_outstanding_requests = 0 # Protected by self._condition.
self._num_running_background_threads = 0 # Protected by self._condition.
self._total_requests = 0 # Protected by self._condition.
self._started = False # Protected by self._condition.
self._quitting = False # Protected by self._condition.
self._quit = False # Protected by self._condition.
self._last_request_end_time = time.time() # Protected by self._condition.
self._expecting_ready_request = expect_ready_request
self._expecting_shutdown_request = False
self._healthy = True
# A deque containg (start_time, end_time) 2-tuples representing completed
# requests. This is used to compute latency and qps statistics.
self._request_history = collections.deque() # Protected by self._condition.
def __repr__(self):
statuses = []
if not self._started:
statuses.append('not started')
if self._quitting:
statuses.append('quitting')
if self._quit:
statuses.append('quit')
if self._expecting_ready_request:
statuses.append('handling ready request')
if statuses:
status = ' [%s]' % ' '.join(statuses)
else:
status = ''
return '<Instance %s: %d/%d, total: %d%s>' % (
self._instance_id,
self._num_outstanding_requests,
self._max_concurrent_requests,
self._total_requests,
status)
@property
def instance_id(self):
"""The unique string or integer id for the Instance."""
return self._instance_id
@property
def total_requests(self):
"""The total number requests that the Instance has handled."""
with self._condition:
return self._total_requests
@property
def remaining_request_capacity(self):
"""The number of extra requests that the Instance can currently handle."""
with self._condition:
return self._max_concurrent_requests - self._num_outstanding_requests
@property
def remaining_background_thread_capacity(self):
"""The number of extra background threads the Instance can handle."""
with self._condition:
return self._max_background_threads - self._num_running_background_threads
@property
def num_outstanding_requests(self):
"""The number of requests that the Instance is currently handling."""
with self._condition:
return self._num_outstanding_requests
@property
def idle_seconds(self):
"""The number of seconds that the Instance has been idle.
Will be 0.0 if the Instance has not started.
"""
with self._condition:
if self._num_outstanding_requests:
return 0.0
elif not self._started:
return 0.0
else:
return time.time() - self._last_request_end_time
@property
def handling_ready_request(self):
"""True if the Instance is handling or will be sent a ready request."""
return self._expecting_ready_request
def get_latency_60s(self):
"""Returns the average request latency over the last 60s in seconds."""
with self._condition:
self._trim_request_history_to_60s()
if not self._request_history:
return 0.0
else:
total_latency = sum(
end - start for (start, end) in self._request_history)
return total_latency / len(self._request_history)
def get_qps_60s(self):
"""Returns the average queries-per-second over the last 60 seconds."""
with self._condition:
self._trim_request_history_to_60s()
if not self._request_history:
return 0.0
else:
return len(self._request_history) / 60.0
@property
def has_quit(self):
with self._condition:
return self._quit or self._quitting or self._expecting_shutdown_request
@property
def can_accept_requests(self):
"""True if .handle() will accept requests.
Does not consider outstanding request volume.
"""
with self._condition:
return (not self._quit and
not self._quitting and
not self._expecting_ready_request and
not self._expecting_shutdown_request and
self._started and
self._healthy)
def _trim_request_history_to_60s(self):
"""Removes obsolete entries from _outstanding_request_history."""
window_start = time.time() - 60
with self._condition:
while self._request_history:
t, _ = self._request_history[0]
if t < window_start:
self._request_history.popleft()
else:
break
def start(self):
"""Start the instance and the RuntimeProxy.
Returns:
True if the Instance was started or False, if the Instance has already
been quit or the attempt to start it failed.
"""
with self._condition:
if self._quit:
return False
try:
self._runtime_proxy.start()
except Exception as e: # pylint: disable=broad-except
logger = logging.getLogger()
if logger.isEnabledFor(logging.DEBUG):
logger.exception(e)
logger.error(str(e))
return False
with self._condition:
if self._quit:
self._runtime_proxy.quit()
return False
self._last_request_end_time = time.time()
self._started = True
logging.debug('Started instance: %s', self)
# We are in development mode, here be optimistic for the health of the
# instance so it can respond instantly to the first request.
self.set_health(True)
return True
def quit(self, allow_async=False, force=False, expect_shutdown=False):
"""Quits the instance and the RuntimeProxy.
Args:
allow_async: Whether to enqueue the quit after all requests have completed
if the instance cannot be quit immediately.
force: Whether to force the instance to quit even if the instance is
currently handling a request. This overrides allow_async if True.
expect_shutdown: Whether the instance will be sent a shutdown request.
Raises:
CannotQuitServingInstance: if the Instance is currently handling a
request and allow_async is False.
"""
with self._condition:
if self._quit:
return
if not self._started:
self._quit = True
return
if expect_shutdown:
self._expecting_shutdown_request = True
return
if (self._num_outstanding_requests or
self._num_running_background_threads or
self._expecting_shutdown_request):
if not force:
if allow_async or expect_shutdown:
self._quitting = True
return
raise CannotQuitServingInstance()
self._quit = True
self._runtime_proxy.quit()
self._condition.notify_all()
logging.debug('Quit instance: %s', self)
def reserve_background_thread(self):
"""Reserves a background thread slot.
Raises:
CannotAcceptRequests: if the Instance is already handling the maximum
permissible number of background threads or is not in a state where it
can handle background threads.
"""
with self._condition:
if self._quit:
raise CannotAcceptRequests('Instance has been quit')
if not self._started:
raise CannotAcceptRequests('Instance has not started')
if not self.remaining_background_thread_capacity:
raise CannotAcceptRequests(
'Instance has no additional background thread capacity')
self._num_running_background_threads += 1
def handle(self, environ, start_response, url_map, match, request_id,
request_type):
"""Handles an HTTP request by forwarding it to the RuntimeProxy.
Args:
environ: An environ dict for the request as defined in PEP-333.
start_response: A function with semantics defined in PEP-333.
url_map: An appinfo.URLMap instance containing the configuration for the
handler matching this request.
match: A re.MatchObject containing the result of the matched URL pattern.
request_id: A unique string id associated with the request.
request_type: The type of the request. See *_REQUEST module constants.
Returns:
An iterable over strings containing the body of the HTTP response.
Raises:
CannotAcceptRequests: if the Instance has quit or is already handling the
maximum permissible number of concurrent requests.
"""
start_time = time.time()
with self._condition:
if self._quit:
raise CannotAcceptRequests('Instance has been quit')
if not self._started:
raise CannotAcceptRequests('Instance has not started')
if request_type not in (BACKGROUND_REQUEST, SHUTDOWN_REQUEST):
if self._quitting:
raise CannotAcceptRequests('Instance is shutting down')
if self._expecting_ready_request and request_type != READY_REQUEST:
raise CannotAcceptRequests('Instance is waiting for ready request')
if not self.remaining_request_capacity:
raise CannotAcceptRequests('Instance has no additional capacity')
self._num_outstanding_requests += 1
self._request_data.set_request_instance(request_id, self)
self._total_requests += 1
try:
# Force the generator to complete so the code in the finally block runs
# at the right time.
return list(self._runtime_proxy.handle(environ,
start_response,
url_map,
match,
request_id,
request_type))
finally:
logging.debug('Request handled by %s in %0.4fs',
self, time.time() - start_time)
with self._condition:
if request_type == READY_REQUEST:
self._expecting_ready_request = False
if request_type == BACKGROUND_REQUEST:
self._num_running_background_threads -= 1
elif request_type != SHUTDOWN_REQUEST:
self._num_outstanding_requests -= 1
self._last_request_end_time = time.time()
self._trim_request_history_to_60s()
self._request_history.append((start_time, self._last_request_end_time))
if request_type == READY_REQUEST:
self._condition.notify(self._max_concurrent_requests)
elif request_type == SHUTDOWN_REQUEST:
self._expecting_shutdown_request = False
self.quit(allow_async=True)
elif request_type == NORMAL_REQUEST:
self._condition.notify()
if (not self._num_outstanding_requests and
not self._num_running_background_threads):
if self._quitting:
self.quit()
def wait(self, timeout_time):
"""Wait for this instance to have capacity to serve a request.
Args:
timeout_time: A float containing a time in seconds since the epoch to wait
until before timing out.
Returns:
True if the instance has request capacity or False if the timeout time was
reached or the instance has been quit.
"""
with self._condition:
while (time.time() < timeout_time and not
(self.remaining_request_capacity and self.can_accept_requests)
and not self.has_quit):
self._condition.wait(timeout_time - time.time())
return bool(self.remaining_request_capacity and self.can_accept_requests)
def set_health(self, health):
self._healthy = health
@property
def healthy(self):
return self._healthy
class InstanceFactory(object):
"""An abstract factory that creates instances for an InstancePool.
Attributes:
max_concurrent_requests: The maximum number of concurrent requests that
Instances created by this factory can handle. If the Instances do not
support concurrent requests then the value should be 1.
START_URL_MAP: An apinfo.URLMap that should be used as the default
/_ah/start handler if no user-specified script handler matches.
WARMUP_URL_MAP: An apinfo.URLMap that should be used as the default
/_ah/warmup handler if no user-specified script handler matches.
"""
START_URL_MAP = None
WARMUP_URL_MAP = None
# If True then the runtime supports interactive command evaluation e.g. for
# use in interactive shells.
SUPPORTS_INTERACTIVE_REQUESTS = False
# Controls how instances are restarted when a file relevant to the application
# is changed. Possible values: NEVER, AFTER_FIRST_RESTART, ALWAYS.
FILE_CHANGE_INSTANCE_RESTART_POLICY = None
def __init__(self, request_data, max_concurrent_requests,
max_background_threads=0):
"""Initializer for InstanceFactory.
Args:
request_data: A wsgi_request_info.WSGIRequestInfo instance that will be
populated with Instance data for use by the API stubs.
max_concurrent_requests: The maximum number of concurrent requests that
Instances created by this factory can handle. If the Instances do not
support concurrent requests then the value should be 1.
max_background_threads: The maximum number of background threads that
the instance can handle. If the instance does not support background
threads then the value should be 0.
"""
self.request_data = request_data
self.max_concurrent_requests = max_concurrent_requests
self.max_background_threads = max_background_threads
def get_restart_directories(self):
"""Returns a list of directories changes in which should trigger a restart.
Returns:
A list of directory paths. Changes (i.e. files added, deleted or modified)
in these directories will trigger the restart of all instances created
with this factory.
"""
return []
def files_changed(self):
"""Called when a file relevant to the factory *might* have changed."""
def configuration_changed(self, config_changes):
"""Called when the configuration of the module has changed.
Args:
config_changes: A set containing the changes that occured. See the
*_CHANGED constants in the application_configuration module.
"""
def new_instance(self, instance_id, expect_ready_request=False):
"""Create and return a new Instance.
Args:
instance_id: A string or integer representing the unique (per module) id
of the instance.
expect_ready_request: If True then the instance will be sent a special
request (i.e. /_ah/warmup or /_ah/start) before it can handle external
requests.
Returns:
The newly created instance.Instance.
"""
raise NotImplementedError()
| |
from functools import partial
import gym
from gym.spaces import Box, Dict, Discrete
import numpy as np
import unittest
import ray
from ray.rllib.models import ActionDistribution, ModelCatalog, MODEL_DEFAULTS
from ray.rllib.models.preprocessors import NoPreprocessor, Preprocessor
from ray.rllib.models.tf.tf_action_dist import MultiActionDistribution, \
TFActionDistribution
from ray.rllib.models.tf.tf_modelv2 import TFModelV2
from ray.rllib.utils.annotations import override
from ray.rllib.utils.framework import try_import_tf, try_import_torch
from ray.rllib.utils.test_utils import framework_iterator
tf1, tf, tfv = try_import_tf()
torch, _ = try_import_torch()
class CustomPreprocessor(Preprocessor):
def _init_shape(self, obs_space, options):
return [1]
class CustomPreprocessor2(Preprocessor):
def _init_shape(self, obs_space, options):
return [1]
class CustomModel(TFModelV2):
def _build_layers(self, *args):
return tf.constant([[0] * 5]), None
class CustomActionDistribution(TFActionDistribution):
def __init__(self, inputs, model):
# Store our output shape.
custom_model_config = model.model_config["custom_model_config"]
if "output_dim" in custom_model_config:
self.output_shape = tf.concat(
[tf.shape(inputs)[:1], custom_model_config["output_dim"]],
axis=0)
else:
self.output_shape = tf.shape(inputs)
super().__init__(inputs, model)
@staticmethod
def required_model_output_shape(action_space, model_config=None):
custom_model_config = model_config["custom_model_config"] or {}
if custom_model_config is not None and \
custom_model_config.get("output_dim"):
return custom_model_config.get("output_dim")
return action_space.shape
@override(TFActionDistribution)
def _build_sample_op(self):
return tf.random.uniform(self.output_shape)
@override(ActionDistribution)
def logp(self, x):
return tf.zeros(self.output_shape)
class CustomMultiActionDistribution(MultiActionDistribution):
@override(MultiActionDistribution)
def entropy(self):
raise NotImplementedError
class TestModelCatalog(unittest.TestCase):
def tearDown(self):
ray.shutdown()
def test_custom_preprocessor(self):
ray.init(object_store_memory=1000 * 1024 * 1024)
ModelCatalog.register_custom_preprocessor("foo", CustomPreprocessor)
ModelCatalog.register_custom_preprocessor("bar", CustomPreprocessor2)
env = gym.make("CartPole-v0")
p1 = ModelCatalog.get_preprocessor(env, {"custom_preprocessor": "foo"})
self.assertEqual(str(type(p1)), str(CustomPreprocessor))
p2 = ModelCatalog.get_preprocessor(env, {"custom_preprocessor": "bar"})
self.assertEqual(str(type(p2)), str(CustomPreprocessor2))
p3 = ModelCatalog.get_preprocessor(env)
self.assertEqual(type(p3), NoPreprocessor)
def test_default_models(self):
ray.init(object_store_memory=1000 * 1024 * 1024)
for fw in framework_iterator(frameworks=("jax", "tf", "tf2", "torch")):
obs_space = Box(0, 1, shape=(3, ), dtype=np.float32)
p1 = ModelCatalog.get_model_v2(
obs_space=obs_space,
action_space=Discrete(5),
num_outputs=5,
model_config={},
framework=fw,
)
self.assertTrue("FullyConnectedNetwork" in type(p1).__name__)
# Do a test forward pass.
obs = np.array([obs_space.sample()])
if fw == "torch":
obs = torch.from_numpy(obs)
out, state_outs = p1({"obs": obs})
self.assertTrue(out.shape == (1, 5))
self.assertTrue(state_outs == [])
# No Conv2Ds for JAX yet.
if fw != "jax":
p2 = ModelCatalog.get_model_v2(
obs_space=Box(0, 1, shape=(84, 84, 3), dtype=np.float32),
action_space=Discrete(5),
num_outputs=5,
model_config={},
framework=fw,
)
self.assertTrue("VisionNetwork" in type(p2).__name__)
def test_custom_model(self):
ray.init(object_store_memory=1000 * 1024 * 1024)
ModelCatalog.register_custom_model("foo", CustomModel)
p1 = ModelCatalog.get_model_v2(
obs_space=Box(0, 1, shape=(3, ), dtype=np.float32),
action_space=Discrete(5),
num_outputs=5,
model_config={"custom_model": "foo"})
self.assertEqual(str(type(p1)), str(CustomModel))
def test_custom_action_distribution(self):
class Model():
pass
ray.init(
object_store_memory=1000 * 1024 * 1024,
ignore_reinit_error=True) # otherwise fails sometimes locally
# registration
ModelCatalog.register_custom_action_dist("test",
CustomActionDistribution)
action_space = Box(0, 1, shape=(5, 3), dtype=np.float32)
# test retrieving it
model_config = MODEL_DEFAULTS.copy()
model_config["custom_action_dist"] = "test"
dist_cls, param_shape = ModelCatalog.get_action_dist(
action_space, model_config)
self.assertEqual(str(dist_cls), str(CustomActionDistribution))
self.assertEqual(param_shape, action_space.shape)
# test the class works as a distribution
dist_input = tf1.placeholder(tf.float32, (None, ) + param_shape)
model = Model()
model.model_config = model_config
dist = dist_cls(dist_input, model=model)
self.assertEqual(dist.sample().shape[1:], dist_input.shape[1:])
self.assertIsInstance(dist.sample(), tf.Tensor)
with self.assertRaises(NotImplementedError):
dist.entropy()
# test passing the options to it
model_config["custom_model_config"].update({"output_dim": (3, )})
dist_cls, param_shape = ModelCatalog.get_action_dist(
action_space, model_config)
self.assertEqual(param_shape, (3, ))
dist_input = tf1.placeholder(tf.float32, (None, ) + param_shape)
model.model_config = model_config
dist = dist_cls(dist_input, model=model)
self.assertEqual(dist.sample().shape[1:], dist_input.shape[1:])
self.assertIsInstance(dist.sample(), tf.Tensor)
with self.assertRaises(NotImplementedError):
dist.entropy()
def test_custom_multi_action_distribution(self):
class Model():
pass
ray.init(
object_store_memory=1000 * 1024 * 1024,
ignore_reinit_error=True) # otherwise fails sometimes locally
# registration
ModelCatalog.register_custom_action_dist(
"test", CustomMultiActionDistribution)
s1 = Discrete(5)
s2 = Box(0, 1, shape=(3, ), dtype=np.float32)
spaces = dict(action_1=s1, action_2=s2)
action_space = Dict(spaces)
# test retrieving it
model_config = MODEL_DEFAULTS.copy()
model_config["custom_action_dist"] = "test"
dist_cls, param_shape = ModelCatalog.get_action_dist(
action_space, model_config)
self.assertIsInstance(dist_cls, partial)
self.assertEqual(param_shape, s1.n + 2 * s2.shape[0])
# test the class works as a distribution
dist_input = tf1.placeholder(tf.float32, (None, param_shape))
model = Model()
model.model_config = model_config
dist = dist_cls(dist_input, model=model)
self.assertIsInstance(dist.sample(), dict)
self.assertIn("action_1", dist.sample())
self.assertIn("action_2", dist.sample())
self.assertEqual(dist.sample()["action_1"].dtype, tf.int64)
self.assertEqual(dist.sample()["action_2"].shape[1:], s2.shape)
with self.assertRaises(NotImplementedError):
dist.entropy()
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
| |
# -*- coding: utf-8 -*-
from ionyweb.administration.views import IsAdminView
from djangorestframework.response import Response
from djangorestframework import status
# from django.contrib.contenttypes.models import ContentType
from django.shortcuts import get_object_or_404
from django.template.loader import render_to_string
from django.template import RequestContext
from ionyweb.utils import ContentTypeAccessor as CTA
from ionyweb.plugin.models import PluginRelation
from ionyweb.page.forms import PageWAForm
from ionyweb.administration.utils import MESSAGES
from ionyweb.website.rendering import RenderingContext
class PagesView(IsAdminView):
"""
Return a list of all pages installed
"""
def get(self, request):
"""
Return the list of pages
"""
pages = request.website.pages.all()
html = render_to_string('administration/page/page-list.html',
{'pages': pages,},
context_instance = RequestContext(request))
response = Response(status.HTTP_200_OK, {"html": html})
return self.render(response)
# class PageContentView(IsAdminView):
# """
# Return the content of #main-content in HTML. Use mainly for ajax update
# """
# def get(self, request):
# response = Response(status.HTTP_200_OK, {"html": request.page.render_page(request).content,
# 'css_file': request.page.get_layout_css_file(), })
# return self.render(response)
#
# class PageDuplicateView(IsAdminView):
# DON'T USE DEEPCOPY BUT DO IT HERE
# def get(self, request, pk=None):
# if pk:
# page = get_object_or_404(request.website.pages, pk=pk)
# new_page = page.deepcopy()
# else:
# raise Http404
# pages = request.website.pages.all()
# html = render_to_string('administration/page/page-list.html',
# {'pages': pages,},
# context_instance = RequestContext(request))
# response = Response(status.HTTP_200_OK,
# {"html": html})
# return self.render(response)
class PageLayoutView(IsAdminView):
def get(self, request):
layout_section_slug = request.GET.get('layout_section_slug', None)
if not layout_section_slug:
response = Response(status.HTTP_400_BAD_REQUEST,
{"msg": MESSAGES.get('default_error', "")})
return self.render(response)
rendering_context = RenderingContext(request)
html_rendering = rendering_context.get_html_layout(layout_section_slug)
response = Response(status.HTTP_200_OK,
{'html': html_rendering,
'msg': MESSAGES.get('items_edit_success', "")})
return self.render(response)
class PageSelectApp(IsAdminView):
pass
class PageView(IsAdminView):
"""
Modify a Page object
"""
def get(self, request, pk=None):
"""
Return creation or edition form if pk is provided
"""
if pk is None:
default_data = {'app_page_type': CTA().get_for_names("page_text", "pageapp_text")}
if 'parent' in request.GET and request.GET['parent'] != '0':
default_data['parent'] = request.GET['parent']
form = PageWAForm(initial=default_data)
html = render_to_string('administration/page/page-create.html',
{'form': form,},
context_instance = RequestContext(request))
response = Response(status.HTTP_200_OK, {"html": html})
return self.render(response)
else:
page = get_object_or_404(request.website.pages, pk=pk)
form = PageWAForm(instance=page)
html = render_to_string('administration/page/page-edit.html',
{'form': form,
'page': page},
context_instance = RequestContext(request))
response = Response(status.HTTP_200_OK, {"html": html})
return self.render(response)
def put(self, request, pk=None):
post_values = self.DATA.copy()
post_values['website'] = request.website.id
form = PageWAForm(post_values)
if form.is_valid():
page = form.save()
# Add the new page on auto_display PluginRelation
plugins_relation = PluginRelation.objects.filter(display_on_new_pages=True, pages__website=request.website)
for plugin_relation in plugins_relation:
plugin_relation.pages.add(page)
response = Response(status.HTTP_202_ACCEPTED,
{"msg": MESSAGES.get('redirection', ""),
'location': page.get_absolute_url()})
else:
content = render_to_string('administration/page/page-create.html',
{'form': form,},
context_instance = RequestContext(request))
response = Response(status.HTTP_400_BAD_REQUEST,
{"html": content,
"msg": MESSAGES.get('default_error', "")})
return self.render(response)
def post(self, request, pk):
"""
Modify the page
"""
# Get page which is currently updated
page = get_object_or_404(request.website.pages, pk=pk)
# Saving url of current page
old_url_current_page = request.page.get_absolute_url()
# Settings Refresh
refresh_manager = False
refresh_page = False
msg_user = None
# ----------------------
# Moving Page Management
# ----------------------
if 'move' in request.POST:
if 'previous' in request.POST:
page_top = get_object_or_404(request.website.pages, pk=request.POST['previous'])
page.move_to(page_top, 'right')
else:
if 'next' in request.POST:
page_top = get_object_or_404(request.website.pages, pk=request.POST['next'])
page.move_to(page_top, 'left')
else:
if 'parent' in request.POST:
page_top = get_object_or_404(request.website.pages, pk=request.POST['parent'])
page.move_to(page_top, 'first-child')
# We save updates.
page.save()
# Ask refresh manager
refresh_manager = True
# Messgae fo user
msg_user = MESSAGES.get('items_move_success', '')
# ----------------------
# Settings page as draft
# ----------------------
elif 'draft' in request.POST:
page.draft = not page.draft
page.save()
refresh_manager = True
msg_user = MESSAGES.get('page_draft_toggle', '')
# ----------------------
# Updating settings page
# ----------------------
else:
# Get POST values
post_values = request.POST.copy()
post_values['website'] = request.website.id
# Creation of form
form = PageWAForm(post_values, instance=page)
if form.is_valid():
page = form.save()
# We ask content updating
if page == request.page:
refresh_page = True
# Message for user
msg_user = MESSAGES.get('app_edit_success', '')
else:
# We reload the edit form with errors
content = render_to_string('administration/page/page-edit.html',
{'form': form,
'page': page},
context_instance = RequestContext(request))
response = Response(status.HTTP_203_NON_AUTHORITATIVE_INFORMATION,
{"html": content,
"msg": MESSAGES.get('default_error', "")})
return self.render(response)
# ---------------
# Refresh Website
# ---------------
# Update cache for current page displayed.
request.page = get_object_or_404(request.website.pages, pk=request.page.id)
# Check if we need reload current page
# if current url changed or refresh content asked.
new_url_current_page = request.page.get_absolute_url()
if old_url_current_page != new_url_current_page or refresh_page:
response = Response(status.HTTP_202_ACCEPTED,
{'location': new_url_current_page})
return self.render(response)
# Else we refresh only page manager and navigation.
# Page manager:
if refresh_manager:
pages_list = request.website.pages.all()
page_manager_html = render_to_string('administration/page/page-list.html',
{'pages': pages_list,},
context_instance = RequestContext(request))
else:
page_manager_html = None
navigation_html = RenderingContext(request).html_navigation
# Response
response = Response(status.HTTP_200_OK,
{"manager_html": page_manager_html,
"navigation_html": navigation_html,
# "page_html": page_content_html,
"msg": msg_user})
return self.render(response)
def delete(self, request, pk):
page = get_object_or_404(request.website.pages, pk=pk)
url_home_page = request.website.get_url_home_page()
# We can't delete the home page
if page.get_absolute_url() == url_home_page:
response = Response(status.HTTP_400_BAD_REQUEST,
{"msg": MESSAGES.get('delete_home_page_error', "")})
return self.render(response)
# Need redirection if page is currently displayed
if request.page == page:
redirection = True
else:
redirection = False
# Deleting page
page.delete()
# Make response
if redirection:
response = Response(status.HTTP_202_ACCEPTED, {'location': url_home_page})
else:
# Refresh Menu navigation:
navigation_html = RenderingContext(request).html_navigation
response = Response(status.HTTP_200_OK,
{"id": pk,
"navigation_html": navigation_html,
"msg": MESSAGES.get('page_delete_success', "")})
# Send response
return self.render(response)
| |
"""SCons.Builder
Builder object subsystem.
A Builder object is a callable that encapsulates information about how
to execute actions to create a target Node (file) from source Nodes
(files), and how to create those dependencies for tracking.
The main entry point here is the Builder() factory method. This provides
a procedural interface that creates the right underlying Builder object
based on the keyword arguments supplied and the types of the arguments.
The goal is for this external interface to be simple enough that the
vast majority of users can create new Builders as necessary to support
building new types of files in their configurations, without having to
dive any deeper into this subsystem.
The base class here is BuilderBase. This is a concrete base class which
does, in fact, represent the Builder objects that we (or users) create.
There is also a proxy that looks like a Builder:
CompositeBuilder
This proxies for a Builder with an action that is actually a
dictionary that knows how to map file suffixes to a specific
action. This is so that we can invoke different actions
(compilers, compile options) for different flavors of source
files.
Builders and their proxies have the following public interface methods
used by other modules:
__call__()
THE public interface. Calling a Builder object (with the
use of internal helper methods) sets up the target and source
dependencies, appropriate mapping to a specific action, and the
environment manipulation necessary for overridden construction
variable. This also takes care of warning about possible mistakes
in keyword arguments.
add_emitter()
Adds an emitter for a specific file suffix, used by some Tool
modules to specify that (for example) a yacc invocation on a .y
can create a .h *and* a .c file.
add_action()
Adds an action for a specific file suffix, heavily used by
Tool modules to add their specific action(s) for turning
a source file into an object file to the global static
and shared object file Builders.
There are the following methods for internal use within this module:
_execute()
The internal method that handles the heavily lifting when a
Builder is called. This is used so that the __call__() methods
can set up warning about possible mistakes in keyword-argument
overrides, and *then* execute all of the steps necessary so that
the warnings only occur once.
get_name()
Returns the Builder's name within a specific Environment,
primarily used to try to return helpful information in error
messages.
adjust_suffix()
get_prefix()
get_suffix()
get_src_suffix()
set_src_suffix()
Miscellaneous stuff for handling the prefix and suffix
manipulation we use in turning source file names into target
file names.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Builder.py 3603 2008/10/10 05:46:45 scons"
import UserDict
import UserList
import SCons.Action
from SCons.Debug import logInstanceCreation
from SCons.Errors import InternalError, UserError
import SCons.Executor
import SCons.Memoize
import SCons.Node
import SCons.Node.FS
import SCons.Util
import SCons.Warnings
class _Null:
pass
_null = _Null
class DictCmdGenerator(SCons.Util.Selector):
"""This is a callable class that can be used as a
command generator function. It holds on to a dictionary
mapping file suffixes to Actions. It uses that dictionary
to return the proper action based on the file suffix of
the source file."""
def __init__(self, dict=None, source_ext_match=1):
SCons.Util.Selector.__init__(self, dict)
self.source_ext_match = source_ext_match
def src_suffixes(self):
return self.keys()
def add_action(self, suffix, action):
"""Add a suffix-action pair to the mapping.
"""
self[suffix] = action
def __call__(self, target, source, env, for_signature):
if not source:
return []
if self.source_ext_match:
ext = None
for src in map(str, source):
my_ext = SCons.Util.splitext(src)[1]
if ext and my_ext != ext:
raise UserError("While building `%s' from `%s': Cannot build multiple sources with different extensions: %s, %s" % (repr(map(str, target)), src, ext, my_ext))
ext = my_ext
else:
ext = SCons.Util.splitext(str(source[0]))[1]
if not ext:
raise UserError("While building `%s': Cannot deduce file extension from source files: %s" % (repr(map(str, target)), repr(map(str, source))))
try:
ret = SCons.Util.Selector.__call__(self, env, source)
except KeyError, e:
raise UserError("Ambiguous suffixes after environment substitution: %s == %s == %s" % (e[0], e[1], e[2]))
if ret is None:
raise UserError("While building `%s' from `%s': Don't know how to build from a source file with suffix `%s'. Expected a suffix in this list: %s." % \
(repr(map(str, target)), repr(map(str, source)), ext, repr(self.keys())))
return ret
class CallableSelector(SCons.Util.Selector):
"""A callable dictionary that will, in turn, call the value it
finds if it can."""
def __call__(self, env, source):
value = SCons.Util.Selector.__call__(self, env, source)
if callable(value):
value = value(env, source)
return value
class DictEmitter(SCons.Util.Selector):
"""A callable dictionary that maps file suffixes to emitters.
When called, it finds the right emitter in its dictionary for the
suffix of the first source file, and calls that emitter to get the
right lists of targets and sources to return. If there's no emitter
for the suffix in its dictionary, the original target and source are
returned.
"""
def __call__(self, target, source, env):
emitter = SCons.Util.Selector.__call__(self, env, source)
if emitter:
target, source = emitter(target, source, env)
return (target, source)
class ListEmitter(UserList.UserList):
"""A callable list of emitters that calls each in sequence,
returning the result.
"""
def __call__(self, target, source, env):
for e in self.data:
target, source = e(target, source, env)
return (target, source)
# These are a common errors when calling a Builder;
# they are similar to the 'target' and 'source' keyword args to builders,
# so we issue warnings when we see them. The warnings can, of course,
# be disabled.
misleading_keywords = {
'targets' : 'target',
'sources' : 'source',
}
class OverrideWarner(UserDict.UserDict):
"""A class for warning about keyword arguments that we use as
overrides in a Builder call.
This class exists to handle the fact that a single Builder call
can actually invoke multiple builders. This class only emits the
warnings once, no matter how many Builders are invoked.
"""
def __init__(self, dict):
UserDict.UserDict.__init__(self, dict)
if __debug__: logInstanceCreation(self, 'Builder.OverrideWarner')
self.already_warned = None
def warn(self):
if self.already_warned:
return
for k in self.keys():
if misleading_keywords.has_key(k):
alt = misleading_keywords[k]
msg = "Did you mean to use `%s' instead of `%s'?" % (alt, k)
SCons.Warnings.warn(SCons.Warnings.MisleadingKeywordsWarning, msg)
self.already_warned = 1
def Builder(**kw):
"""A factory for builder objects."""
composite = None
if kw.has_key('generator'):
if kw.has_key('action'):
raise UserError, "You must not specify both an action and a generator."
kw['action'] = SCons.Action.CommandGeneratorAction(kw['generator'])
del kw['generator']
elif kw.has_key('action'):
source_ext_match = kw.get('source_ext_match', 1)
if kw.has_key('source_ext_match'):
del kw['source_ext_match']
if SCons.Util.is_Dict(kw['action']):
composite = DictCmdGenerator(kw['action'], source_ext_match)
kw['action'] = SCons.Action.CommandGeneratorAction(composite)
kw['src_suffix'] = composite.src_suffixes()
else:
kw['action'] = SCons.Action.Action(kw['action'])
if kw.has_key('emitter'):
emitter = kw['emitter']
if SCons.Util.is_String(emitter):
# This allows users to pass in an Environment
# variable reference (like "$FOO") as an emitter.
# We will look in that Environment variable for
# a callable to use as the actual emitter.
var = SCons.Util.get_environment_var(emitter)
if not var:
raise UserError, "Supplied emitter '%s' does not appear to refer to an Environment variable" % emitter
kw['emitter'] = EmitterProxy(var)
elif SCons.Util.is_Dict(emitter):
kw['emitter'] = DictEmitter(emitter)
elif SCons.Util.is_List(emitter):
kw['emitter'] = ListEmitter(emitter)
result = apply(BuilderBase, (), kw)
if not composite is None:
result = CompositeBuilder(result, composite)
return result
def _node_errors(builder, env, tlist, slist):
"""Validate that the lists of target and source nodes are
legal for this builder and environment. Raise errors or
issue warnings as appropriate.
"""
# First, figure out if there are any errors in the way the targets
# were specified.
for t in tlist:
if t.side_effect:
raise UserError, "Multiple ways to build the same target were specified for: %s" % t
if t.has_explicit_builder():
if not t.env is None and not t.env is env:
action = t.builder.action
t_contents = action.get_contents(tlist, slist, t.env)
contents = action.get_contents(tlist, slist, env)
if t_contents == contents:
msg = "Two different environments were specified for target %s,\n\tbut they appear to have the same action: %s" % (t, action.genstring(tlist, slist, t.env))
SCons.Warnings.warn(SCons.Warnings.DuplicateEnvironmentWarning, msg)
else:
msg = "Two environments with different actions were specified for the same target: %s" % t
raise UserError, msg
if builder.multi:
if t.builder != builder:
msg = "Two different builders (%s and %s) were specified for the same target: %s" % (t.builder.get_name(env), builder.get_name(env), t)
raise UserError, msg
if t.get_executor().targets != tlist:
msg = "Two different target lists have a target in common: %s (from %s and from %s)" % (t, map(str, t.get_executor().targets), map(str, tlist))
raise UserError, msg
elif t.sources != slist:
msg = "Multiple ways to build the same target were specified for: %s (from %s and from %s)" % (t, map(str, t.sources), map(str, slist))
raise UserError, msg
if builder.single_source:
if len(slist) > 1:
raise UserError, "More than one source given for single-source builder: targets=%s sources=%s" % (map(str,tlist), map(str,slist))
class EmitterProxy:
"""This is a callable class that can act as a
Builder emitter. It holds on to a string that
is a key into an Environment dictionary, and will
look there at actual build time to see if it holds
a callable. If so, we will call that as the actual
emitter."""
def __init__(self, var):
self.var = SCons.Util.to_String(var)
def __call__(self, target, source, env):
emitter = self.var
# Recursively substitute the variable.
# We can't use env.subst() because it deals only
# in strings. Maybe we should change that?
while SCons.Util.is_String(emitter) and env.has_key(emitter):
emitter = env[emitter]
if callable(emitter):
target, source = emitter(target, source, env)
elif SCons.Util.is_List(emitter):
for e in emitter:
target, source = e(target, source, env)
return (target, source)
def __cmp__(self, other):
return cmp(self.var, other.var)
class BuilderBase:
"""Base class for Builders, objects that create output
nodes (files) from input nodes (files).
"""
if SCons.Memoize.use_memoizer:
__metaclass__ = SCons.Memoize.Memoized_Metaclass
memoizer_counters = []
def __init__(self, action = None,
prefix = '',
suffix = '',
src_suffix = '',
target_factory = None,
source_factory = None,
target_scanner = None,
source_scanner = None,
emitter = None,
multi = 0,
env = None,
single_source = 0,
name = None,
chdir = _null,
is_explicit = 1,
src_builder = None,
ensure_suffix = False,
**overrides):
if __debug__: logInstanceCreation(self, 'Builder.BuilderBase')
self._memo = {}
self.action = action
self.multi = multi
if SCons.Util.is_Dict(prefix):
prefix = CallableSelector(prefix)
self.prefix = prefix
if SCons.Util.is_Dict(suffix):
suffix = CallableSelector(suffix)
self.env = env
self.single_source = single_source
if overrides.has_key('overrides'):
SCons.Warnings.warn(SCons.Warnings.DeprecatedWarning,
"The \"overrides\" keyword to Builder() creation has been deprecated;\n" +\
"\tspecify the items as keyword arguments to the Builder() call instead.")
overrides.update(overrides['overrides'])
del overrides['overrides']
if overrides.has_key('scanner'):
SCons.Warnings.warn(SCons.Warnings.DeprecatedWarning,
"The \"scanner\" keyword to Builder() creation has been deprecated;\n"
"\tuse: source_scanner or target_scanner as appropriate.")
del overrides['scanner']
self.overrides = overrides
self.set_suffix(suffix)
self.set_src_suffix(src_suffix)
self.ensure_suffix = ensure_suffix
self.target_factory = target_factory
self.source_factory = source_factory
self.target_scanner = target_scanner
self.source_scanner = source_scanner
self.emitter = emitter
# Optional Builder name should only be used for Builders
# that don't get attached to construction environments.
if name:
self.name = name
self.executor_kw = {}
if not chdir is _null:
self.executor_kw['chdir'] = chdir
self.is_explicit = is_explicit
if src_builder is None:
src_builder = []
elif not SCons.Util.is_List(src_builder):
src_builder = [ src_builder ]
self.src_builder = src_builder
def __nonzero__(self):
raise InternalError, "Do not test for the Node.builder attribute directly; use Node.has_builder() instead"
def get_name(self, env):
"""Attempts to get the name of the Builder.
Look at the BUILDERS variable of env, expecting it to be a
dictionary containing this Builder, and return the key of the
dictionary. If there's no key, then return a directly-configured
name (if there is one) or the name of the class (by default)."""
try:
index = env['BUILDERS'].values().index(self)
return env['BUILDERS'].keys()[index]
except (AttributeError, KeyError, TypeError, ValueError):
try:
return self.name
except AttributeError:
return str(self.__class__)
def __cmp__(self, other):
return cmp(self.__dict__, other.__dict__)
def splitext(self, path, env=None):
if not env:
env = self.env
if env:
matchsuf = filter(lambda S,path=path: path[-len(S):] == S,
self.src_suffixes(env))
if matchsuf:
suf = max(map(None, map(len, matchsuf), matchsuf))[1]
return [path[:-len(suf)], path[-len(suf):]]
return SCons.Util.splitext(path)
def get_single_executor(self, env, tlist, slist, executor_kw):
if not self.action:
raise UserError, "Builder %s must have an action to build %s."%(self.get_name(env or self.env), map(str,tlist))
return self.action.get_executor(env or self.env,
[], # env already has overrides
tlist,
slist,
executor_kw)
def get_multi_executor(self, env, tlist, slist, executor_kw):
try:
executor = tlist[0].get_executor(create = 0)
except (AttributeError, IndexError):
return self.get_single_executor(env, tlist, slist, executor_kw)
else:
executor.add_sources(slist)
return executor
def _adjustixes(self, files, pre, suf, ensure_suffix=False):
if not files:
return []
result = []
if not SCons.Util.is_List(files):
files = [files]
for f in files:
if SCons.Util.is_String(f):
f = SCons.Util.adjustixes(f, pre, suf, ensure_suffix)
result.append(f)
return result
def _create_nodes(self, env, target = None, source = None):
"""Create and return lists of target and source nodes.
"""
src_suf = self.get_src_suffix(env)
target_factory = env.get_factory(self.target_factory)
source_factory = env.get_factory(self.source_factory)
source = self._adjustixes(source, None, src_suf)
slist = env.arg2nodes(source, source_factory)
pre = self.get_prefix(env, slist)
suf = self.get_suffix(env, slist)
if target is None:
try:
t_from_s = slist[0].target_from_source
except AttributeError:
raise UserError("Do not know how to create a target from source `%s'" % slist[0])
except IndexError:
tlist = []
else:
splitext = lambda S,self=self,env=env: self.splitext(S,env)
tlist = [ t_from_s(pre, suf, splitext) ]
else:
target = self._adjustixes(target, pre, suf, self.ensure_suffix)
tlist = env.arg2nodes(target, target_factory, target=target, source=source)
if self.emitter:
# The emitter is going to do str(node), but because we're
# being called *from* a builder invocation, the new targets
# don't yet have a builder set on them and will look like
# source files. Fool the emitter's str() calls by setting
# up a temporary builder on the new targets.
new_targets = []
for t in tlist:
if not t.is_derived():
t.builder_set(self)
new_targets.append(t)
orig_tlist = tlist[:]
orig_slist = slist[:]
target, source = self.emitter(target=tlist, source=slist, env=env)
# Now delete the temporary builders that we attached to any
# new targets, so that _node_errors() doesn't do weird stuff
# to them because it thinks they already have builders.
for t in new_targets:
if t.builder is self:
# Only delete the temporary builder if the emitter
# didn't change it on us.
t.builder_set(None)
# Have to call arg2nodes yet again, since it is legal for
# emitters to spit out strings as well as Node instances.
tlist = env.arg2nodes(target, target_factory,
target=orig_tlist, source=orig_slist)
slist = env.arg2nodes(source, source_factory,
target=orig_tlist, source=orig_slist)
return tlist, slist
def _execute(self, env, target, source, overwarn={}, executor_kw={}):
# We now assume that target and source are lists or None.
if self.src_builder:
source = self.src_builder_sources(env, source, overwarn)
if self.single_source and len(source) > 1 and target is None:
result = []
if target is None: target = [None]*len(source)
for tgt, src in zip(target, source):
if not tgt is None: tgt = [tgt]
if not src is None: src = [src]
result.extend(self._execute(env, tgt, src, overwarn))
return SCons.Node.NodeList(result)
overwarn.warn()
tlist, slist = self._create_nodes(env, target, source)
# Check for errors with the specified target/source lists.
_node_errors(self, env, tlist, slist)
# The targets are fine, so find or make the appropriate Executor to
# build this particular list of targets from this particular list of
# sources.
if self.multi:
get_executor = self.get_multi_executor
else:
get_executor = self.get_single_executor
executor = get_executor(env, tlist, slist, executor_kw)
# Now set up the relevant information in the target Nodes themselves.
for t in tlist:
t.cwd = env.fs.getcwd()
t.builder_set(self)
t.env_set(env)
t.add_source(slist)
t.set_executor(executor)
t.set_explicit(self.is_explicit)
return SCons.Node.NodeList(tlist)
def __call__(self, env, target=None, source=None, chdir=_null, **kw):
# We now assume that target and source are lists or None.
# The caller (typically Environment.BuilderWrapper) is
# responsible for converting any scalar values to lists.
if chdir is _null:
ekw = self.executor_kw
else:
ekw = self.executor_kw.copy()
ekw['chdir'] = chdir
if kw:
if kw.has_key('srcdir'):
def prependDirIfRelative(f, srcdir=kw['srcdir']):
import os.path
if SCons.Util.is_String(f) and not os.path.isabs(f):
f = os.path.join(srcdir, f)
return f
if not SCons.Util.is_List(source):
source = [source]
source = map(prependDirIfRelative, source)
del kw['srcdir']
if self.overrides:
env_kw = self.overrides.copy()
env_kw.update(kw)
else:
env_kw = kw
else:
env_kw = self.overrides
env = env.Override(env_kw)
return self._execute(env, target, source, OverrideWarner(kw), ekw)
def adjust_suffix(self, suff):
if suff and not suff[0] in [ '.', '_', '$' ]:
return '.' + suff
return suff
def get_prefix(self, env, sources=[]):
prefix = self.prefix
if callable(prefix):
prefix = prefix(env, sources)
return env.subst(prefix)
def set_suffix(self, suffix):
if not callable(suffix):
suffix = self.adjust_suffix(suffix)
self.suffix = suffix
def get_suffix(self, env, sources=[]):
suffix = self.suffix
if callable(suffix):
suffix = suffix(env, sources)
return env.subst(suffix)
def set_src_suffix(self, src_suffix):
if not src_suffix:
src_suffix = []
elif not SCons.Util.is_List(src_suffix):
src_suffix = [ src_suffix ]
adjust = lambda suf, s=self: \
callable(suf) and suf or s.adjust_suffix(suf)
self.src_suffix = map(adjust, src_suffix)
def get_src_suffix(self, env):
"""Get the first src_suffix in the list of src_suffixes."""
ret = self.src_suffixes(env)
if not ret:
return ''
return ret[0]
def add_emitter(self, suffix, emitter):
"""Add a suffix-emitter mapping to this Builder.
This assumes that emitter has been initialized with an
appropriate dictionary type, and will throw a TypeError if
not, so the caller is responsible for knowing that this is an
appropriate method to call for the Builder in question.
"""
self.emitter[suffix] = emitter
def add_src_builder(self, builder):
"""
Add a new Builder to the list of src_builders.
This requires wiping out cached values so that the computed
lists of source suffixes get re-calculated.
"""
self._memo = {}
self.src_builder.append(builder)
def _get_sdict(self, env):
"""
Returns a dictionary mapping all of the source suffixes of all
src_builders of this Builder to the underlying Builder that
should be called first.
This dictionary is used for each target specified, so we save a
lot of extra computation by memoizing it for each construction
environment.
Note that this is re-computed each time, not cached, because there
might be changes to one of our source Builders (or one of their
source Builders, and so on, and so on...) that we can't "see."
The underlying methods we call cache their computed values,
though, so we hope repeatedly aggregating them into a dictionary
like this won't be too big a hit. We may need to look for a
better way to do this if performance data show this has turned
into a significant bottleneck.
"""
sdict = {}
for bld in self.get_src_builders(env):
for suf in bld.src_suffixes(env):
sdict[suf] = bld
return sdict
def src_builder_sources(self, env, source, overwarn={}):
sdict = self._get_sdict(env)
src_suffixes = self.src_suffixes(env)
lengths = list(set(map(len, src_suffixes)))
def match_src_suffix(name, src_suffixes=src_suffixes, lengths=lengths):
node_suffixes = map(lambda l, n=name: n[-l:], lengths)
for suf in src_suffixes:
if suf in node_suffixes:
return suf
return None
result = []
for s in SCons.Util.flatten(source):
if SCons.Util.is_String(s):
match_suffix = match_src_suffix(env.subst(s))
if not match_suffix and not '.' in s:
src_suf = self.get_src_suffix(env)
s = self._adjustixes(s, None, src_suf)[0]
else:
match_suffix = match_src_suffix(s.name)
if match_suffix:
try:
bld = sdict[match_suffix]
except KeyError:
result.append(s)
else:
tlist = bld._execute(env, None, [s], overwarn)
# If the subsidiary Builder returned more than one
# target, then filter out any sources that this
# Builder isn't capable of building.
if len(tlist) > 1:
mss = lambda t, m=match_src_suffix: m(t.name)
tlist = filter(mss, tlist)
result.extend(tlist)
else:
result.append(s)
source_factory = env.get_factory(self.source_factory)
return env.arg2nodes(result, source_factory)
def _get_src_builders_key(self, env):
return id(env)
memoizer_counters.append(SCons.Memoize.CountDict('get_src_builders', _get_src_builders_key))
def get_src_builders(self, env):
"""
Returns the list of source Builders for this Builder.
This exists mainly to look up Builders referenced as
strings in the 'BUILDER' variable of the construction
environment and cache the result.
"""
memo_key = id(env)
try:
memo_dict = self._memo['get_src_builders']
except KeyError:
memo_dict = {}
self._memo['get_src_builders'] = memo_dict
else:
try:
return memo_dict[memo_key]
except KeyError:
pass
builders = []
for bld in self.src_builder:
if SCons.Util.is_String(bld):
try:
bld = env['BUILDERS'][bld]
except KeyError:
continue
builders.append(bld)
memo_dict[memo_key] = builders
return builders
def _subst_src_suffixes_key(self, env):
return id(env)
memoizer_counters.append(SCons.Memoize.CountDict('subst_src_suffixes', _subst_src_suffixes_key))
def subst_src_suffixes(self, env):
"""
The suffix list may contain construction variable expansions,
so we have to evaluate the individual strings. To avoid doing
this over and over, we memoize the results for each construction
environment.
"""
memo_key = id(env)
try:
memo_dict = self._memo['subst_src_suffixes']
except KeyError:
memo_dict = {}
self._memo['subst_src_suffixes'] = memo_dict
else:
try:
return memo_dict[memo_key]
except KeyError:
pass
suffixes = map(lambda x, s=self, e=env: e.subst(x), self.src_suffix)
memo_dict[memo_key] = suffixes
return suffixes
def src_suffixes(self, env):
"""
Returns the list of source suffixes for all src_builders of this
Builder.
This is essentially a recursive descent of the src_builder "tree."
(This value isn't cached because there may be changes in a
src_builder many levels deep that we can't see.)
"""
sdict = {}
suffixes = self.subst_src_suffixes(env)
for s in suffixes:
sdict[s] = 1
for builder in self.get_src_builders(env):
for s in builder.src_suffixes(env):
if not sdict.has_key(s):
sdict[s] = 1
suffixes.append(s)
return suffixes
class CompositeBuilder(SCons.Util.Proxy):
"""A Builder Proxy whose main purpose is to always have
a DictCmdGenerator as its action, and to provide access
to the DictCmdGenerator's add_action() method.
"""
def __init__(self, builder, cmdgen):
if __debug__: logInstanceCreation(self, 'Builder.CompositeBuilder')
SCons.Util.Proxy.__init__(self, builder)
# cmdgen should always be an instance of DictCmdGenerator.
self.cmdgen = cmdgen
self.builder = builder
def add_action(self, suffix, action):
self.cmdgen.add_action(suffix, action)
self.set_src_suffix(self.cmdgen.src_suffixes())
| |
#!/usr/bin/python
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import itertools
import random
import unittest
import adsense_util_data_collator
DATE_FORMAT = adsense_util_data_collator.DATE_FORMAT
MONTH_FORMAT = adsense_util_data_collator.MONTH_FORMAT
class TestSequenceFunctions(unittest.TestCase):
class FakeReport(object):
"""Creates a fake report.
Args:
start_date: a string with a date in DATE_FORMAT.
end_date: a string with a date in DATE_FORMAT.
time_dimensions: a list of strings containing none or all of "DATE",
"WEEK" and "MONTH".
dimensions: a list of strings representing dimensions.
metrics: a list of strings representing metrics.
ratios: a list of strings representing metrics that are ratios.
currencies: a list of strings representing metrics that are currencies.
dimension_values: a dictionary of dimensions where the values are lists
of strings.
row_fill_factor: a number between 0 and 1 (inclusive) that defines the
number of rows that are not returned, to simulate periods without
activity.
"""
def __init__(self, start_date=u'2014-02-28', end_date=u'2014-04-07',
time_dimensions=None, dimensions=None, metrics=None,
ratios=None, currencies=None, dimension_values=None,
row_fill_factor=0.8):
# Default values for the fake report:
self.start_date = start_date
self.end_date = end_date
self.time_dimensions = (['MONTH'] if time_dimensions is None
else time_dimensions)
self.dimensions = (['PLATFORM_TYPE'] if dimensions is None
else dimensions)
self.metrics = (['CLICKS', 'PAGE_VIEWS'] if metrics is None
else metrics)
self.ratios = (['AD_REQUESTS_CTR', 'AD_REQUESTS_COVERAGE']
if ratios is None else ratios)
self.currencies = (['EARNINGS', 'COST_PER_CLICK'] if currencies is None
else currencies)
self.dimension_values = (
{'PLATFORM_TYPE': ['Desktop', 'High-end mobile devices']}
if dimension_values is None else dimension_values)
# Sort the time dimensions: DATE before WEEK before MONTH.
self.time_dimensions = [
dimension for dimension in ['DATE', 'WEEK', 'MONTH']
if dimension in self.time_dimensions]
self.row_fill_factor = row_fill_factor
def generate(self):
"""Generates the fake report.
Returns:
The generated report.
"""
report = {}
self._fill_dates(report)
self._fill_headers(report)
self._fill_rows(report)
return report
def _fill_dates(self, report):
report['startDate'] = self.start_date
report['endDate'] = self.end_date
def _fill_headers(self, report):
report['headers'] = []
for time_dimension in self.time_dimensions:
time_dimension_header = {u'type': u'DIMENSION',
u'name': time_dimension}
report['headers'].append(time_dimension_header)
for dimension in self.dimensions:
dimension_header = {u'type': u'DIMENSION', u'name': dimension}
report['headers'].append(dimension_header)
for metric in self.metrics:
metrics_header = {u'type': u'METRIC_TALLY', u'name': metric}
report['headers'].append(metrics_header)
for ratio in self.ratios:
ratios_header = {u'type': u'METRIC_RATIO', u'name': ratio}
report['headers'].append(ratios_header)
for currency in self.ratios:
currencies_header = {u'type': u'METRIC_CURRENCY', u'name': currency}
report['headers'].append(currencies_header)
def _fill_rows(self, report):
# Make sure the resulting report is random but always the same.
random.seed(0)
# Fill the rows with data.
report['rows'] = []
cursor_date = datetime.datetime.strptime(self.start_date, DATE_FORMAT)
end_date = datetime.datetime.strptime(self.end_date, DATE_FORMAT)
while cursor_date <= end_date:
for combination in itertools.product(*self.dimension_values.values()):
# The fill factor specifies the ratio of missing rows.
if random.random() > self.row_fill_factor:
continue
row = []
if 'DATE' in self.time_dimensions:
row.append(adsense_util_data_collator.date_to_date_st(cursor_date))
if 'WEEK' in self.time_dimensions:
row.append(adsense_util_data_collator.date_to_week_st(cursor_date))
if 'MONTH' in self.time_dimensions:
row.append(adsense_util_data_collator.date_to_month_st(cursor_date))
for dimension in combination:
row.append(dimension)
for _ in xrange(len(self.metrics)):
row.append('42')
for _ in xrange(len(self.ratios)):
row.append('3.14')
for _ in xrange(len(self.currencies)):
row.append('99.95')
report['rows'].append(row)
# No time dimensions, we don't need to loop.
if not self.time_dimensions:
break
# If there are time dimensions, increase the cursor and loop.
if 'DATE' in self.time_dimensions:
cursor_date += datetime.timedelta(days=1)
elif 'WEEK' in self.time_dimensions:
cursor_date += datetime.timedelta(days=7)
elif 'MONTH' in self.time_dimensions:
cursor_date = adsense_util_data_collator.increase_month(cursor_date)
cursor_date = datetime.datetime(
cursor_date.year, cursor_date.month, 1)
def setUp(self):
# Generate the default fake report.
generatedr = [self.FakeReport().generate()]
self.data_collator_default = (
adsense_util_data_collator.DataCollator(generatedr))
def test_get_header_index(self):
report = self.data_collator_default.reports[0]
self.assertEquals(
self.data_collator_default._get_header_index(report, 'DATE'), -1)
self.assertEquals(
self.data_collator_default._get_header_index(report, 'WEEK'), -1)
self.assertEquals(
self.data_collator_default._get_header_index(report, 'MONTH'), 0)
def test_multiple_reports_without_date_filling(self):
"""Tests two contiguous reports."""
report1 = self.FakeReport(start_date='2013-01-01', end_date='2013-01-03',
time_dimensions=['DATE'], dimensions=[])
report2 = self.FakeReport(start_date='2013-01-04', end_date='2013-01-06',
time_dimensions=['DATE'], dimensions=[])
reports = [report1.generate(), report2.generate()]
datacollator = adsense_util_data_collator.DataCollator(reports)
resulting_report = datacollator.collate_data()
self.assertEquals(len(resulting_report['rows']), 6)
def test_multiple_reports_with_date_filling(self):
"""Tests two non-contiguous reports."""
report1 = self.FakeReport(start_date='2013-01-01', end_date='2013-01-03',
time_dimensions=['DATE'], dimensions=[])
report2 = self.FakeReport(start_date='2013-01-05', end_date='2013-01-07',
time_dimensions=['DATE'], dimensions=[])
reports = [report1.generate(), report2.generate()]
self.assertRaises(ValueError, adsense_util_data_collator.DataCollator,
reports)
def test_same_row_different_reports(self):
"""Tests for duplicated rows.
Only the first row should be returned.
"""
# 2013-01-03 will be duplicated.
report1 = self.FakeReport(start_date='2013-01-01', end_date='2013-01-03',
time_dimensions=['DATE'], dimensions=[])
report2 = self.FakeReport(start_date='2013-01-03', end_date='2013-01-04',
time_dimensions=['DATE'], dimensions=[])
reports = [report1.generate(), report2.generate()]
datacollator = adsense_util_data_collator.DataCollator(reports)
resulting_report = datacollator.collate_data()
self.assertEquals(len(resulting_report['rows']), 4)
def test_multiple_empty_reports_with_date_filling(self):
"""Tests an empty report just with one time dimension."""
report1 = self.FakeReport(start_date='2013-01-01', end_date='2013-01-03',
time_dimensions=['DATE'], row_fill_factor=0,
dimensions=[])
report2 = self.FakeReport(start_date='2013-01-04', end_date='2013-01-06',
time_dimensions=['DATE'], row_fill_factor=0,
dimensions=[])
reports = [report1.generate(), report2.generate()]
datacollator = adsense_util_data_collator.DataCollator(reports)
resulting_report = datacollator.collate_data()
self.assertEquals(len(resulting_report['rows']), 6)
def test_multiple_empty_reports_with_date_filling_multiple_dimensions(self):
"""Tests an empty report with two dimensions (time and generic).
The second dimension should be ignored as there are no dimension values.
"""
dimensions = ['DIM1']
dimension_values = {
'DIM1': ['Unused', 'Unused']}
report1 = self.FakeReport(start_date='2013-01-01',
end_date='2013-01-03',
time_dimensions=['DATE'],
row_fill_factor=0,
dimensions=dimensions,
dimension_values=dimension_values)
report2 = self.FakeReport(start_date='2013-01-04',
end_date='2013-01-06',
time_dimensions=['DATE'],
row_fill_factor=0,
dimensions=dimensions,
dimension_values=dimension_values)
reports = [report1.generate(), report2.generate()]
datacollator = adsense_util_data_collator.DataCollator(reports)
resulting_report = datacollator.collate_data()
self.assertEquals(len(resulting_report['rows']), 6)
def test_no_time_dimensions_report(self):
dimensions = ['DIM1']
dimension_values = {
'DIM1': ['Value1', 'Value2']}
report1 = self.FakeReport(start_date='2013-01-01',
end_date='2013-01-03',
row_fill_factor=1,
dimensions=dimensions,
time_dimensions=[],
dimension_values=dimension_values).generate()
datacollator = adsense_util_data_collator.DataCollator([report1])
resulting_report = datacollator.collate_data()
self.assertEquals(len(resulting_report['rows']), 2)
def test_no_reports_error(self):
self.assertRaises(ValueError, adsense_util_data_collator.DataCollator, [])
def test_no_headers_error(self):
report1 = self.FakeReport(start_date='2013-01-01',
end_date='2013-01-03',
time_dimensions=[],
row_fill_factor=0,
dimensions=[],
metrics=[],
ratios=[],
currencies=[],
dimension_values={}).generate()
reports = [report1]
self.assertRaises(ValueError,
adsense_util_data_collator.DataCollator,
reports)
def test_no_dimensions_error(self):
report1 = self.FakeReport(start_date='2013-01-01',
end_date='2013-01-03',
time_dimensions=[],
row_fill_factor=0,
dimensions=[],
dimension_values={}).generate()
reports = [report1]
self.assertRaises(ValueError,
adsense_util_data_collator.DataCollator,
reports)
def test_different_reports_error(self):
date_report = self.FakeReport(start_date='2013-01-01',
end_date='2013-01-03',
time_dimensions=['DATE'],
row_fill_factor=0).generate()
month_report = self.FakeReport(start_date='2013-01-04',
end_date='2013-01-06',
time_dimensions=['DATE', 'MONTH'],
row_fill_factor=0).generate()
self.assertRaises(ValueError,
adsense_util_data_collator.DataCollator,
[date_report, month_report])
def test_different_reports_error2(self):
date_report = self.FakeReport(start_date='2013-01-01',
end_date='2013-01-03',
time_dimensions=['DATE'],
row_fill_factor=0).generate()
month_report = self.FakeReport(start_date='2013-01-04',
end_date='2013-01-06',
time_dimensions=['MONTH'],
row_fill_factor=0).generate()
self.assertRaises(ValueError,
adsense_util_data_collator.DataCollator,
[date_report, month_report])
def test_bad_reports_error(self):
report1 = self.FakeReport(start_date='2013-01-01',
end_date='2013-01-03',
time_dimensions=['DATE'],
row_fill_factor=0).generate()
report2 = self.FakeReport(start_date='2013-01-04',
end_date='2013-01-06',
time_dimensions=['DATE'],
row_fill_factor=0).generate()
report2['headers'][0], report2['headers'][1] = (
report2['headers'][1], report2['headers'][0])
self.assertRaises(ValueError,
adsense_util_data_collator.DataCollator,
[report1, report2])
def test_increase_month(self):
increase_month = adsense_util_data_collator.increase_month
self.assertEquals(increase_month(datetime.datetime(2010, 01, 01)),
datetime.datetime(2010, 02, 01))
self.assertEquals(increase_month(datetime.datetime(2010, 12, 01)),
datetime.datetime(2011, 01, 01))
self.assertEquals(increase_month(datetime.datetime(2010, 12, 31)),
datetime.datetime(2011, 01, 31))
self.assertEquals(increase_month(datetime.datetime(2010, 01, 31)),
datetime.datetime(2010, 02, 28))
# Test leap year.
self.assertEquals(increase_month(datetime.datetime(2012, 01, 31)),
datetime.datetime(2012, 02, 29))
self.assertEquals(increase_month(datetime.datetime(2010, 11, 30)),
datetime.datetime(2010, 12, 30))
def test_get_all_dimensions_from_report(self):
report = self.data_collator_default.reports[0]
dimensions = self.data_collator_default._get_all_dimensions_from_report(
report)
self.assertEquals(dimensions, [(0, u'MONTH'), (1, u'PLATFORM_TYPE')])
def test_create_new_row(self):
self.data_collator_default.month_index = 0
february = datetime.datetime(2014, 02, 01)
dimension = u'High-end mobile devices'
combination = [dimension]
new_row = self.data_collator_default._create_new_row(combination, february)
self.assertIn('2014-02', new_row)
self.assertIn(dimension, new_row)
def test_generate_every_dimension_combination(self):
"""Tests the generate_every_combination method for a simple report.
A report is generated with one dimension with three possible values. The
list of combinations should have length 1x3.
"""
dimensions = [u'PLATFORM_TYPE']
dimension_values = {u'PLATFORM_TYPE': [
u'Desktop', u'High-end mobile devices', u'Tablets']}
generatedr = [self.FakeReport(
dimensions=dimensions, dimension_values=dimension_values).generate()]
datacollator = adsense_util_data_collator.DataCollator(generatedr)
# Sets can be used to compare as the combinations are unique.
self.assertEquals(
set([x for x in datacollator._generate_every_dimension_combination()]),
set([(u'High-end mobile devices',), (u'Tablets',), (u'Desktop',)]))
def test_generate_every_dimension_combination_empty(self):
"""Tests the generate_every_combination method for a simple report.
A report is generated with one dimension with three possible values. The
list of combinations should have length 1x3.
"""
dimensions = [u'PLATFORM_TYPE']
dimension_values = {u'PLATFORM_TYPE': ['Unused']}
generatedr = [
self.FakeReport(dimensions=dimensions,
dimension_values=dimension_values,
row_fill_factor=0).generate()]
datacollator = adsense_util_data_collator.DataCollator(generatedr)
# Sets can be used to compare as the combinations are unique.
self.assertEquals(
[x for x in datacollator._generate_every_dimension_combination()], [[]])
def test_generate_every_dimension_combination_multiple(self):
"""Tests the generate_every_combination method for a complex report.
A report is generated with two dimensions, one having three values and
the other one having two. The list of combinations should have length 2x3.
"""
dimensions = [u'PLATFORM_TYPE', u'ANOTHER_DIMENSION']
dimension_values = {u'PLATFORM_TYPE': [
u'Desktop', u'High-end mobile devices', u'Tablets'],
u'ANOTHER_DIMENSION': [u'Value1', u'Value2']}
generatedr = [self.FakeReport(
dimensions=dimensions, dimension_values=dimension_values).generate()]
datacollator = adsense_util_data_collator.DataCollator(generatedr)
expected_multiple_dimensions = [
(u'Desktop', u'Value1'),
(u'Desktop', u'Value2'),
(u'High-end mobile devices', u'Value1'),
(u'High-end mobile devices', u'Value2'),
(u'Tablets', u'Value1'),
(u'Tablets', u'Value2')]
# Sets can be used to compare as the combinations are unique.
self.assertEquals(
set([x for x in datacollator._generate_every_dimension_combination()]),
set(expected_multiple_dimensions))
def test_collate_data_simple(self):
self._run_common_tests_and_get_result(self.data_collator_default)
def test_collate_data_complex(self):
reports_multiple_dim_values = {
'PLATFORM_TYPE': ['Desktop', 'High-end mobile devices', 'Tablets'],
'ANOTHER_DIMENSION': ['Value1', 'Value2']
}
reports_multiple_dim = [self.FakeReport(
start_date='2014-02-06', end_date='2014-04-07',
time_dimensions=['MONTH'],
dimensions=['PLATFORM_TYPE', 'ANOTHER_DIMENSION'],
dimension_values=reports_multiple_dim_values).generate()]
collator = adsense_util_data_collator.DataCollator(reports_multiple_dim)
self._run_common_tests_and_get_result(collator)
def test_collate_data_complex_week(self):
reports_multiple_dim_values = {
'PLATFORM_TYPE': ['Desktop', 'High-end mobile devices', 'Tablets'],
'ANOTHER_DIMENSION': ['Value1', 'Value2']
}
reports_multiple_dim = [self.FakeReport(
start_date='2014-02-06', end_date='2014-04-07',
time_dimensions=['MONTH', 'WEEK'],
dimensions=['PLATFORM_TYPE', 'ANOTHER_DIMENSION'],
dimension_values=reports_multiple_dim_values).generate()]
collator = adsense_util_data_collator.DataCollator(reports_multiple_dim)
# Common test for every collated report.
result = self._run_common_tests_and_get_result(collator)
self.assertTrue(collator.week_index != -1)
# The number of weeks includes 2014-02-24 twice because it runs between
# months (ends in March).
number_of_weeks = 10
different_dim_values = (
len(reports_multiple_dim_values['PLATFORM_TYPE'])
* len(reports_multiple_dim_values['ANOTHER_DIMENSION']))
self.assertEquals(len(result['rows']),
number_of_weeks * different_dim_values)
def _run_common_tests_and_get_result(self, data_collator):
"""Collates data and runs common tests that apply to every result.
Args:
data_collator: An instance of DataCollator.
Returns:
the resulting report.
"""
result = data_collator.collate_data()
# Make sure everything contained in the original reports is in the result.
number_rows_original = 0
for report in data_collator.reports:
number_rows_original += len(report['rows'])
for row in report['rows']:
self.assertIn(row, result['rows'])
# Make sure the number of rows in the result is at least the number of rows
# in the original reports.
self.assertTrue(number_rows_original <= len(result['rows']))
return result
def test_generator(self):
self.FakeReport().generate()
if __name__ == '__main__':
unittest.main()
| |
# -*- coding:utf-8 -*-
# Copyright (c) 2009-2014 - Simon Conseil
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from __future__ import absolute_import, print_function
import click
import importlib
import io
import locale
import logging
import os
import sys
import time
from click import argument, option
from .compat import server, socketserver, string_types
from .gallery import Gallery
from .log import init_logging
from .pkgmeta import __version__
from .settings import read_settings
from .utils import copy
_DEFAULT_CONFIG_FILE = 'sigal.conf.py'
@click.group()
@click.version_option(version=__version__)
def main():
"""Sigal - Simple Static Gallery Generator.
Sigal is yet another python script to prepare a static gallery of images:
resize images, create thumbnails with some options, generate html pages.
"""
pass
@main.command()
@argument('path', default=_DEFAULT_CONFIG_FILE)
def init(path):
"""Copy a sample config file in the current directory (default to
'sigal.conf.py'), or use the provided 'path'."""
if os.path.isfile(path):
print("Found an existing config file, will abort to keep it safe.")
sys.exit(1)
from pkg_resources import resource_string
conf = resource_string(__name__, 'templates/sigal.conf.py')
with io.open(path, 'w', encoding='utf-8') as f:
f.write(conf.decode('utf8'))
print("Sample config file created: {}".format(path))
@main.command()
@argument('source', required=False)
@argument('destination', required=False)
@option('-f', '--force', is_flag=True,
help="Force the reprocessing of existing images")
@option('-v', '--verbose', is_flag=True, help="Show all messages")
@option('-d', '--debug', is_flag=True,
help="Show all message, including debug messages")
@option('-c', '--config', default=_DEFAULT_CONFIG_FILE, show_default=True,
help="Configuration file")
@option('-t', '--theme', help="Specify a theme directory, or a theme name for "
"the themes included with Sigal")
@option('--title', help="Title of the gallery (overrides the title setting.")
@option('-n', '--ncpu', help="Number of cpu to use (default: all)")
def build(source, destination, debug, verbose, force, config, theme, title,
ncpu):
"""Run sigal to process a directory.
If provided, 'source', 'destination' and 'theme' will override the
corresponding values from the settings file.
"""
level = ((debug and logging.DEBUG) or (verbose and logging.INFO)
or logging.WARNING)
init_logging(__name__, level=level)
logger = logging.getLogger(__name__)
if not os.path.isfile(config):
logger.error("Settings file not found: %s", config)
sys.exit(1)
start_time = time.time()
settings = read_settings(config)
for key in ('source', 'destination', 'theme'):
arg = locals()[key]
if arg is not None:
settings[key] = os.path.abspath(arg)
logger.info("%12s : %s", key.capitalize(), settings[key])
if not settings['source'] or not os.path.isdir(settings['source']):
logger.error("Input directory not found: %s", settings['source'])
sys.exit(1)
if not os.path.relpath(settings['destination'],
settings['source']).startswith('..'):
logger.error("Output directory should be outside of the input "
"directory.")
sys.exit(1)
if title:
settings['title'] = title
locale.setlocale(locale.LC_ALL, settings['locale'])
init_plugins(settings)
gal = Gallery(settings, ncpu=ncpu)
gal.build(force=force)
# copy extra files
for src, dst in settings['files_to_copy']:
src = os.path.join(settings['source'], src)
dst = os.path.join(settings['destination'], dst)
logger.debug('Copy %s to %s', src, dst)
copy(src, dst, symlink=settings['orig_link'])
stats = gal.stats
def format_stats(_type):
opt = ["{} {}".format(stats[_type + '_' + subtype], subtype)
for subtype in ('skipped', 'failed')
if stats[_type + '_' + subtype] > 0]
opt = ' ({})'.format(', '.join(opt)) if opt else ''
return '{} {}s{}'.format(stats[_type], _type, opt)
print('Done.\nProcessed {} and {} in {:.2f} seconds.'
.format(format_stats('image'), format_stats('video'),
time.time() - start_time))
def init_plugins(settings):
"""Load plugins and call register()."""
logger = logging.getLogger(__name__)
logger.debug('Plugin paths: %s', settings['plugin_paths'])
for path in settings['plugin_paths']:
sys.path.insert(0, path)
for plugin in settings['plugins']:
try:
if isinstance(plugin, string_types):
mod = importlib.import_module(plugin)
mod.register(settings)
else:
plugin.register(settings)
logger.debug('Registered plugin %s', plugin)
except Exception as e:
logger.error('Failed to load plugin %s: %r', plugin, e)
for path in settings['plugin_paths']:
sys.path.remove(path)
@main.command()
@argument('destination', default='_build')
@option('-p', '--port', help="Port to use", default=8000)
@option('-c', '--config', default=_DEFAULT_CONFIG_FILE,
show_default=True, help='Configuration file')
def serve(destination, port, config):
"""Run a simple web server."""
if os.path.exists(destination):
pass
elif os.path.exists(config):
settings = read_settings(config)
destination = settings.get('destination')
if not os.path.exists(destination):
sys.stderr.write("The '{}' directory doesn't exist, "
"maybe try building first?"
"\n".format(destination))
sys.exit(1)
else:
sys.stderr.write("The {destination} directory doesn't exist "
"and the config file ({config}) could not be "
"read."
"\n".format(destination=destination, config=config))
sys.exit(2)
print('DESTINATION : {}'.format(destination))
os.chdir(destination)
Handler = server.SimpleHTTPRequestHandler
httpd = socketserver.TCPServer(("", port), Handler, False)
print(" * Running on http://127.0.0.1:{}/".format(port))
try:
httpd.allow_reuse_address = True
httpd.server_bind()
httpd.server_activate()
httpd.serve_forever()
except KeyboardInterrupt:
print('\nAll done!')
| |
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A memcache viewer and editor UI.
Memcache associates a key with a value and an integer flag. The Go API maps
keys to strings and lets the user control the flag. Java, PHP and Python
map keys to an arbitrary type and uses the flag to indicate the type
information. Java, PHP and Python map types in inconsistent ways, see:
- google/appengine/api/memcache/__init__.py
- google/appengine/api/memcache/MemcacheSerialization.java
- google/appengine/runtime/MemcacheUtils.php
"""
import datetime
import logging
import urllib
from google.appengine.api import apiproxy_stub_map
from google.appengine.api import memcache
from google.appengine.api.memcache import memcache_service_pb
from google.appengine.tools.devappserver2.admin import admin_request_handler
class StringValueConverter(object):
memcache_type = memcache.TYPE_STR
placeholder = 'hello world!'
can_edit = True
friendly_type_name = 'String'
@staticmethod
def to_display(cache_value):
"""Convert a memcache string into a displayable representation.
Make a memcache string into a text string that can be displayed or edited.
While called a string, it is technically just an array of bytes. Because
we do not know what encoding the bytes are (and possibly they are not an
encoded text string - for example they could be an MD5 hash) we display
in string-escaped form.
Args:
cache_value: an array of bytes
Returns:
A unicode string that represents the sequence of bytes and can be
roundtripped back to the sequence of bytes.
"""
# As we don't know what encoding the bytes are, we string escape so any
# byte sequence is legal ASCII. Once we have a legal ASCII byte sequence
# we can safely convert to a unicode/text string.
return cache_value.encode('string-escape').decode('ascii')
@staticmethod
def to_cache(display_value):
"""Convert a displayable representation to a memcache string.
Take a displayable/editable text string and convert into a memcache string.
As a memcache string is technically an array of bytes, we only allow
characters from the ASCII range and require all other bytes to be indicated
via string escape. (because if we see the Unicode character Yen sign
(U+00A5) we don't know if they want the byte 0xA5 or the UTF-8 two byte
sequence 0xC2 0xA5).
Args:
display_value: a text (i.e. unicode string) using only ASCII characters;
non-ASCII characters must be represented string escapes.
Returns:
An array of bytes.
Raises:
UnicodeEncodeError: a non-ASCII character is part of the input.
"""
# Since we don't know how they want their Unicode encoded, this will raise
# an exception (which will be displayed nicely) if they include non-ASCII.
return display_value.encode('ascii').decode('string-escape')
class UnicodeValueConverter(object):
memcache_type = memcache.TYPE_UNICODE
# Hello world in Japanese.
placeholder = u'\u3053\u3093\u306b\u3061\u306f\u4e16\u754c'
can_edit = True
friendly_type_name = 'Unicode String'
@staticmethod
def to_display(cache_value):
return cache_value.decode('utf-8')
@staticmethod
def to_cache(display_value):
return display_value.encode('utf-8')
class BooleanValueConverter(object):
memcache_type = memcache.TYPE_BOOL
placeholder = 'true'
can_edit = True
friendly_type_name = 'Boolean'
@staticmethod
def to_display(cache_value):
if cache_value == '0':
return 'false'
elif cache_value == '1':
return 'true'
else:
raise ValueError('unexpected boolean %r' % cache_value)
@staticmethod
def to_cache(display_value):
if display_value.lower() in ('false', 'no', 'off', '0'):
return '0'
elif display_value.lower() in ('true', 'yes', 'on', '1'):
return '1'
raise ValueError(
'invalid literal for boolean: %s (must be "true" or "false")' %
display_value)
class IntValueConverter(object):
memcache_type = memcache.TYPE_INT
placeholder = '42'
can_edit = True
friendly_type_name = 'Integer'
@staticmethod
def to_display(cache_value):
return str(cache_value)
@staticmethod
def to_cache(display_value):
return str(int(display_value))
class OtherValueConverter(object):
memcache_type = None
placeholder = None
can_edit = False
friendly_type_name = 'Unknown Type'
@staticmethod
def to_display(cache_value):
return repr(cache_value)[1:-1]
@staticmethod
def to_cache(display_value):
raise NotImplementedError('cannot to a memcache value of unknown type')
class MemcacheViewerRequestHandler(admin_request_handler.AdminRequestHandler):
CONVERTERS = [StringValueConverter, UnicodeValueConverter,
BooleanValueConverter, IntValueConverter,
OtherValueConverter]
MEMCACHE_TYPE_TO_CONVERTER = {c.memcache_type: c for c in CONVERTERS
if c.memcache_type is not None}
FRIENDLY_TYPE_NAME_TO_CONVERTER = {c.friendly_type_name: c
for c in CONVERTERS}
def _get_memcache_value_and_flags(self, key):
"""Return a 2-tuple containing a memcache value and its flags."""
request = memcache_service_pb.MemcacheGetRequest()
response = memcache_service_pb.MemcacheGetResponse()
request.add_key(key)
apiproxy_stub_map.MakeSyncCall('memcache', 'Get', request, response)
assert response.item_size() < 2
if response.item_size() == 0:
return None, None
else:
return response.item(0).value(), response.item(0).flags()
def _set_memcache_value(self, key, value, flags):
"""Store a value in memcache."""
request = memcache_service_pb.MemcacheSetRequest()
response = memcache_service_pb.MemcacheSetResponse()
item = request.add_item()
item.set_key(key)
item.set_value(value)
item.set_flags(flags)
apiproxy_stub_map.MakeSyncCall('memcache', 'Set', request, response)
return (response.set_status(0) ==
memcache_service_pb.MemcacheSetResponse.STORED)
def get(self):
"""Show template and prepare stats and/or key+value to display/edit."""
super(MemcacheViewerRequestHandler, self).post()
values = {'request': self.request,
'message': self.request.get('message')}
edit = self.request.get('edit')
key = self.request.get('key')
if edit:
# Show the form to edit/create the value.
key = edit
values['show_stats'] = False
values['show_value'] = False
values['show_valueform'] = True
values['types'] = [type_value.friendly_type_name
for type_value in self.CONVERTERS
if type_value.can_edit]
elif key:
# A key was given, show it's value on the stats page.
values['show_stats'] = True
values['show_value'] = True
values['show_valueform'] = False
else:
# Plain stats display + key lookup form.
values['show_stats'] = True
values['show_valueform'] = False
values['show_value'] = False
if key:
values['key'] = key
memcache_value, memcache_flags = self._get_memcache_value_and_flags(key)
if memcache_value is not None:
converter = self.MEMCACHE_TYPE_TO_CONVERTER.get(memcache_flags,
OtherValueConverter)
try:
values['value'] = converter.to_display(memcache_value)
except ValueError:
# This exception is possible in the case where the value was set by
# Go, which allows for arbitrary user-assigned flag values.
logging.exception('Could not convert %s value %s',
converter.friendly_type_name, memcache_value)
converter = OtherValueConverter
values['value'] = converter.to_display(memcache_value)
values['type'] = converter.friendly_type_name
values['writable'] = converter.can_edit
values['key_exists'] = True
values['value_placeholder'] = converter.placeholder
else:
values['writable'] = True
values['key_exists'] = False
if values['show_stats']:
memcache_stats = memcache.get_stats()
if not memcache_stats:
# No stats means no memcache usage.
memcache_stats = {'hits': 0, 'misses': 0, 'byte_hits': 0, 'items': 0,
'bytes': 0, 'oldest_item_age': 0}
values['stats'] = memcache_stats
try:
hitratio = memcache_stats['hits'] * 100 / (memcache_stats['hits']
+ memcache_stats['misses'])
except ZeroDivisionError:
hitratio = 0
values['hitratio'] = hitratio
# TODO: oldest_item_age should be formatted in a more useful
# way.
delta_t = datetime.timedelta(seconds=memcache_stats['oldest_item_age'])
values['oldest_item_age'] = datetime.datetime.now() - delta_t
self.response.write(self.render('memcache_viewer.html', values))
def _urlencode(self, query):
"""Encode a dictionary into a URL query string.
In contrast to urllib this encodes unicode characters as UTF8.
Args:
query: Dictionary of key/value pairs.
Returns:
String.
"""
return '&'.join('%s=%s' % (urllib.quote_plus(k.encode('utf8')),
urllib.quote_plus(v.encode('utf8')))
for k, v in query.iteritems())
def post(self):
"""Handle modifying actions and/or redirect to GET page."""
super(MemcacheViewerRequestHandler, self).post()
next_param = {}
if self.request.get('action:flush'):
if memcache.flush_all():
next_param['message'] = 'Cache flushed, all keys dropped.'
else:
next_param['message'] = 'Flushing the cache failed. Please try again.'
elif self.request.get('action:display'):
next_param['key'] = self.request.get('key')
elif self.request.get('action:edit'):
next_param['edit'] = self.request.get('key')
elif self.request.get('action:delete'):
key = self.request.get('key')
result = memcache.delete(key)
if result == memcache.DELETE_NETWORK_FAILURE:
next_param['message'] = ('ERROR: Network failure, key "%s" not deleted.'
% key)
elif result == memcache.DELETE_ITEM_MISSING:
next_param['message'] = 'Key "%s" not in cache.' % key
elif result == memcache.DELETE_SUCCESSFUL:
next_param['message'] = 'Key "%s" deleted.' % key
else:
next_param['message'] = ('Unknown return value. Key "%s" might still '
'exist.' % key)
elif self.request.get('action:save'):
key = self.request.get('key')
value = self.request.get('value')
type_ = self.request.get('type')
next_param['key'] = key
converter = self.FRIENDLY_TYPE_NAME_TO_CONVERTER[type_]
try:
memcache_value = converter.to_cache(value)
except ValueError as e:
next_param['message'] = 'ERROR: Failed to save key "%s": %s.' % (key, e)
else:
if self._set_memcache_value(key,
memcache_value,
converter.memcache_type):
next_param['message'] = 'Key "%s" saved.' % key
else:
next_param['message'] = 'ERROR: Failed to save key "%s".' % key
elif self.request.get('action:cancel'):
next_param['key'] = self.request.get('key')
else:
next_param['message'] = 'Unknown action.'
next = self.request.path_url
if next_param:
next = '%s?%s' % (next, self._urlencode(next_param))
self.redirect(next)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# =================================================================
# =================================================================
from eventlet import greenthread
from nova import rpc
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_states
from nova.openstack.common import log as logging
from paxes_nova.virt.ibmpowervm.common import exception
from paxes_nova import logcall
from paxes_nova.compute import api
from oslo.config import cfg
from paxes_nova import _
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
@logcall
def verify_reserve_policy(reserve_policy, instance_name, disk_name):
"""
The reserve policy of the hdisk attached to the lpar
must be set to 'no_reserve'
:param reserve_policy: The reserve policy to check
:param intsance_name: The name of the instance (for logging)
:param disk_name: The name of the disk (for logging)
"""
if ((reserve_policy != 'no_reserve') and (reserve_policy != 'NoReserve')):
error = (_("Cannot migrate %(instance)s because "
"the reserve policy on disk %(hdisk)s is set to "
"%(policy)s. It must be set to no_reserve.") %
{'instance': instance_name,
'hdisk': disk_name,
'policy': reserve_policy})
LOG.exception(error)
raise exception.IBMPowerVMMigrationFailed(error)
@logcall
def verify_rmc_state(rmc_state, instance_name):
"""
The rmc state of the instance must be 'active'
:param rmc_state: The rmc_state to check
:param intsance_name: The name of the instance (for logging)
"""
if rmc_state != 'active':
error = (_("Cannot live migrate %(inst)s because its RMC "
"state is %(rmc)s. The RMC state must be active.") %
{'inst': instance_name,
'rmc': rmc_state})
LOG.exception(error)
raise exception.IBMPowerVMMigrationFailed(error)
@logcall
def verify_dlpar_enabled(dlpar, instance_name):
"""
Dlpar must be enabled on the instance
:param dlpar: Boolean value indicating dlpar is enabled
:param intsance_name: The name of the instance (for logging)
"""
if not dlpar:
error = (_("Cannot live migrate %s because DLPAR "
"is not enabled.") % instance_name)
LOG.exception(error)
raise exception.IBMPowerVMMigrationFailed(error)
@logcall
def verify_proc_compat_mode(lpar_compat_mode, host_compat_modes,
instance_name):
"""
The processor compatibility mode of the lpar must be in the list of
supported processor compatibility modes on the target host
:param lpar_compat_mode: The proc compat mode of the lpar
:param host_compat_modes: The proc compat modes of the target host
:param instance_name: The name of the instance (for logging)
"""
mode_list = host_compat_modes.split(',')
if not lpar_compat_mode in mode_list:
error = (_("Cannot migrate %(inst)s because its "
"processor compatibility mode %(mode)s "
"is not in the list of modes %(modes)s "
"supported by the target host.") %
{'inst': instance_name,
'mode': lpar_compat_mode,
'modes': mode_list})
LOG.exception(error)
raise exception.IBMPowerVMMigrationFailed(error)
@logcall
def verify_host_capacity_for_migration(migrations_allowed, migrations_running,
instance_name, host_name):
"""
The host can only support a limited number of concurrent migrations.
Verify there is room for another.
:param migrations_allowed: The number of migrations allowed
:param migrations_running: The number of migrations in progress
:param instance_name: The name of the instance (for logging)
:param host_name: The name of the host (for logging)
"""
if migrations_allowed == migrations_running:
error = (_("Cannot migrate %(inst)s because host %(host)s "
"only allows %(allowed)s concurrent migrations, and "
"%(running)s migrations are currently running.") %
{'inst': instance_name,
'host': host_name,
'allowed': migrations_allowed,
'running': migrations_running})
LOG.exception(error)
raise exception.IBMPowerVMMigrationFailed(error)
@logcall
def verify_logical_memory_block_size(source_lmb, target_lmb, instance_name):
"""
The logical memory block size of the source and target host
in a live migration operation must be the same.
:param source_lmb: The block size of the source host
:param target_lmb: The block size of the target host
:param instance_name: The name of the instance (for logging)
"""
if (source_lmb != target_lmb):
error = (_("Cannot migrate %(inst)s because the logical "
"memory block size of the source(%(source_lmb)sMB) "
"does not match the logical memory block size of the "
"target(%(target_lmb)sMB).") % {'inst': instance_name,
'source_lmb': source_lmb,
'target_lmb': target_lmb})
LOG.exception(error)
raise exception.IBMPowerVMMigrationFailed(error)
@logcall
def send_migration_failure_notification(context, instance,
tgt_host, exception):
"""
Sends a notification of live migration failure to the GUI
:param context: security context
:param instance: The instance that was migrating
:param tgt_host: The target host name
:param exception: The exception that was thrown
"""
if hasattr(exception, 'message'):
err_msg = _('%s') % exception.message
else:
err_msg = _('%s') % exception
# Send error notification
info = {'msg': _('Migration of virtual machine {instance_name} '
'to host {host_name} failed. '
'{error}'),
'instance_id': instance['uuid'],
'instance_name': instance['display_name'],
'host_name': tgt_host,
'error': err_msg}
notifier = rpc.get_notifier(service='compute', host=CONF.host)
notifier.error(context, 'compute.instance.log', info)
@logcall
def send_migration_success_notification(context, instance, src_host, tgt_host):
"""
Sends a notification of live migration failure to the GUI
:param context: security context
:param instance: The instance that was migrating
:param src_host: The source host name
:param tgt_host: The target host name
"""
# Send error notification
info = {'msg': _('Migration of virtual machine {instance_name} from '
'{source_host} to {target_host} was successful.'),
'instance_id': instance['uuid'],
'instance_name': instance['display_name'],
'source_host': src_host,
'target_host': tgt_host}
notifier = rpc.get_notifier(service='compute', host=CONF.host)
notifier.info(context, 'compute.instance.log', info)
@logcall
def send_migration_notification(context, instance, priority, message):
"""
Sends a general live migration notification to the GUI
:param context: security context
:param instance: The instance that was migrating
:param priority: The level of message (DEBUG, WARN, INFO, ERROR, CRITICAL)
:param message: The message to print
"""
# Send event notification
info = {'msg': '{message}',
'instance_id': instance['uuid'],
'instance_name': instance['display_name'],
'message': message}
notifier = rpc.get_notifier(service='compute', host=CONF.host)
if priority == 'error':
notifier.error(context, 'compute.instance.log', info)
elif priority == 'info':
notifier.info(context, 'compute.instance.log', info)
elif priority == 'critical':
notifier.critical(context, 'compute.instance.log', info)
elif priority == 'warn':
notifier.warn(context, 'compute.instance.log', info)
elif priority == 'debug':
notifier.debug(context, 'compute.instance.log', info)
def verify_out_of_band_migration(context,
instance,
uuid,
uuid_in_db,
host_in_db,
host_name,
vm_info,
node):
# Verify if an out of band migration has moved an instance to current host.
# Spawn a thread to avoid blocking main function.
greenthread.spawn(_verify_out_of_band_migration,
context,
instance,
uuid,
uuid_in_db,
host_in_db,
host_name,
vm_info,
node)
def _verify_out_of_band_migration(context,
instance,
uuid,
uuid_in_db,
host_in_db,
host_name,
vm_info,
node):
"""
Verify if an out of band migration has moved an instance to current host
:param context: Security context
:param instance: The instance that was migrating
:param uuid: uuid of the instance
:param uuid_in_db: uuid of the instance in database
:param host_name: Current host name
:param vm_info: Result of driver.get_info()
:param node: Current node name
"""
state_in_db = instance.vm_state
# If LPAR appears on different host, check to see
# if it is migrating.
# Check task_state, skip the rest if the instance
# is deploying, deleting or migrating.
instance_task_state = instance.task_state
instance_vm_state = instance.vm_state
instance_name = instance.display_name
if (instance_task_state in (task_states.SCHEDULING,
task_states.
BLOCK_DEVICE_MAPPING,
task_states.NETWORKING,
task_states.SPAWNING,
task_states.MIGRATING,
task_states.DELETING)
or instance_vm_state in
(vm_states.BUILDING)):
LOG.info(_('Virtual machine %(name)s is in '
'task state: %(task_state)s and '
'virtual machine state: '
'%(vm_state)s. The operation to '
'determine whether a migration was '
'performed outside of PowerVC will '
'be skipped. '
'UUID on host: %(uuid_on_host)s. '
'UUID in DB: %(uuid_in_db)s. '
'Source host: %(src_host)s. Target '
'host: %(tgt_host)s')
% {'name': instance_name,
'task_state': instance_task_state,
'vm_state': instance_vm_state,
'uuid_on_host': uuid,
'uuid_in_db': uuid_in_db,
'src_host': host_in_db,
'tgt_host': host_name})
return
# Check 'instance_migrating' in case migration is
# out of band
is_migrating = vm_info.get('instance_migrating')
if is_migrating:
LOG.debug('Instance %s is migrating. '
'Skipped.'
% instance_name)
return
# If instance is still on source host (host_in_db), skip.
if _is_instance_on_source_host(context, host_in_db, uuid, instance_name):
return
# Sync up host
instance.host = host_name
# Sync up node
instance.node = node
# Check the instance state in DB, compare it with
# instance state known to hypervisor. Update DB if
# needed
vm_power_state = vm_info['state']
if state_in_db == vm_states.ERROR:
if vm_power_state == power_state.RUNNING:
instance.vm_state = vm_states.ACTIVE
elif vm_power_state == power_state.SHUTDOWN:
instance.vm_state = vm_states.STOPPED
elif state_in_db == vm_states.STOPPED:
if vm_power_state == power_state.RUNNING:
instance.vm_state = vm_states.ACTIVE
inst_output = instance.obj_to_primitive()
LOG.info(_('Virtual machine information: %s')
% inst_output)
instance.save()
LOG.info(_('Virtual machine %(name)s '
'migrated from %(src)s to %(tgt)s. '
'UUID on host: %(uuid)s. UUID in DB: '
'%(uuid_in_db)s') %
{'name': instance_name, 'src': host_in_db,
'tgt': host_name, 'uuid': uuid,
'uuid_in_db': uuid_in_db},
instance=instance)
# Send event notification
info = {'msg': _('Virtual machine {instance_name} '
'has been migrated from host '
'{source_host_name} to '
'host {destination_host_name}.'),
'instance_id': instance.uuid,
'instance_name': instance_name,
'source_host_name': host_in_db,
'destination_host_name': host_name}
notifier = rpc.get_notifier(service='compute',
host=host_name)
notifier.info(context,
'compute.instance.log',
info)
def _is_instance_on_source_host(context,
source_host,
instance_uuid,
instance_name):
"""
While handling possible out of band migration, we send a message to
source host, asking if instance is still on the source host.
"""
# Cooperative yield
greenthread.sleep(0)
answer = api.PowerVCComputeRPCAPI().is_instance_on_host(context,
instance_uuid,
instance_name,
source_host)
if answer:
LOG.info(_('Virtual machine %(inst)s is being managed by remote host '
'%(host)s. This could indicate the virtual machine is on '
'two hosts simultaneously after migration '
'outside of PowerVC') %
{'inst': instance_name, 'host': source_host})
else:
LOG.info(_('Instance %(inst)s is not being managed by remote '
'host %(host)s.') %
{'inst': instance_name, 'host': source_host})
return answer
| |
# The 6.00 Word Game
# Created by: Kevin Luu <luuk> and Jenna Wiens <jwiens>
# Modified by: Sarina Canelake <sarina>, Nicola Moretto
import random
import string
VOWELS = 'aeiou'
CONSONANTS = 'bcdfghjklmnpqrstvwxyz'
HAND_SIZE = 7
SCRABBLE_LETTER_VALUES = {
'a': 1, 'b': 3, 'c': 3, 'd': 2, 'e': 1, 'f': 4, 'g': 2, 'h': 4, 'i': 1, 'j': 8, 'k': 5, 'l': 1, 'm': 3, 'n': 1, 'o': 1, 'p': 3, 'q': 10, 'r': 1, 's': 1, 't': 1, 'u': 1, 'v': 4, 'w': 4, 'x': 8, 'y': 4, 'z': 10
}
WORDLIST_FILENAME = "words.txt"
def loadWords():
"""
Returns a list of valid words. Words are strings of lowercase letters.
Depending on the size of the word list, this function may
take a while to finish.
"""
print "Loading word list from file..."
# inFile: file
inFile = open(WORDLIST_FILENAME, 'r', 0)
# wordList: list of strings
wordList = []
for line in inFile:
wordList.append(line.strip().lower())
print " ", len(wordList), "words loaded."
return wordList
def getFrequencyDict(sequence):
"""
Returns a dictionary where the keys are elements of the sequence
and the values are integer counts, for the number of times that
an element is repeated in the sequence.
sequence: string or list
return: dictionary
"""
# freqs: dictionary (element_type -> int)
freq = {}
for x in sequence:
freq[x] = freq.get(x,0) + 1
return freq
#
# Problem #1: Scoring a word
#
def getWordScore(word, n):
"""
Returns the score for a word. Assumes the word is a valid word.
The score for a word is the sum of the points for letters in the
word, multiplied by the length of the word, PLUS 50 points if all n
letters are used on the first turn.
Letters are scored as in Scrabble; A is worth 1, B is worth 3, C is
worth 3, D is worth 2, E is worth 1, and so on (see SCRABBLE_LETTER_VALUES)
word: string (lowercase letters)
n: integer (HAND_SIZE; i.e., hand size required for additional points)
returns: int >= 0
"""
totalScore = 0
# Add points for each letter
for letter in word:
totalScore += SCRABBLE_LETTER_VALUES[letter]
# Multiply score by the length of the word
totalScore *= len(word)
# Add 50 extra points if all letters are used
if (len(word) == n):
totalScore += 50
return totalScore
#
# Problem #2: Make sure you understand how this function works and what it does!
#
def displayHand(hand):
"""
Displays the letters currently in the hand.
For example:
>>> displayHand({'a':1, 'x':2, 'l':3, 'e':1})
Should print out something like:
a x x l l l e
The order of the letters is unimportant.
hand: dictionary (string -> int)
"""
for letter in hand.keys():
for j in range(hand[letter]):
print letter, # print all on the same line
print # print an empty line
#
# Problem #2: Make sure you understand how this function works and what it does!
#
def dealHand(n):
"""
Returns a random hand containing n lowercase letters.
At least n/3 the letters in the hand should be VOWELS.
Hands are represented as dictionaries. The keys are
letters and the values are the number of times the
particular letter is repeated in that hand.
n: int >= 0
returns: dictionary (string -> int)
"""
hand={}
numVowels = n / 3
for i in range(numVowels):
x = VOWELS[random.randrange(0,len(VOWELS))]
hand[x] = hand.get(x, 0) + 1
for i in range(numVowels, n):
x = CONSONANTS[random.randrange(0,len(CONSONANTS))]
hand[x] = hand.get(x, 0) + 1
return hand
#
# Problem #2: Update a hand by removing letters
#
def updateHand(hand, word):
"""
Assumes that 'hand' has all the letters in word.
In other words, this assumes that however many times
a letter appears in 'word', 'hand' has at least as
many of that letter in it.
Updates the hand: uses up the letters in the given word
and returns the new hand, without those letters in it.
Has no side effects: does not modify hand.
word: string
hand: dictionary (string -> int)
returns: dictionary (string -> int)
"""
usedLetters = {}
for letter in word:
usedLetters[letter] = usedLetters.get(letter, 0) + 1
remainingLetters = {}
for letter in hand.keys():
if usedLetters.get(letter, 0) < hand[letter]:
remainingLetters[letter] = hand[letter] - usedLetters.get(letter, 0)
return remainingLetters
#
# Problem #3: Test word validity
#
def isValidWord(word, hand, wordList):
"""
Returns True if word is in the wordList and is entirely
composed of letters in the hand. Otherwise, returns False.
Does not mutate hand or wordList.
word: string
hand: dictionary (string -> int)
wordList: list of lowercase strings
"""
valid = word in wordList
usedLetters = {}
for letter in word:
usedLetters[letter] = usedLetters.get(letter, 0) + 1
for letter in usedLetters.keys():
if hand.get(letter, 0) < usedLetters[letter]:
valid = False
break
return valid
#
# Problem #4: Playing a hand
#
def calculateHandlen(hand):
"""
Returns the length (number of letters) in the current hand.
hand: dictionary (string-> int)
returns: integer
"""
numLetters = 0
for letter in hand.keys():
numLetters += hand[letter]
return numLetters
def playHand(hand, wordList, n):
"""
Allows the user to play the given hand, as follows:
* The hand is displayed.
* The user may input a word or a single period (the string ".")
to indicate they're done playing
* Invalid words are rejected, and a message is displayed asking
the user to choose another word until they enter a valid word or "."
* When a valid word is entered, it uses up letters from the hand.
* After every valid word: the score for that word is displayed,
the remaining letters in the hand are displayed, and the user
is asked to input another word.
* The sum of the word scores is displayed when the hand finishes.
* The hand finishes when there are no more unused letters or the user
inputs a "."
hand: dictionary (string -> int)
wordList: list of lowercase strings
n: integer (HAND_SIZE; i.e., hand size required for additional points)
"""
# Keep track of the total score
totalScore = 0
# As long as there are still letters left in the hand:
while calculateHandlen(hand) > 0:
# Display the hand
print("Current Hand: "),
displayHand(hand)
# Ask user for input
word = raw_input('Enter word, or a "." to indicate that you are finished: ')
# If the input is a single period:
if word == '.':
# End the game (break out of the loop)
print("Goodbye! Total score: " + str(totalScore) + " points.\n")
break
else: # Otherwise (the input is not a single period):
# If the word is not valid:
if not isValidWord(word, hand, wordList):
# Reject invalid word (print a message followed by a blank line)
print("Invalid word, please try again.\n")
else: # Otherwise (the word is valid):
wordScore = getWordScore(word, n)
# Tell the user how many points the word earned, and the updated total score, in one line followed by a blank line
totalScore += wordScore
print('"' + word + '" earned ' + str(wordScore) + ' points. Total: ' + str(totalScore) + '\n')
# Update the hand
hand = updateHand(hand, word)
# Game is over (user entered a '.' or ran out of letters), so tell user the total score
if (calculateHandlen(hand) == 0):
print("Run out of letters. Total score: " + str(totalScore) + " points.\n")
#
# Problem #5: Playing a game
#
def playGame(wordList):
"""
Allow the user to play an arbitrary number of hands.
1) Asks the user to input 'n' or 'r' or 'e'.
* If the user inputs 'n', let the user play a new (random) hand.
* If the user inputs 'r', let the user play the last hand again.
* If the user inputs 'e', exit the game.
* If the user inputs anything else, tell them their input was invalid.
2) When done playing the hand, repeat from step 1
"""
playedHands = 0
while True:
letter = raw_input("Enter n to deal a new hand, r to replay the last hand, or e to end game: ")
if letter == 'n':
hand = dealHand(HAND_SIZE)
playHand(hand, wordList, HAND_SIZE)
playedHands += 1
elif letter == 'r':
if playedHands == 0:
print("You have not played a hand yet. Please play a new hand first!\n")
else:
playHand(hand, wordList, HAND_SIZE)
playedHands += 1
elif letter == 'e':
break
else:
print("Invalid command.")
#
# Build data structures used for entire session and play game
#
if __name__ == '__main__':
wordList = loadWords()
playGame(wordList)
| |
# encoding: utf-8
"""
Connector (line) shape and related objects. A connector is a line shape
having end-points that can be connected to other objects (but not to other
connectors). A line can be straight, have elbows, or can be curved.
"""
from __future__ import (
absolute_import, division, print_function, unicode_literals
)
from .base import BaseShape
from ..util import Emu
class Connector(BaseShape):
"""
Connector (line) shape. A connector is a linear shape having end-points
that can be connected to other objects (but not to other connectors).
A line can be straight, have elbows, or can be curved.
"""
def begin_connect(self, shape, cxn_pt_idx):
"""
**EXPERIMENTAL** - *The current implementation only works properly
with rectangular shapes, such as pictures and rectangles. Use with
other shape types may cause unexpected visual alignment of the
connected end-point and could lead to a load error if cxn_pt_idx
exceeds the connection point count available on the connected shape.
That said, a quick test should reveal what to expect when using this
method with other shape types.*
Connect the beginning of this connector to *shape* at the connection
point specified by *cxn_pt_idx*. Each shape has zero or more
connection points and they are identified by index, starting with 0.
Generally, the first connection point of a shape is at the top center
of its bounding box and numbering proceeds counter-clockwise from
there. However this is only a convention and may vary, especially
with non built-in shapes.
"""
self._connect_begin_to(shape, cxn_pt_idx)
self._move_begin_to_cxn(shape, cxn_pt_idx)
@property
def begin_x(self):
"""
Return the X-position of the begin point of this connector, in
English Metric Units (as a |Length| object).
"""
cxnSp = self._element
x, cx, flipH = cxnSp.x, cxnSp.cx, cxnSp.flipH
begin_x = x+cx if flipH else x
return Emu(begin_x)
@begin_x.setter
def begin_x(self, value):
cxnSp = self._element
x, cx, flipH, new_x = cxnSp.x, cxnSp.cx, cxnSp.flipH, int(value)
if flipH:
old_x = x + cx
dx = abs(new_x - old_x)
if new_x >= old_x:
cxnSp.cx = cx + dx
elif dx <= cx:
cxnSp.cx = cx - dx
else:
cxnSp.flipH = False
cxnSp.x = new_x
cxnSp.cx = dx - cx
else:
dx = abs(new_x - x)
if new_x <= x:
cxnSp.x = new_x
cxnSp.cx = cx + dx
elif dx <= cx:
cxnSp.x = new_x
cxnSp.cx = cx - dx
else:
cxnSp.flipH = True
cxnSp.x = x + cx
cxnSp.cx = dx - cx
@property
def begin_y(self):
"""
Return the Y-position of the begin point of this connector, in
English Metric Units (as a |Length| object).
"""
cxnSp = self._element
y, cy, flipV = cxnSp.y, cxnSp.cy, cxnSp.flipV
begin_y = y+cy if flipV else y
return Emu(begin_y)
@begin_y.setter
def begin_y(self, value):
cxnSp = self._element
y, cy, flipV, new_y = cxnSp.y, cxnSp.cy, cxnSp.flipV, int(value)
if flipV:
old_y = y + cy
dy = abs(new_y - old_y)
if new_y >= old_y:
cxnSp.cy = cy + dy
elif dy <= cy:
cxnSp.cy = cy - dy
else:
cxnSp.flipV = False
cxnSp.y = new_y
cxnSp.cy = dy - cy
else:
dy = abs(new_y - y)
if new_y <= y:
cxnSp.y = new_y
cxnSp.cy = cy + dy
elif dy <= cy:
cxnSp.y = new_y
cxnSp.cy = cy - dy
else:
cxnSp.flipV = True
cxnSp.y = y + cy
cxnSp.cy = dy - cy
def end_connect(self, shape, cxn_pt_idx):
"""
**EXPERIMENTAL** - *The current implementation only works properly
with rectangular shapes, such as pictures and rectangles. Use with
other shape types may cause unexpected visual alignment of the
connected end-point and could lead to a load error if cxn_pt_idx
exceeds the connection point count available on the connected shape.
That said, a quick test should reveal what to expect when using this
method with other shape types.*
Connect the ending of this connector to *shape* at the connection
point specified by *cxn_pt_idx*.
"""
self._connect_end_to(shape, cxn_pt_idx)
self._move_end_to_cxn(shape, cxn_pt_idx)
@property
def end_x(self):
"""
Return the X-position of the end point of this connector, in English
Metric Units (as a |Length| object).
"""
cxnSp = self._element
x, cx, flipH = cxnSp.x, cxnSp.cx, cxnSp.flipH
end_x = x if flipH else x+cx
return Emu(end_x)
@end_x.setter
def end_x(self, value):
cxnSp = self._element
x, cx, flipH, new_x = cxnSp.x, cxnSp.cx, cxnSp.flipH, int(value)
if flipH:
dx = abs(new_x - x)
if new_x <= x:
cxnSp.x = new_x
cxnSp.cx = cx + dx
elif dx <= cx:
cxnSp.x = new_x
cxnSp.cx = cx - dx
else:
cxnSp.flipH = False
cxnSp.x = x + cx
cxnSp.cx = dx - cx
else:
old_x = x + cx
dx = abs(new_x - old_x)
if new_x >= old_x:
cxnSp.cx = cx + dx
elif dx <= cx:
cxnSp.cx = cx - dx
else:
cxnSp.flipH = True
cxnSp.x = new_x
cxnSp.cx = dx - cx
@property
def end_y(self):
"""
Return the Y-position of the end point of this connector, in English
Metric Units (as a |Length| object).
"""
cxnSp = self._element
y, cy, flipV = cxnSp.y, cxnSp.cy, cxnSp.flipV
end_y = y if flipV else y+cy
return Emu(end_y)
@end_y.setter
def end_y(self, value):
cxnSp = self._element
y, cy, flipV, new_y = cxnSp.y, cxnSp.cy, cxnSp.flipV, int(value)
if flipV:
dy = abs(new_y - y)
if new_y <= y:
cxnSp.y = new_y
cxnSp.cy = cy + dy
elif dy <= cy:
cxnSp.y = new_y
cxnSp.cy = cy - dy
else:
cxnSp.flipV = False
cxnSp.y = y + cy
cxnSp.cy = dy - cy
else:
old_y = y + cy
dy = abs(new_y - old_y)
if new_y >= old_y:
cxnSp.cy = cy + dy
elif dy <= cy:
cxnSp.cy = cy - dy
else:
cxnSp.flipV = True
cxnSp.y = new_y
cxnSp.cy = dy - cy
def _connect_begin_to(self, shape, cxn_pt_idx):
"""
Add or update a stCxn element for this connector that connects its
begin point to the connection point of *shape* specified by
*cxn_pt_idx*.
"""
cNvCxnSpPr = self._element.nvCxnSpPr.cNvCxnSpPr
stCxn = cNvCxnSpPr.get_or_add_stCxn()
stCxn.id = shape.shape_id
stCxn.idx = cxn_pt_idx
def _connect_end_to(self, shape, cxn_pt_idx):
"""
Add or update an endCxn element for this connector that connects its
end point to the connection point of *shape* specified by
*cxn_pt_idx*.
"""
cNvCxnSpPr = self._element.nvCxnSpPr.cNvCxnSpPr
endCxn = cNvCxnSpPr.get_or_add_endCxn()
endCxn.id = shape.shape_id
endCxn.idx = cxn_pt_idx
def _move_begin_to_cxn(self, shape, cxn_pt_idx):
"""
Move the begin point of this connector to coordinates of the
connection point of *shape* specified by *cxn_pt_idx*.
"""
x, y, cx, cy = shape.left, shape.top, shape.width, shape.height
self.begin_x, self.begin_y = {
0: (int(x + cx/2), y),
1: (x, int(y + cy/2)),
2: (int(x + cx/2), y + cy),
3: (x + cx, int(y + cy/2)),
}[cxn_pt_idx]
def _move_end_to_cxn(self, shape, cxn_pt_idx):
"""
Move the end point of this connector to the coordinates of the
connection point of *shape* specified by *cxn_pt_idx*.
"""
x, y, cx, cy = shape.left, shape.top, shape.width, shape.height
self.end_x, self.end_y = {
0: (int(x + cx/2), y),
1: (x, int(y + cy/2)),
2: (int(x + cx/2), y + cy),
3: (x + cx, int(y + cy/2)),
}[cxn_pt_idx]
| |
from pandas import DateOffset, DatetimeIndex, Series, Timestamp
from pandas.compat import add_metaclass
from datetime import datetime, timedelta
from dateutil.relativedelta import MO, TU, WE, TH, FR, SA, SU
from pandas.tseries.offsets import Easter, Day
def next_monday(dt):
"""
If holiday falls on Saturday, use following Monday instead;
if holiday falls on Sunday, use Monday instead
"""
if dt.weekday() == 5:
return dt + timedelta(2)
elif dt.weekday() == 6:
return dt + timedelta(1)
return dt
def next_monday_or_tuesday(dt):
"""
For second holiday of two adjacent ones!
If holiday falls on Saturday, use following Monday instead;
if holiday falls on Sunday or Monday, use following Tuesday instead
(because Monday is already taken by adjacent holiday on the day before)
"""
dow = dt.weekday()
if dow == 5 or dow == 6:
return dt + timedelta(2)
elif dow == 0:
return dt + timedelta(1)
return dt
def previous_friday(dt):
"""
If holiday falls on Saturday or Sunday, use previous Friday instead.
"""
if dt.weekday() == 5:
return dt - timedelta(1)
elif dt.weekday() == 6:
return dt - timedelta(2)
return dt
def sunday_to_monday(dt):
"""
If holiday falls on Sunday, use day thereafter (Monday) instead.
"""
if dt.weekday() == 6:
return dt + timedelta(1)
return dt
def weekend_to_monday(dt):
"""
If holiday falls on Sunday or Saturday,
use day thereafter (Monday) instead.
Needed for holidays such as Christmas observation in Europe
"""
if dt.weekday() == 6:
return dt + timedelta(1)
elif dt.weekday() == 5:
return dt + timedelta(2)
return dt
def nearest_workday(dt):
"""
If holiday falls on Saturday, use day before (Friday) instead;
if holiday falls on Sunday, use day thereafter (Monday) instead.
"""
if dt.weekday() == 5:
return dt - timedelta(1)
elif dt.weekday() == 6:
return dt + timedelta(1)
return dt
def next_workday(dt):
"""
returns next weekday used for observances
"""
dt += timedelta(days=1)
while dt.weekday() > 4:
# Mon-Fri are 0-4
dt += timedelta(days=1)
return dt
def previous_workday(dt):
"""
returns previous weekday used for observances
"""
dt -= timedelta(days=1)
while dt.weekday() > 4:
# Mon-Fri are 0-4
dt -= timedelta(days=1)
return dt
def before_nearest_workday(dt):
"""
returns previous workday after nearest workday
"""
return previous_workday(nearest_workday(dt))
def after_nearest_workday(dt):
"""
returns next workday after nearest workday
needed for Boxing day or multiple holidays in a series
"""
return next_workday(nearest_workday(dt))
class Holiday(object):
"""
Class that defines a holiday with start/end dates and rules
for observance.
"""
def __init__(self, name, year=None, month=None, day=None, offset=None,
observance=None, start_date=None, end_date=None,
days_of_week=None):
"""
Parameters
----------
name : str
Name of the holiday , defaults to class name
offset : array of pandas.tseries.offsets or
class from pandas.tseries.offsets
computes offset from date
observance: function
computes when holiday is given a pandas Timestamp
days_of_week:
provide a tuple of days e.g (0,1,2,3,) for Monday Through Thursday
Monday=0,..,Sunday=6
Examples
--------
>>> from pandas.tseries.holiday import Holiday, nearest_workday
>>> from pandas import DateOffset
>>> from dateutil.relativedelta import MO
>>> USMemorialDay = Holiday('MemorialDay', month=5, day=24,
offset=DateOffset(weekday=MO(1)))
>>> USLaborDay = Holiday('Labor Day', month=9, day=1,
offset=DateOffset(weekday=MO(1)))
>>> July3rd = Holiday('July 3rd', month=7, day=3,)
>>> NewYears = Holiday('New Years Day', month=1, day=1,
observance=nearest_workday),
>>> July3rd = Holiday('July 3rd', month=7, day=3,
days_of_week=(0, 1, 2, 3))
"""
self.name = name
self.year = year
self.month = month
self.day = day
self.offset = offset
self.start_date = start_date
self.end_date = end_date
self.observance = observance
assert (days_of_week is None or type(days_of_week) == tuple)
self.days_of_week = days_of_week
def __repr__(self):
info = ''
if self.year is not None:
info += 'year=%s, ' % self.year
info += 'month=%s, day=%s, ' % (self.month, self.day)
if self.offset is not None:
info += 'offset=%s' % self.offset
if self.observance is not None:
info += 'observance=%s' % self.observance
repr = 'Holiday: %s (%s)' % (self.name, info)
return repr
def dates(self, start_date, end_date, return_name=False):
"""
Calculate holidays between start date and end date
Parameters
----------
start_date : starting date, datetime-like, optional
end_date : ending date, datetime-like, optional
return_name : bool, optional, default=False
If True, return a series that has dates and holiday names.
False will only return dates.
"""
if self.year is not None:
dt = Timestamp(datetime(self.year, self.month, self.day))
if return_name:
return Series(self.name, index=[dt])
else:
return [dt]
if self.start_date is not None:
start_date = self.start_date
if self.end_date is not None:
end_date = self.end_date
start_date = Timestamp(start_date)
end_date = Timestamp(end_date)
year_offset = DateOffset(years=1)
base_date = Timestamp(
datetime(start_date.year, self.month, self.day),
tz=start_date.tz,
)
dates = DatetimeIndex(start=base_date, end=end_date, freq=year_offset)
holiday_dates = self._apply_rule(dates)
if self.days_of_week is not None:
holiday_dates = list(filter(lambda x: x is not None and
x.dayofweek in self.days_of_week,
holiday_dates))
else:
holiday_dates = list(filter(lambda x: x is not None, holiday_dates))
if return_name:
return Series(self.name, index=holiday_dates)
return holiday_dates
def _apply_rule(self, dates):
"""
Apply the given offset/observance to an
iterable of dates.
Parameters
----------
dates : array-like
Dates to apply the given offset/observance rule
Returns
-------
Dates with rules applied
"""
if self.observance is not None:
return map(lambda d: self.observance(d), dates)
if self.offset is not None:
if not isinstance(self.offset, list):
offsets = [self.offset]
else:
offsets = self.offset
for offset in offsets:
dates = list(map(lambda d: d + offset, dates))
return dates
holiday_calendars = {}
def register(cls):
try:
name = cls.name
except:
name = cls.__name__
holiday_calendars[name] = cls
def get_calendar(name):
"""
Return an instance of a calendar based on its name.
Parameters
----------
name : str
Calendar name to return an instance of
"""
return holiday_calendars[name]()
class HolidayCalendarMetaClass(type):
def __new__(cls, clsname, bases, attrs):
calendar_class = super(HolidayCalendarMetaClass, cls).__new__(cls, clsname, bases, attrs)
register(calendar_class)
return calendar_class
@add_metaclass(HolidayCalendarMetaClass)
class AbstractHolidayCalendar(object):
"""
Abstract interface to create holidays following certain rules.
"""
__metaclass__ = HolidayCalendarMetaClass
rules = []
start_date = Timestamp(datetime(1970, 1, 1))
end_date = Timestamp(datetime(2030, 12, 31))
_cache = None
def __init__(self, name=None, rules=None):
"""
Initializes holiday object with a given set a rules. Normally
classes just have the rules defined within them.
Parameters
----------
name : str
Name of the holiday calendar, defaults to class name
rules : array of Holiday objects
A set of rules used to create the holidays.
"""
super(AbstractHolidayCalendar, self).__init__()
if name is None:
name = self.__class__.__name__
self.name = name
if rules is not None:
self.rules = rules
def holidays(self, start=None, end=None, return_name=False):
"""
Returns a curve with holidays between start_date and end_date
Parameters
----------
start : starting date, datetime-like, optional
end : ending date, datetime-like, optional
return_names : bool, optional
If True, return a series that has dates and holiday names.
False will only return a DatetimeIndex of dates.
Returns
-------
DatetimeIndex of holidays
"""
if self.rules is None:
raise Exception('Holiday Calendar %s does not have any '
'rules specified' % self.name)
if start is None:
start = AbstractHolidayCalendar.start_date
if end is None:
end = AbstractHolidayCalendar.end_date
start = Timestamp(start)
end = Timestamp(end)
holidays = None
# If we don't have a cache or the dates are outside the prior cache, we get them again
if self._cache is None or start < self._cache[0] or end > self._cache[1]:
for rule in self.rules:
rule_holidays = rule.dates(start, end, return_name=True)
if holidays is None:
holidays = rule_holidays
else:
holidays = holidays.append(rule_holidays)
self._cache = (start, end, holidays.sort_index())
holidays = self._cache[2]
holidays = holidays[start:end]
if return_name:
return holidays
else:
return holidays.index
@staticmethod
def merge_class(base, other):
"""
Merge holiday calendars together. The base calendar
will take precedence to other. The merge will be done
based on each holiday's name.
Parameters
----------
base : AbstractHolidayCalendar instance/subclass or array of Holiday objects
other : AbstractHolidayCalendar instance/subclass or array of Holiday objects
"""
try:
other = other.rules
except:
pass
if not isinstance(other, list):
other = [other]
other_holidays = dict((holiday.name, holiday) for holiday in other)
try:
base = base.rules
except:
pass
if not isinstance(base, list):
base = [base]
base_holidays = dict([(holiday.name, holiday) for holiday in base])
other_holidays.update(base_holidays)
return list(other_holidays.values())
def merge(self, other, inplace=False):
"""
Merge holiday calendars together. The caller's class
rules take precedence. The merge will be done
based on each holiday's name.
Parameters
----------
other : holiday calendar
inplace : bool (default=False)
If True set rule_table to holidays, else return array of Holidays
"""
holidays = self.merge_class(self, other)
if inplace:
self.rules = holidays
else:
return holidays
USMemorialDay = Holiday('MemorialDay', month=5, day=24,
offset=DateOffset(weekday=MO(1)))
USLaborDay = Holiday('Labor Day', month=9, day=1,
offset=DateOffset(weekday=MO(1)))
USColumbusDay = Holiday('Columbus Day', month=10, day=1,
offset=DateOffset(weekday=MO(2)))
USThanksgivingDay = Holiday('Thanksgiving', month=11, day=1,
offset=DateOffset(weekday=TH(4)))
USMartinLutherKingJr = Holiday('Dr. Martin Luther King Jr.', month=1, day=1,
offset=DateOffset(weekday=MO(3)))
USPresidentsDay = Holiday('President''s Day', month=2, day=1,
offset=DateOffset(weekday=MO(3)))
GoodFriday = Holiday("Good Friday", month=1, day=1, offset=[Easter(), Day(-2)])
EasterMonday = Holiday("Easter Monday", month=1, day=1, offset=[Easter(), Day(1)])
class USFederalHolidayCalendar(AbstractHolidayCalendar):
"""
US Federal Government Holiday Calendar based on rules specified
by: https://www.opm.gov/policy-data-oversight/snow-dismissal-procedures/federal-holidays/
"""
rules = [
Holiday('New Years Day', month=1, day=1, observance=nearest_workday),
USMartinLutherKingJr,
USPresidentsDay,
USMemorialDay,
Holiday('July 4th', month=7, day=4, observance=nearest_workday),
USLaborDay,
USColumbusDay,
Holiday('Veterans Day', month=11, day=11, observance=nearest_workday),
USThanksgivingDay,
Holiday('Christmas', month=12, day=25, observance=nearest_workday)
]
def HolidayCalendarFactory(name, base, other, base_class=AbstractHolidayCalendar):
rules = AbstractHolidayCalendar.merge_class(base, other)
calendar_class = type(name, (base_class,), {"rules": rules, "name": name})
return calendar_class
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.