gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
#!/usr/bin/env python2
#
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
from mininode import *
from blockstore import BlockStore, TxStore
from util import p2p_port
'''
This is a tool for comparing two or more bitcoinds to each other
using a script provided.
To use, create a class that implements get_tests(), and pass it in
as the test generator to TestManager. get_tests() should be a python
generator that returns TestInstance objects. See below for definition.
'''
# TestNode behaves as follows:
# Configure with a BlockStore and TxStore
# on_inv: log the message but don't request
# on_headers: log the chain tip
# on_pong: update ping response map (for synchronization)
# on_getheaders: provide headers via BlockStore
# on_getdata: provide blocks via BlockStore
global mininode_lock
def wait_until(predicate, attempts=float('inf'), timeout=float('inf')):
attempt = 0
elapsed = 0
while attempt < attempts and elapsed < timeout:
with mininode_lock:
if predicate():
return True
attempt += 1
elapsed += 0.05
time.sleep(0.05)
return False
class RejectResult(object):
'''
Outcome that expects rejection of a transaction or block.
'''
def __init__(self, code, reason=''):
self.code = code
self.reason = reason
def match(self, other):
if self.code != other.code:
return False
return other.reason.startswith(self.reason)
def __repr__(self):
return '%i:%s' % (self.code,self.reason or '*')
class TestNode(NodeConnCB):
def __init__(self, block_store, tx_store):
NodeConnCB.__init__(self)
self.conn = None
self.bestblockhash = None
self.block_store = block_store
self.block_request_map = {}
self.tx_store = tx_store
self.tx_request_map = {}
self.block_reject_map = {}
self.tx_reject_map = {}
# When the pingmap is non-empty we're waiting for
# a response
self.pingMap = {}
self.lastInv = []
self.closed = False
def on_close(self, conn):
self.closed = True
def add_connection(self, conn):
self.conn = conn
def on_headers(self, conn, message):
if len(message.headers) > 0:
best_header = message.headers[-1]
best_header.calc_sha256()
self.bestblockhash = best_header.sha256
def on_getheaders(self, conn, message):
response = self.block_store.headers_for(message.locator, message.hashstop)
if response is not None:
conn.send_message(response)
def on_getdata(self, conn, message):
[conn.send_message(r) for r in self.block_store.get_blocks(message.inv)]
[conn.send_message(r) for r in self.tx_store.get_transactions(message.inv)]
for i in message.inv:
if i.type == 1:
self.tx_request_map[i.hash] = True
elif i.type == 2:
self.block_request_map[i.hash] = True
def on_inv(self, conn, message):
self.lastInv = [x.hash for x in message.inv]
def on_pong(self, conn, message):
try:
del self.pingMap[message.nonce]
except KeyError:
raise AssertionError("Got pong for unknown ping [%s]" % repr(message))
def on_reject(self, conn, message):
if message.message == 'tx':
self.tx_reject_map[message.data] = RejectResult(message.code, message.reason)
if message.message == 'block':
self.block_reject_map[message.data] = RejectResult(message.code, message.reason)
def send_inv(self, obj):
mtype = 2 if isinstance(obj, CBlock) else 1
self.conn.send_message(msg_inv([CInv(mtype, obj.sha256)]))
def send_getheaders(self):
# We ask for headers from their last tip.
m = msg_getheaders()
m.locator = self.block_store.get_locator(self.bestblockhash)
self.conn.send_message(m)
# This assumes BIP31
def send_ping(self, nonce):
self.pingMap[nonce] = True
self.conn.send_message(msg_ping(nonce))
def received_ping_response(self, nonce):
return nonce not in self.pingMap
def send_mempool(self):
self.lastInv = []
self.conn.send_message(msg_mempool())
# TestInstance:
#
# Instances of these are generated by the test generator, and fed into the
# comptool.
#
# "blocks_and_transactions" should be an array of
# [obj, True/False/None, hash/None]:
# - obj is either a CBlock, CBlockHeader, or a CTransaction, and
# - the second value indicates whether the object should be accepted
# into the blockchain or mempool (for tests where we expect a certain
# answer), or "None" if we don't expect a certain answer and are just
# comparing the behavior of the nodes being tested.
# - the third value is the hash to test the tip against (if None or omitted,
# use the hash of the block)
# - NOTE: if a block header, no test is performed; instead the header is
# just added to the block_store. This is to facilitate block delivery
# when communicating with headers-first clients (when withholding an
# intermediate block).
# sync_every_block: if True, then each block will be inv'ed, synced, and
# nodes will be tested based on the outcome for the block. If False,
# then inv's accumulate until all blocks are processed (or max inv size
# is reached) and then sent out in one inv message. Then the final block
# will be synced across all connections, and the outcome of the final
# block will be tested.
# sync_every_tx: analogous to behavior for sync_every_block, except if outcome
# on the final tx is None, then contents of entire mempool are compared
# across all connections. (If outcome of final tx is specified as true
# or false, then only the last tx is tested against outcome.)
class TestInstance(object):
def __init__(self, objects=None, sync_every_block=True, sync_every_tx=False):
self.blocks_and_transactions = objects if objects else []
self.sync_every_block = sync_every_block
self.sync_every_tx = sync_every_tx
class TestManager(object):
def __init__(self, testgen, datadir):
self.test_generator = testgen
self.connections = []
self.test_nodes = []
self.block_store = BlockStore(datadir)
self.tx_store = TxStore(datadir)
self.ping_counter = 1
def add_all_connections(self, nodes):
for i in range(len(nodes)):
# Create a p2p connection to each node
test_node = TestNode(self.block_store, self.tx_store)
self.test_nodes.append(test_node)
self.connections.append(NodeConn('127.0.0.1', p2p_port(i), nodes[i], test_node))
# Make sure the TestNode (callback class) has a reference to its
# associated NodeConn
test_node.add_connection(self.connections[-1])
def clear_all_connections(self):
self.connections = []
self.test_nodes = []
def wait_for_disconnections(self):
def disconnected():
return all(node.closed for node in self.test_nodes)
return wait_until(disconnected, timeout=10)
def wait_for_verack(self):
def veracked():
return all(node.verack_received for node in self.test_nodes)
return wait_until(veracked, timeout=10)
def wait_for_pings(self, counter):
def received_pongs():
return all(node.received_ping_response(counter) for node in self.test_nodes)
return wait_until(received_pongs)
# sync_blocks: Wait for all connections to request the blockhash given
# then send get_headers to find out the tip of each node, and synchronize
# the response by using a ping (and waiting for pong with same nonce).
def sync_blocks(self, blockhash, num_blocks):
def blocks_requested():
return all(
blockhash in node.block_request_map and node.block_request_map[blockhash]
for node in self.test_nodes
)
# --> error if not requested
if not wait_until(blocks_requested, attempts=20*num_blocks):
# print [ c.cb.block_request_map for c in self.connections ]
raise AssertionError("Not all nodes requested block")
# Send getheaders message
[ c.cb.send_getheaders() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Analogous to sync_block (see above)
def sync_transaction(self, txhash, num_events):
# Wait for nodes to request transaction (50ms sleep * 20 tries * num_events)
def transaction_requested():
return all(
txhash in node.tx_request_map and node.tx_request_map[txhash]
for node in self.test_nodes
)
# --> error if not requested
if not wait_until(transaction_requested, attempts=20*num_events):
# print [ c.cb.tx_request_map for c in self.connections ]
raise AssertionError("Not all nodes requested transaction")
# Get the mempool
[ c.cb.send_mempool() for c in self.connections ]
# Send ping and wait for response -- synchronization hack
[ c.cb.send_ping(self.ping_counter) for c in self.connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Sort inv responses from each node
with mininode_lock:
[ c.cb.lastInv.sort() for c in self.connections ]
# Verify that the tip of each connection all agree with each other, and
# with the expected outcome (if given)
def check_results(self, blockhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
if c.cb.bestblockhash != self.connections[0].cb.bestblockhash:
return False
elif isinstance(outcome, RejectResult): # Check that block was rejected w/ code
if c.cb.bestblockhash == blockhash:
return False
if blockhash not in c.cb.block_reject_map:
print 'Block not in reject map: %064x' % (blockhash)
return False
if not outcome.match(c.cb.block_reject_map[blockhash]):
print 'Block rejected with %s instead of expected %s: %064x' % (c.cb.block_reject_map[blockhash], outcome, blockhash)
return False
elif ((c.cb.bestblockhash == blockhash) != outcome):
# print c.cb.bestblockhash, blockhash, outcome
return False
return True
# Either check that the mempools all agree with each other, or that
# txhash's presence in the mempool matches the outcome specified.
# This is somewhat of a strange comparison, in that we're either comparing
# a particular tx to an outcome, or the entire mempools altogether;
# perhaps it would be useful to add the ability to check explicitly that
# a particular tx's existence in the mempool is the same across all nodes.
def check_mempool(self, txhash, outcome):
with mininode_lock:
for c in self.connections:
if outcome is None:
# Make sure the mempools agree with each other
if c.cb.lastInv != self.connections[0].cb.lastInv:
# print c.rpc.getrawmempool()
return False
elif isinstance(outcome, RejectResult): # Check that tx was rejected w/ code
if txhash in c.cb.lastInv:
return False
if txhash not in c.cb.tx_reject_map:
print 'Tx not in reject map: %064x' % (txhash)
return False
if not outcome.match(c.cb.tx_reject_map[txhash]):
print 'Tx rejected with %s instead of expected %s: %064x' % (c.cb.tx_reject_map[txhash], outcome, txhash)
return False
elif ((txhash in c.cb.lastInv) != outcome):
# print c.rpc.getrawmempool(), c.cb.lastInv
return False
return True
def run(self):
# Wait until verack is received
self.wait_for_verack()
test_number = 1
for test_instance in self.test_generator.get_tests():
# We use these variables to keep track of the last block
# and last transaction in the tests, which are used
# if we're not syncing on every block or every tx.
[ block, block_outcome, tip ] = [ None, None, None ]
[ tx, tx_outcome ] = [ None, None ]
invqueue = []
for test_obj in test_instance.blocks_and_transactions:
b_or_t = test_obj[0]
outcome = test_obj[1]
# Determine if we're dealing with a block or tx
if isinstance(b_or_t, CBlock): # Block test runner
block = b_or_t
block_outcome = outcome
tip = block.sha256
# each test_obj can have an optional third argument
# to specify the tip we should compare with
# (default is to use the block being tested)
if len(test_obj) >= 3:
tip = test_obj[2]
# Add to shared block_store, set as current block
# If there was an open getdata request for the block
# previously, and we didn't have an entry in the
# block_store, then immediately deliver, because the
# node wouldn't send another getdata request while
# the earlier one is outstanding.
first_block_with_hash = True
if self.block_store.get(block.sha256) is not None:
first_block_with_hash = False
with mininode_lock:
self.block_store.add_block(block)
for c in self.connections:
if first_block_with_hash and block.sha256 in c.cb.block_request_map and c.cb.block_request_map[block.sha256] == True:
# There was a previous request for this block hash
# Most likely, we delivered a header for this block
# but never had the block to respond to the getdata
c.send_message(msg_block(block))
else:
c.cb.block_request_map[block.sha256] = False
# Either send inv's to each node and sync, or add
# to invqueue for later inv'ing.
if (test_instance.sync_every_block):
[ c.cb.send_inv(block) for c in self.connections ]
self.sync_blocks(block.sha256, 1)
if (not self.check_results(tip, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(2, block.sha256))
elif isinstance(b_or_t, CBlockHeader):
block_header = b_or_t
self.block_store.add_header(block_header)
else: # Tx test runner
assert(isinstance(b_or_t, CTransaction))
tx = b_or_t
tx_outcome = outcome
# Add to shared tx store and clear map entry
with mininode_lock:
self.tx_store.add_transaction(tx)
for c in self.connections:
c.cb.tx_request_map[tx.sha256] = False
# Again, either inv to all nodes or save for later
if (test_instance.sync_every_tx):
[ c.cb.send_inv(tx) for c in self.connections ]
self.sync_transaction(tx.sha256, 1)
if (not self.check_mempool(tx.sha256, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
else:
invqueue.append(CInv(1, tx.sha256))
# Ensure we're not overflowing the inv queue
if len(invqueue) == MAX_INV_SZ:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
# Do final sync if we weren't syncing on every block or every tx.
if (not test_instance.sync_every_block and block is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_blocks(block.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_results(tip, block_outcome)):
raise AssertionError("Block test failed at test %d" % test_number)
if (not test_instance.sync_every_tx and tx is not None):
if len(invqueue) > 0:
[ c.send_message(msg_inv(invqueue)) for c in self.connections ]
invqueue = []
self.sync_transaction(tx.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_mempool(tx.sha256, tx_outcome)):
raise AssertionError("Mempool test failed at test %d" % test_number)
print "Test %d: PASS" % test_number, [ c.rpc.getblockcount() for c in self.connections ]
test_number += 1
[ c.disconnect_node() for c in self.connections ]
self.wait_for_disconnections()
self.block_store.close()
self.tx_store.close()
| |
#! /usr/bin/env python
## @file: src/sdm/fuse_backend.py
# Configure the FUSE backend and handle mounts
#
# @author Illyoung Choi
#
# @copyright Copyright 2016 The Trustees of University of Arizona\n
# Licensed under the Apache License, Version 2.0 (the "License" );
# you may not use this file except in compliance with the License.\n
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0\n
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import psutil
import os
import time
import inspect
import subprocess
import tempfile
import shlex
import shutil
import abstract_backend as sdm_absbackends
import util as sdm_util
from os.path import expanduser
DEFAULT_MOUNT_PATH = "~/sdm_mounts"
DEFAULT_SYNDICATE_DEBUG_MODE = True
DEFAULT_SYNDICATE_DEBUG_LEVEL = 3
DEFAULT_SYNDICATE_CACHE_MAX = 2*1024*1024*1024 # 20GB
DEFAULT_USE_VALGRIND = False
SYNDICATEFS_PROCESS_NAME = "syndicatefs"
SYNDICATE_CONFIG_ROOT_PATH = "~/.sdm/mounts/"
class FuseBackendException(sdm_absbackends.AbstractBackendException):
pass
class FuseBackendConfig(sdm_absbackends.AbstractBackendConfig):
"""
FUSE Backend Config
"""
def __init__(self):
self.default_mount_path = DEFAULT_MOUNT_PATH
self.syndicate_debug_mode = DEFAULT_SYNDICATE_DEBUG_MODE
self.syndicate_debug_level = DEFAULT_SYNDICATE_DEBUG_LEVEL
self.syndicate_cache_max = DEFAULT_SYNDICATE_CACHE_MAX
self.use_valgrind = DEFAULT_USE_VALGRIND
@classmethod
def from_dict(cls, d):
config = FuseBackendConfig()
config.default_mount_path = d["default_mount_path"]
config.syndicate_debug_mode = d["syndicate_debug_mode"]
config.syndicate_debug_level = d["syndicate_debug_level"]
config.syndicate_cache_max = d["syndicate_cache_max"]
config.use_valgrind = d["use_valgrind"]
return config
@classmethod
def get_default_config(cls):
return FuseBackendConfig()
def to_json(self):
return json.dumps({
"default_mount_path": self.default_mount_path,
"syndicate_debug_mode": self.syndicate_debug_mode,
"syndicate_debug_level": self.syndicate_debug_level,
"syndicate_cache_max": self.syndicate_cache_max,
"use_valgrind": self.use_valgrind
})
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return "<FuseBackendConfig %s %s>" % \
(self.default_mount_path, self.syndicate_debug_mode)
class FuseBackend(sdm_absbackends.AbstractBackend):
"""
FUSE Backend
"""
def __init__(self, backend_config):
self.backend_config = backend_config
@classmethod
def get_name(cls):
return "FUSE"
def is_legal_mount_path(self, mount_path):
if os.path.exists(mount_path) and not os.path.isdir(mount_path):
return False
return True
def make_default_mount_path(self, dataset, default_mount_path):
mount_path = "%s/%s" % (
default_mount_path,
dataset.strip().lower()
)
abs_mount_path = sdm_util.get_abs_path(mount_path)
if not abs_mount_path.startswith("/"):
raise FuseBackendException( "cannot make default mount path for %s" % dataset)
return abs_mount_path
def _get_processes(self, name):
matching_processes = []
for p in psutil.process_iter():
try:
pcmdline = ""
if inspect.ismethod(p.cmdline):
pcmdline = p.cmdline()
else:
pcmdline = p.cmdline
if name in pcmdline:
matching_processes.append(p)
except psutil.NoSuchProcess:
pass
return matching_processes
def _get_fuse_mounts(self, name, path):
matching_mounts = []
with open('/proc/mounts', 'r') as f:
for line in f:
l = line.strip()
w = l.split()
if w[2].startswith("fuse."):
if w[0] == name and w[1] == path:
matching_mounts.append(w)
return matching_mounts
def _wait_mount(self, mount_path, timeout=30, retry=0):
tick = 0
trial = 0
while True:
# check processes
matching_processes = self._get_processes(SYNDICATEFS_PROCESS_NAME)
if len(matching_processes) == 0:
trial += 1
if trial > retry:
raise FuseBackendException(
"cannot find matching process - %s" %
SYNDICATEFS_PROCESS_NAME
)
else:
# check mount
matching_mounts = self._get_fuse_mounts(
SYNDICATEFS_PROCESS_NAME,
mount_path
)
if len(matching_mounts) != 0:
# success
return
time.sleep(1)
tick += 1
if tick >= timeout:
raise FuseBackendException(
"mount timed out - %s / %s" %
(SYNDICATEFS_PROCESS_NAME, mount_path)
)
def _make_syndicate_configuration_path(self, mount_id):
confing_path = "%s/syndicate.conf" % (
self._make_syndicate_configuration_root_path(mount_id)
)
return confing_path
def _make_syndicate_configuration_root_path(self, mount_id):
config_root_path = "%s/%s" % (
SYNDICATE_CONFIG_ROOT_PATH.rstrip("/"),
mount_id.strip().lower()
)
abs_config_root_path = sdm_util.get_abs_path(config_root_path)
return abs_config_root_path
def _make_syndicate_command(self, mount_id, debug_mode=False):
conf_path = self._make_syndicate_configuration_path(mount_id)
debug_flag = ""
if debug_mode:
debug_flag = "-d"
return "syndicate %s -c %s" % (debug_flag, conf_path)
def _make_syndicatefs_command(self, mount_id, debug_mode=False, debug_level=1):
conf_path = self._make_syndicate_configuration_path(mount_id)
debug_flag = ""
if debug_mode:
debug_flag = "-d%d" % debug_level
return "syndicatefs %s -c %s" % (debug_flag, conf_path)
def _run_command_foreground(self, command):
try:
sdm_util.log_message("Running an external process - %s" % command, sdm_util.LogLevel.DEBUG)
proc = subprocess.Popen(
shlex.split(command),
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE
)
stdout_value = proc.communicate()[0]
message = repr(stdout_value)
rc = proc.poll()
if rc != 0:
raise FuseBackendException(
"Failed to run an external process - %d : %s" % (rc, message)
)
except subprocess.CalledProcessError as err:
raise FuseBackendException(
"> error code: %d, %s" % (err.returncode, err.output)
)
def _run_command_background(self, command, log_path):
try:
sdm_util.log_message("Running an external process in background - %s" % command, sdm_util.LogLevel.DEBUG)
fd = open(log_path, "w")
fileno = fd.fileno()
proc = subprocess.Popen(
command,
stderr=subprocess.STDOUT,
stdout=fileno,
shell=True
)
except subprocess.CalledProcessError as err:
raise FuseBackendException(
"> error code: %d, %s" % (err.returncode, err.output)
)
def _setup_syndicate(self, mount_id, dataset, username, user_pkey, gateway_name, ms_host, debug_mode=False, cache_size_limit=DEFAULT_SYNDICATE_CACHE_MAX):
config_root_path = self._make_syndicate_configuration_root_path(mount_id)
if not os.path.exists(config_root_path):
os.makedirs(config_root_path, 0755)
config_path = self._make_syndicate_configuration_path(mount_id)
skip_config = False
if os.path.exists(config_path):
# skip
skip_config = True
syndicate_command = self._make_syndicate_command(mount_id, debug_mode)
if not skip_config:
sdm_util.log_message("Setting up Syndicate for an user, %s" % username)
user_pkey_fd, user_pkey_path = tempfile.mkstemp()
f = os.fdopen(user_pkey_fd, "w")
f.write(user_pkey)
f.close()
command_register = "%s --trust_public_key setup %s %s %s" % (
syndicate_command,
username.strip(),
user_pkey_path,
ms_host.strip()
)
try:
self._run_command_foreground(command_register)
sdm_util.log_message("Successfully set up Syndicate for an user, %s" % username)
finally:
os.remove(user_pkey_path)
# set local cache size
with open(config_path, "a") as cf:
cf.write("\n[gateway]\n")
cf.write("cache_size_limit=%d\n" % cache_size_limit)
command_reload_user_cert = "%s reload_user_cert %s" % (
syndicate_command,
username.strip()
)
command_reload_volume_cert = "%s reload_volume_cert %s" % (
syndicate_command,
dataset.strip().lower()
)
command_reload_gatway_cert = "%s reload_gateway_cert %s" % (
syndicate_command,
gateway_name.strip().lower()
)
self._run_command_foreground(command_reload_user_cert)
sdm_util.log_message("Successfully reloaded a user cert, %s" % username)
self._run_command_foreground(command_reload_volume_cert)
sdm_util.log_message("Successfully reloaded a volume cert, %s" % dataset)
self._run_command_foreground(command_reload_gatway_cert)
sdm_util.log_message("Successfully reloaded a gateway cert, %s" % gateway_name)
def _remove_syndicate_setup(self, mount_id):
config_root_path = self._make_syndicate_configuration_root_path(mount_id)
if os.path.exists(config_root_path):
shutil.rmtree(config_root_path)
sdm_util.log_message("Successfully removed Syndicate at %s" % config_root_path)
def _mount_syndicatefs(self, mount_id, dataset, gateway_name, mount_path, debug_mode=False, debug_level=1, use_valgrind=False):
sdm_util.log_message("Mounting syndicatefs, %s to %s" % (dataset, mount_path))
abs_mount_path = sdm_util.get_abs_path(mount_path)
if not os.path.exists(abs_mount_path):
os.makedirs(abs_mount_path, 0755)
config_root_path = self._make_syndicate_configuration_root_path(mount_id)
syndicatefs_log_path = "%s/mount.log" % config_root_path
syndicatefs_command = self._make_syndicatefs_command(mount_id, debug_mode, debug_level)
#${SYNDICATEFS_CMD} -f -u ANONYMOUS -v ${VOLUME_NAME} -g ${UG_NAME} ${SYNDICATEFS_DATASET_MOUNT_DIR} &> /tmp/syndicate_${VOLUME_NAME}.log&
command_format = "%s -f -u ANONYMOUS -v %s -g %s %s"
if debug_mode and use_valgrind:
command_format = "valgrind " + command_format
command_mount = command_format % (
syndicatefs_command,
dataset,
gateway_name,
abs_mount_path
)
self._run_command_background(command_mount, syndicatefs_log_path)
self._wait_mount(abs_mount_path, retry=3)
sdm_util.log_message("Successfully mounted syndicatefs, %s to %s" % (dataset, abs_mount_path))
def _unmount_syndicatefs(self, mount_path):
try:
command_unmount = "fusermount -u %s" % mount_path
self._run_command_foreground(command_unmount)
except FuseBackendException, e:
if "not found" in str(e):
# it's already unmounted - skip
pass
else:
raise e
def mount(self, mount_id, ms_host, dataset, username, user_pkey, gateway_name, mount_path):
sdm_util.print_message("Mounting a dataset %s to %s" % (dataset, mount_path), True)
self._setup_syndicate(mount_id, dataset, username, user_pkey, gateway_name, ms_host, self.backend_config.syndicate_debug_mode, self.backend_config.syndicate_cache_max)
self._mount_syndicatefs(mount_id, dataset, gateway_name, mount_path, self.backend_config.syndicate_debug_mode, self.backend_config.syndicate_debug_level, self.backend_config.use_valgrind)
sdm_util.print_message("A dataset %s is mounted to %s" % (dataset, mount_path), True)
def check_mount(self, mount_id, dataset, mount_path):
try:
self._wait_mount(mount_path)
return True
except FuseBackendException, e:
return False
def unmount(self, mount_id, dataset, mount_path, cleanup=False):
sdm_util.print_message("Unmounting a dataset %s mounted at %s" % (dataset, mount_path), True)
self._unmount_syndicatefs(mount_path)
if cleanup:
self._remove_syndicate_setup(mount_id)
sdm_util.print_message("Successfully unmounted a dataset %s mounted at %s" % (dataset, mount_path), True)
| |
import codecs
import re
import types
import sys
from constants import EOF, spaceCharacters, asciiLetters, asciiUppercase
from constants import encodings, ReparseException
#Non-unicode versions of constants for use in the pre-parser
spaceCharactersBytes = [str(item) for item in spaceCharacters]
asciiLettersBytes = [str(item) for item in asciiLetters]
asciiUppercaseBytes = [str(item) for item in asciiUppercase]
invalid_unicode_re = re.compile(u"[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uD800-\uDFFF\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]")
ascii_punctuation_re = re.compile(ur"[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005B-\u0060\u007B-\u007E]")
# Cache for charsUntil()
charsUntilRegEx = {}
class BufferedStream:
"""Buffering for streams that do not have buffering of their own
The buffer is implemented as a list of chunks on the assumption that
joining many strings will be slow since it is O(n**2)
"""
def __init__(self, stream):
self.stream = stream
self.buffer = []
self.position = [-1,0] #chunk number, offset
def tell(self):
pos = 0
for chunk in self.buffer[:self.position[0]]:
pos += len(chunk)
pos += self.position[1]
return pos
def seek(self, pos):
assert pos < self._bufferedBytes()
offset = pos
i = 0
while len(self.buffer[i]) < offset:
offset -= pos
i += 1
self.position = [i, offset]
def read(self, bytes):
if not self.buffer:
return self._readStream(bytes)
elif (self.position[0] == len(self.buffer) and
self.position[1] == len(self.buffer[-1])):
return self._readStream(bytes)
else:
return self._readFromBuffer(bytes)
def _bufferedBytes(self):
return sum([len(item) for item in self.buffer])
def _readStream(self, bytes):
data = self.stream.read(bytes)
self.buffer.append(data)
self.position[0] += 1
self.position[1] = len(data)
return data
def _readFromBuffer(self, bytes):
remainingBytes = bytes
rv = []
bufferIndex = self.position[0]
bufferOffset = self.position[1]
while bufferIndex < len(self.buffer) and remainingBytes != 0:
assert remainingBytes > 0
bufferedData = self.buffer[bufferIndex]
if remainingBytes <= len(bufferedData) - bufferOffset:
bytesToRead = remainingBytes
self.position = [bufferIndex, bufferOffset + bytesToRead]
else:
bytesToRead = len(bufferedData) - bufferOffset
self.position = [bufferIndex, len(bufferedData)]
bufferIndex += 1
data = rv.append(bufferedData[bufferOffset:
bufferOffset + bytesToRead])
remainingBytes -= bytesToRead
bufferOffset = 0
if remainingBytes:
rv.append(self._readStream(remainingBytes))
return "".join(rv)
class HTMLInputStream:
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
_defaultChunkSize = 10240
def __init__(self, source, encoding=None, parseMeta=True, chardet=True):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
# List of where new lines occur
self.newLines = [0]
self.charEncoding = (codecName(encoding), "certain")
# Raw Stream - for unicode objects this will encode to utf-8 and set
# self.charEncoding as appropriate
self.rawStream = self.openStream(source)
# Encoding Information
#Number of bytes to use when looking for a meta element with
#encoding information
self.numBytesMeta = 512
#Number of bytes to use when using detecting encoding using chardet
self.numBytesChardet = 100
#Encoding to use if no other information can be found
self.defaultEncoding = "windows-1252"
#Detect encoding iff no explicit "transport level" encoding is supplied
if (self.charEncoding[0] is None):
self.charEncoding = self.detectEncoding(parseMeta, chardet)
self.reset()
def reset(self):
self.dataStream = codecs.getreader(self.charEncoding[0])(self.rawStream,
'replace')
self.chunk = u""
self.chunkSize = 0
self.chunkOffset = 0
self.errors = []
# Remember the current position in the document
self.positionLine = 1
self.positionCol = 0
# Remember the length of the last line, so unget("\n") can restore
# positionCol. (Only one character can be ungot at once, so we only
# need to remember the single last line.)
self.lastLineLength = None
#Flag to indicate we may have a CR LF broken across a data chunk
self._lastChunkEndsWithCR = False
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
# Otherwise treat source as a string and convert to a file object
if isinstance(source, unicode):
source = source.encode('utf-8')
self.charEncoding = ("utf-8", "certain")
import cStringIO
stream = cStringIO.StringIO(str(source))
if (not(hasattr(stream, "tell") and hasattr(stream, "seek")) or
stream is sys.stdin):
stream = BufferedStream(stream)
return stream
def detectEncoding(self, parseMeta=True, chardet=True):
#First look for a BOM
#This will also read past the BOM if present
encoding = self.detectBOM()
confidence = "certain"
#If there is no BOM need to look for meta elements with encoding
#information
if encoding is None and parseMeta:
encoding = self.detectEncodingMeta()
confidence = "tentative"
#Guess with chardet, if avaliable
if encoding is None and chardet:
confidence = "tentative"
try:
from chardet.universaldetector import UniversalDetector
buffers = []
detector = UniversalDetector()
while not detector.done:
buffer = self.rawStream.read(self.numBytesChardet)
if not buffer:
break
buffers.append(buffer)
detector.feed(buffer)
detector.close()
encoding = detector.result['encoding']
self.rawStream.seek(0)
except ImportError:
pass
# If all else fails use the default encoding
if encoding is None:
confidence="tentative"
encoding = self.defaultEncoding
#Substitute for equivalent encodings:
encodingSub = {"iso-8859-1":"windows-1252"}
if encoding.lower() in encodingSub:
encoding = encodingSub[encoding.lower()]
return encoding, confidence
def changeEncoding(self, newEncoding):
newEncoding = codecName(newEncoding)
if newEncoding in ("utf-16", "utf-16-be", "utf-16-le"):
newEncoding = "utf-8"
if newEncoding is None:
return
elif newEncoding == self.charEncoding[0]:
self.charEncoding = (self.charEncoding[0], "certian")
else:
self.rawStream.seek(0)
self.reset()
self.charEncoding = (newEncoding, "certian")
raise ReparseException, "Encoding changed from %s to %s"%(self.charEncoding[0], newEncoding)
def detectBOM(self):
"""Attempts to detect at BOM at the start of the stream. If
an encoding can be determined from the BOM return the name of the
encoding otherwise return None"""
bomDict = {
codecs.BOM_UTF8: 'utf-8',
codecs.BOM_UTF16_LE: 'utf-16-le', codecs.BOM_UTF16_BE: 'utf-16-be',
codecs.BOM_UTF32_LE: 'utf-32-le', codecs.BOM_UTF32_BE: 'utf-32-be'
}
# Go to beginning of file and read in 4 bytes
string = self.rawStream.read(4)
# Try detecting the BOM using bytes from the string
encoding = bomDict.get(string[:3]) # UTF-8
seek = 3
if not encoding:
# Need to detect UTF-32 before UTF-16
encoding = bomDict.get(string) # UTF-32
seek = 4
if not encoding:
encoding = bomDict.get(string[:2]) # UTF-16
seek = 2
# Set the read position past the BOM if one was found, otherwise
# set it to the start of the stream
self.rawStream.seek(encoding and seek or 0)
return encoding
def detectEncodingMeta(self):
"""Report the encoding declared by the meta element
"""
buffer = self.rawStream.read(self.numBytesMeta)
parser = EncodingParser(buffer)
self.rawStream.seek(0)
encoding = parser.getEncoding()
if encoding in ("utf-16", "utf-16-be", "utf-16-le"):
encoding = "utf-8"
return encoding
def updatePosition(self, chars):
# Update the position attributes to correspond to some sequence of
# read characters
# Find the last newline character
idx = chars.rfind(u"\n")
if idx == -1:
# No newlines in chars
self.positionCol += len(chars)
else:
# Find the last-but-one newline character
idx2 = chars.rfind(u"\n", 0, idx)
if idx2 == -1:
# Only one newline in chars
self.positionLine += 1
self.lastLineLength = self.positionCol + idx
self.positionCol = len(chars) - (idx + 1)
else:
# At least two newlines in chars
newlines = chars.count(u"\n")
self.positionLine += newlines
self.lastLineLength = idx - (idx2 + 1)
self.positionCol = len(chars) - (idx + 1)
def position(self):
"""Returns (line, col) of the current position in the stream."""
return (self.positionLine, self.positionCol)
def char(self):
""" Read one character from the stream or queue if available. Return
EOF when EOF is reached.
"""
# Read a new chunk from the input stream if necessary
if self.chunkOffset >= self.chunkSize:
if not self.readChunk():
return EOF
char = self.chunk[self.chunkOffset]
self.chunkOffset += 1
# Update the position attributes
if char == u"\n":
self.lastLineLength = self.positionCol
self.positionCol = 0
self.positionLine += 1
elif char is not EOF:
self.positionCol += 1
return char
def readChunk(self, chunkSize=_defaultChunkSize):
self.chunk = u""
self.chunkSize = 0
self.chunkOffset = 0
data = self.dataStream.read(chunkSize)
if not data:
return False
#Replace null characters
for i in xrange(data.count(u"\u0000")):
self.errors.append("null-character")
for i in xrange(len(invalid_unicode_re.findall(data))):
self.errors.append("invalid-codepoint")
data = data.replace(u"\u0000", u"\ufffd")
#Check for CR LF broken across chunks
if (self._lastChunkEndsWithCR and data[0] == u"\n"):
data = data[1:]
# Stop if the chunk is now empty
if not data:
return False
self._lastChunkEndsWithCR = data[-1] == u"\r"
data = data.replace(u"\r\n", u"\n")
data = data.replace(u"\r", u"\n")
self.chunk = data
self.chunkSize = len(data)
return True
def charsUntil(self, characters, opposite = False):
""" Returns a string of characters from the stream up to but not
including any character in 'characters' or EOF. 'characters' must be
a container that supports the 'in' method and iteration over its
characters.
"""
# Use a cache of regexps to find the required characters
try:
chars = charsUntilRegEx[(characters, opposite)]
except KeyError:
for c in characters: assert(ord(c) < 128)
regex = u"".join([u"\\x%02x" % ord(c) for c in characters])
if not opposite:
regex = u"^%s" % regex
chars = charsUntilRegEx[(characters, opposite)] = re.compile(u"[%s]+" % regex)
rv = []
while True:
# Find the longest matching prefix
m = chars.match(self.chunk, self.chunkOffset)
if m is None:
# If nothing matched, and it wasn't because we ran out of chunk,
# then stop
if self.chunkOffset != self.chunkSize:
break
else:
end = m.end()
# If not the whole chunk matched, return everything
# up to the part that didn't match
if end != self.chunkSize:
rv.append(self.chunk[self.chunkOffset:end])
self.chunkOffset = end
break
# If the whole remainder of the chunk matched,
# use it all and read the next chunk
rv.append(self.chunk[self.chunkOffset:])
if not self.readChunk():
# Reached EOF
break
r = u"".join(rv)
self.updatePosition(r)
return r
def unget(self, char):
# Only one character is allowed to be ungotten at once - it must
# be consumed again before any further call to unget
if char is not None:
if self.chunkOffset == 0:
# unget is called quite rarely, so it's a good idea to do
# more work here if it saves a bit of work in the frequently
# called char and charsUntil.
# So, just prepend the ungotten character onto the current
# chunk:
self.chunk = char + self.chunk
self.chunkSize += 1
else:
self.chunkOffset -= 1
assert self.chunk[self.chunkOffset] == char
# Update the position attributes
if char == u"\n":
assert self.positionLine >= 1
assert self.lastLineLength is not None
self.positionLine -= 1
self.positionCol = self.lastLineLength
self.lastLineLength = None
else:
self.positionCol -= 1
class EncodingBytes(str):
"""String-like object with an assosiated position and various extra methods
If the position is ever greater than the string length then an exception is
raised"""
def __init__(self, value):
str.__init__(self, value)
self._position=-1
def __iter__(self):
return self
def next(self):
self._position += 1
rv = self[self.position]
return rv
def setPosition(self, position):
if self._position >= len(self):
raise StopIteration
self._position = position
def getPosition(self):
if self._position >= len(self):
raise StopIteration
if self._position >= 0:
return self._position
else:
return None
position = property(getPosition, setPosition)
def getCurrentByte(self):
return self[self.position]
currentByte = property(getCurrentByte)
def skip(self, chars=spaceCharactersBytes):
"""Skip past a list of characters"""
while self.currentByte in chars:
self.position += 1
def matchBytes(self, bytes, lower=False):
"""Look for a sequence of bytes at the start of a string. If the bytes
are found return True and advance the position to the byte after the
match. Otherwise return False and leave the position alone"""
data = self[self.position:self.position+len(bytes)]
if lower:
data = data.lower()
rv = data.startswith(bytes)
if rv == True:
self.position += len(bytes)
return rv
def jumpTo(self, bytes):
"""Look for the next sequence of bytes matching a given sequence. If
a match is found advance the position to the last byte of the match"""
newPosition = self[self.position:].find(bytes)
if newPosition > -1:
self._position += (newPosition + len(bytes)-1)
return True
else:
raise StopIteration
def findNext(self, byteList):
"""Move the pointer so it points to the next byte in a set of possible
bytes"""
while (self.currentByte not in byteList):
self.position += 1
class EncodingParser(object):
"""Mini parser for detecting character encoding from meta elements"""
def __init__(self, data):
"""string - the data to work on for encoding detection"""
self.data = EncodingBytes(data)
self.encoding = None
def getEncoding(self):
methodDispatch = (
("<!--",self.handleComment),
("<meta",self.handleMeta),
("</",self.handlePossibleEndTag),
("<!",self.handleOther),
("<?",self.handleOther),
("<",self.handlePossibleStartTag))
for byte in self.data:
keepParsing = True
for key, method in methodDispatch:
if self.data.matchBytes(key, lower=True):
try:
keepParsing = method()
break
except StopIteration:
keepParsing=False
break
if not keepParsing:
break
return self.encoding
def handleComment(self):
"""Skip over comments"""
return self.data.jumpTo("-->")
def handleMeta(self):
if self.data.currentByte not in spaceCharactersBytes:
#if we have <meta not followed by a space so just keep going
return True
#We have a valid meta element we want to search for attributes
while True:
#Try to find the next attribute after the current position
attr = self.getAttribute()
if attr is None:
return True
else:
if attr[0] == "charset":
tentativeEncoding = attr[1]
codec = codecName(tentativeEncoding)
if codec is not None:
self.encoding = codec
return False
elif attr[0] == "content":
contentParser = ContentAttrParser(EncodingBytes(attr[1]))
tentativeEncoding = contentParser.parse()
codec = codecName(tentativeEncoding)
if codec is not None:
self.encoding = codec
return False
def handlePossibleStartTag(self):
return self.handlePossibleTag(False)
def handlePossibleEndTag(self):
self.data.position+=1
return self.handlePossibleTag(True)
def handlePossibleTag(self, endTag):
if self.data.currentByte not in asciiLettersBytes:
#If the next byte is not an ascii letter either ignore this
#fragment (possible start tag case) or treat it according to
#handleOther
if endTag:
self.data.position -= 1
self.handleOther()
return True
self.data.findNext(list(spaceCharactersBytes) + ["<", ">"])
if self.data.currentByte == "<":
#return to the first step in the overall "two step" algorithm
#reprocessing the < byte
self.data.position -= 1
else:
#Read all attributes
attr = self.getAttribute()
while attr is not None:
attr = self.getAttribute()
return True
def handleOther(self):
return self.data.jumpTo(">")
def getAttribute(self):
"""Return a name,value pair for the next attribute in the stream,
if one is found, or None"""
self.data.skip(list(spaceCharactersBytes)+["/"])
if self.data.currentByte == "<":
self.data.position -= 1
return None
elif self.data.currentByte == ">":
return None
attrName = []
attrValue = []
spaceFound = False
#Step 5 attribute name
while True:
if self.data.currentByte == "=" and attrName:
break
elif self.data.currentByte in spaceCharactersBytes:
spaceFound=True
break
elif self.data.currentByte in ("/", "<", ">"):
return "".join(attrName), ""
elif self.data.currentByte in asciiUppercaseBytes:
attrName.extend(self.data.currentByte.lower())
else:
attrName.extend(self.data.currentByte)
#Step 6
self.data.position += 1
#Step 7
if spaceFound:
self.data.skip()
#Step 8
if self.data.currentByte != "=":
self.data.position -= 1
return "".join(attrName), ""
#XXX need to advance position in both spaces and value case
#Step 9
self.data.position += 1
#Step 10
self.data.skip()
#Step 11
if self.data.currentByte in ("'", '"'):
#11.1
quoteChar = self.data.currentByte
while True:
self.data.position+=1
#11.3
if self.data.currentByte == quoteChar:
self.data.position += 1
return "".join(attrName), "".join(attrValue)
#11.4
elif self.data.currentByte in asciiUppercaseBytes:
attrValue.extend(self.data.currentByte.lower())
#11.5
else:
attrValue.extend(self.data.currentByte)
elif self.data.currentByte in (">", "<"):
return "".join(attrName), ""
elif self.data.currentByte in asciiUppercaseBytes:
attrValue.extend(self.data.currentByte.lower())
else:
attrValue.extend(self.data.currentByte)
while True:
self.data.position +=1
if self.data.currentByte in (
list(spaceCharactersBytes) + [">", "<"]):
return "".join(attrName), "".join(attrValue)
elif self.data.currentByte in asciiUppercaseBytes:
attrValue.extend(self.data.currentByte.lower())
else:
attrValue.extend(self.data.currentByte)
class ContentAttrParser(object):
def __init__(self, data):
self.data = data
def parse(self):
try:
#Skip to the first ";"
self.data.jumpTo(";")
self.data.position += 1
self.data.skip()
#Check if the attr name is charset
#otherwise return
self.data.jumpTo("charset")
self.data.position += 1
self.data.skip()
if not self.data.currentByte == "=":
#If there is no = sign keep looking for attrs
return None
self.data.position += 1
self.data.skip()
#Look for an encoding between matching quote marks
if self.data.currentByte in ('"', "'"):
quoteMark = self.data.currentByte
self.data.position += 1
oldPosition = self.data.position
self.data.jumpTo(quoteMark)
return self.data[oldPosition:self.data.position]
else:
#Unquoted value
oldPosition = self.data.position
try:
self.data.findNext(spaceCharactersBytes)
return self.data[oldPosition:self.data.position]
except StopIteration:
#Return the whole remaining value
return self.data[oldPosition:]
except StopIteration:
return None
def codecName(encoding):
"""Return the python codec name corresponding to an encoding or None if the
string doesn't correspond to a valid encoding."""
if (encoding is not None and type(encoding) in types.StringTypes):
canonicalName = ascii_punctuation_re.sub("", encoding).lower()
return encodings.get(canonicalName, None)
else:
return None
| |
# valuerep.py
"""Special classes for DICOM value representations (VR)"""
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
from decimal import Decimal
import dicom.config
from dicom.multival import MultiValue
from dicom import in_py3
import logging
logger = logging.getLogger('pydicom')
default_encoding = "iso8859" # can't import from charset or get circular import
# For reading/writing data elements, these ones have longer explicit VR format
extra_length_VRs = ('OB', 'OW', 'OF', 'SQ', 'UN', 'UT')
# VRs that can be affected by character repertoire in (0008,0005) Specific Character Set
# See PS-3.5 (2011), section 6.1.2 Graphic Characters
text_VRs = ('SH', 'LO', 'ST', 'LT', 'UT') # and PN, but it is handled separately.
import re
match_string = b''.join([
b'(?P<single_byte>',
b'(?P<family_name>[^=\^]*)',
b'\^?(?P<given_name>[^=\^]*)',
b'\^?(?P<middle_name>[^=\^]*)',
b'\^?(?P<name_prefix>[^=\^]*)',
b'\^?(?P<name_suffix>[^=\^]*)',
b')',
b'=?(?P<ideographic>[^=]*)',
b'=?(?P<phonetic>[^=]*)$'])
match_string_uni = re.compile(match_string.decode('iso8859'))
match_string_bytes = re.compile(match_string)
class DSfloat(float):
"""Store values for DICOM VR of DS (Decimal String) as a float.
If constructed from an empty string, return the empty string,
not an instance of this class.
"""
__slots__ = 'original_string'
def __init__(self, val):
"""Store the original string if one given, for exact write-out of same
value later.
"""
# ... also if user changes a data element value, then will get
# a different object, becuase float is immutable.
if isinstance(val, (str, unicode)):
self.original_string = val
elif isinstance(val, (DSfloat, DSdecimal)) and hasattr(val, 'original_string'):
self.original_string = val.original_string
def __str__(self):
if hasattr(self, 'original_string'):
return self.original_string
else:
return super(DSfloat, self).__str__()
def __repr__(self):
return "'" + str(self) + "'"
class DSdecimal(Decimal):
"""Store values for DICOM VR of DS (Decimal String).
Note: if constructed by an empty string, returns the empty string,
not an instance of this class.
"""
__slots__ = 'original_string'
def __new__(cls, val):
"""Create an instance of DS object, or return a blank string if one is
passed in, e.g. from a type 2 DICOM blank value.
:param val: val must be a string or a number type which can be
converted to a decimal
"""
# Store this value here so that if the input string is actually a valid
# string but decimal.Decimal transforms it to an invalid string it will
# still be initialized properly
enforce_length = dicom.config.enforce_valid_values
# DICOM allows spaces around the string, but python doesn't, so clean it
if isinstance(val, (str, unicode)):
val = val.strip()
# If the input string is actually invalid that we relax the valid
# value constraint for this particular instance
if len(val) <= 16:
enforce_length = False
if val == '':
return val
if isinstance(val, float) and not dicom.config.allow_DS_float:
msg = ("DS cannot be instantiated with a float value, unless "
"config.allow_DS_float is set to True. It is recommended to "
"convert to a string instead, with the desired number of digits, "
"or use Decimal.quantize and pass a Decimal instance.")
raise TypeError(msg)
if not isinstance(val, Decimal):
val = super(DSdecimal, cls).__new__(cls, val)
if len(str(val)) > 16 and enforce_length:
msg = ("DS value representation must be <= 16 characters by DICOM "
"standard. Initialize with a smaller string, or set config.enforce_valid_values "
"to False to override, "
"or use Decimal.quantize() and initialize with a Decimal instance.")
raise OverflowError(msg)
return val
def __init__(self, val):
"""Store the original string if one given, for exact write-out of same
value later. E.g. if set '1.23e2', Decimal would write '123', but DS
will use the original
"""
# ... also if user changes a data element value, then will get
# a different Decimal, as Decimal is immutable.
if isinstance(val, (str, unicode)):
self.original_string = val
elif isinstance(val, DSbase) and hasattr(val, 'original_string'):
self.original_string = val.original_string
def __str__(self):
if hasattr(self, 'original_string') and len(self.original_string) <= 16:
return self.original_string
else:
return super(DSdecimal, self).__str__()
def __repr__(self):
return "'" + str(self) + "'"
# CHOOSE TYPE OF DS
if dicom.config.use_DS_decimal:
DSclass = DSdecimal
else:
DSclass = DSfloat
def DS(val):
"""Factory function for creating DS class instances.
Checks for blank string; if so, return that. Else calls DSfloat or DSdecimal
to create the class instance. This avoids overriding __new__ in DSfloat
(which carries a time penalty for large arrays of DS).
Similarly the string clean and check can be avoided and DSfloat called
directly if a string has already been processed.
"""
if isinstance(val, (str, unicode)):
val = val.strip()
if val == '':
return val
return DSclass(val)
class IS(int):
"""Derived class of int. Stores original integer string for exact rewriting
of the string originally read or stored.
"""
if not in_py3:
__slots__ = 'original_string'
# Unlikely that str(int) will not be the same as the original, but could happen
# with leading zeros.
def __new__(cls, val):
"""Create instance if new integer string"""
if isinstance(val, (str, unicode)) and val.strip() == '':
return ''
newval = super(IS, cls).__new__(cls, val)
# check if a float or Decimal passed in, then could have lost info,
# and will raise error. E.g. IS(Decimal('1')) is ok, but not IS(1.23)
if isinstance(val, (float, Decimal)) and newval != val:
raise TypeError("Could not convert value to integer without loss")
# Checks in case underlying int is >32 bits, DICOM does not allow this
if (newval < -2 ** 31 or newval >= 2 ** 31) and dicom.config.enforce_valid_values:
message = "Value exceeds DICOM limits of -2**31 to (2**31 - 1) for IS"
raise OverflowError(message)
return newval
def __init__(self, val):
# If a string passed, then store it
if isinstance(val, (str, unicode)):
self.original_string = val
elif isinstance(val, IS) and hasattr(val, 'original_string'):
self.original_string = val.original_string
def __repr__(self):
if hasattr(self, 'original_string'):
return "'" + self.original_string + "'"
else:
return "'" + int.__str__(self) + "'"
def MultiString(val, valtype=str):
"""Split a bytestring by delimiters if there are any
val -- DICOM bytestring to split up
valtype -- default str, but can be e.g. UID to overwrite to a specific type
"""
# Remove trailing blank used to pad to even length
# 2005.05.25: also check for trailing 0, error made in PET files we are converting
if val and (val.endswith(' ') or val.endswith('\x00')):
val = val[:-1]
splitup = val.split("\\")
if len(splitup) == 1:
val = splitup[0]
return valtype(val) if val else val
else:
return MultiValue(valtype, splitup)
class PersonName3(object):
def __init__(self, val, encodings=default_encoding):
if isinstance(val, PersonName3):
val = val.original_string
self.original_string = val
self.encodings = self._verify_encodings(encodings)
self.parse(val)
def parse(self, val):
if isinstance(val, bytes):
matchstr = match_string_bytes
else:
matchstr = match_string_uni
matchobj = re.match(matchstr, val)
self.__dict__.update(matchobj.groupdict())
groups = matchobj.groups()
self.components = [groups[i] for i in (0, -2, -1)]
def __eq__(self, other):
return self.original_string == other
def __str__(self):
return self.original_string.__str__()
def __repr__(self):
return self.original_string.__repr__()
def decode(self, encodings=None):
encodings = self._verify_encodings(encodings)
from dicom.charset import clean_escseq
if not isinstance(self.components[0], bytes):
comps = self.components
else:
comps = [clean_escseq(comp.decode(enc), encodings)
for comp, enc in zip(self.components, encodings)]
while len(comps) and not comps[-1]:
comps.pop()
return PersonName3('='.join(comps), encodings)
def encode(self, encodings=None):
encodings = self._verify_encodings(encodings)
if isinstance(self.components[0], bytes):
comps = self.components
else:
comps = [C.encode(enc) for C, enc in zip(self.components, encodings)]
# Remove empty elements from the end
while len(comps) and not comps[-1]:
comps.pop()
return b'='.join(comps)
def family_comma_given(self):
return self.formatted('%(family_name)s, %(given_name)s')
def formatted(self, format_str):
if isinstance(self.original_string, bytes):
return format_str % self.decode(default_encoding).__dict__
else:
return format_str % self.__dict__
def _verify_encodings(self, encodings):
if encodings is None:
return self.encodings
if not isinstance(encodings, list):
encodings = [encodings] * 3
if len(encodings) == 2:
encodings.append(encodings[1])
return encodings
class PersonNameBase(object):
"""Base class for Person Name classes"""
def __init__(self, val):
"""Initialize the PN properties"""
# Note normally use __new__ on subclassing an immutable, but here we just want
# to do some pre-processing for properties
# PS 3.5-2008 section 6.2 (p.28) and 6.2.1 describes PN. Briefly:
# single-byte-characters=ideographic characters=phonetic-characters
# (each with?):
# family-name-complex^Given-name-complex^Middle-name^name-prefix^name-suffix
self.parse()
def formatted(self, format_str):
"""Return a formatted string according to the format pattern
Use "...%(property)...%(property)..." where property is one of
family_name, given_name, middle_name, name_prefix, name_suffix
"""
return format_str % self.__dict__
def parse(self):
"""Break down the components and name parts"""
self.components = self.split("=")
nComponents = len(self.components)
self.single_byte = self.components[0]
self.ideographic = ''
self.phonetic = ''
if nComponents > 1:
self.ideographic = self.components[1]
if nComponents > 2:
self.phonetic = self.components[2]
if self.single_byte:
name_string = self.single_byte + "^^^^" # in case missing trailing items are left out
parts = name_string.split("^")[:5]
(self.family_name, self.given_name, self.middle_name,
self.name_prefix, self.name_suffix) = parts
else:
(self.family_name, self.given_name, self.middle_name,
self.name_prefix, self.name_suffix) = ('', '', '', '', '')
class PersonName(PersonNameBase, bytes):
"""Human-friendly class to hold VR of Person Name (PN)
Name is parsed into the following properties:
single-byte, ideographic, and phonetic components (PS3.5-2008 6.2.1)
family_name,
given_name,
middle_name,
name_prefix,
name_suffix
"""
def __new__(cls, val):
"""Return instance of the new class"""
# Check if trying to convert a string that has already been converted
if isinstance(val, PersonName):
return val
return super(PersonName, cls).__new__(cls, val)
def encode(self, *args):
"""Dummy method to mimic py2 str behavior in py3 bytes subclass"""
# This greatly simplifies the write process so all objects have the
# "encode" method
return self
def family_comma_given(self):
"""Return name as 'Family-name, Given-name'"""
return self.formatted("%(family_name)s, %(given_name)s")
# def __str__(self):
# return str(self.byte_string)
# XXX need to process the ideographic or phonetic components?
# def __len__(self):
# return len(self.byte_string)
class PersonNameUnicode(PersonNameBase, unicode):
"""Unicode version of Person Name"""
def __new__(cls, val, encodings):
"""Return unicode string after conversion of each part
val -- the PN value to store
encodings -- a list of python encodings, generally found
from dicom.charset.python_encodings mapping
of values in DICOM data element (0008,0005).
"""
from dicom.charset import clean_escseq # in here to avoid circular import
# Make the possible three character encodings explicit:
if not isinstance(encodings, list):
encodings = [encodings] * 3
if len(encodings) == 2:
encodings.append(encodings[1])
components = val.split(b"=")
# Remove the first encoding if only one component is present
if (len(components) == 1):
del encodings[0]
comps = [clean_escseq(C.decode(enc), encodings)
for C, enc in zip(components, encodings)]
new_val = u"=".join(comps)
return unicode.__new__(cls, new_val)
def __init__(self, val, encodings):
self.encodings = self._verify_encodings(encodings)
PersonNameBase.__init__(self, val)
def _verify_encodings(self, encodings):
"""Checks the encoding to ensure proper format"""
if encodings is None:
return self.encodings
if not isinstance(encodings, list):
encodings = [encodings] * 3
if len(encodings) == 2:
encodings.append(encodings[1])
return encodings
def encode(self, encodings):
"""Encode the unicode using the specified encoding"""
encodings = self._verify_encodings(encodings)
components = self.split('=')
comps = [C.encode(enc) for C, enc in zip(components, encodings)]
# Remove empty elements from the end
while len(comps) and not comps[-1]:
comps.pop()
return '='.join(comps)
def family_comma_given(self):
"""Return name as 'Family-name, Given-name'"""
return self.formatted("%(family_name)u, %(given_name)u")
| |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from transformers.modeling_utils import *
import copy
import torch
import torch.nn.functional as F
REGFUNC = {
"square": lambda x: x**2,
"square_cut5": lambda x: F.relu(x**2 - 5),
"square_cut100": lambda x: F.relu(x**2 - 100),
"abs_cut10": lambda x: F.relu(x.abs() - 10),
"abs_cut20": lambda x: F.relu(x.abs() - 20),
}
class PreTrainedModels(PreTrainedModel):
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args,
**kwargs):
config = kwargs.pop("config", None)
state_dict = kwargs.pop("state_dict", None)
cache_dir = kwargs.pop("cache_dir", None)
from_tf = kwargs.pop("from_tf", False)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
output_loading_info = kwargs.pop("output_loading_info", False)
local_files_only = kwargs.pop("local_files_only", False)
use_cdn = kwargs.pop("use_cdn", True)
# Load config if we don't provide a configuration
if not isinstance(config, PretrainedConfig):
config_path = config if config is not None else pretrained_model_name_or_path
config, model_kwargs = cls.config_class.from_pretrained(
config_path,
*model_args,
cache_dir=cache_dir,
return_unused_kwargs=True,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
local_files_only=local_files_only,
**kwargs,
)
else:
model_kwargs = kwargs
# Load model
if pretrained_model_name_or_path is not None:
if os.path.isdir(pretrained_model_name_or_path):
if from_tf and os.path.isfile(
os.path.join(pretrained_model_name_or_path,
TF_WEIGHTS_NAME + ".index")):
# Load from a TF 1.0 checkpoint
archive_file = os.path.join(pretrained_model_name_or_path,
TF_WEIGHTS_NAME + ".index")
elif from_tf and os.path.isfile(
os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):
# Load from a TF 2.0 checkpoint
archive_file = os.path.join(pretrained_model_name_or_path,
TF2_WEIGHTS_NAME)
elif os.path.isfile(
os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
# Load from a PyTorch checkpoint
archive_file = os.path.join(pretrained_model_name_or_path,
WEIGHTS_NAME)
else:
raise EnvironmentError(
"Error no file named {} found in directory {} or `from_tf` set to False"
.format(
[WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME + ".index"],
pretrained_model_name_or_path,
))
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(
pretrained_model_name_or_path):
archive_file = pretrained_model_name_or_path
elif os.path.isfile(pretrained_model_name_or_path + ".index"):
assert (
from_tf
), ("We found a TensorFlow checkpoint at {}, please set from_tf to True"
" to load from this checkpoint").format(
pretrained_model_name_or_path + ".index")
archive_file = pretrained_model_name_or_path + ".index"
else:
archive_file = hf_bucket_url(
pretrained_model_name_or_path,
filename=(TF2_WEIGHTS_NAME if from_tf else WEIGHTS_NAME),
use_cdn=use_cdn,
)
try:
# Load from URL or cache if already cached
resolved_archive_file = cached_path(
archive_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
)
if resolved_archive_file is None:
raise EnvironmentError
except EnvironmentError:
msg = (
f"Can't load weights for '{pretrained_model_name_or_path}'. Make "
f"sure that:\n\n- '{pretrained_model_name_or_path}' is a correct "
f"model identifier listed on 'https://huggingface.co/models'\n\n- "
f"or '{pretrained_model_name_or_path}' is the correct path to a "
f"directory containing a file named one of {WEIGHTS_NAME}, "
f"{TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME}.\n\n"
)
raise EnvironmentError(msg)
if resolved_archive_file == archive_file:
logger.info("loading weights file {}".format(archive_file))
else:
logger.info("loading weights file {} from cache at {}".format(
archive_file, resolved_archive_file))
else:
resolved_archive_file = None
# Instantiate model.
model = cls(config, *model_args, **model_kwargs)
if state_dict is None and not from_tf:
try:
state_dict = torch.load(resolved_archive_file, map_location="cpu")
except Exception:
raise OSError(
"Unable to load weights from pytorch checkpoint file. "
"If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. "
)
missing_keys = []
unexpected_keys = []
error_msgs = []
if from_tf:
if resolved_archive_file.endswith(".index"):
# Load from a TensorFlow 1.X checkpoint - provided by original authors
model = cls.load_tf_weights(
model, config, resolved_archive_file[:-6]) # Remove the '.index'
else:
# Load from our TensorFlow 2.0 checkpoints
try:
from transformers import load_tf2_checkpoint_in_pytorch_model
model = load_tf2_checkpoint_in_pytorch_model(
model, resolved_archive_file, allow_missing_keys=True)
except ImportError:
logger.error(
"Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see "
"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions."
)
raise
else:
# Convert old format to new format if needed from a PyTorch state_dict
has_all_sub_modules = all(
any(s.startswith(_prefix)
for s in state_dict.keys())
for _prefix in model.base_model_prefixs)
has_prefix_module = any(
s.startswith(model.base_model_prefix) for s in state_dict.keys())
old_keys = list(state_dict.keys())
for key in old_keys:
new_key = key
if "gamma" in key:
new_key = new_key.replace("gamma", "weight")
if "beta" in key:
new_key = new_key.replace("beta", "bias")
_state = state_dict.pop(key)
if has_all_sub_modules:
state_dict[new_key] = _state
elif not has_prefix_module:
for _prefix in model.base_model_prefixs:
_key = _prefix + "." + new_key
state_dict[_key] = _state
else:
if new_key.startswith(model.base_model_prefix):
for _prefix in model.base_model_prefixs:
_key = _prefix + new_key[len(model.base_model_prefix):]
state_dict[_key] = _state
else:
state_dict[new_key] = _state
if hasattr(model, "hack_pretrained_state_dict"):
state_dict = model.hack_pretrained_state_dict(state_dict)
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, "_metadata", None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
# PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants
# so we need to apply the function recursively.
def load(module, prefix=""):
local_metadata = {} if metadata is None else metadata.get(
prefix[:-1], {})
module._load_from_state_dict(
state_dict,
prefix,
local_metadata,
True,
missing_keys,
unexpected_keys,
error_msgs,
)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + ".")
# Make sure we are able to load base models as well as derived models (with heads)
load(model)
load = None
if len(missing_keys) > 0:
logger.info(
"Weights of {} not initialized from pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in {}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError(
"Error(s) in loading state_dict for {}:\n\t{}".format(
model.__class__.__name__, "\n\t".join(error_msgs)))
model.tie_weights(
) # make sure token embedding weights are still tied if needed
# Set model in evaluation mode to deactivate DropOut modules by default
model.eval()
if output_loading_info:
loading_info = {
"missing_keys": missing_keys,
"unexpected_keys": unexpected_keys,
"error_msgs": error_msgs,
}
return model, loading_info
if hasattr(config, "xla_device") and config.xla_device:
import torch_xla.core.xla_model as xm
model = xm.send_cpu_data_to_device(model, xm.xla_device())
model.to(xm.xla_device())
return model
| |
# Copyright (c) 2015 Mirantis inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import ddt
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
import six
import webob
from manila.api import common
from manila.api.openstack import api_version_request as api_version
from manila.api.v2 import share_replicas
from manila.api.v2 import shares
from manila.common import constants
from manila import context
from manila import db
from manila import exception
from manila import policy
from manila.share import api as share_api
from manila.share import share_types
from manila import test
from manila.tests.api.contrib import stubs
from manila.tests.api import fakes
from manila.tests import db_utils
from manila import utils
CONF = cfg.CONF
@ddt.ddt
class ShareAPITest(test.TestCase):
"""Share API Test."""
def setUp(self):
super(self.__class__, self).setUp()
self.controller = shares.ShareController()
self.mock_object(db, 'availability_zone_get')
self.mock_object(share_api.API, 'get_all',
stubs.stub_get_all_shares)
self.mock_object(share_api.API, 'get',
stubs.stub_share_get)
self.mock_object(share_api.API, 'update', stubs.stub_share_update)
self.mock_object(share_api.API, 'delete', stubs.stub_share_delete)
self.mock_object(share_api.API, 'get_snapshot',
stubs.stub_snapshot_get)
self.maxDiff = None
self.share = {
"size": 100,
"display_name": "Share Test Name",
"display_description": "Share Test Desc",
"share_proto": "fakeproto",
"availability_zone": "zone1:host1",
"is_public": False,
}
self.create_mock = mock.Mock(
return_value=stubs.stub_share(
'1',
display_name=self.share['display_name'],
display_description=self.share['display_description'],
size=100,
share_proto=self.share['share_proto'].upper(),
instance={
'availability_zone': self.share['availability_zone'],
})
)
self.vt = {
'id': 'fake_volume_type_id',
'name': 'fake_volume_type_name',
}
CONF.set_default("default_share_type", None)
def _get_expected_share_detailed_response(self, values=None, admin=False):
share = {
'id': '1',
'name': 'displayname',
'availability_zone': 'fakeaz',
'description': 'displaydesc',
'export_location': 'fake_location',
'export_locations': ['fake_location', 'fake_location2'],
'project_id': 'fakeproject',
'host': 'fakehost',
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'share_proto': 'FAKEPROTO',
'metadata': {},
'size': 1,
'snapshot_id': '2',
'share_network_id': None,
'status': 'fakestatus',
'share_type': '1',
'volume_type': '1',
'snapshot_support': True,
'is_public': False,
'consistency_group_id': None,
'source_cgsnapshot_member_id': None,
'task_state': None,
'share_type_name': None,
'links': [
{
'href': 'http://localhost/v1/fake/shares/1',
'rel': 'self'
},
{
'href': 'http://localhost/fake/shares/1',
'rel': 'bookmark'
}
],
}
if values:
if 'display_name' in values:
values['name'] = values.pop('display_name')
if 'display_description' in values:
values['description'] = values.pop('display_description')
share.update(values)
if share.get('share_proto'):
share['share_proto'] = share['share_proto'].upper()
if admin:
share['share_server_id'] = 'fake_share_server_id'
return {'share': share}
@ddt.data("2.0", "2.1")
def test_share_create_original(self, microversion):
self.mock_object(share_api.API, 'create', self.create_mock)
body = {"share": copy.deepcopy(self.share)}
req = fakes.HTTPRequest.blank('/shares', version=microversion)
res_dict = self.controller.create(req, body)
expected = self._get_expected_share_detailed_response(self.share)
expected['share'].pop('snapshot_support')
expected['share'].pop('share_type_name')
expected['share'].pop('task_state')
expected['share'].pop('consistency_group_id')
expected['share'].pop('source_cgsnapshot_member_id')
self.assertEqual(expected, res_dict)
@ddt.data("2.2", "2.3")
def test_share_create_with_snapshot_support_without_cg(self, microversion):
self.mock_object(share_api.API, 'create', self.create_mock)
body = {"share": copy.deepcopy(self.share)}
req = fakes.HTTPRequest.blank('/shares', version=microversion)
res_dict = self.controller.create(req, body)
expected = self._get_expected_share_detailed_response(self.share)
expected['share'].pop('share_type_name')
expected['share'].pop('task_state')
expected['share'].pop('consistency_group_id')
expected['share'].pop('source_cgsnapshot_member_id')
self.assertEqual(expected, res_dict)
@ddt.data("2.4", "2.5")
def test_share_create_with_consistency_group(self, microversion):
self.mock_object(share_api.API, 'create', self.create_mock)
body = {"share": copy.deepcopy(self.share)}
req = fakes.HTTPRequest.blank('/shares', version=microversion)
res_dict = self.controller.create(req, body)
expected = self._get_expected_share_detailed_response(self.share)
expected['share'].pop('share_type_name')
if (api_version.APIVersionRequest(microversion) ==
api_version.APIVersionRequest('2.4')):
expected['share'].pop('task_state')
self.assertEqual(expected, res_dict)
def test_share_create_with_valid_default_share_type(self):
self.mock_object(share_types, 'get_share_type_by_name',
mock.Mock(return_value=self.vt))
CONF.set_default("default_share_type", self.vt['name'])
self.mock_object(share_api.API, 'create', self.create_mock)
body = {"share": copy.deepcopy(self.share)}
req = fakes.HTTPRequest.blank('/shares', version='2.7')
res_dict = self.controller.create(req, body)
expected = self._get_expected_share_detailed_response(self.share)
share_types.get_share_type_by_name.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), self.vt['name'])
self.assertEqual(expected, res_dict)
def test_share_create_with_invalid_default_share_type(self):
self.mock_object(
share_types, 'get_default_share_type',
mock.Mock(side_effect=exception.ShareTypeNotFoundByName(
self.vt['name'])),
)
CONF.set_default("default_share_type", self.vt['name'])
req = fakes.HTTPRequest.blank('/shares', version='2.7')
self.assertRaises(exception.ShareTypeNotFoundByName,
self.controller.create, req, {'share': self.share})
share_types.get_default_share_type.assert_called_once_with()
def test_share_create_with_replication(self):
self.mock_object(share_api.API, 'create', self.create_mock)
body = {"share": copy.deepcopy(self.share)}
req = fakes.HTTPRequest.blank(
'/shares', version=share_replicas.MIN_SUPPORTED_API_VERSION)
res_dict = self.controller.create(req, body)
expected = self._get_expected_share_detailed_response(self.share)
expected['share']['task_state'] = None
expected['share']['consistency_group_id'] = None
expected['share']['source_cgsnapshot_member_id'] = None
expected['share']['replication_type'] = None
expected['share']['share_type_name'] = None
expected['share']['has_replicas'] = False
expected['share']['access_rules_status'] = 'active'
expected['share'].pop('export_location')
expected['share'].pop('export_locations')
self.assertEqual(expected, res_dict)
def test_share_create_with_share_net(self):
shr = {
"size": 100,
"name": "Share Test Name",
"description": "Share Test Desc",
"share_proto": "fakeproto",
"availability_zone": "zone1:host1",
"share_network_id": "fakenetid"
}
create_mock = mock.Mock(return_value=stubs.stub_share('1',
display_name=shr['name'],
display_description=shr['description'],
size=shr['size'],
share_proto=shr['share_proto'].upper(),
availability_zone=shr['availability_zone'],
share_network_id=shr['share_network_id']))
self.mock_object(share_api.API, 'create', create_mock)
self.mock_object(share_api.API, 'get_share_network', mock.Mock(
return_value={'id': 'fakenetid'}))
body = {"share": copy.deepcopy(shr)}
req = fakes.HTTPRequest.blank('/shares', version='2.7')
res_dict = self.controller.create(req, body)
expected = self._get_expected_share_detailed_response(shr)
self.assertEqual(expected, res_dict)
self.assertEqual("fakenetid",
create_mock.call_args[1]['share_network_id'])
@ddt.data("2.15", "2.16")
def test_share_create_original_with_user_id(self, microversion):
self.mock_object(share_api.API, 'create', self.create_mock)
body = {"share": copy.deepcopy(self.share)}
req = fakes.HTTPRequest.blank('/shares', version=microversion)
res_dict = self.controller.create(req, body)
expected = self._get_expected_share_detailed_response(self.share)
if api_version.APIVersionRequest(microversion) >= (
api_version.APIVersionRequest("2.16")):
expected['share']['user_id'] = 'fakeuser'
else:
self.assertNotIn('user_id', expected['share'])
expected['share']['task_state'] = None
expected['share']['consistency_group_id'] = None
expected['share']['source_cgsnapshot_member_id'] = None
expected['share']['replication_type'] = None
expected['share']['share_type_name'] = None
expected['share']['has_replicas'] = False
expected['share']['access_rules_status'] = 'active'
expected['share'].pop('export_location')
expected['share'].pop('export_locations')
self.assertEqual(expected, res_dict)
def test_migration_start(self):
share = db_utils.create_share()
share_network = db_utils.create_share_network()
share_type = {'share_type_id': 'fake_type_id'}
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
context = req.environ['manila.context']
self.mock_object(db, 'share_network_get', mock.Mock(
return_value=share_network))
self.mock_object(db, 'share_type_get', mock.Mock(
return_value=share_type))
body = {
'migration_start': {
'host': 'fake_host',
'new_share_network_id': 'fake_net_id',
'new_share_type_id': 'fake_type_id',
}
}
method = 'migration_start'
self.mock_object(share_api.API, 'migration_start')
self.mock_object(share_api.API, 'get', mock.Mock(return_value=share))
response = getattr(self.controller, method)(req, share['id'], body)
self.assertEqual(202, response.status_int)
share_api.API.get.assert_called_once_with(context, share['id'])
share_api.API.migration_start.assert_called_once_with(
context, share, 'fake_host', False, True, True, False,
new_share_network=share_network, new_share_type=share_type)
db.share_network_get.assert_called_once_with(
context, 'fake_net_id')
db.share_type_get.assert_called_once_with(
context, 'fake_type_id')
def test_migration_start_has_replicas(self):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True)
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request = api_version.APIVersionRequest('2.22')
req.api_version_request.experimental = True
body = {'migration_start': {'host': 'fake_host'}}
self.mock_object(share_api.API, 'migration_start',
mock.Mock(side_effect=exception.Conflict(err='err')))
self.assertRaises(webob.exc.HTTPConflict,
self.controller.migration_start,
req, share['id'], body)
def test_migration_start_no_share_id(self):
req = fakes.HTTPRequest.blank('/shares/%s/action' % 'fake_id',
use_admin_context=True, version='2.22')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
body = {'migration_start': {'host': 'fake_host'}}
method = 'migration_start'
self.mock_object(share_api.API, 'get',
mock.Mock(side_effect=[exception.NotFound]))
self.assertRaises(webob.exc.HTTPNotFound,
getattr(self.controller, method),
req, 'fake_id', body)
def test_migration_start_no_host(self):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
body = {'migration_start': {}}
method = 'migration_start'
self.assertRaises(webob.exc.HTTPBadRequest,
getattr(self.controller, method),
req, share['id'], body)
def test_migration_start_new_share_network_not_found(self):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
context = req.environ['manila.context']
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
body = {'migration_start': {'host': 'fake_host',
'new_share_network_id': 'nonexistent'}}
self.mock_object(db, 'share_network_get',
mock.Mock(side_effect=exception.NotFound()))
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.migration_start,
req, share['id'], body)
db.share_network_get.assert_called_once_with(context, 'nonexistent')
def test_migration_start_new_share_type_not_found(self):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
context = req.environ['manila.context']
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
body = {'migration_start': {'host': 'fake_host',
'new_share_type_id': 'nonexistent'}}
self.mock_object(db, 'share_type_get',
mock.Mock(side_effect=exception.NotFound()))
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.migration_start,
req, share['id'], body)
db.share_type_get.assert_called_once_with(context, 'nonexistent')
def test_migration_start_invalid_force_host_assisted_migration(self):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
body = {'migration_start': {'host': 'fake_host',
'force_host_assisted_migration': 'fake'}}
method = 'migration_start'
self.assertRaises(webob.exc.HTTPBadRequest,
getattr(self.controller, method),
req, share['id'], body)
@ddt.data('writable', 'preserve_metadata')
def test_migration_start_invalid_writable_preserve_metadata(
self, parameter):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
body = {'migration_start': {'host': 'fake_host',
parameter: 'invalid'}}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.migration_start, req, share['id'],
body)
@ddt.data(constants.TASK_STATE_MIGRATION_ERROR, None)
def test_reset_task_state(self, task_state):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
update = {'task_state': task_state}
body = {'reset_task_state': update}
self.mock_object(db, 'share_update')
response = self.controller.reset_task_state(req, share['id'], body)
self.assertEqual(202, response.status_int)
db.share_update.assert_called_once_with(utils.IsAMatcher(
context.RequestContext), share['id'], update)
def test_reset_task_state_error_body(self):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
update = {'error': 'error'}
body = {'reset_task_state': update}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.reset_task_state, req, share['id'],
body)
def test_reset_task_state_error_invalid(self):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
update = {'task_state': 'error'}
body = {'reset_task_state': update}
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.reset_task_state, req, share['id'],
body)
def test_reset_task_state_not_found(self):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
update = {'task_state': constants.TASK_STATE_MIGRATION_ERROR}
body = {'reset_task_state': update}
self.mock_object(db, 'share_update',
mock.Mock(side_effect=exception.NotFound()))
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.reset_task_state, req, share['id'],
body)
db.share_update.assert_called_once_with(utils.IsAMatcher(
context.RequestContext), share['id'], update)
def test_migration_complete(self):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
body = {'migration_complete': None}
self.mock_object(share_api.API, 'get',
mock.Mock(return_value=share))
self.mock_object(share_api.API, 'migration_complete')
response = self.controller.migration_complete(req, share['id'], body)
self.assertEqual(202, response.status_int)
share_api.API.migration_complete.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), share)
def test_migration_complete_not_found(self):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
body = {'migration_complete': None}
self.mock_object(share_api.API, 'get',
mock.Mock(side_effect=exception.NotFound()))
self.mock_object(share_api.API, 'migration_complete')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.migration_complete, req, share['id'],
body)
def test_migration_cancel(self):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
body = {'migration_cancel': None}
self.mock_object(share_api.API, 'get',
mock.Mock(return_value=share))
self.mock_object(share_api.API, 'migration_cancel')
response = self.controller.migration_cancel(req, share['id'], body)
self.assertEqual(202, response.status_int)
share_api.API.migration_cancel.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), share)
def test_migration_cancel_not_found(self):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
body = {'migration_cancel': None}
self.mock_object(share_api.API, 'get',
mock.Mock(side_effect=exception.NotFound()))
self.mock_object(share_api.API, 'migration_cancel')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.migration_cancel, req, share['id'],
body)
def test_migration_get_progress(self):
share = db_utils.create_share(
task_state=constants.TASK_STATE_MIGRATION_SUCCESS)
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
body = {'migration_get_progress': None}
expected = {
'total_progress': 'fake',
'task_state': constants.TASK_STATE_MIGRATION_SUCCESS,
}
self.mock_object(share_api.API, 'get',
mock.Mock(return_value=share))
self.mock_object(share_api.API, 'migration_get_progress',
mock.Mock(return_value=expected))
response = self.controller.migration_get_progress(req, share['id'],
body)
self.assertEqual(expected, response)
share_api.API.migration_get_progress.assert_called_once_with(
utils.IsAMatcher(context.RequestContext), share)
def test_migration_get_progress_not_found(self):
share = db_utils.create_share()
req = fakes.HTTPRequest.blank('/shares/%s/action' % share['id'],
use_admin_context=True, version='2.22')
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.api_version_request.experimental = True
body = {'migration_get_progress': None}
self.mock_object(share_api.API, 'get',
mock.Mock(side_effect=exception.NotFound()))
self.mock_object(share_api.API, 'migration_get_progress')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.migration_get_progress, req,
share['id'], body)
def test_share_create_from_snapshot_without_share_net_no_parent(self):
shr = {
"size": 100,
"name": "Share Test Name",
"description": "Share Test Desc",
"share_proto": "fakeproto",
"availability_zone": "zone1:host1",
"snapshot_id": 333,
"share_network_id": None,
}
create_mock = mock.Mock(return_value=stubs.stub_share('1',
display_name=shr['name'],
display_description=shr['description'],
size=shr['size'],
share_proto=shr['share_proto'].upper(),
snapshot_id=shr['snapshot_id'],
instance=dict(
availability_zone=shr['availability_zone'],
share_network_id=shr['share_network_id'])))
self.mock_object(share_api.API, 'create', create_mock)
body = {"share": copy.deepcopy(shr)}
req = fakes.HTTPRequest.blank('/shares', version='2.7')
res_dict = self.controller.create(req, body)
expected = self._get_expected_share_detailed_response(shr)
self.assertEqual(expected, res_dict)
def test_share_create_from_snapshot_without_share_net_parent_exists(self):
shr = {
"size": 100,
"name": "Share Test Name",
"description": "Share Test Desc",
"share_proto": "fakeproto",
"availability_zone": "zone1:host1",
"snapshot_id": 333,
"share_network_id": None,
}
parent_share_net = 444
create_mock = mock.Mock(return_value=stubs.stub_share('1',
display_name=shr['name'],
display_description=shr['description'],
size=shr['size'],
share_proto=shr['share_proto'].upper(),
snapshot_id=shr['snapshot_id'],
instance=dict(
availability_zone=shr['availability_zone'],
share_network_id=shr['share_network_id'])))
self.mock_object(share_api.API, 'create', create_mock)
self.mock_object(share_api.API, 'get_snapshot',
stubs.stub_snapshot_get)
self.mock_object(share_api.API, 'get', mock.Mock(
return_value=mock.Mock(
instance={'share_network_id': parent_share_net})))
self.mock_object(share_api.API, 'get_share_network', mock.Mock(
return_value={'id': parent_share_net}))
body = {"share": copy.deepcopy(shr)}
req = fakes.HTTPRequest.blank('/shares', version='2.7')
res_dict = self.controller.create(req, body)
expected = self._get_expected_share_detailed_response(shr)
self.assertEqual(expected, res_dict)
self.assertEqual(parent_share_net,
create_mock.call_args[1]['share_network_id'])
def test_share_create_from_snapshot_with_share_net_equals_parent(self):
parent_share_net = 444
shr = {
"size": 100,
"name": "Share Test Name",
"description": "Share Test Desc",
"share_proto": "fakeproto",
"availability_zone": "zone1:host1",
"snapshot_id": 333,
"share_network_id": parent_share_net
}
create_mock = mock.Mock(return_value=stubs.stub_share('1',
display_name=shr['name'],
display_description=shr['description'],
size=shr['size'],
share_proto=shr['share_proto'].upper(),
snapshot_id=shr['snapshot_id'],
instance=dict(
availability_zone=shr['availability_zone'],
share_network_id=shr['share_network_id'])))
self.mock_object(share_api.API, 'create', create_mock)
self.mock_object(share_api.API, 'get_snapshot',
stubs.stub_snapshot_get)
self.mock_object(share_api.API, 'get', mock.Mock(
return_value=mock.Mock(
instance={'share_network_id': parent_share_net})))
self.mock_object(share_api.API, 'get_share_network', mock.Mock(
return_value={'id': parent_share_net}))
body = {"share": copy.deepcopy(shr)}
req = fakes.HTTPRequest.blank('/shares', version='2.7')
res_dict = self.controller.create(req, body)
expected = self._get_expected_share_detailed_response(shr)
self.assertEqual(expected, res_dict)
self.assertEqual(parent_share_net,
create_mock.call_args[1]['share_network_id'])
def test_share_create_from_snapshot_invalid_share_net(self):
self.mock_object(share_api.API, 'create')
shr = {
"size": 100,
"name": "Share Test Name",
"description": "Share Test Desc",
"share_proto": "fakeproto",
"availability_zone": "zone1:host1",
"snapshot_id": 333,
"share_network_id": 1234
}
body = {"share": shr}
req = fakes.HTTPRequest.blank('/shares', version='2.7')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.create, req, body)
def test_share_creation_fails_with_bad_size(self):
shr = {"size": '',
"name": "Share Test Name",
"description": "Share Test Desc",
"share_proto": "fakeproto",
"availability_zone": "zone1:host1"}
body = {"share": shr}
req = fakes.HTTPRequest.blank('/shares', version='2.7')
self.assertRaises(exception.InvalidInput,
self.controller.create, req, body)
def test_share_create_no_body(self):
req = fakes.HTTPRequest.blank('/shares', version='2.7')
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.create, req, {})
def test_share_create_invalid_availability_zone(self):
self.mock_object(
db,
'availability_zone_get',
mock.Mock(side_effect=exception.AvailabilityZoneNotFound(id='id'))
)
body = {"share": copy.deepcopy(self.share)}
req = fakes.HTTPRequest.blank('/shares', version='2.7')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.create,
req,
body)
def test_share_show(self):
req = fakes.HTTPRequest.blank('/shares/1')
expected = self._get_expected_share_detailed_response()
expected['share'].pop('snapshot_support')
expected['share'].pop('share_type_name')
expected['share'].pop('task_state')
expected['share'].pop('consistency_group_id')
expected['share'].pop('source_cgsnapshot_member_id')
res_dict = self.controller.show(req, '1')
self.assertEqual(expected, res_dict)
def test_share_show_with_consistency_group(self):
req = fakes.HTTPRequest.blank('/shares/1', version='2.4')
expected = self._get_expected_share_detailed_response()
expected['share'].pop('share_type_name')
expected['share'].pop('task_state')
res_dict = self.controller.show(req, '1')
self.assertEqual(expected, res_dict)
def test_share_show_with_share_type_name(self):
req = fakes.HTTPRequest.blank('/shares/1', version='2.6')
res_dict = self.controller.show(req, '1')
expected = self._get_expected_share_detailed_response()
expected['share']['consistency_group_id'] = None
expected['share']['source_cgsnapshot_member_id'] = None
expected['share']['share_type_name'] = None
expected['share']['task_state'] = None
self.assertEqual(expected, res_dict)
@ddt.data("2.15", "2.16")
def test_share_show_with_user_id(self, microversion):
req = fakes.HTTPRequest.blank('/shares/1', version=microversion)
res_dict = self.controller.show(req, '1')
expected = self._get_expected_share_detailed_response()
if api_version.APIVersionRequest(microversion) >= (
api_version.APIVersionRequest("2.16")):
expected['share']['user_id'] = 'fakeuser'
else:
self.assertNotIn('user_id', expected['share'])
expected['share']['consistency_group_id'] = None
expected['share']['source_cgsnapshot_member_id'] = None
expected['share']['share_type_name'] = None
expected['share']['task_state'] = None
expected['share']['access_rules_status'] = 'active'
expected['share'].pop('export_location')
expected['share'].pop('export_locations')
expected['share']['replication_type'] = None
expected['share']['has_replicas'] = False
self.assertEqual(expected, res_dict)
def test_share_show_admin(self):
req = fakes.HTTPRequest.blank('/shares/1', use_admin_context=True)
expected = self._get_expected_share_detailed_response(admin=True)
expected['share'].pop('snapshot_support')
expected['share'].pop('share_type_name')
expected['share'].pop('task_state')
expected['share'].pop('consistency_group_id')
expected['share'].pop('source_cgsnapshot_member_id')
res_dict = self.controller.show(req, '1')
self.assertEqual(expected, res_dict)
def test_share_show_no_share(self):
self.mock_object(share_api.API, 'get',
stubs.stub_share_get_notfound)
req = fakes.HTTPRequest.blank('/shares/1')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show,
req, '1')
def test_share_show_with_replication_type(self):
req = fakes.HTTPRequest.blank(
'/shares/1', version=share_replicas.MIN_SUPPORTED_API_VERSION)
res_dict = self.controller.show(req, '1')
expected = self._get_expected_share_detailed_response()
expected['share']['task_state'] = None
expected['share']['consistency_group_id'] = None
expected['share']['source_cgsnapshot_member_id'] = None
expected['share']['access_rules_status'] = 'active'
expected['share']['share_type_name'] = None
expected['share']['replication_type'] = None
expected['share']['has_replicas'] = False
expected['share'].pop('export_location')
expected['share'].pop('export_locations')
self.assertEqual(expected, res_dict)
def test_share_delete(self):
req = fakes.HTTPRequest.blank('/shares/1')
resp = self.controller.delete(req, 1)
self.assertEqual(202, resp.status_int)
def test_share_delete_has_replicas(self):
req = fakes.HTTPRequest.blank('/shares/1')
self.mock_object(share_api.API, 'get',
mock.Mock(return_value=self.share))
self.mock_object(share_api.API, 'delete',
mock.Mock(side_effect=exception.Conflict(err='err')))
self.assertRaises(
webob.exc.HTTPConflict, self.controller.delete, req, 1)
def test_share_delete_in_consistency_group_param_not_provided(self):
fake_share = stubs.stub_share('fake_share',
consistency_group_id='fake_cg_id')
self.mock_object(share_api.API, 'get',
mock.Mock(return_value=fake_share))
req = fakes.HTTPRequest.blank('/shares/1')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.delete, req, 1)
def test_share_delete_in_consistency_group(self):
fake_share = stubs.stub_share('fake_share',
consistency_group_id='fake_cg_id')
self.mock_object(share_api.API, 'get',
mock.Mock(return_value=fake_share))
req = fakes.HTTPRequest.blank(
'/shares/1?consistency_group_id=fake_cg_id')
resp = self.controller.delete(req, 1)
self.assertEqual(202, resp.status_int)
def test_share_delete_in_consistency_group_wrong_id(self):
fake_share = stubs.stub_share('fake_share',
consistency_group_id='fake_cg_id')
self.mock_object(share_api.API, 'get',
mock.Mock(return_value=fake_share))
req = fakes.HTTPRequest.blank(
'/shares/1?consistency_group_id=not_fake_cg_id')
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.delete, req, 1)
def test_share_update(self):
shr = self.share
body = {"share": shr}
req = fakes.HTTPRequest.blank('/share/1')
res_dict = self.controller.update(req, 1, body)
self.assertEqual(shr["display_name"], res_dict['share']["name"])
self.assertEqual(shr["display_description"],
res_dict['share']["description"])
self.assertEqual(shr['is_public'],
res_dict['share']['is_public'])
def test_share_update_with_consistency_group(self):
shr = self.share
body = {"share": shr}
req = fakes.HTTPRequest.blank('/share/1', version="2.4")
res_dict = self.controller.update(req, 1, body)
self.assertIsNone(res_dict['share']["consistency_group_id"])
self.assertIsNone(res_dict['share']["source_cgsnapshot_member_id"])
def test_share_not_updates_size(self):
req = fakes.HTTPRequest.blank('/share/1')
res_dict = self.controller.update(req, 1, {"share": self.share})
self.assertNotEqual(res_dict['share']["size"], self.share["size"])
def test_share_delete_no_share(self):
self.mock_object(share_api.API, 'get',
stubs.stub_share_get_notfound)
req = fakes.HTTPRequest.blank('/shares/1')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete,
req,
1)
def _share_list_summary_with_search_opts(self, use_admin_context):
search_opts = {
'name': 'fake_name',
'status': constants.STATUS_AVAILABLE,
'share_server_id': 'fake_share_server_id',
'share_type_id': 'fake_share_type_id',
'snapshot_id': 'fake_snapshot_id',
'host': 'fake_host',
'share_network_id': 'fake_share_network_id',
'metadata': '%7B%27k1%27%3A+%27v1%27%7D', # serialized k1=v1
'extra_specs': '%7B%27k2%27%3A+%27v2%27%7D', # serialized k2=v2
'sort_key': 'fake_sort_key',
'sort_dir': 'fake_sort_dir',
'limit': '1',
'offset': '1',
'is_public': 'False',
}
# fake_key should be filtered for non-admin
url = '/shares?fake_key=fake_value'
for k, v in search_opts.items():
url = url + '&' + k + '=' + v
req = fakes.HTTPRequest.blank(url, use_admin_context=use_admin_context)
shares = [
{'id': 'id1', 'display_name': 'n1'},
{'id': 'id2', 'display_name': 'n2'},
{'id': 'id3', 'display_name': 'n3'},
]
self.mock_object(share_api.API, 'get_all',
mock.Mock(return_value=shares))
result = self.controller.index(req)
search_opts_expected = {
'display_name': search_opts['name'],
'status': search_opts['status'],
'share_server_id': search_opts['share_server_id'],
'share_type_id': search_opts['share_type_id'],
'snapshot_id': search_opts['snapshot_id'],
'host': search_opts['host'],
'share_network_id': search_opts['share_network_id'],
'metadata': {'k1': 'v1'},
'extra_specs': {'k2': 'v2'},
'is_public': 'False',
}
if use_admin_context:
search_opts_expected.update({'fake_key': 'fake_value'})
share_api.API.get_all.assert_called_once_with(
req.environ['manila.context'],
sort_key=search_opts['sort_key'],
sort_dir=search_opts['sort_dir'],
search_opts=search_opts_expected,
)
self.assertEqual(1, len(result['shares']))
self.assertEqual(shares[1]['id'], result['shares'][0]['id'])
self.assertEqual(
shares[1]['display_name'], result['shares'][0]['name'])
def test_share_list_summary_with_search_opts_by_non_admin(self):
self._share_list_summary_with_search_opts(use_admin_context=False)
def test_share_list_summary_with_search_opts_by_admin(self):
self._share_list_summary_with_search_opts(use_admin_context=True)
def test_share_list_summary(self):
self.mock_object(share_api.API, 'get_all',
stubs.stub_share_get_all_by_project)
req = fakes.HTTPRequest.blank('/shares')
res_dict = self.controller.index(req)
expected = {
'shares': [
{
'name': 'displayname',
'id': '1',
'links': [
{
'href': 'http://localhost/v1/fake/shares/1',
'rel': 'self'
},
{
'href': 'http://localhost/fake/shares/1',
'rel': 'bookmark'
}
],
}
]
}
self.assertEqual(expected, res_dict)
def _share_list_detail_with_search_opts(self, use_admin_context):
search_opts = {
'name': 'fake_name',
'status': constants.STATUS_AVAILABLE,
'share_server_id': 'fake_share_server_id',
'share_type_id': 'fake_share_type_id',
'snapshot_id': 'fake_snapshot_id',
'host': 'fake_host',
'share_network_id': 'fake_share_network_id',
'metadata': '%7B%27k1%27%3A+%27v1%27%7D', # serialized k1=v1
'extra_specs': '%7B%27k2%27%3A+%27v2%27%7D', # serialized k2=v2
'sort_key': 'fake_sort_key',
'sort_dir': 'fake_sort_dir',
'limit': '1',
'offset': '1',
'is_public': 'False',
}
# fake_key should be filtered for non-admin
url = '/shares/detail?fake_key=fake_value'
for k, v in search_opts.items():
url = url + '&' + k + '=' + v
req = fakes.HTTPRequest.blank(url, use_admin_context=use_admin_context)
shares = [
{'id': 'id1', 'display_name': 'n1'},
{
'id': 'id2',
'display_name': 'n2',
'status': constants.STATUS_AVAILABLE,
'snapshot_id': 'fake_snapshot_id',
'share_type_id': 'fake_share_type_id',
'instance': {
'host': 'fake_host',
'share_network_id': 'fake_share_network_id',
},
},
{'id': 'id3', 'display_name': 'n3'},
]
self.mock_object(share_api.API, 'get_all',
mock.Mock(return_value=shares))
result = self.controller.detail(req)
search_opts_expected = {
'display_name': search_opts['name'],
'status': search_opts['status'],
'share_server_id': search_opts['share_server_id'],
'share_type_id': search_opts['share_type_id'],
'snapshot_id': search_opts['snapshot_id'],
'host': search_opts['host'],
'share_network_id': search_opts['share_network_id'],
'metadata': {'k1': 'v1'},
'extra_specs': {'k2': 'v2'},
'is_public': 'False',
}
if use_admin_context:
search_opts_expected.update({'fake_key': 'fake_value'})
share_api.API.get_all.assert_called_once_with(
req.environ['manila.context'],
sort_key=search_opts['sort_key'],
sort_dir=search_opts['sort_dir'],
search_opts=search_opts_expected,
)
self.assertEqual(1, len(result['shares']))
self.assertEqual(shares[1]['id'], result['shares'][0]['id'])
self.assertEqual(
shares[1]['display_name'], result['shares'][0]['name'])
self.assertEqual(
shares[1]['snapshot_id'], result['shares'][0]['snapshot_id'])
self.assertEqual(
shares[1]['status'], result['shares'][0]['status'])
self.assertEqual(
shares[1]['share_type_id'], result['shares'][0]['share_type'])
self.assertEqual(
shares[1]['snapshot_id'], result['shares'][0]['snapshot_id'])
self.assertEqual(
shares[1]['instance']['host'], result['shares'][0]['host'])
self.assertEqual(
shares[1]['instance']['share_network_id'],
result['shares'][0]['share_network_id'])
def test_share_list_detail_with_search_opts_by_non_admin(self):
self._share_list_detail_with_search_opts(use_admin_context=False)
def test_share_list_detail_with_search_opts_by_admin(self):
self._share_list_detail_with_search_opts(use_admin_context=True)
def _list_detail_common_expected(self):
return {
'shares': [
{
'status': 'fakestatus',
'description': 'displaydesc',
'export_location': 'fake_location',
'export_locations': ['fake_location', 'fake_location2'],
'availability_zone': 'fakeaz',
'name': 'displayname',
'share_proto': 'FAKEPROTO',
'metadata': {},
'project_id': 'fakeproject',
'host': 'fakehost',
'id': '1',
'snapshot_id': '2',
'snapshot_support': True,
'share_network_id': None,
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'size': 1,
'share_type': '1',
'volume_type': '1',
'is_public': False,
'links': [
{
'href': 'http://localhost/v1/fake/shares/1',
'rel': 'self'
},
{
'href': 'http://localhost/fake/shares/1',
'rel': 'bookmark'
}
],
}
]
}
def _list_detail_test_common(self, req, expected):
self.mock_object(share_api.API, 'get_all',
stubs.stub_share_get_all_by_project)
res_dict = self.controller.detail(req)
self.assertEqual(expected, res_dict)
self.assertEqual(res_dict['shares'][0]['volume_type'],
res_dict['shares'][0]['share_type'])
def test_share_list_detail(self):
env = {'QUERY_STRING': 'name=Share+Test+Name'}
req = fakes.HTTPRequest.blank('/shares/detail', environ=env)
expected = self._list_detail_common_expected()
expected['shares'][0].pop('snapshot_support')
self._list_detail_test_common(req, expected)
def test_share_list_detail_with_consistency_group(self):
env = {'QUERY_STRING': 'name=Share+Test+Name'}
req = fakes.HTTPRequest.blank('/shares/detail', environ=env,
version="2.4")
expected = self._list_detail_common_expected()
expected['shares'][0]['consistency_group_id'] = None
expected['shares'][0]['source_cgsnapshot_member_id'] = None
self._list_detail_test_common(req, expected)
def test_share_list_detail_with_task_state(self):
env = {'QUERY_STRING': 'name=Share+Test+Name'}
req = fakes.HTTPRequest.blank('/shares/detail', environ=env,
version="2.5")
expected = self._list_detail_common_expected()
expected['shares'][0]['consistency_group_id'] = None
expected['shares'][0]['source_cgsnapshot_member_id'] = None
expected['shares'][0]['task_state'] = None
self._list_detail_test_common(req, expected)
def test_share_list_detail_without_export_locations(self):
env = {'QUERY_STRING': 'name=Share+Test+Name'}
req = fakes.HTTPRequest.blank('/shares/detail', environ=env,
version="2.9")
expected = self._list_detail_common_expected()
expected['shares'][0]['consistency_group_id'] = None
expected['shares'][0]['source_cgsnapshot_member_id'] = None
expected['shares'][0]['task_state'] = None
expected['shares'][0]['share_type_name'] = None
expected['shares'][0].pop('export_location')
expected['shares'][0].pop('export_locations')
self._list_detail_test_common(req, expected)
def test_share_list_detail_with_replication_type(self):
self.mock_object(share_api.API, 'get_all',
stubs.stub_share_get_all_by_project)
env = {'QUERY_STRING': 'name=Share+Test+Name'}
req = fakes.HTTPRequest.blank(
'/shares/detail', environ=env,
version=share_replicas.MIN_SUPPORTED_API_VERSION)
res_dict = self.controller.detail(req)
expected = {
'shares': [
{
'status': 'fakestatus',
'description': 'displaydesc',
'availability_zone': 'fakeaz',
'name': 'displayname',
'share_proto': 'FAKEPROTO',
'metadata': {},
'project_id': 'fakeproject',
'access_rules_status': 'active',
'host': 'fakehost',
'id': '1',
'snapshot_id': '2',
'share_network_id': None,
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'size': 1,
'share_type_name': None,
'share_type': '1',
'volume_type': '1',
'is_public': False,
'consistency_group_id': None,
'source_cgsnapshot_member_id': None,
'snapshot_support': True,
'has_replicas': False,
'replication_type': None,
'task_state': None,
'links': [
{
'href': 'http://localhost/v1/fake/shares/1',
'rel': 'self'
},
{
'href': 'http://localhost/fake/shares/1',
'rel': 'bookmark'
}
],
}
]
}
self.assertEqual(expected, res_dict)
self.assertEqual(res_dict['shares'][0]['volume_type'],
res_dict['shares'][0]['share_type'])
def test_remove_invalid_options(self):
ctx = context.RequestContext('fakeuser', 'fakeproject', is_admin=False)
search_opts = {'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'}
expected_opts = {'a': 'a', 'c': 'c'}
allowed_opts = ['a', 'c']
common.remove_invalid_options(ctx, search_opts, allowed_opts)
self.assertEqual(expected_opts, search_opts)
def test_remove_invalid_options_admin(self):
ctx = context.RequestContext('fakeuser', 'fakeproject', is_admin=True)
search_opts = {'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'}
expected_opts = {'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'}
allowed_opts = ['a', 'c']
common.remove_invalid_options(ctx, search_opts, allowed_opts)
self.assertEqual(expected_opts, search_opts)
def _fake_access_get(self, ctxt, access_id):
class Access(object):
def __init__(self, **kwargs):
self.STATE_NEW = 'fake_new'
self.STATE_ACTIVE = 'fake_active'
self.STATE_ERROR = 'fake_error'
self.params = kwargs
self.params['state'] = self.STATE_NEW
self.share_id = kwargs.get('share_id')
self.id = access_id
def __getitem__(self, item):
return self.params[item]
access = Access(access_id=access_id, share_id='fake_share_id')
return access
@ddt.ddt
class ShareActionsTest(test.TestCase):
def setUp(self):
super(self.__class__, self).setUp()
self.controller = shares.ShareController()
self.mock_object(share_api.API, 'get', stubs.stub_share_get)
@ddt.data(
{'access_type': 'ip', 'access_to': '127.0.0.1'},
{'access_type': 'user', 'access_to': '1' * 4},
{'access_type': 'user', 'access_to': '1' * 32},
{'access_type': 'user', 'access_to': 'fake\\]{.-_\'`;}['},
{'access_type': 'user', 'access_to': 'MYDOMAIN\\Administrator'},
{'access_type': 'cert', 'access_to': 'x'},
{'access_type': 'cert', 'access_to': 'tenant.example.com'},
{'access_type': 'cert', 'access_to': 'x' * 64},
)
def test_allow_access(self, access):
self.mock_object(share_api.API,
'allow_access',
mock.Mock(return_value={'fake': 'fake'}))
self.mock_object(self.controller._access_view_builder, 'view',
mock.Mock(return_value={'access':
{'fake': 'fake'}}))
id = 'fake_share_id'
body = {'allow_access': access}
expected = {'access': {'fake': 'fake'}}
req = fakes.HTTPRequest.blank(
'/v2/tenant1/shares/%s/action' % id, version="2.7")
res = self.controller.allow_access(req, id, body)
self.assertEqual(expected, res)
@ddt.data(
{'access_type': 'error_type', 'access_to': '127.0.0.1'},
{'access_type': 'ip', 'access_to': 'localhost'},
{'access_type': 'ip', 'access_to': '127.0.0.*'},
{'access_type': 'ip', 'access_to': '127.0.0.0/33'},
{'access_type': 'ip', 'access_to': '127.0.0.256'},
{'access_type': 'user', 'access_to': '1'},
{'access_type': 'user', 'access_to': '1' * 3},
{'access_type': 'user', 'access_to': '1' * 33},
{'access_type': 'user', 'access_to': 'root^'},
{'access_type': 'cert', 'access_to': ''},
{'access_type': 'cert', 'access_to': ' '},
{'access_type': 'cert', 'access_to': 'x' * 65},
)
def test_allow_access_error(self, access):
id = 'fake_share_id'
body = {'allow_access': access}
req = fakes.HTTPRequest.blank('/v2/tenant1/shares/%s/action' % id,
version="2.7")
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.allow_access, req, id, body)
@ddt.unpack
@ddt.data(
{'exc': None, 'access_to': 'alice', 'version': '2.13'},
{'exc': webob.exc.HTTPBadRequest, 'access_to': 'alice',
'version': '2.11'}
)
def test_allow_access_ceph(self, exc, access_to, version):
share_id = "fake_id"
self.mock_object(share_api.API,
'allow_access',
mock.Mock(return_value={'fake': 'fake'}))
self.mock_object(self.controller._access_view_builder, 'view',
mock.Mock(return_value={'access':
{'fake': 'fake'}}))
req = fakes.HTTPRequest.blank(
'/v2/shares/%s/action' % share_id, version=version)
body = {'allow_access':
{
'access_type': 'cephx',
'access_to': access_to,
'access_level': 'rw'
}}
if exc:
self.assertRaises(exc, self.controller.allow_access, req, share_id,
body)
else:
expected = {'access': {'fake': 'fake'}}
res = self.controller.allow_access(req, id, body)
self.assertEqual(expected, res)
def test_deny_access(self):
def _stub_deny_access(*args, **kwargs):
pass
self.mock_object(share_api.API, "deny_access", _stub_deny_access)
self.mock_object(share_api.API, "access_get", _fake_access_get)
id = 'fake_share_id'
body = {"os-deny_access": {"access_id": 'fake_acces_id'}}
req = fakes.HTTPRequest.blank('/v1/tenant1/shares/%s/action' % id)
res = self.controller._deny_access(req, id, body)
self.assertEqual(202, res.status_int)
def test_deny_access_not_found(self):
def _stub_deny_access(*args, **kwargs):
pass
self.mock_object(share_api.API, "deny_access", _stub_deny_access)
self.mock_object(share_api.API, "access_get", _fake_access_get)
id = 'super_fake_share_id'
body = {"os-deny_access": {"access_id": 'fake_acces_id'}}
req = fakes.HTTPRequest.blank('/v1/tenant1/shares/%s/action' % id)
self.assertRaises(webob.exc.HTTPNotFound,
self.controller._deny_access,
req,
id,
body)
def test_access_list(self):
fake_access_list = [
{
"state": "fakestatus",
"id": "fake_access_id",
"access_type": "fakeip",
"access_to": "127.0.0.1",
}
]
self.mock_object(self.controller._access_view_builder, 'list_view',
mock.Mock(return_value={'access_list':
fake_access_list}))
id = 'fake_share_id'
body = {"os-access_list": None}
req = fakes.HTTPRequest.blank('/v2/tenant1/shares/%s/action' % id)
res_dict = self.controller._access_list(req, id, body)
self.assertEqual({'access_list': fake_access_list}, res_dict)
@ddt.unpack
@ddt.data(
{'body': {'os-extend': {'new_size': 2}}, 'version': '2.6'},
{'body': {'extend': {'new_size': 2}}, 'version': '2.7'},
)
def test_extend(self, body, version):
id = 'fake_share_id'
share = stubs.stub_share_get(None, None, id)
self.mock_object(share_api.API, 'get', mock.Mock(return_value=share))
self.mock_object(share_api.API, "extend")
size = '2'
req = fakes.HTTPRequest.blank(
'/v2/shares/%s/action' % id, version=version)
actual_response = self.controller._extend(req, id, body)
share_api.API.get.assert_called_once_with(mock.ANY, id)
share_api.API.extend.assert_called_once_with(
mock.ANY, share, int(size))
self.assertEqual(202, actual_response.status_int)
@ddt.data({"os-extend": ""},
{"os-extend": {"new_size": "foo"}},
{"os-extend": {"new_size": {'foo': 'bar'}}})
def test_extend_invalid_body(self, body):
id = 'fake_share_id'
req = fakes.HTTPRequest.blank('/v1/shares/%s/action' % id)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._extend, req, id, body)
@ddt.data({'source': exception.InvalidInput,
'target': webob.exc.HTTPBadRequest},
{'source': exception.InvalidShare,
'target': webob.exc.HTTPBadRequest},
{'source': exception.ShareSizeExceedsAvailableQuota,
'target': webob.exc.HTTPForbidden})
@ddt.unpack
def test_extend_exception(self, source, target):
id = 'fake_share_id'
req = fakes.HTTPRequest.blank('/v1/shares/%s/action' % id)
body = {"os-extend": {'new_size': '123'}}
self.mock_object(share_api.API, "extend",
mock.Mock(side_effect=source('fake')))
self.assertRaises(target, self.controller._extend, req, id, body)
@ddt.unpack
@ddt.data(
{'body': {'os-shrink': {'new_size': 1}}, 'version': '2.6'},
{'body': {'shrink': {'new_size': 1}}, 'version': '2.7'},
)
def test_shrink(self, body, version):
id = 'fake_share_id'
share = stubs.stub_share_get(None, None, id)
self.mock_object(share_api.API, 'get', mock.Mock(return_value=share))
self.mock_object(share_api.API, "shrink")
size = '1'
req = fakes.HTTPRequest.blank(
'/v2/shares/%s/action' % id, version=version)
actual_response = self.controller._shrink(req, id, body)
share_api.API.get.assert_called_once_with(mock.ANY, id)
share_api.API.shrink.assert_called_once_with(
mock.ANY, share, int(size))
self.assertEqual(202, actual_response.status_int)
@ddt.data({"os-shrink": ""},
{"os-shrink": {"new_size": "foo"}},
{"os-shrink": {"new_size": {'foo': 'bar'}}})
def test_shrink_invalid_body(self, body):
id = 'fake_share_id'
req = fakes.HTTPRequest.blank('/v1/shares/%s/action' % id)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller._shrink, req, id, body)
@ddt.data({'source': exception.InvalidInput,
'target': webob.exc.HTTPBadRequest},
{'source': exception.InvalidShare,
'target': webob.exc.HTTPBadRequest})
@ddt.unpack
def test_shrink_exception(self, source, target):
id = 'fake_share_id'
req = fakes.HTTPRequest.blank('/v1/shares/%s/action' % id)
body = {"os-shrink": {'new_size': '123'}}
self.mock_object(share_api.API, "shrink",
mock.Mock(side_effect=source('fake')))
self.assertRaises(target, self.controller._shrink, req, id, body)
@ddt.ddt
class ShareAdminActionsAPITest(test.TestCase):
def setUp(self):
super(self.__class__, self).setUp()
CONF.set_default("default_share_type", None)
self.flags(rpc_backend='manila.openstack.common.rpc.impl_fake')
self.share_api = share_api.API()
self.admin_context = context.RequestContext('admin', 'fake', True)
self.member_context = context.RequestContext('fake', 'fake')
def _get_context(self, role):
return getattr(self, '%s_context' % role)
def _setup_share_data(self, share=None, version='2.7'):
if share is None:
share = db_utils.create_share(status=constants.STATUS_AVAILABLE,
size='1',
override_defaults=True)
req = fakes.HTTPRequest.blank(
'/v2/fake/shares/%s/action' % share['id'], version=version)
return share, req
def _reset_status(self, ctxt, model, req, db_access_method,
valid_code, valid_status=None, body=None, version='2.7'):
if float(version) > 2.6:
action_name = 'reset_status'
else:
action_name = 'os-reset_status'
if body is None:
body = {action_name: {'status': constants.STATUS_ERROR}}
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.headers['X-Openstack-Manila-Api-Version'] = version
req.body = six.b(jsonutils.dumps(body))
req.environ['manila.context'] = ctxt
resp = req.get_response(fakes.app())
# validate response code and model status
self.assertEqual(valid_code, resp.status_int)
if valid_code == 404:
self.assertRaises(exception.NotFound,
db_access_method,
ctxt,
model['id'])
else:
actual_model = db_access_method(ctxt, model['id'])
self.assertEqual(valid_status, actual_model['status'])
@ddt.data(*fakes.fixture_reset_status_with_different_roles)
@ddt.unpack
def test_share_reset_status_with_different_roles(self, role, valid_code,
valid_status, version):
share, req = self._setup_share_data(version=version)
ctxt = self._get_context(role)
self._reset_status(ctxt, share, req, db.share_get, valid_code,
valid_status, version=version)
@ddt.data(*fakes.fixture_invalid_reset_status_body)
def test_share_invalid_reset_status_body(self, body):
share, req = self._setup_share_data(version='2.6')
ctxt = self.admin_context
self._reset_status(ctxt, share, req, db.share_get, 400,
constants.STATUS_AVAILABLE, body, version='2.6')
@ddt.data('2.6', '2.7')
def test_share_reset_status_for_missing(self, version):
fake_share = {'id': 'missing-share-id'}
req = fakes.HTTPRequest.blank(
'/v2/fake/shares/%s/action' % fake_share['id'], version=version)
self._reset_status(self.admin_context, fake_share, req,
db.share_snapshot_get, 404, version=version)
def _force_delete(self, ctxt, model, req, db_access_method, valid_code,
check_model_in_db=False, version='2.7'):
if float(version) > 2.6:
action_name = 'force_delete'
else:
action_name = 'os-force_delete'
req.method = 'POST'
req.headers['content-type'] = 'application/json'
req.headers['X-Openstack-Manila-Api-Version'] = version
req.body = six.b(jsonutils.dumps({action_name: {}}))
req.environ['manila.context'] = ctxt
resp = req.get_response(fakes.app())
# validate response
self.assertEqual(valid_code, resp.status_int)
if valid_code == 202 and check_model_in_db:
self.assertRaises(exception.NotFound,
db_access_method,
ctxt,
model['id'])
@ddt.data(*fakes.fixture_force_delete_with_different_roles)
@ddt.unpack
def test_share_force_delete_with_different_roles(self, role, resp_code,
version):
share, req = self._setup_share_data(version=version)
ctxt = self._get_context(role)
self._force_delete(ctxt, share, req, db.share_get, resp_code,
check_model_in_db=True, version=version)
@ddt.data('2.6', '2.7')
def test_share_force_delete_missing(self, version):
share, req = self._setup_share_data(
share={'id': 'fake'}, version=version)
ctxt = self._get_context('admin')
self._force_delete(
ctxt, share, req, db.share_get, 404, version=version)
@ddt.ddt
class ShareUnmanageTest(test.TestCase):
def setUp(self):
super(self.__class__, self).setUp()
self.controller = shares.ShareController()
self.mock_object(share_api.API, 'get_all',
stubs.stub_get_all_shares)
self.mock_object(share_api.API, 'get',
stubs.stub_share_get)
self.mock_object(share_api.API, 'update', stubs.stub_share_update)
self.mock_object(share_api.API, 'delete', stubs.stub_share_delete)
self.mock_object(share_api.API, 'get_snapshot',
stubs.stub_snapshot_get)
self.share_id = 'fake'
self.request = fakes.HTTPRequest.blank(
'/share/%s/unmanage' % self.share_id,
use_admin_context=True, version='2.7',
)
def test_unmanage_share(self):
share = dict(status=constants.STATUS_AVAILABLE, id='foo_id',
instance={})
self.mock_object(share_api.API, 'get', mock.Mock(return_value=share))
self.mock_object(share_api.API, 'unmanage', mock.Mock())
self.mock_object(
self.controller.share_api.db, 'share_snapshot_get_all_for_share',
mock.Mock(return_value=[]))
actual_result = self.controller.unmanage(self.request, share['id'])
self.assertEqual(202, actual_result.status_int)
self.controller.share_api.db.share_snapshot_get_all_for_share.\
assert_called_once_with(
self.request.environ['manila.context'], share['id'])
self.controller.share_api.get.assert_called_once_with(
self.request.environ['manila.context'], share['id'])
share_api.API.unmanage.assert_called_once_with(
self.request.environ['manila.context'], share)
def test_unmanage_share_that_has_snapshots(self):
share = dict(status=constants.STATUS_AVAILABLE, id='foo_id',
instance={})
snapshots = ['foo', 'bar']
self.mock_object(self.controller.share_api, 'unmanage')
self.mock_object(
self.controller.share_api.db, 'share_snapshot_get_all_for_share',
mock.Mock(return_value=snapshots))
self.mock_object(
self.controller.share_api, 'get',
mock.Mock(return_value=share))
self.assertRaises(
webob.exc.HTTPForbidden,
self.controller.unmanage, self.request, share['id'])
self.assertFalse(self.controller.share_api.unmanage.called)
self.controller.share_api.db.share_snapshot_get_all_for_share.\
assert_called_once_with(
self.request.environ['manila.context'], share['id'])
self.controller.share_api.get.assert_called_once_with(
self.request.environ['manila.context'], share['id'])
def test_unmanage_share_based_on_share_server(self):
share = dict(instance=dict(share_server_id='foo_id'), id='bar_id')
self.mock_object(
self.controller.share_api, 'get',
mock.Mock(return_value=share))
self.assertRaises(
webob.exc.HTTPForbidden,
self.controller.unmanage, self.request, share['id'])
self.controller.share_api.get.assert_called_once_with(
self.request.environ['manila.context'], share['id'])
@ddt.data(*constants.TRANSITIONAL_STATUSES)
def test_unmanage_share_with_transitional_state(self, share_status):
share = dict(status=share_status, id='foo_id', instance={})
self.mock_object(
self.controller.share_api, 'get',
mock.Mock(return_value=share))
self.assertRaises(
webob.exc.HTTPForbidden,
self.controller.unmanage, self.request, share['id'])
self.controller.share_api.get.assert_called_once_with(
self.request.environ['manila.context'], share['id'])
def test_unmanage_share_not_found(self):
self.mock_object(share_api.API, 'get', mock.Mock(
side_effect=exception.NotFound))
self.mock_object(share_api.API, 'unmanage', mock.Mock())
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.unmanage,
self.request, self.share_id)
@ddt.data(exception.InvalidShare(reason="fake"),
exception.PolicyNotAuthorized(action="fake"),)
def test_unmanage_share_invalid(self, side_effect):
share = dict(status=constants.STATUS_AVAILABLE, id='foo_id',
instance={})
self.mock_object(share_api.API, 'get', mock.Mock(return_value=share))
self.mock_object(share_api.API, 'unmanage', mock.Mock(
side_effect=side_effect))
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.unmanage,
self.request, self.share_id)
def test_wrong_permissions(self):
share_id = 'fake'
req = fakes.HTTPRequest.blank('/share/%s/unmanage' % share_id,
use_admin_context=False, version='2.7')
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.unmanage,
req,
share_id)
def test_unsupported_version(self):
share_id = 'fake'
req = fakes.HTTPRequest.blank('/share/%s/unmanage' % share_id,
use_admin_context=False, version='2.6')
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.unmanage,
req,
share_id)
def get_fake_manage_body(export_path='/fake', service_host='fake@host#POOL',
protocol='fake', share_type='fake', **kwargs):
fake_share = {
'export_path': export_path,
'service_host': service_host,
'protocol': protocol,
'share_type': share_type,
}
fake_share.update(kwargs)
return {'share': fake_share}
@ddt.ddt
class ShareManageTest(test.TestCase):
def setUp(self):
super(self.__class__, self).setUp()
self.controller = shares.ShareController()
self.resource_name = self.controller.resource_name
self.request = fakes.HTTPRequest.blank(
'/v2/shares/manage', use_admin_context=True, version='2.7')
self.mock_policy_check = self.mock_object(
policy, 'check_policy', mock.Mock(return_value=True))
def _setup_manage_mocks(self, service_is_up=True):
self.mock_object(db, 'service_get_by_host_and_topic', mock.Mock(
return_value={'host': 'fake'}))
self.mock_object(share_types, 'get_share_type_by_name_or_id',
mock.Mock(return_value={'id': 'fake'}))
self.mock_object(utils, 'service_is_up', mock.Mock(
return_value=service_is_up))
if service_is_up:
self.mock_object(utils, 'validate_service_host')
else:
self.mock_object(
utils,
'validate_service_host',
mock.Mock(side_effect=exception.ServiceIsDown(service='fake')))
@ddt.data({},
{'shares': {}},
{'share': get_fake_manage_body('', None, None)})
def test_share_manage_invalid_body(self, body):
self.assertRaises(webob.exc.HTTPUnprocessableEntity,
self.controller.manage,
self.request,
body)
def test_share_manage_service_not_found(self):
body = get_fake_manage_body()
self.mock_object(db, 'service_get_by_host_and_topic', mock.Mock(
side_effect=exception.ServiceNotFound(service_id='fake')))
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.manage,
self.request,
body)
def test_share_manage_share_type_not_found(self):
body = get_fake_manage_body()
self.mock_object(db, 'service_get_by_host_and_topic', mock.Mock())
self.mock_object(utils, 'service_is_up', mock.Mock(return_value=True))
self.mock_object(db, 'share_type_get_by_name', mock.Mock(
side_effect=exception.ShareTypeNotFoundByName(
share_type_name='fake')))
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.manage,
self.request,
body)
@ddt.data({'service_is_up': False, 'service_host': 'fake@host#POOL'},
{'service_is_up': True, 'service_host': 'fake@host'})
def test_share_manage_bad_request(self, settings):
body = get_fake_manage_body(service_host=settings.pop('service_host'))
self._setup_manage_mocks(**settings)
self.assertRaises(webob.exc.HTTPBadRequest,
self.controller.manage,
self.request,
body)
def test_share_manage_duplicate_share(self):
body = get_fake_manage_body()
exc = exception.InvalidShare(reason="fake")
self._setup_manage_mocks()
self.mock_object(share_api.API, 'manage', mock.Mock(side_effect=exc))
self.assertRaises(webob.exc.HTTPConflict,
self.controller.manage,
self.request,
body)
def test_share_manage_forbidden_manage(self):
body = get_fake_manage_body()
self._setup_manage_mocks()
error = mock.Mock(side_effect=exception.PolicyNotAuthorized(action=''))
self.mock_object(share_api.API, 'manage', error)
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.manage,
self.request,
body)
def test_share_manage_forbidden_validate_service_host(self):
body = get_fake_manage_body()
self._setup_manage_mocks()
error = mock.Mock(side_effect=exception.PolicyNotAuthorized(action=''))
self.mock_object(
utils, 'validate_service_host', mock.Mock(side_effect=error))
self.assertRaises(webob.exc.HTTPForbidden,
self.controller.manage,
self.request,
body)
@ddt.data(
get_fake_manage_body(name='foo', description='bar'),
get_fake_manage_body(display_name='foo', description='bar'),
get_fake_manage_body(name='foo', display_description='bar'),
get_fake_manage_body(display_name='foo', display_description='bar'),
get_fake_manage_body(display_name='foo', display_description='bar',
driver_options=dict(volume_id='quuz')),
)
def test_share_manage(self, data):
self._test_share_manage(data, "2.7")
@ddt.data(
get_fake_manage_body(name='foo', description='bar', is_public=True),
get_fake_manage_body(name='foo', description='bar', is_public=False)
)
def test_share_manage_with_is_public(self, data):
self._test_share_manage(data, "2.8")
def test_share_manage_with_user_id(self):
self._test_share_manage(get_fake_manage_body(
name='foo', description='bar', is_public=True), "2.16")
def _test_share_manage(self, data, version):
expected = {
'share': {
'status': 'fakestatus',
'description': 'displaydesc',
'availability_zone': 'fakeaz',
'name': 'displayname',
'share_proto': 'FAKEPROTO',
'metadata': {},
'project_id': 'fakeproject',
'host': 'fakehost',
'id': 'fake',
'snapshot_id': '2',
'share_network_id': None,
'created_at': datetime.datetime(1, 1, 1, 1, 1, 1),
'size': 1,
'share_type_name': None,
'share_server_id': 'fake_share_server_id',
'share_type': '1',
'volume_type': '1',
'is_public': False,
'consistency_group_id': None,
'source_cgsnapshot_member_id': None,
'snapshot_support': True,
'task_state': None,
'links': [
{
'href': 'http://localhost/v1/fake/shares/fake',
'rel': 'self'
},
{
'href': 'http://localhost/fake/shares/fake',
'rel': 'bookmark'
}
],
}
}
self._setup_manage_mocks()
return_share = mock.Mock(
return_value=stubs.stub_share('fake', instance={}))
self.mock_object(
share_api.API, 'manage', return_share)
share = {
'host': data['share']['service_host'],
'export_location': data['share']['export_path'],
'share_proto': data['share']['protocol'].upper(),
'share_type_id': 'fake',
'display_name': 'foo',
'display_description': 'bar',
}
driver_options = data['share'].get('driver_options', {})
if (api_version.APIVersionRequest(version) <=
api_version.APIVersionRequest('2.8')):
expected['share']['export_location'] = 'fake_location'
expected['share']['export_locations'] = (
['fake_location', 'fake_location2'])
if (api_version.APIVersionRequest(version) >=
api_version.APIVersionRequest('2.10')):
expected['share']['access_rules_status'] = (
constants.STATUS_ACTIVE)
if (api_version.APIVersionRequest(version) >=
api_version.APIVersionRequest('2.11')):
expected['share']['has_replicas'] = False
expected['share']['replication_type'] = None
if (api_version.APIVersionRequest(version) >=
api_version.APIVersionRequest('2.16')):
expected['share']['user_id'] = 'fakeuser'
if (api_version.APIVersionRequest(version) >=
api_version.APIVersionRequest('2.8')):
share['is_public'] = data['share']['is_public']
req = fakes.HTTPRequest.blank('/v2/shares/manage', version=version,
use_admin_context=True)
actual_result = self.controller.manage(req, data)
share_api.API.manage.assert_called_once_with(
mock.ANY, share, driver_options)
self.assertIsNotNone(actual_result)
self.assertEqual(expected, actual_result)
self.mock_policy_check.assert_called_once_with(
req.environ['manila.context'], self.resource_name, 'manage')
def test_wrong_permissions(self):
body = get_fake_manage_body()
self.assertRaises(
webob.exc.HTTPForbidden,
self.controller.manage,
fakes.HTTPRequest.blank(
'/share/manage', use_admin_context=False, version='2.7'),
body,
)
def test_unsupported_version(self):
share_id = 'fake'
req = fakes.HTTPRequest.blank(
'/share/manage', use_admin_context=False, version='2.6')
self.assertRaises(exception.VersionNotFoundForAPIMethod,
self.controller.manage,
req,
share_id)
| |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Wrappers for primitive Neural Net (NN) Operations."""
# pylint: disable=invalid-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.client import graph_util
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import common_shapes
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_nn_ops import *
# pylint: enable=wildcard-import
# Aliases for some automatically-generated names.
local_response_normalization = gen_nn_ops.lrn
def conv2d_transpose(value, filter, output_shape, strides, padding="SAME",
name=None):
"""The transpose of `conv2d`.
This operation is sometimes called "deconvolution" after (Deconvolutional
Networks)[http://www.matthewzeiler.com/pubs/cvpr2010/cvpr2010.pdf], but is
actually the transpose (gradient) of `conv2d` rather than an actual
deconvolution.
Args:
value: A 4-D `Tensor` of type `float` and shape
`[batch, height, width, in_channels]`.
filter: A 4-D `Tensor` with the same type as `value` and shape
`[height, width, output_channels, in_channels]`. `filter`'s
`in_channels` dimension must match that of `value`.
output_shape: A 1-D `Tensor` representing the output shape of the
deconvolution op.
strides: A list of ints. The stride of the sliding window for each
dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
name: Optional name for the returned tensor.
Returns:
A `Tensor` with the same type as `value`.
Raises:
ValueError: If input/output depth does not match `filter`'s shape, or if
padding is other than `'VALID'` or `'SAME'`.
"""
with ops.op_scope([value, filter, output_shape], name,
"conv2d_transpose") as name:
value = ops.convert_to_tensor(value, name="value")
filter = ops.convert_to_tensor(filter, name="filter")
if not value.get_shape()[3].is_compatible_with(filter.get_shape()[3]):
raise ValueError(
"input channels does not match filter's input channels, "
"{} != {}".format(value.get_shape()[3], filter.get_shape()[3]))
output_shape_ = ops.convert_to_tensor(output_shape, name="output_shape")
if not output_shape_.get_shape().is_compatible_with(tensor_shape.vector(4)):
raise ValueError("output_shape must have shape (4,), got {}"
.format(output_shape_.get_shape()))
if isinstance(output_shape, (list, np.ndarray)):
# output_shape's shape should be == [4] if reached this point.
if not filter.get_shape()[2].is_compatible_with(output_shape[3]):
raise ValueError(
"output_shape does not match filter's output channels, "
"{} != {}".format(output_shape[3], filter.get_shape()[2]))
if padding != "VALID" and padding != "SAME":
raise ValueError("padding must be either VALID or SAME:"
" {}".format(padding))
return gen_nn_ops.conv2d_backprop_input(input_sizes=output_shape_,
filter=filter,
out_backprop=value,
strides=strides,
padding=padding,
name=name)
# pylint: disable=protected-access
def bias_add(value, bias, data_format=None, name=None):
"""Adds `bias` to `value`.
This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.
Broadcasting is supported, so `value` may have any number of dimensions.
Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the
case where both types are quantized.
Args:
value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
`int16`, `int8`, or `complex64`.
bias: A 1-D `Tensor` with size matching the last dimension of `value`.
Must be the same type as `value` unless `value` is a quantized type,
in which case a different quantized type may be used.
data_format: A string. 'NHWC' and 'NCHW" are supported.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `value`.
"""
with ops.op_scope([value, bias], name, "BiasAdd") as name:
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
return gen_nn_ops._bias_add(value, bias, data_format=data_format, name=name)
ops.RegisterShape("BiasAdd")(common_shapes.bias_add_shape)
ops.RegisterShape("BiasAddGrad")(common_shapes.bias_add_grad_shape)
# pylint: disable=protected-access
def bias_add_v1(value, bias, name=None):
"""Adds `bias` to `value`.
This is a deprecated version of bias_add and will soon to be removed.
This is (mostly) a special case of `tf.add` where `bias` is restricted to 1-D.
Broadcasting is supported, so `value` may have any number of dimensions.
Unlike `tf.add`, the type of `bias` is allowed to differ from `value` in the
case where both types are quantized.
Args:
value: A `Tensor` with type `float`, `double`, `int64`, `int32`, `uint8`,
`int16`, `int8`, or `complex64`.
bias: A 1-D `Tensor` with size matching the last dimension of `value`.
Must be the same type as `value` unless `value` is a quantized type,
in which case a different quantized type may be used.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `value`.
"""
with ops.op_scope([value, bias], name, "BiasAddV1") as name:
value = ops.convert_to_tensor(value, name="input")
bias = ops.convert_to_tensor(bias, dtype=value.dtype, name="bias")
return gen_nn_ops._bias_add_v1(value, bias, name=name)
ops.RegisterShape("BiasAddV1")(common_shapes.bias_add_shape)
ops.RegisterShape("BiasAddGradV1")(common_shapes.bias_add_grad_shape)
def relu6(features, name=None):
"""Computes Rectified Linear 6: `min(max(features, 0), 6)`.
Args:
features: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`,
`int16`, or `int8`.
name: A name for the operation (optional).
Returns:
A `Tensor` with the same type as `features`.
"""
with ops.op_scope([features], name, "Relu6") as name:
features = ops.convert_to_tensor(features, name="features")
return gen_nn_ops._relu6(features, name=name)
def softmax_cross_entropy_with_logits(logits, labels, name=None):
"""Computes softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** While the classes are mutually exclusive, their probabilities
need not be. If using exclusive `labels` (wherein one and only one class is
true at a time), see `sparse_softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
`logits` and `labels` must have the same shape `[batch_size, num_classes]`
and the same dtype (either `float32` or `float64`).
Args:
logits: Unscaled log probabilities.
labels: Each row `labels[i]` must be a valid probability distribution or
all zeros. If all zeros, the corresponding loss will be `0`, regardless
of the contents of `logits[i]`.
name: A name for the operation (optional).
Returns:
A 1-D `Tensor` of length `batch_size` of the same type as `logits` with the
softmax cross entropy loss.
"""
# The second output tensor contains the gradients. We use it in
# _CrossEntropyGrad() in nn_grad but not here.
cost, unused_backprop = gen_nn_ops._softmax_cross_entropy_with_logits(
logits, labels, name=name)
return cost
def sparse_softmax_cross_entropy_with_logits(logits, labels, name=None):
"""Computes sparse softmax cross entropy between `logits` and `labels`.
Measures the probability error in discrete classification tasks in which the
classes are mutually exclusive (each entry is in exactly one class). For
example, each CIFAR-10 image is labeled with one and only one label: an image
can be a dog or a truck, but not both.
**NOTE:** For this operation, the probability of a given label is considered
exclusive. That is, soft classes are not allowed, and the `labels` vector
must provide a single specific index for the true class for each row of
`logits` (each minibatch entry). For soft softmax classification with
a probability distribution for each entry, see
`softmax_cross_entropy_with_logits`.
**WARNING:** This op expects unscaled logits, since it performs a `softmax`
on `logits` internally for efficiency. Do not call this op with the
output of `softmax`, as it will produce incorrect results.
`logits` and must have the shape `[batch_size, num_classes]`
and the dtype (either `float32` or `float64`).
`labels` must have the shape `[batch_size]` and the dtype `int64`.
Args:
logits: Unscaled log probabilities.
labels: Each entry `labels[i]` must be an index in `[0, num_classes)` or
`-1`. If `-1`, the corresponding loss will be `0`, regardless
of the contents of `logits[i]`.
name: A name for the operation (optional).
Returns:
A 1-D `Tensor` of length `batch_size` of the same type as `logits` with the
softmax cross entropy loss.
"""
# The second output tensor contains the gradients. We use it in
# _CrossEntropyGrad() in nn_grad but not here.
cost, unused_backprop = gen_nn_ops._sparse_softmax_cross_entropy_with_logits(
logits, labels, name=name)
return cost
@ops.RegisterShape("SparseSoftmaxCrossEntropyWithLogits")
def _SparseSoftmaxCrossEntropyWithLogitsShape(op):
"""Shape function for SparseSoftmaxCrossEntropyWithLogits op."""
logits_shape = op.inputs[0].get_shape()
input_shape = logits_shape.with_rank(2)
batch_size = input_shape[0]
# labels_shape
op.inputs[1].get_shape().merge_with(tensor_shape.vector(batch_size))
return [tensor_shape.vector(batch_size.value), input_shape]
@ops.RegisterShape("SoftmaxCrossEntropyWithLogits")
def _SoftmaxCrossEntropyWithLogitsShape(op):
"""Shape function for SoftmaxCrossEntropyWithLogits op."""
logits_shape = op.inputs[0].get_shape()
labels_shape = op.inputs[1].get_shape()
input_shape = logits_shape.merge_with(labels_shape).with_rank(2)
batch_size = input_shape[0]
return [tensor_shape.vector(batch_size.value), input_shape]
def avg_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
"""Performs the average pooling on the input.
Each entry in `output` is the mean of the corresponding size `ksize`
window in `value`.
Args:
value: A 4-D `Tensor` of shape `[batch, height, width, channels]` and type
`float32`, `float64`, `qint8`, `quint8`, or `qint32`.
ksize: A list of ints that has length >= 4.
The size of the window for each dimension of the input tensor.
strides: A list of ints that has length >= 4.
The stride of the sliding window for each dimension of the
input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
data_format: A string. 'NHWC' and 'NCHW" are supported.
name: Optional name for the operation.
Returns:
A `Tensor` with the same type as `value`. The average pooled output tensor.
"""
with ops.op_scope([value], name, "AvgPool") as name:
value = ops.convert_to_tensor(value, name="input")
return gen_nn_ops._avg_pool(value, ksize=ksize, strides=strides,
padding=padding,
data_format=data_format,
name=name)
def max_pool(value, ksize, strides, padding, data_format="NHWC", name=None):
"""Performs the max pooling on the input.
Args:
value: A 4-D `Tensor` with shape `[batch, height, width, channels]` and
type `tf.float32`.
ksize: A list of ints that has length >= 4. The size of the window for
each dimension of the input tensor.
strides: A list of ints that has length >= 4. The stride of the sliding
window for each dimension of the input tensor.
padding: A string, either `'VALID'` or `'SAME'`. The padding algorithm.
data_format: A string. 'NHWC' and 'NCHW" are supported.
name: Optional name for the operation.
Returns:
A `Tensor` with type `tf.float32`. The max pooled output tensor.
"""
with ops.op_scope([value], name, "MaxPool") as name:
value = ops.convert_to_tensor(value, name="input")
return gen_nn_ops._max_pool(value, ksize=ksize, strides=strides,
padding=padding,
data_format=data_format,
name=name)
ops.RegisterShape("Relu")(common_shapes.unchanged_shape)
ops.RegisterShape("Relu6")(common_shapes.unchanged_shape)
ops.RegisterShape("Elu")(common_shapes.unchanged_shape)
ops.RegisterShape("Softplus")(common_shapes.unchanged_shape)
ops.RegisterShape("Softsign")(common_shapes.unchanged_shape)
@ops.RegisterShape("ReluGrad")
@ops.RegisterShape("Relu6Grad")
@ops.RegisterShape("EluGrad")
@ops.RegisterShape("SoftplusGrad")
@ops.RegisterShape("SoftsignGrad")
def _BinaryElementwiseShape(op):
"""Returns same shape as both inputs to op.
Args:
op: Input operation.
Returns:
Shape of both inputs to `op`.
"""
return [op.inputs[0].get_shape().merge_with(op.inputs[1].get_shape())]
ops.RegisterShape("L2Loss")(common_shapes.scalar_shape)
ops.RegisterShape("LRN")(common_shapes.unchanged_shape_with_rank(4))
@ops.RegisterShape("LRNGrad")
def _LRNGradShape(op):
"""Shape function for LRNGrad op."""
in_grads_shape = op.inputs[0].get_shape().with_rank(4)
in_image_shape = op.inputs[1].get_shape().with_rank(4)
out_image_shape = op.inputs[2].get_shape().with_rank(4)
return [in_grads_shape.merge_with(in_image_shape).merge_with(out_image_shape)]
ops.RegisterShape("Softmax")(
common_shapes.unchanged_shape_with_rank(2))
ops.RegisterShape("LogSoftmax")(
common_shapes.unchanged_shape_with_rank(2))
@ops.RegisterShape("InTopK")
def _InTopKShape(op):
"""Shape function for InTopK op."""
predictions_shape = op.inputs[0].get_shape().with_rank(2)
targets_shape = op.inputs[1].get_shape().with_rank(1)
batch_size = predictions_shape[0].merge_with(targets_shape[0])
return [tensor_shape.vector(batch_size.value)]
@ops.RegisterShape("TopK")
@ops.RegisterShape("TopKV2")
def _TopKShape(op):
"""Shape function for TopK and TopKV2 ops."""
input_shape = op.inputs[0].get_shape().with_rank_at_least(1)
if len(op.inputs) >= 2:
k = tensor_util.constant_value(op.inputs[1])
else:
k = op.get_attr("k")
last = input_shape[-1].value
if last is not None and k is not None and last < k:
raise ValueError("input.shape %s must have last dimension >= k = %d" %
(input_shape, k))
output_shape = input_shape[:-1].concatenate([k])
return [output_shape, output_shape]
@ops.RegisterShape("BatchNormWithGlobalNormalization")
def _BatchNormShape(op):
"""Shape function for BatchNormWithGlobalNormalization op."""
input_shape = op.inputs[0].get_shape().with_rank(4)
mean_shape = op.inputs[1].get_shape().with_rank(1)
var_shape = op.inputs[2].get_shape().with_rank(1)
beta_shape = op.inputs[3].get_shape().with_rank(1)
gamma_shape = op.inputs[4].get_shape().with_rank(1)
mean_shape[0].merge_with(input_shape[3])
var_shape[0].merge_with(input_shape[3])
beta_shape[0].merge_with(input_shape[3])
gamma_shape[0].merge_with(input_shape[3])
return [input_shape]
@ops.RegisterShape("BatchNormWithGlobalNormalizationGrad")
def _BatchNormGradShape(op):
"""Shape function for BatchNormWithGlobalNormalizationGrad op."""
input_shape = op.inputs[0].get_shape().with_rank(4)
mean_shape = op.inputs[1].get_shape().with_rank(1)
var_shape = op.inputs[2].get_shape().with_rank(1)
beta_shape = op.inputs[3].get_shape().with_rank(1)
out_backprop_shape = op.inputs[4].get_shape().with_rank(4)
input_shape = input_shape.merge_with(out_backprop_shape)
vector_dim = input_shape[3]
vector_dim = vector_dim.merge_with(mean_shape[0])
vector_dim = vector_dim.merge_with(var_shape[0])
vector_dim = vector_dim.merge_with(beta_shape[0])
return [input_shape] + ([tensor_shape.vector(vector_dim)] * 4)
ops.RegisterShape("Conv2D")(common_shapes.conv2d_shape)
ops.RegisterShape("DepthwiseConv2dNative")(
common_shapes.depthwise_conv2d_native_shape)
ops.RegisterShape("AvgPool")(common_shapes.avg_pool_shape)
ops.RegisterShape("MaxPool")(common_shapes.max_pool_shape)
@ops.RegisterShape("MaxPoolWithArgmax")
def _MaxPoolWithArgMaxShape(op):
"""Shape function for MaxPoolWithArgmax op."""
return common_shapes.max_pool_shape(op) * 2
@ops.RegisterShape("AvgPoolGrad")
def _AvgPoolGradShape(op):
"""Shape function for the AvgPoolGrad op."""
orig_input_shape = tensor_util.constant_value(op.inputs[0])
if orig_input_shape is not None:
return [tensor_shape.TensorShape(orig_input_shape.tolist())]
else:
# NOTE(mrry): We could in principle work out the shape from the
# gradients and the attrs, but if we do not know orig_input_shape
# statically, then we are unlikely to know the shape of the
# gradients either.
return [tensor_shape.unknown_shape(ndims=4)]
@ops.RegisterShape("Conv2DBackpropFilter")
def _Conv2DBackpropFilterShape(op):
"""Shape function for the Conv2DBackpropFilter op."""
filter_shape = tensor_util.constant_value(op.inputs[1])
if filter_shape is not None:
return [tensor_shape.TensorShape(filter_shape.tolist())]
else:
# NOTE(mrry): We could in principle work out the shape from the
# gradients and the attrs, but if we do not know filter_shape
# statically, then we are unlikely to know the shape of the
# gradients either.
return [tensor_shape.unknown_shape(ndims=4)]
@ops.RegisterShape("Conv2DBackpropInput")
def _Conv2DBackpropInputShape(op):
"""Shape function for the Conv2DBackpropInput op."""
input_shape = tensor_util.constant_value(op.inputs[0])
if input_shape is not None:
return [tensor_shape.TensorShape(input_shape.tolist())]
else:
# NOTE(mrry): We could in principle work out the shape from the
# gradients and the attrs, but if we do not know input_shape
# statically, then we are unlikely to know the shape of the
# gradients either.
return [tensor_shape.unknown_shape(ndims=4)]
@ops.RegisterShape("DepthwiseConv2dNativeBackpropFilter")
def _DepthwiseConv2dNativeBackpropFilterShape(op):
"""Shape function for the DepthwiseConv2dNativeBackpropFilter op."""
filter_shape = tensor_util.constant_value(op.inputs[1])
if filter_shape is not None:
return [tensor_shape.TensorShape(filter_shape.tolist())]
else:
return [tensor_shape.unknown_shape(ndims=4)]
@ops.RegisterShape("DepthwiseConv2dNativeBackpropInput")
def _DepthwiseConv2dNativeBackpropInputShape(op):
"""Shape function for the DepthwiseConv2dNativeBackpropInput op."""
input_shape = tensor_util.constant_value(op.inputs[0])
if input_shape is not None:
return [tensor_shape.TensorShape(input_shape.tolist())]
else:
return [tensor_shape.unknown_shape(ndims=4)]
@ops.RegisterShape("MaxPoolGrad")
@ops.RegisterShape("MaxPoolGradWithArgmax")
def _MaxPoolGradShape(op):
"""Shape function for the MaxPoolGrad op."""
orig_input_shape = op.inputs[0].get_shape().with_rank(4)
return [orig_input_shape]
@ops.RegisterStatistics("Conv2D", "flops")
def _calc_conv_flops(graph, node):
"""Calculates the compute resources needed for Conv2D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(graph,
node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
filter_in_depth = int(filter_shape[2])
output_count = np.prod(output_shape.as_list())
return ops.OpStats("flops", (output_count * filter_in_depth * filter_height *
filter_width * 2))
@ops.RegisterStatistics("Conv2D", "weight_parameters")
def _calc_conv_weight_params(graph, node):
"""Calculates the on-disk size of the weights for Conv2D."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
filter_shape = graph_util.tensor_shape_from_node_def_name(graph,
node.input[1])
filter_shape.assert_is_fully_defined()
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
filter_height = int(filter_shape[0])
filter_width = int(filter_shape[1])
filter_in_depth = int(filter_shape[2])
filter_out_depth = int(filter_shape[3])
return ops.OpStats("weight_parameters", (filter_height * filter_width *
filter_in_depth * filter_out_depth))
@ops.RegisterStatistics("BiasAdd", "flops")
def _calc_bias_add_flops(graph, node):
"""Calculates the computing needed for BiasAdd."""
input_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
input_shape.assert_is_fully_defined()
input_count = np.prod(input_shape.as_list())
return ops.OpStats("flops", input_count)
@ops.RegisterStatistics("BiasAdd", "weight_parameters")
def _calc_bias_add_weight_params(graph, node):
"""Calculates the on-disk weight parameters for BiasAdd."""
bias_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[1])
bias_shape.assert_is_fully_defined()
bias_count = np.prod(bias_shape.as_list())
return ops.OpStats("weight_parameters", bias_count)
def xw_plus_b(x, weights, biases, name=None): # pylint: disable=invalid-name
"""Computes matmul(x, weights) + biases.
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"xw_plus_b" is used.
Returns:
A 2-D Tensor computing matmul(x, weights) + biases.
Dimensions typically: batch, out_units.
"""
with ops.op_scope([x, weights, biases], name, "xw_plus_b") as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add(mm, biases, name=name)
def xw_plus_b_v1(x, weights, biases, name=None): # pylint: disable=invalid-name
"""Computes matmul(x, weights) + biases.
This is a deprecated version of that will soon be removed.
Args:
x: a 2D tensor. Dimensions typically: batch, in_units
weights: a 2D tensor. Dimensions typically: in_units, out_units
biases: a 1D tensor. Dimensions: out_units
name: A name for the operation (optional). If not specified
"xw_plus_b_v1" is used.
Returns:
A 2-D Tensor computing matmul(x, weights) + biases.
Dimensions typically: batch, out_units.
"""
with ops.op_scope([x, weights, biases], name, "xw_plus_b_v1") as name:
x = ops.convert_to_tensor(x, name="x")
weights = ops.convert_to_tensor(weights, name="weights")
biases = ops.convert_to_tensor(biases, name="biases")
mm = math_ops.matmul(x, weights)
return bias_add_v1(mm, biases, name=name)
# pylint: disable=invalid-name
def dropout(x, keep_prob, noise_shape=None, seed=None, name=None):
"""Computes dropout.
With probability `keep_prob`, outputs the input element scaled up by
`1 / keep_prob`, otherwise outputs `0`. The scaling is so that the expected
sum is unchanged.
By default, each element is kept or dropped independently. If `noise_shape`
is specified, it must be
[broadcastable](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
to the shape of `x`, and only dimensions with `noise_shape[i] == shape(x)[i]`
will make independent decisions. For example, if `shape(x) = [k, l, m, n]`
and `noise_shape = [k, 1, 1, n]`, each batch and channel component will be
kept independently and each row and column will be kept or not kept together.
Args:
x: A tensor.
keep_prob: A scalar `Tensor` with the same type as x. The probability
that each element is kept.
noise_shape: A 1-D `Tensor` of type `int32`, representing the
shape for randomly generated keep/drop flags.
seed: A Python integer. Used to create random seeds. See
[`set_random_seed`](../../api_docs/python/constant_op.md#set_random_seed)
for behavior.
name: A name for this operation (optional).
Returns:
A Tensor of the same shape of `x`.
Raises:
ValueError: If `keep_prob` is not in `(0, 1]`.
"""
with ops.op_scope([x], name, "dropout") as name:
x = ops.convert_to_tensor(x, name="x")
if isinstance(keep_prob, float) and not 0 < keep_prob <= 1:
raise ValueError("keep_prob must be a scalar tensor or a float in the "
"range (0, 1], got %g" % keep_prob)
keep_prob = ops.convert_to_tensor(
keep_prob, dtype=x.dtype, name="keep_prob")
keep_prob.get_shape().assert_is_compatible_with(tensor_shape.scalar())
noise_shape = noise_shape if noise_shape is not None else array_ops.shape(x)
# uniform [keep_prob, 1.0 + keep_prob)
random_tensor = keep_prob
random_tensor += random_ops.random_uniform(
noise_shape, seed=seed, dtype=x.dtype)
# 0. if [keep_prob, 1.0) and 1. if [1.0, 1.0 + keep_prob)
binary_tensor = math_ops.floor(random_tensor)
ret = x * math_ops.inv(keep_prob) * binary_tensor
ret.set_shape(x.get_shape())
return ret
def top_k(input, k=1, sorted=True, name=None):
"""Finds values and indices of the `k` largest entries for the last dimension.
If the input is a vector (rank-1), finds the `k` largest entries in the vector
and outputs their values and indices as vectors. Thus `values[j]` is the
`j`-th largest entry in `input`, and its index is `indices[j]`.
For matrices (resp. higher rank input), computes the top `k` entries in each
row (resp. vector along the last dimension). Thus,
values.shape = indices.shape = input.shape[:-1] + [k]
If two elements are equal, the lower-index element appears first.
Args:
input: 1-D or higher `Tensor` with last dimension at least `k`.
k: 0-D `int32` `Tensor`. Number of top elements to look for along the last
dimension (along each row for matrices).
sorted: If true the resulting `k` elements will be sorted by the values in
descending order.
name: Optional name for the operation.
Returns:
values: The `k` largest elements along each last dimensional slice.
indices: The indices of `values` within the last dimension of `input`.
"""
return gen_nn_ops._top_kv2(input, k=k, sorted=sorted, name=name)
# pylint: enable=invalid-name
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2013 Robin Stenvi
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# Divide the flickr geojson files into a structure more appropriate for us.
import json, pprint, sys, os
# Converts a string to valid file name
def getValidFileName(name):
ret = name.lower()
ret = ret.replace(" ", "_")
ret = ret.replace(",", "-")
ret = ret.encode("ascii", "ignore")
return ret
def findStringFromList(string, List):
rets = []
for l in List:
if string.endswith(l):
return l
return ""
def divideJson(Json, divisors=[], Split="NOSPLIT", useSplits=[], compress=True):
ret = {}
divisors.append("unknown")
for div in divisors:
if div not in ret:
ret[div] = {
"type" : Json.get("type", ""),
"name" : Json.get("name", ""),
"description" : Json.get("description", ""),
"license" : Json.get("license", ""),
"features" : []
}
if compress == True:
if 'description' in ret[div]: del ret[div]['description']
for feature in Json.get("features", []):
if "properties" in feature and "label" in feature["properties"]:
if compress == True:
if "id" in feature: del feature["id"]
if "properties" in feature:
if "woe_id" in feature["properties"]:
del feature["properties"]["woe_id"]
if "place_id" in feature["properties"]:
del feature["properties"]["place_id"]
if "place_type_id" in feature["properties"]:
del feature["properties"]["place_type_id"]
if "geometry" in feature:
if "created" in feature["geometry"]:
del feature["geometry"]["created"]
if "alpha" in feature["geometry"]:
del feature["geometry"]["alpha"]
if "points" in feature["geometry"]:
del feature["geometry"]["points"]
if "edges" in feature["geometry"]:
del feature["geometry"]["edges"]
if "is_donuthole" in feature["geometry"]:
del feature["geometry"]["is_donuthole"]
if "link" in feature["geometry"]:
del feature["geometry"]["link"]
name = feature["properties"]["label"]
label = getLabel(name, Split, useSplits)
divisor = findStringFromList(label, divisors)
if divisor != "":
ret[divisor]["features"].append(feature)
else:
ret["unknown"]["features"].append(feature)
else:
print "Feature " + feature.get("id", "unknown") + " does not have correct format"
sys.exit(0)
return ret
def getLabel(name, Split, useSplits):
name = name.split(Split)
label = ""
if len(name) == 1:
label = name[0]
else:
for use in useSplits:
if use >= len(name):
# The flickr dataset seems to be missing this region on the county
# dataset, we need to add it to not mess up the rest of the
# divisions
if name[0] == "Rotorua District":
name.insert(1, "Bay of Plenty")
else:
return "".join(name)
label += name[use]
return label
def getLabels(Json, Split="NOSPLIT", useSplits=[]):
labels = []
for feature in Json.get("features", []):
if "properties" in feature and "label" in feature["properties"]:
tmp = feature["properties"]["label"]
label = getLabel(tmp, Split, useSplits)
labels.append(label)
return labels
def addPaths(Json, append, Split="NOSPLIT", useSplits=[]):
for feature in Json.get("features", []):
if "properties" in feature:
label = getLabel(feature["properties"].get("label", "unknown"), Split,\
useSplits)
feature["properties"]["next"] = getValidFileName(label) + "_" + append
else:
print "Proprties is non-existent in " + feature.get("id", "Unknown")
return Json
# Print json to file
def printJson(Json, File, pretty=False):
f = open(File, "w")
if pretty == True:
f.write(json.dumps(Json, sort_keys=True, indent=4, separators=(',', ':')))
else:
f.write(json.dumps(Json))
# Read in a file and return the result as JSON
def readFileAsJson(File):
if os.path.isfile(File) == False:
print str(File) + " doesn't exist, have you downlaoded the Flickr dataset"
print "http://code.flickr.net/tag/shapefile/"
print "Download version 2.0.1(important) and extract it in this directory"
sys.exit(0)
with open(File) as f:
contents = f.read()
try:
Json = json.loads(contents)
except ValueError, v:
print "ValueError: ", v
print "Make sure it is version 2.0.1 of the dataset"
sys.exit(0)
if type(Json) != dict:
print File + " is not a valid Json file"
return Json
# Make sure that all the links work as expected
def sanityCheck(File, Dir):
realFile = os.path.join(Dir, File)
if os.path.exists(realFile):
Json = readFileAsJson(realFile)
if "features" not in Json:
print "File " + realFile + " is missing features"
sys.exit(0)
for feature in Json["features"]:
if "properties" not in feature:
print "File " + realFile + " is missing property key"
sys.exit(0)
if "next" in feature["properties"]:
sanityCheck(feature["properties"]["next"], "sources")
else:
print "File " + realFile + " does not exist"
sys.exit(0)
if __name__ == "__main__":
# Read in all countries
print "Reading in list of countries"
countriesJson = readFileAsJson("flickr_shapes_countries.geojson")
# Get all possible countries
countries = getLabels(countriesJson)
# Add a path to each specific country file that we haven't created yet
countriesJson = addPaths(countriesJson, "regions.geojson")
if os.path.isdir("sources") == False:
os.mkdir("sources")
# Write out new file of countries
print "Writing new file with countries\n"
printJson(countriesJson, os.path.join("sources", "countries.geojson"))
# Read in all regions
print "Reading in list of regions"
regionsJson = readFileAsJson("flickr_shapes_regions.geojson")
# Get a label, 0 is region, 2 is country, needed to filter
regions = getLabels(regionsJson, ",", [0, 2])
# Add a path to each county
regionsJson = addPaths(regionsJson, "counties.geojson", ",", [0, 2])
# Divide the regions based on the country
regionsDivided = divideJson(regionsJson, countries)
# Print out new region files
count = 0
for key in regionsDivided.keys():
printJson(regionsDivided[key], os.path.join("sources", getValidFileName(key) +\
"_regions.geojson"))
count += 1
print "Wrote " + str(count) + " new files with regions\n"
print "Reading in list of counties"
countiesJson = readFileAsJson("flickr_shapes_counties.geojson")
countiesDivided = divideJson(countiesJson, regions, ",", [0,1,2])
print "Reading in list of localities"
localitiesJson = readFileAsJson("flickr_shapes_localities.geojson")
localitiesDivided = divideJson(localitiesJson, regions, ",", [0,1,2])
print "Reading in list of neighbourhoods"
neighbourhoodsJson = readFileAsJson("flickr_shapes_neighbourhoods.geojson")
neighbourhoodsDivided = divideJson(neighbourhoodsJson, regions, ",", [0,1,2,4])
for key in localitiesDivided.keys():
for feature in localitiesDivided[key].get("features", []):
countiesDivided[key]["features"].append(feature)
for feature in neighbourhoodsDivided.get("features", []):
countiesDivided[key]["features"].append(feature)
count = 0
for key in countiesDivided.keys():
printJson(countiesDivided[key], os.path.join("sources", getValidFileName(key) +\
"_counties.geojson"))
count += 1
print "Wrote " + str(count) + " new files with counties\n"
# Final sanity check to make sure it was printed correctly
print "Running checks to see if everything is correct\n"
sanityCheck("countries.geojson", "sources")
print "New files written under 'sources, you can now delete the downloaded files"
| |
# coding=utf-8
# Copyright 2019 Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""List of core actions / action sets supported by the gfootball environment."""
# ***** List of core actions *****
# Only add new ones, do not reorder so the numbering do not change.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from gfootball_engine import e_BackendAction
import numpy
from six.moves import range
class CoreAction(object):
def __init__(self, backend_action, name, sticky=False, directional=False):
self._backend_action = backend_action
self._name = name
self._sticky = sticky
self._directional = directional
def is_in_actionset(self, config):
return self in get_action_set(config)
def __eq__(self, other):
assert set(other.__dict__) == set(self.__dict__)
return self._name == other._name
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
assert set(other.__dict__) == set(self.__dict__)
return self._backend_action < other._backend_action
def __le__(self, other):
assert set(other.__dict__) == set(self.__dict__)
return self._backend_action <= other._backend_action
def __gt__(self, other):
assert set(other.__dict__) == set(self.__dict__)
return self._backend_action > other._backend_action
def __ge__(self, other):
assert set(other.__dict__) == set(self.__dict__)
return self._backend_action >= other._backend_action
def __hash__(self):
return self._backend_action
def __repr__(self):
return self._name
action_idle = CoreAction(e_BackendAction.idle, "idle")
action_builtin_ai = CoreAction(e_BackendAction.builtin_ai, "builtin_ai")
action_left = CoreAction(
e_BackendAction.left, "left", sticky=True, directional=True)
action_top_left = CoreAction(
e_BackendAction.top_left, "top_left", sticky=True, directional=True)
action_top = CoreAction(
e_BackendAction.top, "top", sticky=True, directional=True)
action_top_right = CoreAction(
e_BackendAction.top_right, "top_right", sticky=True, directional=True)
action_right = CoreAction(
e_BackendAction.right, "right", sticky=True, directional=True)
action_bottom_right = CoreAction(
e_BackendAction.bottom_right, "bottom_right", sticky=True, directional=True)
action_bottom = CoreAction(
e_BackendAction.bottom, "bottom", sticky=True, directional=True)
action_bottom_left = CoreAction(
e_BackendAction.bottom_left, "bottom_left", sticky=True, directional=True)
action_long_pass = CoreAction(e_BackendAction.long_pass, "long_pass")
action_high_pass = CoreAction(e_BackendAction.high_pass, "high_pass")
action_short_pass = CoreAction(e_BackendAction.short_pass, "short_pass")
action_shot = CoreAction(e_BackendAction.shot, "shot")
action_keeper_rush = CoreAction(
e_BackendAction.keeper_rush, "keeper_rush", sticky=True)
action_sliding = CoreAction(e_BackendAction.sliding, "sliding")
action_pressure = CoreAction(
e_BackendAction.pressure, "pressure", sticky=True)
action_team_pressure = CoreAction(
e_BackendAction.team_pressure, "team_pressure", sticky=True)
action_switch = CoreAction(e_BackendAction.switch, "switch")
action_sprint = CoreAction(e_BackendAction.sprint, "sprint", sticky=True)
action_dribble = CoreAction(
e_BackendAction.dribble, "dribble", sticky=True)
action_release_direction = CoreAction(
e_BackendAction.release_direction, "release_direction", directional=True)
action_release_long_pass = CoreAction(e_BackendAction.release_long_pass,
"release_long_pass")
action_release_high_pass = CoreAction(e_BackendAction.release_high_pass,
"release_high_pass")
action_release_short_pass = CoreAction(e_BackendAction.release_short_pass,
"release_short_pass")
action_release_shot = CoreAction(e_BackendAction.release_shot, "release_shot")
action_release_keeper_rush = CoreAction(e_BackendAction.release_keeper_rush,
"release_keeper_rush")
action_release_sliding = CoreAction(e_BackendAction.release_sliding,
"release_sliding")
action_release_pressure = CoreAction(e_BackendAction.release_pressure,
"release_pressure")
action_release_team_pressure = CoreAction(e_BackendAction.release_team_pressure,
"release_team_pressure")
action_release_switch = CoreAction(e_BackendAction.release_switch,
"release_switch")
action_release_sprint = CoreAction(e_BackendAction.release_sprint,
"release_sprint")
action_release_dribble = CoreAction(e_BackendAction.release_dribble,
"release_dribble")
# ***** Define some action sets *****
action_set_v1 = [
action_idle, action_left, action_top_left, action_top,
action_top_right, action_right, action_bottom_right,
action_bottom, action_bottom_left, action_long_pass,
action_high_pass, action_short_pass, action_shot,
action_sprint, action_release_direction, action_release_sprint,
action_sliding, action_dribble, action_release_dribble]
action_set_v2 = action_set_v1 + [action_builtin_ai]
# Special action set that includes all the core actions in the same order.
full_action_set = action_set_v2 + [
action_keeper_rush, action_pressure,
action_team_pressure, action_switch,
action_release_long_pass, action_release_high_pass,
action_release_short_pass, action_release_shot,
action_release_keeper_rush, action_release_sliding,
action_release_pressure, action_release_team_pressure,
action_release_switch,
]
action_set_dict = {
"default": action_set_v1,
"v2": action_set_v2,
# "full" action set is needed by the play_game script.
# Don't use it for training models.
"full": full_action_set,
}
reverse_action_mapping = {
action_long_pass: action_release_long_pass,
action_high_pass: action_release_high_pass,
action_short_pass: action_release_short_pass,
action_shot: action_release_shot,
action_keeper_rush: action_release_keeper_rush,
action_sliding: action_release_sliding,
action_pressure: action_release_pressure,
action_team_pressure: action_release_team_pressure,
action_switch: action_release_switch,
action_sprint: action_release_sprint,
action_dribble: action_release_dribble,
action_release_long_pass: action_long_pass,
action_release_high_pass: action_high_pass,
action_release_short_pass: action_short_pass,
action_release_shot: action_shot,
action_release_keeper_rush: action_keeper_rush,
action_release_sliding: action_sliding,
action_release_pressure: action_pressure,
action_release_team_pressure: action_team_pressure,
action_release_switch: action_switch,
action_release_sprint: action_sprint,
action_release_dribble: action_dribble
}
# Returns action set specified by the config.
def get_action_set(config):
action_set_name = config["action_set"]
return action_set_dict[action_set_name]
def get_sticky_actions(config):
"""Returns list of sticky actions for the currently used action set."""
sticky_actions = []
for a in get_action_set(config):
if a._sticky:
sticky_actions.append(a)
return sticky_actions
# Converts different action representation to an action from a given action set.
def named_action_from_action_set(action_set, action):
if (hasattr(action, "__dict__") and action_set and
set(action.__dict__) == set(action_set[0].__dict__)):
return action
if (isinstance(action, numpy.int32) or isinstance(action, numpy.int64) or
isinstance(action, int)):
# The action can be given as a numpy.int32 which cannot be
# serialized. First convert it to a proper python integer.
action = int(action)
assert action < len(action_set), "Action outside of action set"
return action_set[action]
assert False, "Action {} not found in action set".format(action)
def disable_action(action):
assert set(action.__dict__) == set(action_left.__dict__)
if action._directional:
return action_release_direction
return reverse_action_mapping[action]
| |
# System Imports
from math import ceil
import operator
# Package Imports
from ..util import now, timerange
from ..events import EventEmitter
# Sibling Imports
from . import errors
def _upper_bound (list, time):
# Return the index of the first item in {list} which
# is greater than or equal to {time}.
# http://stackoverflow.com/q/2236906/
return next((i for i, t in enumerate(list) if t >= time), None)
def _lower_bound (list, time):
l = len(list)
# Return the index of the last item in {list} which
# is less than or equal to {time}.
try:
return l - 1 - next(
(i for i, t in enumerate(reversed(list)) if t <= time)
)
except StopIteration:
return None
def _interp (x, x0, y0, x1, y1):
try:
return y0 + (y1 - y0) * (x - x0) / (x1 - x0)
except ZeroDivisionError:
return y1
def _prepare (start, interval):
if interval is not None and interval < 0:
interval = 0
if start is not None:
if start < 0:
start = now() + start
if interval is None:
interval = now() - start
return start, interval
def _get (x_vals, y_vals, x_max, x_min, start, interval):
# Return all data
if start is None and interval is None:
return list(zip(x_vals, y_vals))
if interval is None:
interval = 0
# Request range is outside data range
if start > x_max:
if interval == 0:
return [(start, y_vals[-1])]
else:
return [(start, y_vals[-1]), (start + interval, y_vals[-1])]
if start + interval < x_min:
try:
if interval == 0:
return [(start, y_vals[0])]
else:
return [(start, y_vals[0]), (start + interval, y_vals[0])]
except IndexError:
if interval == 0:
return [(start, 0)]
else:
return [(start, 0), (start + interval, 0)]
# Collect data from archive
i_start = _lower_bound(x_vals, start)
i_end = _upper_bound(x_vals, start + interval)
if i_end is not None:
i_end += 1 # Return the interval length of data
vals = list(zip(x_vals[i_start:i_end], y_vals[i_start:i_end]))
# Fill in the start and end points if necessary.
try:
if start < x_min:
vals.insert(0, (start, y_vals[0]))
except IndexError:
pass
try:
if start + interval > x_max:
vals.append((start + interval, y_vals[-1]))
except IndexError:
pass
return vals
def _at (val, time):
if len(val) == 1:
return val[0][1]
elif len(val) == 0:
return 0
else:
a, b = val[0:2]
return _interp(time, a[0], a[1], b[0], b[1])
class Archive (object):
# Set threshold_factor to None for non-numeric variables
threshold_factor = 0.05
min_delta = 10
def __init__ (self):
self._prev_x = None
self._prev_y = None
self.truncate()
def truncate (self):
self._zero = now()
self._x = [self._prev_x] if self._prev_x is not None else []
self._y = [self._prev_y] if self._prev_y is not None else []
self._y_min = 0
self._y_max = 0
self._min_since_last = None
self._max_since_last = None
def push (self, x, y):
# Ignore data points at times earlier than the most recent reset.
if x < self._zero:
return
if self.threshold_factor is not None:
# Update max and min
if y > self._y_max:
self._y_max = y
elif y < self._y_min:
self._y_min = y
# The delta must be at least {factor} * absolute spread
# of values collected so far, and at least {min_delta}.
threshold = max(
self.threshold_factor * (self._y_max - self._y_min),
self.min_delta
)
# Update Min / Max values
if self._min_since_last is None \
or y < self._min_since_last[1]:
self._min_since_last = (x, y)
if self._max_since_last is None \
or y > self._max_since_last[1]:
self._max_since_last = (x, y)
# Store the values if the delta exceeds the threshold
if self._prev_y is None \
or self.threshold_factor is None \
or abs(self._prev_y - y) > threshold:
# Add up to one local maximum (or minimum)
# to retain concave curve shapes.
if self.threshold_factor is not None and self._prev_y is not None:
if self._max_since_last[1] > self._prev_y \
and self._max_since_last[1] > y:
self._x.append(self._max_since_last[0])
self._y.append(self._max_since_last[1])
elif self._min_since_last[1] < self._prev_y \
and self._min_since_last[1] < y:
self._x.append(self._min_since_last[0])
self._y.append(self._min_since_last[1])
self._min_since_last = (x, y)
self._max_since_last = (x, y)
self._x.append(x)
self._y.append(y)
self._prev_x = x
self._prev_y = y
def get (self, start = None, interval = None):
start, interval = _prepare(start, interval)
# Nothing in archive
if self._prev_x is None:
return []
return _get(self._x, self._y, self._prev_x, self._x[0], start, interval)
def at (self, time):
val = self.get(time, 0)
if len(val) == 0:
return ""
else:
return val[0][1]
class StringArchive (Archive):
def __init__ (self, variable):
Archive.__init__(self)
self._variable = variable
def push (self, x, y):
pass
def at (self, time):
return "StringArchive.at not implemented" # _at(self.get(time, 0), time)
_default_alias_counters = {}
def _default_alias (object):
class_name = object.__class__.__name__
if class_name not in _default_alias_counters:
_default_alias_counters[class_name] = 1
else:
_default_alias_counters[class_name] += 1
return "{:s}_{:d}".format(class_name, _default_alias_counters[class_name])
class BaseVariable (EventEmitter):
alias = ""
@property
def value (self):
try:
return self._value
except AttributeError:
return None
@property
def type (self):
try:
return self._type
except AttributeError:
return type(None)
def get_value (self):
return self._value
def __str__ (self):
return str(self.get_value())
def __int__ (self):
return int(self.get_value())
def __float__ (self):
return float(self.get_value())
def __bool__ (self):
return bool(self.get_value())
__nonzero__ = __bool__
def __repr__ (self):
return "<{class_name} at Ox{reference:x}: {var_alias} ({var_type}) = {var_value}>".format(
class_name = self.__class__.__name__,
reference = id(self),
var_alias = self.alias,
var_type = self.type.__name__,
var_value = self.value
)
_numeric_types = (int, float, complex)
class Variable (BaseVariable):
length = 30 # in seconds
def __init__ (self, type, value = None):
self.alias = _default_alias(self)
self._time = None
self._value = None
self._type = type
self._x = []
self._y = []
if type in _numeric_types:
self._archive = Archive()
else:
self._archive = StringArchive(self)
self._log_file = None
if value is not None:
self._push(value)
def truncate (self):
"""
Empty the variable of all stored data.
"""
if self._value is None:
self._x = []
self._y = []
else:
self._time = now()
self._x = [self._time]
self._y = [self._value]
self._archive.truncate()
# Trigger clear event
self.emit("clear", time = self._time, value = self._value)
def set (self, value):
self._push(value)
def get (self, start = None, interval = None):
"""
Returns the value of the variable over a particular time period.
Returns a list of (time, value) pairs between
[time = start and time = start + interval] (inclusive).
start: earliest time to return data.
interval: time-span requested.
If interval = None, then data are returned from start
up to the current time.
If start < 0, then this number of seconds is subtracted
from the current time.
"""
if start is None and interval is None:
return self._archive.get()
if start < self._x[0]:
return self._archive.get(start, interval)
start, interval = _prepare(start, interval)
return _get(self._x, self._y, self._time, self._x[0], start, interval)
def at (self, time):
return _at(self.get(time, 0), time)
def _push (self, value, time = None):
if value is None:
return
if type(value) != self._type:
value = self._type(value)
if time is None:
time = now()
elif self._time is not None and time < self._time:
raise Exception("Cannot insert values earlier than latest value")
# Only store changes
if self._value == value \
and len(self._x) > 2 \
and self._y[-2] == value:
self._x[-1] = time
changed = False
else:
self._x.append(time)
self._y.append(value)
changed = True
# Trim old data
mid = len(self._x) // 2
if time - self._x[mid] > self.length:
self._y = self._y[mid:]
self._x = self._x[mid:]
self._value = value
self._time = time
self._archive.push(time, value)
self._log(time, value)
# Trigger change event
if changed:
self.emit("change", time = time, value = value)
# Todo: Put these in event watchers in the experiment.
def _log (self, time, value):
if self._log_file is not None:
self._log_file.write(time, value)
def setLogFile (self, logFile):
if self._log_file is not None:
self._log_file.close()
self._log_file = logFile
if self._value is not None:
self._log_file.write(now(), self._value)
def stopLogging (self):
if self._log_file is not None:
self._log_file.close()
self._log_file = None
class Constant (BaseVariable):
def __init__ (self, value):
self._value = value
self._type = type(value)
def get (self, start, interval = None):
start, interval = _prepare(start, interval)
if interval == 0:
return [(start, self._value)]
return [
(start, self._value),
(start + interval, self._value)
]
def at (self, time):
return self._value
def serialize (self):
return str(self._value)
class Expression (BaseVariable):
pass
# Variable should emulate a numerical variable
_unary_ops = (
(" not ", operator.not_), (" abs ", operator.abs),
(" -", operator.neg), (" +", operator.pos))
_binary_ops = (
(" < ", operator.lt), (" <= ", operator.le), (" == ", operator.eq),
(" != ", operator.ne), (" > ", operator.gt), (" >= ", operator.ge),
(" + ", operator.add), (" - ", operator.sub),
(" / ", operator.truediv), (" // ", operator.floordiv),
(" * ", operator.mul), (" % ", operator.mod),
("**", operator.pow),
(" & ", operator.and_), (" | ", operator.or_),
(" and ", lambda a, b: a and b), (" or ", lambda a, b: a or b),
)
# http://stackoverflow.com/questions/100003/what-is-a-metaclass-in-python/6581949#6581949
def _def_binary_op (symbol, operatorFn):
if symbol in (" and ", " or "):
clsName = symbol[1:-1].capitalize() + "Expression"
attrName = symbol[1:-1] + "_"
rattrName = None
else:
if operatorFn in (operator.and_, operator.or_):
clsName = "Bitwise" + operatorFn.__name__[:-1].capitalize() + "Expression"
attrName = "__" + operatorFn.__name__[:-1] + "__"
rattrName = "__r" + operatorFn.__name__[:-1] + "__"
else:
clsName = operatorFn.__name__.capitalize() + "Expression"
attrName = "__" + operatorFn.__name__ + "__"
rattrName = "__r" + operatorFn.__name__ + "__"
def init (self, lhs, rhs):
self.alias = _default_alias(self)
self._archive_x = None
self._archive_y = None
lhs = lhs if isinstance(lhs, BaseVariable) else Constant(lhs)
rhs = rhs if isinstance(rhs, BaseVariable) else Constant(rhs)
self._lhs = lhs
self._rhs = rhs
if lhs.value is not None and rhs.value is not None:
try:
self._value = operatorFn(lhs.value, rhs.value)
except TypeError:
if lhs.type is str or rhs.type is str:
self._value = operatorFn(str(lhs.value), str(rhs.value))
else:
raise
self._type = type(self._value)
else:
self._value = None
self._type = None
lhs.on("change", self._changed)
rhs.on("change", self._changed)
def _changed (self, data):
try:
self._value = operatorFn(self._lhs.value, self._rhs.value)
except TypeError:
if self._lhs.type is str or self._rhs.type is str:
self._value = operatorFn(str(self._lhs.value), str(self._rhs.value))
else:
raise
if self._archive_x is not None:
self._archive_x.append(data['time'])
self._archive_y.append(self._value)
self.emit("change", time = data['time'], value = self._value)
def get_type (self):
if self._type is None and self._value is not None:
self._type = type(self._value)
return self._type
def get (self, start = None, interval = None):
if self._archive_x is None:
self.get_archive()
return _get(self._archive_x, self._archive_y, self._archive_x[-1], self._archive_x[0], start, interval)
def at (self, time):
return _at(self.get(time, 0), time)
def get_archive (self, store = True):
if self._archive_x is not None:
return list(zip(self._archive_x, self._archive_y))
x = []
y = []
try:
lhsa = self._lhs.get_archive(store = False)
except AttributeError:
lhsa = self._lhs.get()
try:
rhsa = self._rhs.get_archive(store = False)
except AttributeError:
rhsa = self._rhs.get()
if self._lhs.type is str or self._rhs.type is str:
def op (l, r):
return operatorFn(l, r)
else:
def op (l, r):
return operatorFn(str(l), str(r))
r_max = len(rhsa)
l_max = len(rhsa)
r_i = l_i = 0
while r_i < r_max and l_i < l_max:
l_t, c_l = lhs[l_i]
while rhs[r_i][0] < l_t:
x.append(rhs[r_i][0])
y.append(op(c_l, rhs[r_i][0]))
r_i += 1
r_t, c_r = rhs[r_i]
while lhs[r_i][0] < r_t:
x.append(rhs[l_i][0])
y.append(op(lhs[l_i][0], c_r))
l_i += 1
if store:
self._archive_x = x
self._archive_y = y
return list(zip(x, y))
def serialize (self):
return "(" + \
self._lhs.serialize() + symbol \
+ self._rhs.serialize() + ")"
cls = type(
clsName,
(Expression,),
{
"__init__": init,
"type": property(get_type),
"serialize": serialize,
"_changed": _changed,
"get_archive": get_archive,
"get": get,
"at": at
}
)
def op_fn (self, other):
return cls(self, other)
def op_rfn (self, other):
return cls(other, self)
setattr(BaseVariable, attrName, op_fn)
if rattrName is not None:
setattr(BaseVariable, rattrName, op_rfn)
def _def_unary_op (symbol, operatorFn):
def init (self, operand):
self.alias = _default_alias(self)
self._operand = operand
if operand.value is not None:
self._value = operatorFn(operand.value)
self._type = type(self._value)
else:
self._value = None
self._type = None
operand.on("change", self._changed)
def _changed (self, data):
self._value = operatorFn(self._operand.value)
self.emit("change", time = data['time'], value = self._value)
def get_type (self):
if self._type is None and self._value is not None:
self._type = type(self._value)
return self._type
def get (self, start = None, interval = None):
if self._archive_x is None:
self.get_archive()
return _get(self._archive_x, self._archive_y, self._archive_x[-1], self._archive_x[0], start, interval)
def at (self, time):
return _at(self.get(time, 0), time)
def get_archive (self, store = True):
if self._archive_x is not None:
return list(zip(self._archive_x, self._archive_y))
x = []
y = []
try:
opa = self._operand.get_archive(store = False)
except AttributeError:
opa = self._operand.get()
for o_x, o_y in opa:
x.append(o_x)
y.append(operatorFn(o_y))
if store:
self._archive_x = x
self._archive_y = y
return list(zip(x, y))
def serialize (self):
return symbol + self._operand.serialize()
op = operatorFn
cls = type(
op.__name__[2:-2].capitalize() + "Expression",
(Expression,),
{
"__init__": init,
"type": property(get_type),
"serialize": serialize,
"_changed": _changed,
"get_archive": get_archive,
"get": get,
"at": at
}
)
def op_fn (self, other):
return cls(self, op)
setattr(BaseVariable, op.__name__, op_fn)
[_def_unary_op(symbol, op) for symbol, op in _unary_ops]
[_def_binary_op(symbol, op) for symbol, op in _binary_ops]
| |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=too-many-lines
from __future__ import division
from __future__ import absolute_import
import unittest
import telemetry.timeline.counter as tracing_counter
import telemetry.timeline.model as timeline_model
from tracing.trace_data import trace_data as trace_data_module
# 2To3-division: those lines like xxx / 1000.0 are unchanged as result is
# expected floats.
def FindEventNamed(events, name):
for event in events:
if event.name == name:
return event
raise ValueError('No event found with name %s' % name)
class TraceEventTimelineImporterTest(unittest.TestCase):
def testBasicSingleThreadNonnestedParsing(self):
events = [
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 520, 'tts': 280,
'cat': 'foo', 'tid': 53, 'ph': 'B'},
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 560, 'tts': 310,
'cat': 'foo', 'tid': 53, 'ph': 'E'},
{'name': 'b', 'args': {}, 'pid': 52, 'ts': 629, 'tts': 356,
'cat': 'bar', 'tid': 53, 'ph': 'B'},
{'name': 'b', 'args': {}, 'pid': 52, 'ts': 631, 'tts': 357,
'cat': 'bar', 'tid': 53, 'ph': 'E'},
{'name': 'c', 'args': {}, 'pid': 52, 'ts': 633, 'cat': 'baz',
'tid': 53, 'ph': 'B'},
{'name': 'c', 'args': {}, 'pid': 52, 'ts': 637, 'cat': 'baz',
'tid': 53, 'ph': 'E'}
]
trace_data = trace_data_module.CreateFromRawChromeEvents(events)
m = timeline_model.TimelineModel(trace_data)
processes = m.GetAllProcesses()
self.assertEqual(1, len(processes))
p = processes[0]
self.assertEqual(52, p.pid)
self.assertEqual(1, len(p.threads))
t = p.threads[53]
self.assertEqual(3, len(t.all_slices))
self.assertEqual(53, t.tid)
slice_event = t.all_slices[0]
self.assertEqual('a', slice_event.name)
self.assertEqual('foo', slice_event.category)
self.assertAlmostEqual(0, slice_event.start)
self.assertAlmostEqual((560 - 520) / 1000.0, slice_event.duration)
self.assertAlmostEqual((560 - 520) / 1000.0, slice_event.end)
self.assertAlmostEqual(280 / 1000.0, slice_event.thread_start)
self.assertAlmostEqual((310 - 280) / 1000.0, slice_event.thread_duration)
self.assertAlmostEqual(310 / 1000.0, slice_event.thread_end)
self.assertEqual(0, len(slice_event.sub_slices))
slice_event = t.all_slices[1]
self.assertEqual('b', slice_event.name)
self.assertEqual('bar', slice_event.category)
self.assertAlmostEqual((629 - 520) / 1000.0, slice_event.start)
self.assertAlmostEqual((631 - 629) / 1000.0, slice_event.duration)
self.assertAlmostEqual((631 - 520) / 1000.0, slice_event.end)
self.assertAlmostEqual(356 / 1000.0, slice_event.thread_start)
self.assertAlmostEqual((357 - 356) / 1000.0, slice_event.thread_duration)
self.assertAlmostEqual(357 / 1000.0, slice_event.thread_end)
self.assertEqual(0, len(slice_event.sub_slices))
slice_event = t.all_slices[2]
self.assertEqual('c', slice_event.name)
self.assertEqual('baz', slice_event.category)
self.assertAlmostEqual((633 - 520) / 1000.0, slice_event.start)
self.assertAlmostEqual((637 - 633) / 1000.0, slice_event.duration)
self.assertEqual(None, slice_event.thread_start)
self.assertEqual(None, slice_event.thread_duration)
self.assertEqual(None, slice_event.thread_end)
self.assertEqual(0, len(slice_event.sub_slices))
def testArgumentDupeCreatesNonFailingImportError(self):
events = [
{'name': 'a', 'args': {'x': 1}, 'pid': 1, 'ts': 520, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'a', 'args': {'x': 2}, 'pid': 1, 'ts': 560, 'cat': 'foo',
'tid': 1, 'ph': 'E'}
]
trace_data = trace_data_module.CreateFromRawChromeEvents(events)
m = timeline_model.TimelineModel(trace_data)
processes = m.GetAllProcesses()
t = processes[0].threads[1]
slice_a = FindEventNamed(t.all_slices, 'a')
self.assertEqual(2, slice_a.args['x'])
self.assertEqual(1, len(m.import_errors))
def testCategoryBeginEndMismatchPreferslice_begin(self):
events = [
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 520, 'cat': 'foo',
'tid': 53, 'ph': 'B'},
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 560, 'cat': 'bar',
'tid': 53, 'ph': 'E'}
]
trace_data = trace_data_module.CreateFromRawChromeEvents(events)
m = timeline_model.TimelineModel(trace_data)
processes = m.GetAllProcesses()
self.assertEqual(1, len(processes))
p = processes[0]
self.assertEqual(52, p.pid)
self.assertEqual(1, len(p.threads))
t = p.threads[53]
self.assertEqual(1, len(t.all_slices))
self.assertEqual(53, t.tid)
slice_event = t.all_slices[0]
self.assertEqual('a', slice_event.name)
self.assertEqual('foo', slice_event.category)
def testNestedParsing(self):
events = [
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 1, 'tts': 2, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'b', 'args': {}, 'pid': 1, 'ts': 3, 'tts': 3, 'cat': 'bar',
'tid': 1, 'ph': 'B'},
{'name': 'b', 'args': {}, 'pid': 1, 'ts': 5, 'tts': 4, 'cat': 'bar',
'tid': 1, 'ph': 'E'},
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 7, 'tts': 5, 'cat': 'foo',
'tid': 1, 'ph': 'E'}
]
trace_data = trace_data_module.CreateFromRawChromeEvents(events)
m = timeline_model.TimelineModel(trace_data, shift_world_to_zero=False)
t = m.GetAllProcesses()[0].threads[1]
slice_a = FindEventNamed(t.all_slices, 'a')
slice_b = FindEventNamed(t.all_slices, 'b')
self.assertEqual('a', slice_a.name)
self.assertEqual('foo', slice_a.category)
self.assertAlmostEqual(0.001, slice_a.start)
self.assertAlmostEqual(0.006, slice_a.duration)
self.assertAlmostEqual(0.002, slice_a.thread_start)
self.assertAlmostEqual(0.003, slice_a.thread_duration)
self.assertEqual('b', slice_b.name)
self.assertEqual('bar', slice_b.category)
self.assertAlmostEqual(0.003, slice_b.start)
self.assertAlmostEqual(0.002, slice_b.duration)
self.assertAlmostEqual(0.003, slice_b.thread_start)
self.assertAlmostEqual(0.001, slice_b.thread_duration)
def testAutoclosing(self):
events = [
# Slices that don't finish.
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 1, 'tts': 1, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'b', 'args': {}, 'pid': 1, 'ts': 2, 'tts': 2, 'cat': 'foo',
'tid': 2, 'ph': 'B'},
# Slices on thread 1 and 2 that do finish to give an 'end time' to make
# autoclosing work.
{'name': 'c', 'args': {}, 'pid': 1, 'ts': 2, 'tts': 1.5, 'cat': 'bar',
'tid': 1, 'ph': 'B'},
{'name': 'c', 'args': {}, 'pid': 1, 'ts': 4, 'tts': 3, 'cat': 'bar',
'tid': 1, 'ph': 'E'},
{'name': 'd', 'args': {}, 'pid': 1, 'ts': 3, 'tts': 2.5, 'cat': 'bar',
'tid': 2, 'ph': 'B'},
{'name': 'd', 'args': {}, 'pid': 1, 'ts': 7, 'tts': 5, 'cat': 'bar',
'tid': 2, 'ph': 'E'}
]
trace_data = trace_data_module.CreateFromRawChromeEvents(events)
m = timeline_model.TimelineModel(trace_data)
p = m.GetAllProcesses()[0]
t1 = p.threads[1]
slice_event = FindEventNamed(t1.all_slices, 'a')
self.assertEqual('a', slice_event.name)
self.assertEqual('foo', slice_event.category)
self.assertTrue(slice_event.did_not_finish)
self.assertAlmostEqual(0, slice_event.start)
self.assertAlmostEqual((7 - 1) / 1000.0, slice_event.duration)
self.assertAlmostEqual(1 / 1000.0, slice_event.thread_start)
self.assertAlmostEqual((3 - 1) / 1000.0, slice_event.thread_duration)
t2 = p.threads[2]
slice_event = FindEventNamed(t2.all_slices, 'b')
self.assertEqual('b', slice_event.name)
self.assertEqual('foo', slice_event.category)
self.assertTrue(slice_event.did_not_finish)
self.assertAlmostEqual((2 - 1) / 1000.0, slice_event.start)
self.assertAlmostEqual((7 - 2) / 1000.0, slice_event.duration)
self.assertAlmostEqual(2 / 1000.0, slice_event.thread_start)
self.assertAlmostEqual((5 - 2) / 1000.0, slice_event.thread_duration)
def testAutoclosingLoneBegin(self):
events = [
# Slice that doesn't finish.
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 1, 'tts': 1, 'cat': 'foo',
'tid': 1, 'ph': 'B'}
]
trace_data = trace_data_module.CreateFromRawChromeEvents(events)
m = timeline_model.TimelineModel(trace_data)
p = m.GetAllProcesses()[0]
t = p.threads[1]
slice_event = t.all_slices[0]
self.assertEqual('a', slice_event.name)
self.assertEqual('foo', slice_event.category)
self.assertTrue(slice_event.did_not_finish)
self.assertAlmostEqual(0, slice_event.start)
self.assertAlmostEqual(0, slice_event.duration)
self.assertAlmostEqual(1 / 1000.0, slice_event.thread_start)
self.assertAlmostEqual(0, slice_event.thread_duration)
def testAutoclosingWithSubTasks(self):
events = [
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 1, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'b1', 'args': {}, 'pid': 1, 'ts': 2, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'b1', 'args': {}, 'pid': 1, 'ts': 3, 'cat': 'foo',
'tid': 1, 'ph': 'E'},
{'name': 'b2', 'args': {}, 'pid': 1, 'ts': 3, 'cat': 'foo',
'tid': 1, 'ph': 'B'}
]
trace_data = trace_data_module.CreateFromRawChromeEvents(events)
m = timeline_model.TimelineModel(trace_data, shift_world_to_zero=False)
t = m.GetAllProcesses()[0].threads[1]
slice_a = FindEventNamed(t.all_slices, 'a')
slice_b1 = FindEventNamed(t.all_slices, 'b1')
slice_b2 = FindEventNamed(t.all_slices, 'b2')
self.assertAlmostEqual(0.003, slice_a.end)
self.assertAlmostEqual(0.003, slice_b1.end)
self.assertAlmostEqual(0.003, slice_b2.end)
def testAutoclosingWithEventsOutsideBounds(self):
events = [
# Slice that begins before min and ends after max of the other threads.
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 0, 'tts': 0, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'b', 'args': {}, 'pid': 1, 'ts': 6, 'tts': 3, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
# Slice that does finish to give an 'end time' to establish a basis
{'name': 'c', 'args': {}, 'pid': 1, 'ts': 2, 'tts': 1, 'cat': 'bar',
'tid': 2, 'ph': 'B'},
{'name': 'c', 'args': {}, 'pid': 1, 'ts': 4, 'tts': 2, 'cat': 'bar',
'tid': 2, 'ph': 'E'}
]
trace_data = trace_data_module.CreateFromRawChromeEvents(events)
m = timeline_model.TimelineModel(trace_data, shift_world_to_zero=False)
p = m.GetAllProcesses()[0]
t1 = p.threads[1]
t1_thread_time_bounds = (
m._thread_time_bounds[t1]) # pylint: disable=protected-access
self.assertAlmostEqual(0.000, t1_thread_time_bounds.min)
self.assertAlmostEqual(0.003, t1_thread_time_bounds.max)
self.assertEqual(2, len(t1.all_slices))
slice_event = FindEventNamed(t1.all_slices, 'a')
self.assertEqual('a', slice_event.name)
self.assertEqual('foo', slice_event.category)
self.assertAlmostEqual(0, slice_event.start)
self.assertAlmostEqual(0.006, slice_event.duration)
self.assertAlmostEqual(0, slice_event.thread_start)
self.assertAlmostEqual(0.003, slice_event.thread_duration)
t2 = p.threads[2]
t2_thread_time_bounds = (
m._thread_time_bounds[t2]) # pylint: disable=protected-access
self.assertAlmostEqual(0.001, t2_thread_time_bounds.min)
self.assertAlmostEqual(0.002, t2_thread_time_bounds.max)
slice2 = FindEventNamed(t2.all_slices, 'c')
self.assertEqual('c', slice2.name)
self.assertEqual('bar', slice2.category)
self.assertAlmostEqual(0.002, slice2.start)
self.assertAlmostEqual(0.002, slice2.duration)
self.assertAlmostEqual(0.001, slice2.thread_start)
self.assertAlmostEqual(0.001, slice2.thread_duration)
self.assertAlmostEqual(0.000, m.bounds.min)
self.assertAlmostEqual(0.006, m.bounds.max)
def testNestedAutoclosing(self):
events = [
# Tasks that don't finish.
{'name': 'a1', 'args': {}, 'pid': 1, 'ts': 1, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'a2', 'args': {}, 'pid': 1, 'ts': 1.5, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
# Slice that does finish to give an 'end time' to make autoclosing work.
{'name': 'b', 'args': {}, 'pid': 1, 'ts': 1, 'cat': 'foo',
'tid': 2, 'ph': 'B'},
{'name': 'b', 'args': {}, 'pid': 1, 'ts': 2, 'cat': 'foo',
'tid': 2, 'ph': 'E'}
]
trace_data = trace_data_module.CreateFromRawChromeEvents(events)
m = timeline_model.TimelineModel(trace_data, shift_world_to_zero=False)
t1 = m.GetAllProcesses()[0].threads[1]
t2 = m.GetAllProcesses()[0].threads[2]
slice_a1 = FindEventNamed(t1.all_slices, 'a1')
slice_a2 = FindEventNamed(t1.all_slices, 'a2')
FindEventNamed(t2.all_slices, 'b')
self.assertAlmostEqual(0.002, slice_a1.end)
self.assertAlmostEqual(0.002, slice_a2.end)
def testMultipleThreadParsing(self):
events = [
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 2, 'tts': 1, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 4, 'tts': 2, 'cat': 'foo',
'tid': 1, 'ph': 'E'},
{'name': 'b', 'args': {}, 'pid': 1, 'ts': 6, 'tts': 3, 'cat': 'bar',
'tid': 2, 'ph': 'B'},
{'name': 'b', 'args': {}, 'pid': 1, 'ts': 8, 'tts': 4, 'cat': 'bar',
'tid': 2, 'ph': 'E'}
]
trace_data = trace_data_module.CreateFromRawChromeEvents(events)
m = timeline_model.TimelineModel(trace_data)
processes = m.GetAllProcesses()
self.assertEqual(1, len(processes))
p = processes[0]
self.assertEqual(2, len(p.threads))
# Check thread 1.
t = p.threads[1]
self.assertAlmostEqual(1, len(t.all_slices))
self.assertAlmostEqual(1, t.tid)
slice_event = t.all_slices[0]
self.assertEqual('a', slice_event.name)
self.assertEqual('foo', slice_event.category)
self.assertAlmostEqual(0, slice_event.start)
self.assertAlmostEqual((4 - 2) / 1000.0, slice_event.duration)
self.assertAlmostEqual(1 / 1000.0, slice_event.thread_start)
self.assertAlmostEqual((2 - 1) / 1000.0, slice_event.thread_duration)
# Check thread 2.
t = p.threads[2]
self.assertAlmostEqual(1, len(t.all_slices))
self.assertAlmostEqual(2, t.tid)
slice_event = t.all_slices[0]
self.assertEqual('b', slice_event.name)
self.assertEqual('bar', slice_event.category)
self.assertAlmostEqual((6 - 2) / 1000.0, slice_event.start)
self.assertAlmostEqual((8 - 6) / 1000.0, slice_event.duration)
self.assertAlmostEqual(3 / 1000.0, slice_event.thread_start)
self.assertAlmostEqual((4 - 3) / 1000.0, slice_event.thread_duration)
def testMultiplePidParsing(self):
events = [
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 2, 'tts': 1, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 4, 'tts': 2, 'cat': 'foo',
'tid': 1, 'ph': 'E'},
{'name': 'b', 'args': {}, 'pid': 2, 'ts': 6, 'tts': 3, 'cat': 'bar',
'tid': 2, 'ph': 'B'},
{'name': 'b', 'args': {}, 'pid': 2, 'ts': 8, 'tts': 4, 'cat': 'bar',
'tid': 2, 'ph': 'E'}
]
trace_data = trace_data_module.CreateFromRawChromeEvents(events)
m = timeline_model.TimelineModel(trace_data)
processes = m.GetAllProcesses()
self.assertEqual(2, len(processes))
p = processes[0]
self.assertEqual(1, p.pid)
self.assertEqual(1, len(p.threads))
# Check process 1 thread 1.
t = p.threads[1]
self.assertEqual(1, len(t.all_slices))
self.assertEqual(1, t.tid)
slice_event = t.all_slices[0]
self.assertEqual('a', slice_event.name)
self.assertEqual('foo', slice_event.category)
self.assertAlmostEqual(0, slice_event.start)
self.assertAlmostEqual((4 - 2) / 1000.0, slice_event.duration)
self.assertAlmostEqual(1 / 1000.0, slice_event.thread_start)
self.assertAlmostEqual((2 - 1) / 1000.0, slice_event.thread_duration)
# Check process 2 thread 2.
# TODO: will this be in deterministic order?
p = processes[1]
self.assertEqual(2, p.pid)
self.assertEqual(1, len(p.threads))
t = p.threads[2]
self.assertEqual(1, len(t.all_slices))
self.assertEqual(2, t.tid)
slice_event = t.all_slices[0]
self.assertEqual('b', slice_event.name)
self.assertEqual('bar', slice_event.category)
self.assertAlmostEqual((6 - 2) / 1000.0, slice_event.start)
self.assertAlmostEqual((8 - 6) / 1000.0, slice_event.duration)
self.assertAlmostEqual(3 / 1000.0, slice_event.thread_start)
self.assertAlmostEqual((4 - 3) / 1000.0, slice_event.thread_duration)
# Check getAllThreads.
self.assertEqual(
[processes[0].threads[1], processes[1].threads[2]], m.GetAllThreads())
def testThreadNames(self):
events = [
{'name': 'thread_name', 'args': {'name': 'Thread 1'},
'pid': 1, 'ts': 0, 'tid': 1, 'ph': 'M'},
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 1, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 2, 'cat': 'foo',
'tid': 1, 'ph': 'E'},
{'name': 'b', 'args': {}, 'pid': 2, 'ts': 3, 'cat': 'foo',
'tid': 2, 'ph': 'B'},
{'name': 'b', 'args': {}, 'pid': 2, 'ts': 4, 'cat': 'foo',
'tid': 2, 'ph': 'E'},
{'name': 'thread_name', 'args': {'name': 'Thread 2'},
'pid': 2, 'ts': 0, 'tid': 2, 'ph': 'M'}
]
trace_data = trace_data_module.CreateFromRawChromeEvents(events)
m = timeline_model.TimelineModel(trace_data)
processes = m.GetAllProcesses()
self.assertEqual('Thread 1', processes[0].threads[1].name)
self.assertEqual('Thread 2', processes[1].threads[2].name)
def testParsingWhenEndComesFirst(self):
events = [
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 1, 'tts': 1, 'cat': 'foo',
'tid': 1, 'ph': 'E'},
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 4, 'tts': 4, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 5, 'tts': 5, 'cat': 'foo',
'tid': 1, 'ph': 'E'}
]
trace_data = trace_data_module.CreateFromRawChromeEvents(events)
m = timeline_model.TimelineModel(trace_data, shift_world_to_zero=False)
p = m.GetAllProcesses()[0]
t = p.threads[1]
self.assertEqual(1, len(t.all_slices))
self.assertEqual('a', t.all_slices[0].name)
self.assertEqual('foo', t.all_slices[0].category)
self.assertEqual(0.004, t.all_slices[0].start)
self.assertEqual(0.001, t.all_slices[0].duration)
self.assertEqual(0.004, t.all_slices[0].thread_start)
self.assertEqual(0.001, t.all_slices[0].thread_duration)
self.assertEqual(1, len(m.import_errors))
def testImmediateParsing(self):
events = [
# Need to include immediates inside a task so the timeline
# recentering/zeroing doesn't clobber their timestamp.
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 2, 'tts': 1, 'cat': 'foo',
'tid': 1, 'ph': 'B'},
{'name': 'immediate', 'args': {}, 'pid': 1, 'ts': 4, 'cat': 'bar',
'tid': 1, 'ph': 'I'},
{'name': 'slower', 'args': {}, 'pid': 1, 'ts': 8, 'cat': 'baz',
'tid': 1, 'ph': 'i'},
{'name': 'a', 'args': {}, 'pid': 1, 'ts': 8, 'tts': 4, 'cat': 'foo',
'tid': 1, 'ph': 'E'}
]
trace_data = trace_data_module.CreateFromRawChromeEvents(events)
m = timeline_model.TimelineModel(trace_data, shift_world_to_zero=False)
p = m.GetAllProcesses()[0]
t = p.threads[1]
self.assertEqual(3, len(t.all_slices))
i = m.GetAllEventsOfName('immediate')[0]
self.assertEqual('immediate', i.name)
self.assertEqual('bar', i.category)
self.assertAlmostEqual(0.004, i.start)
self.assertAlmostEqual(0, i.duration)
slower = m.GetAllEventsOfName('slower')[0]
self.assertEqual('slower', slower.name)
self.assertEqual('baz', slower.category)
self.assertAlmostEqual(0.008, slower.start)
self.assertAlmostEqual(0, slower.duration)
a = m.GetAllEventsOfName('a')[0]
self.assertEqual('a', a.name)
self.assertEqual('foo', a.category)
self.assertAlmostEqual(0.002, a.start)
self.assertAlmostEqual(0.006, a.duration)
self.assertAlmostEqual(0.001, a.thread_start)
self.assertAlmostEqual(0.003, a.thread_duration)
def testSimpleCounter(self):
events = [
{'name': 'ctr', 'args': {'value': 0}, 'pid': 1, 'ts': 0, 'cat': 'foo',
'tid': 1, 'ph': 'C'},
{'name': 'ctr', 'args': {'value': 10}, 'pid': 1, 'ts': 10, 'cat': 'foo',
'tid': 1, 'ph': 'C'},
{'name': 'ctr', 'args': {'value': 0}, 'pid': 1, 'ts': 20, 'cat': 'foo',
'tid': 1, 'ph': 'C'}
]
trace_data = trace_data_module.CreateFromRawChromeEvents(events)
m = timeline_model.TimelineModel(trace_data)
p = m.GetAllProcesses()[0]
ctr = p.counters['foo.ctr']
self.assertEqual('ctr', ctr.name)
self.assertEqual('foo', ctr.category)
self.assertEqual(3, ctr.num_samples)
self.assertEqual(1, ctr.num_series)
self.assertEqual(['value'], ctr.series_names)
self.assertEqual([0, 0.01, 0.02], ctr.timestamps)
self.assertEqual([0, 10, 0], ctr.samples)
self.assertEqual([0, 10, 0], ctr.totals)
self.assertEqual(10, ctr.max_total)
def testInstanceCounter(self):
events = [
{'name': 'ctr', 'args': {'value': 0}, 'pid': 1, 'ts': 0, 'cat': 'foo',
'tid': 1,
'ph': 'C', 'id': 0},
{'name': 'ctr', 'args': {'value': 10}, 'pid': 1, 'ts': 10, 'cat': 'foo',
'tid': 1,
'ph': 'C', 'id': 0},
{'name': 'ctr', 'args': {'value': 10}, 'pid': 1, 'ts': 10, 'cat': 'foo',
'tid': 1,
'ph': 'C', 'id': 1},
{'name': 'ctr', 'args': {'value': 20}, 'pid': 1, 'ts': 15, 'cat': 'foo',
'tid': 1,
'ph': 'C', 'id': 1},
{'name': 'ctr', 'args': {'value': 30}, 'pid': 1, 'ts': 18, 'cat': 'foo',
'tid': 1,
'ph': 'C', 'id': 1},
{'name': 'ctr', 'args': {'value': 40}, 'pid': 1, 'ts': 20, 'cat': 'bar',
'tid': 1,
'ph': 'C', 'id': 2}
]
trace_data = trace_data_module.CreateFromRawChromeEvents(events)
m = timeline_model.TimelineModel(trace_data)
p = m.GetAllProcesses()[0]
ctr = p.counters['foo.ctr[0]']
self.assertEqual('ctr[0]', ctr.name)
self.assertEqual('foo', ctr.category)
self.assertEqual(2, ctr.num_samples)
self.assertEqual(1, ctr.num_series)
self.assertEqual([0, 0.01], ctr.timestamps)
self.assertEqual([0, 10], ctr.samples)
ctr = m.GetAllProcesses()[0].counters['foo.ctr[1]']
self.assertEqual('ctr[1]', ctr.name)
self.assertEqual('foo', ctr.category)
self.assertEqual(3, ctr.num_samples)
self.assertEqual(1, ctr.num_series)
self.assertEqual([0.01, 0.015, 0.018], ctr.timestamps)
self.assertEqual([10, 20, 30], ctr.samples)
ctr = m.GetAllProcesses()[0].counters['bar.ctr[2]']
self.assertEqual('ctr[2]', ctr.name)
self.assertEqual('bar', ctr.category)
self.assertEqual(1, ctr.num_samples)
self.assertEqual(1, ctr.num_series)
self.assertEqual([0.02], ctr.timestamps)
self.assertEqual([40], ctr.samples)
def testMultiCounterUpdateBounds(self):
ctr = tracing_counter.Counter(
None, 'testBasicCounter', 'testBasicCounter')
ctr.series_names = ['value1', 'value2']
ctr.timestamps = [0, 1, 2, 3, 4, 5, 6, 7]
ctr.samples = [0, 0,
1, 0,
1, 1,
2, 1.1,
3, 0,
1, 7,
3, 0,
3.1, 0.5]
ctr.FinalizeImport()
self.assertEqual(8, ctr.max_total)
self.assertEqual([0, 0,
1, 1,
1, 2,
2, 3.1,
3, 3,
1, 8,
3, 3,
3.1, 3.6], ctr.totals)
def testMultiCounter(self):
events = [
{'name': 'ctr', 'args': {'value1': 0, 'value2': 7}, 'pid': 1, 'ts': 0,
'cat': 'foo', 'tid': 1, 'ph': 'C'},
{'name': 'ctr', 'args': {'value1': 10, 'value2': 4}, 'pid': 1, 'ts': 10,
'cat': 'foo', 'tid': 1, 'ph': 'C'},
{'name': 'ctr', 'args': {'value1': 0, 'value2': 1}, 'pid': 1, 'ts': 20,
'cat': 'foo', 'tid': 1, 'ph': 'C'}
]
trace_data = trace_data_module.CreateFromRawChromeEvents(events)
m = timeline_model.TimelineModel(trace_data)
p = m.GetAllProcesses()[0]
ctr = p.counters['foo.ctr']
self.assertEqual('ctr', ctr.name)
self.assertEqual('ctr', ctr.name)
self.assertEqual('foo', ctr.category)
self.assertEqual(3, ctr.num_samples)
self.assertEqual(2, ctr.num_series)
self.assertEqual(sorted(['value1', 'value2']), sorted(ctr.series_names))
self.assertEqual(sorted([0, 0.01, 0.02]), sorted(ctr.timestamps))
self.assertEqual(sorted([0, 7, 10, 4, 0, 1]), sorted(ctr.samples))
# We can't check ctr.totals here because it can change depending on
# the order in which the series names are added.
self.assertEqual(14, ctr.max_total)
def testNestableInstant(self):
events = [
{'name': 'a', 'args': {'arg1': 'value1'}, 'pid': 52, 'ts': 540,
'cat': 'foo', 'tid': 53, 'ph': 'n', 'id2': {'local': 72}},
{'name': 'b', 'args': {'arg2': 'value3'}, 'pid': 52, 'ts': 1554,
'cat': 'bar', 'tid': 54, 'ph': 'n', 'id2': {'global': 85}},
{'name': 'c', 'args': {'arg3': 'value4'}, 'pid': 52, 'tts': 1555,
'ts': 1560, 'cat': 'baz', 'tid': 54, 'ph': 'n', 'id2': {'local': 72}},
]
trace_data = trace_data_module.CreateFromRawChromeEvents(events)
m = timeline_model.TimelineModel(trace_data)
events = list(m.IterAllEvents())
self.assertEqual(3, len(events))
processes = m.GetAllProcesses()
t1 = processes[0].threads[53]
self.assertEqual(1, len(t1.async_slices))
e1 = t1.async_slices[0]
self.assertEqual('a', e1.name)
self.assertEqual('value1', e1.args['arg1'])
self.assertEqual(0, e1.start)
self.assertEqual(0, e1.duration)
self.assertEqual('foo', e1.category)
self.assertEqual('52.72', e1.id)
t2 = processes[0].threads[54]
self.assertEqual(2, len(t2.async_slices))
e2 = t2.async_slices[0]
self.assertEqual('b', e2.name)
self.assertEqual('value3', e2.args['arg2'])
self.assertEqual((1554 - 540) / 1000.0, e2.start)
self.assertEqual(0, e2.duration)
self.assertEqual('bar', e2.category)
self.assertEqual(85, e2.id)
e3 = t2.async_slices[1]
self.assertEqual('c', e3.name)
self.assertEqual('value4', e3.args['arg3'])
self.assertEqual((1560 - 540) / 1000.0, e3.start)
self.assertEqual(1555 / 1000.0, e3.thread_start)
self.assertEqual(0, e3.duration)
self.assertEqual('baz', e3.category)
self.assertEqual('52.72', e3.id)
def testStartFinishOneSliceOneThread(self):
events = [
# Time is intentionally out of order.
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 560, 'cat': 'cat',
'tid': 53, 'ph': 'F', 'id2': {'global': 72}},
{'name': 'a', 'pid': 52, 'ts': 524, 'cat': 'cat',
'tid': 53, 'ph': 'S', 'id2': {'global': 72}, 'args': {'foo': 'bar'}}
]
trace_data = trace_data_module.CreateFromRawChromeEvents(events)
m = timeline_model.TimelineModel(trace_data)
events = list(m.IterAllEvents())
self.assertEqual(2, len(events))
processes = m.GetAllProcesses()
t = processes[0].threads[53]
slices = t.async_slices
self.assertEqual(1, len(slices))
self.assertEqual('a', slices[0].name)
self.assertEqual('cat', slices[0].category)
self.assertEqual(72, slices[0].id)
self.assertEqual('bar', slices[0].args['foo'])
self.assertEqual(0, slices[0].start)
self.assertAlmostEqual((60 - 24) / 1000.0, slices[0].duration)
self.assertEqual(t, slices[0].start_thread)
self.assertEqual(t, slices[0].end_thread)
def testEndArgsAddedToSlice(self):
events = [
{'name': 'a', 'args': {'x': 1}, 'pid': 52, 'ts': 520, 'cat': 'foo',
'tid': 53, 'ph': 'B'},
{'name': 'a', 'args': {'y': 2}, 'pid': 52, 'ts': 560, 'cat': 'foo',
'tid': 53, 'ph': 'E'}
]
trace_data = trace_data_module.CreateFromRawChromeEvents(events)
m = timeline_model.TimelineModel(trace_data)
processes = m.GetAllProcesses()
self.assertEqual(1, len(processes))
p = processes[0]
self.assertEqual(1, len(p.threads))
t = p.threads[53]
self.assertEqual(1, len(t.all_slices))
self.assertEqual(53, t.tid)
slice_event = t.all_slices[0]
self.assertEqual('a', slice_event.name)
self.assertEqual('foo', slice_event.category)
self.assertEqual(0, slice_event.start)
self.assertEqual(1, slice_event.args['x'])
self.assertEqual(2, slice_event.args['y'])
def testEndArgOverrwritesOriginalArgValueIfDuplicated(self):
events = [
{'name': 'b', 'args': {'z': 3}, 'pid': 52, 'ts': 629, 'cat': 'foo',
'tid': 53, 'ph': 'B'},
{'name': 'b', 'args': {'z': 4}, 'pid': 52, 'ts': 631, 'cat': 'foo',
'tid': 53, 'ph': 'E'}
]
trace_data = trace_data_module.CreateFromRawChromeEvents(events)
m = timeline_model.TimelineModel(trace_data)
processes = m.GetAllProcesses()
self.assertEqual(1, len(processes))
p = processes[0]
self.assertEqual(1, len(p.threads))
t = p.threads[53]
slice_event = t.all_slices[0]
self.assertEqual('b', slice_event.name)
self.assertEqual('foo', slice_event.category)
self.assertEqual(0, slice_event.start)
self.assertEqual(4, slice_event.args['z'])
def testSliceHierarchy(self):
"""The slice hierarchy should look something like this:
[ a ]
[ b ] [ d ]
[ c ] [ e ]
"""
events = [
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 100, 'cat': 'foo',
'tid': 53, 'ph': 'B'},
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 200, 'cat': 'foo',
'tid': 53, 'ph': 'E'},
{'name': 'b', 'args': {}, 'pid': 52, 'ts': 125, 'cat': 'foo',
'tid': 53, 'ph': 'B'},
{'name': 'b', 'args': {}, 'pid': 52, 'ts': 165, 'cat': 'foo',
'tid': 53, 'ph': 'E'},
{'name': 'c', 'args': {}, 'pid': 52, 'ts': 125, 'cat': 'foo',
'tid': 53, 'ph': 'B'},
{'name': 'c', 'args': {}, 'pid': 52, 'ts': 135, 'cat': 'foo',
'tid': 53, 'ph': 'E'},
{'name': 'd', 'args': {}, 'pid': 52, 'ts': 175, 'cat': 'foo',
'tid': 53, 'ph': 'B'},
{'name': 'd', 'args': {}, 'pid': 52, 'ts': 190, 'cat': 'foo',
'tid': 53, 'ph': 'E'},
{'name': 'e', 'args': {}, 'pid': 52, 'ts': 155, 'cat': 'foo',
'tid': 53, 'ph': 'B'},
{'name': 'e', 'args': {}, 'pid': 52, 'ts': 165, 'cat': 'foo',
'tid': 53, 'ph': 'E'}
]
trace_data = trace_data_module.CreateFromRawChromeEvents(events)
m = timeline_model.TimelineModel(trace_data, shift_world_to_zero=False)
processes = m.GetAllProcesses()
self.assertEqual(1, len(processes))
p = processes[0]
self.assertEqual(1, len(p.threads))
t = p.threads[53]
slice_a = t.all_slices[0]
self.assertEqual(4, len(slice_a.GetAllSubSlices()))
self.assertEqual('a', slice_a.name)
self.assertEqual(100 / 1000.0, slice_a.start)
self.assertEqual(200 / 1000.0, slice_a.end)
self.assertEqual(2, len(slice_a.sub_slices))
slice_b = slice_a.sub_slices[0]
self.assertEqual('b', slice_b.name)
self.assertEqual(2, len(slice_b.sub_slices))
self.assertEqual('c', slice_b.sub_slices[0].name)
self.assertEqual('e', slice_b.sub_slices[1].name)
slice_d = slice_a.sub_slices[1]
self.assertEqual('d', slice_d.name)
self.assertEqual(0, len(slice_d.sub_slices))
def testAsyncEndArgAddedToSlice(self):
events = [
# Time is intentionally out of order.
{'name': 'c', 'args': {'y': 2}, 'pid': 52, 'ts': 560, 'cat': 'foo',
'tid': 53, 'ph': 'F', 'id2': {'local': 72}},
{'name': 'c', 'args': {'x': 1}, 'pid': 52, 'ts': 524, 'cat': 'foo',
'tid': 53, 'ph': 'S', 'id2': {'local': 72}}
]
trace_data = trace_data_module.CreateFromRawChromeEvents(events)
m = timeline_model.TimelineModel(trace_data)
t = m.GetAllProcesses()[0].threads[53]
self.assertEqual(1, len(t.async_slices))
parent_slice = t.async_slices[0]
self.assertEqual('c', parent_slice.name)
self.assertEqual('foo', parent_slice.category)
self.assertEqual(1, len(parent_slice.sub_slices))
sub_slice = parent_slice.sub_slices[0]
self.assertEqual(1, sub_slice.args['x'])
self.assertEqual(2, sub_slice.args['y'])
def testAsyncEndArgOverrwritesOriginalArgValueIfDuplicated(self):
events = [
# Time is intentionally out of order.
{'name': 'd', 'args': {'z': 4}, 'pid': 52, 'ts': 560, 'cat': 'foo',
'tid': 53, 'ph': 'F', 'id': 72},
{'name': 'd', 'args': {'z': 3}, 'pid': 52, 'ts': 524, 'cat': 'foo',
'tid': 53, 'ph': 'S', 'id': 72}
]
trace_data = trace_data_module.CreateFromRawChromeEvents(events)
m = timeline_model.TimelineModel(trace_data)
t = m.GetAllProcesses()[0].threads[53]
self.assertEqual(1, len(t.async_slices))
parent_slice = t.async_slices[0]
self.assertEqual('d', parent_slice.name)
self.assertEqual('foo', parent_slice.category)
self.assertEqual(1, len(parent_slice.sub_slices))
sub_slice = parent_slice.sub_slices[0]
self.assertEqual(4, sub_slice.args['z'])
def testAsyncStepsInOneThread(self):
events = [
# Time is intentionally out of order.
{'name': 'a', 'args': {'z': 3}, 'pid': 52, 'ts': 560, 'cat': 'foo',
'tid': 53, 'ph': 'F', 'id': 72, 'tts': 25},
{'name': 'a', 'args': {'step': 's1', 'y': 2}, 'pid': 52, 'ts': 548,
'cat': 'foo', 'tid': 53, 'ph': 'T', 'id': 72, 'tts': 20},
{'name': 'a', 'args': {'x': 1}, 'pid': 52, 'ts': 524, 'cat': 'foo',
'tid': 53, 'ph': 'S', 'id': 72, 'tts': 17}
]
trace_data = trace_data_module.CreateFromRawChromeEvents(events)
m = timeline_model.TimelineModel(trace_data)
t = m.GetAllProcesses()[0].threads[53]
self.assertEqual(1, len(t.async_slices))
parent_slice = t.async_slices[0]
self.assertEqual('a', parent_slice.name)
self.assertEqual('foo', parent_slice.category)
self.assertEqual(0, parent_slice.start)
self.assertAlmostEqual(17 / 1000.0, parent_slice.thread_start)
self.assertAlmostEqual(25 / 1000.0, parent_slice.thread_end)
self.assertEqual(2, len(parent_slice.sub_slices))
sub_slice = parent_slice.sub_slices[0]
self.assertEqual('a', sub_slice.name)
self.assertEqual('foo', sub_slice.category)
self.assertAlmostEqual(0, sub_slice.start)
self.assertAlmostEqual((548 - 524) / 1000.0, sub_slice.duration)
self.assertAlmostEqual((20 - 17) / 1000.0, sub_slice.thread_duration)
self.assertEqual(1, sub_slice.args['x'])
sub_slice = parent_slice.sub_slices[1]
self.assertEqual('a:s1', sub_slice.name)
self.assertEqual('foo', sub_slice.category)
self.assertAlmostEqual((548 - 524) / 1000.0, sub_slice.start)
self.assertAlmostEqual((560 - 548) / 1000.0, sub_slice.duration)
self.assertAlmostEqual((25 - 20) / 1000.0, sub_slice.thread_duration)
self.assertEqual(2, sub_slice.args['y'])
self.assertEqual(3, sub_slice.args['z'])
def testAsyncStepsMissingStart(self):
events = [
# Time is intentionally out of order.
{'name': 'a', 'args': {'z': 3}, 'pid': 52, 'ts': 560, 'cat': 'foo',
'tid': 53, 'ph': 'F', 'id': 72},
{'name': 'a', 'args': {'step': 's1', 'y': 2}, 'pid': 52, 'ts': 548,
'cat': 'foo', 'tid': 53, 'ph': 'T', 'id': 72}
]
trace_data = trace_data_module.CreateFromRawChromeEvents(events)
m = timeline_model.TimelineModel(trace_data)
t = m.GetAllProcesses()[0].threads[53]
self.assertTrue(t is not None)
def testAsyncStepsMissingFinish(self):
events = [
# Time is intentionally out of order.
{'name': 'a', 'args': {'step': 's1', 'y': 2}, 'pid': 52, 'ts': 548,
'cat': 'foo', 'tid': 53, 'ph': 'T', 'id': 72},
{'name': 'a', 'args': {'z': 3}, 'pid': 52, 'ts': 560, 'cat': 'foo',
'tid': 53, 'ph': 'S', 'id': 72}
]
trace_data = trace_data_module.CreateFromRawChromeEvents(events)
m = timeline_model.TimelineModel(trace_data)
t = m.GetAllProcesses()[0].threads[53]
self.assertTrue(t is not None)
def testImportSamples(self):
events = [
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 548, 'cat': 'test',
'tid': 53, 'ph': 'P'},
{'name': 'b', 'args': {}, 'pid': 52, 'ts': 548, 'cat': 'test',
'tid': 53, 'ph': 'P'},
{'name': 'c', 'args': {}, 'pid': 52, 'ts': 558, 'cat': 'test',
'tid': 53, 'ph': 'P'}
]
trace_data = trace_data_module.CreateFromRawChromeEvents(events)
m = timeline_model.TimelineModel(trace_data)
p = m.GetAllProcesses()[0]
t = p.threads[53]
self.assertEqual(3, len(t.samples))
self.assertEqual(0.0, t.samples[0].start)
self.assertEqual(0.0, t.samples[1].start)
self.assertAlmostEqual(0.01, t.samples[2].start)
self.assertEqual('a', t.samples[0].name)
self.assertEqual('b', t.samples[1].name)
self.assertEqual('c', t.samples[2].name)
self.assertEqual(0, len(m.import_errors))
def testImportSamplesMissingArgs(self):
events = [
{'name': 'a', 'pid': 52, 'ts': 548, 'cat': 'test',
'tid': 53, 'ph': 'P'},
{'name': 'b', 'pid': 52, 'ts': 548, 'cat': 'test',
'tid': 53, 'ph': 'P'},
{'name': 'c', 'pid': 52, 'ts': 549, 'cat': 'test',
'tid': 53, 'ph': 'P'}
]
trace_data = trace_data_module.CreateFromRawChromeEvents(events)
m = timeline_model.TimelineModel(trace_data)
p = m.GetAllProcesses()[0]
t = p.threads[53]
self.assertEqual(3, len(t.samples))
self.assertEqual(0, len(m.import_errors))
def testImportCompleteEvent(self):
events = [
{'name': 'a', 'args': {}, 'pid': 52, 'ts': 629, 'tts': 538, 'dur': 1,
'tdur': 1, 'cat': 'baz', 'tid': 53, 'ph': 'X'},
{'name': 'b', 'args': {}, 'pid': 52, 'ts': 730, 'tts': 620, 'dur': 20,
'tdur': 14, 'cat': 'foo', 'tid': 53, 'ph': 'X'},
{'name': 'c', 'args': {}, 'pid': 52, 'ts': 740, 'tts': 625,
'cat': 'baz', 'tid': 53, 'ph': 'X'},
]
trace_data = trace_data_module.CreateFromRawChromeEvents(events)
m = timeline_model.TimelineModel(trace_data)
p = m.GetAllProcesses()[0]
t = p.threads[53]
self.assertEqual(3, len(t.all_slices))
slice_event = t.all_slices[0]
self.assertEqual('a', slice_event.name)
self.assertAlmostEqual(0.0, slice_event.start)
self.assertAlmostEqual(1 / 1000.0, slice_event.duration)
self.assertAlmostEqual(538 / 1000.0, slice_event.thread_start)
self.assertAlmostEqual(1 / 1000.0, slice_event.thread_duration)
self.assertFalse(slice_event.did_not_finish)
self.assertEqual(0, len(slice_event.sub_slices))
slice_event = t.all_slices[1]
self.assertEqual('b', slice_event.name)
self.assertAlmostEqual((730 - 629) / 1000.0, slice_event.start)
self.assertAlmostEqual(20 / 1000.0, slice_event.duration)
self.assertAlmostEqual(620 / 1000.0, slice_event.thread_start)
self.assertAlmostEqual(14 / 1000.0, slice_event.thread_duration)
self.assertFalse(slice_event.did_not_finish)
self.assertEqual(1, len(slice_event.sub_slices))
self.assertEqual(t.all_slices[2], slice_event.sub_slices[0])
slice_event = t.all_slices[2]
self.assertEqual('c', slice_event.name)
self.assertAlmostEqual((740 - 629) / 1000.0, slice_event.start)
self.assertAlmostEqual(10 / 1000.0, slice_event.duration)
self.assertAlmostEqual(625 / 1000.0, slice_event.thread_start)
self.assertAlmostEqual(9 / 1000.0, slice_event.thread_duration)
self.assertTrue(slice_event.did_not_finish)
self.assertEqual(0, len(slice_event.sub_slices))
def testImportMarkEvent(self):
events = [
{'name': 'a', 'pid': 52, 'ts': 629, 'cat': 'baz', 'tid': 53, 'ph': 'R'},
{'name': 'b', 'pid': 52, 'ts': 730, 'cat': 'foo', 'tid': 53, 'ph': 'R'},
{'name': 'c', 'pid': 52, 'ts': 740, 'cat': 'baz', 'tid': 53, 'ph': 'R'},
]
trace_data = trace_data_module.CreateFromRawChromeEvents(events)
m = timeline_model.TimelineModel(trace_data)
p = m.GetAllProcesses()[0]
t = p.threads[53]
self.assertEqual(3, len(t.all_slices))
slice_event = t.all_slices[0]
self.assertEqual('a', slice_event.name)
self.assertEqual('baz', slice_event.category)
self.assertAlmostEqual(0.0, slice_event.start)
self.assertFalse(slice_event.did_not_finish)
self.assertEqual(0, len(slice_event.sub_slices))
slice_event = t.all_slices[1]
self.assertEqual('b', slice_event.name)
self.assertEqual('foo', slice_event.category)
self.assertAlmostEqual((730 - 629) / 1000.0, slice_event.start)
self.assertFalse(slice_event.did_not_finish)
self.assertEqual(0, len(slice_event.sub_slices))
slice_event = t.all_slices[2]
self.assertEqual('c', slice_event.name)
self.assertEqual('baz', slice_event.category)
self.assertAlmostEqual((740 - 629) / 1000.0, slice_event.start)
self.assertFalse(slice_event.did_not_finish)
self.assertEqual(0, len(slice_event.sub_slices))
def testImportFlowEvent(self):
events = [
{'name': 'a', 'cat': 'foo', 'id': 72, 'pid': 52, 'tid': 53, 'ts': 548,
'ph': 's', 'args': {}},
{'name': 'a', 'cat': 'foo', 'id': 72, 'pid': 52, 'tid': 53, 'ts': 560,
'ph': 't', 'args': {}},
{'name': 'a', 'cat': 'foo', 'id': 72, 'pid': 52, 'tid': 53, 'ts': 580,
'ph': 'f', 'args': {}},
]
trace_data = trace_data_module.CreateFromRawChromeEvents(events)
m = timeline_model.TimelineModel(trace_data)
p = m.GetAllProcesses()[0]
t = p.threads[53]
self.assertTrue(t is not None)
self.assertEqual(2, len(m.flow_events))
start = m.flow_events[0][0]
step = m.flow_events[0][1]
finish = m.flow_events[1][1]
self.assertEqual('a', start.name)
self.assertEqual('foo', start.category)
self.assertEqual(72, start.event_id)
self.assertEqual(0, start.start)
self.assertEqual(0, start.duration)
self.assertEqual(start.name, step.name)
self.assertEqual(start.category, step.category)
self.assertEqual(start.event_id, step.event_id)
self.assertAlmostEqual(12 / 1000.0, step.start)
self.assertEqual(0, step.duration)
self.assertEqual(start.name, finish.name)
self.assertEqual(start.category, finish.category)
self.assertEqual(start.event_id, finish.event_id)
self.assertAlmostEqual((20 + 12) / 1000.0, finish.start)
self.assertEqual(0, finish.duration)
def testImportOutOfOrderFlowEvent(self):
events = [
{'name': 'a', 'cat': 'foo', 'id': 72, 'pid': 52, 'tid': 53, 'ts': 548,
'ph': 's', 'args': {}},
{'name': 'b', 'cat': 'foo', 'id': 73, 'pid': 52, 'tid': 53, 'ts': 148,
'ph': 's', 'args': {}},
{'name': 'b', 'cat': 'foo', 'id': 73, 'pid': 52, 'tid': 53, 'ts': 570,
'ph': 'f', 'args': {}},
{'name': 'a', 'cat': 'foo', 'id': 72, 'pid': 52, 'tid': 53, 'ts': 560,
'ph': 't', 'args': {}},
{'name': 'a', 'cat': 'foo', 'id': 72, 'pid': 52, 'tid': 53, 'ts': 580,
'ph': 'f', 'args': {}},
]
expected = [[0.4, 0.412], [0.0, 0.422], [0.412, 0.432]]
trace_data = trace_data_module.CreateFromRawChromeEvents(events)
m = timeline_model.TimelineModel(trace_data)
self.assertEqual(3, len(m.flow_events))
for i, time in enumerate(expected):
self.assertAlmostEqual(time[0], m.flow_events[i][0].start)
self.assertAlmostEqual(time[1], m.flow_events[i][1].start)
def testImportErrornousFlowEvent(self):
events = [
{'name': 'a', 'cat': 'foo', 'id': 70, 'pid': 52, 'tid': 53, 'ts': 548,
'ph': 's', 'args': {}},
{'name': 'a2', 'cat': 'foo', 'id': 70, 'pid': 52, 'tid': 53, 'ts': 550,
'ph': 's', 'args': {}},
{'name': 'b', 'cat': 'foo', 'id': 73, 'pid': 52, 'tid': 53, 'ts': 570,
'ph': 'f', 'args': {}},
{'name': 'a', 'cat': 'foo', 'id': 72, 'pid': 52, 'tid': 53, 'ts': 560,
'ph': 't', 'args': {}},
]
trace_data = trace_data_module.CreateFromRawChromeEvents(events)
m = timeline_model.TimelineModel(trace_data)
self.assertEqual(0, len(m.flow_events))
def testImportMemoryDumpEvents(self):
events = [
{'name': 'a', 'cat': 'b', 'ph': 'v', 'pid': 52, 'ts': 123,
'id': '1234ABCD'},
{'name': 'a', 'cat': 'b', 'ph': 'v', 'pid': 54, 'ts': 134,
'id': '1234ABCD'},
{'name': 'a', 'cat': 'b', 'ph': 'v', 'pid': 52, 'ts': 144,
'id': '1234ABCD'},
{'name': 'a', 'cat': 'b', 'ph': 'v', 'pid': 52, 'ts': 245,
'id': '1234ABDF'},
{'name': 'a', 'cat': 'b', 'ph': 'v', 'pid': 54, 'ts': 256,
'id': '1234ABDF'},
{'name': 'a', 'cat': 'b', 'ph': 'v', 'pid': 52, 'ts': 233,
'id': '1234ABDF'},
]
expected_processes = {52, 54}
expected_results = [['1234ABCD', 0, 21], ['1234ABDF', 110, 23]]
trace_data = trace_data_module.CreateFromRawChromeEvents(events)
m = timeline_model.TimelineModel(trace_data)
assert set(p.pid for p in m.GetAllProcesses()) == expected_processes
memory_dumps = list(m.IterGlobalMemoryDumps())
self.assertEqual(len(expected_results), len(memory_dumps))
for memory_dump, test_values in zip(memory_dumps, expected_results):
assert len(list(memory_dump.IterProcessMemoryDumps())) == len(
expected_processes)
dump_id, start, duration = test_values
self.assertEqual(dump_id, memory_dump.dump_id)
self.assertAlmostEqual(start / 1000.0, memory_dump.start)
self.assertAlmostEqual(duration / 1000.0, memory_dump.duration)
def testImportOutOfOrderMemoryDumpEvents(self):
events = [
{'name': 'a', 'cat': 'b', 'ph': 'v', 'pid': 52, 'ts': 245,
'id': '1234ABDF'},
{'name': 'a', 'cat': 'b', 'ph': 'v', 'pid': 54, 'ts': 134,
'id': '1234ABCD'},
{'name': 'a', 'cat': 'b', 'ph': 'v', 'pid': 54, 'ts': 256,
'id': '1234ABDF'},
{'name': 'a', 'cat': 'b', 'ph': 'v', 'pid': 52, 'ts': 123,
'id': '1234ABCD'},
]
expected = [['1234ABCD', 0, 11], ['1234ABDF', 122, 11]]
trace_data = trace_data_module.CreateFromRawChromeEvents(events)
m = timeline_model.TimelineModel(trace_data)
memory_dumps = list(m.IterGlobalMemoryDumps())
self.assertEqual(len(expected), len(memory_dumps))
for memory_dump, test_values in zip(memory_dumps, expected):
dump_id, start, duration = test_values
self.assertEqual(dump_id, memory_dump.dump_id)
self.assertAlmostEqual(start / 1000.0, memory_dump.start)
self.assertAlmostEqual(duration / 1000.0, memory_dump.duration)
def testMetadataImport(self):
events = [
{'cat': '__metadata', 'pid': 14689, 'tid': 14740, 'ts': 245,
'ph': 'M', 'name': 'process_name', 'args': {'name': 'Browser'}},
{'cat': '__metadata', 'pid': 23828, 'tid': 23828, 'ts': 0,
'ph': 'M', 'name': 'process_labels',
'args': {'labels': 'huge image - Google Search'}}
]
expected = [
[None, 'Browser'],
['huge image - Google Search', 'process 23828']
]
trace_data = trace_data_module.CreateFromRawChromeEvents(events)
m = timeline_model.TimelineModel(trace_data)
processes = m.GetAllProcesses()
self.assertEqual(len(processes), len(expected))
for process, test_values in zip(processes, expected):
process_labels, process_name = test_values
self.assertEqual(process_labels, process.labels)
self.assertEqual(process_name, process.name)
| |
# -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from kombu import exceptions as exc
from taskflow.engines.worker_based import endpoint as ep
from taskflow.engines.worker_based import protocol as pr
from taskflow.engines.worker_based import server
from taskflow import test
from taskflow.tests import utils
from taskflow.utils import misc
from taskflow.utils import persistence_utils as pu
class TestServer(test.MockTestCase):
def setUp(self):
super(TestServer, self).setUp()
self.server_uuid = 'server-uuid'
self.server_exchange = 'server-exchange'
self.broker_url = 'test-url'
self.task_uuid = 'task-uuid'
self.task_args = {'x': 1}
self.task_action = 'execute'
self.task_name = 'taskflow.tests.utils.TaskOneArgOneReturn'
self.task_version = (1, 0)
self.reply_to = 'reply-to'
self.endpoints = [ep.Endpoint(task_cls=utils.TaskOneArgOneReturn),
ep.Endpoint(task_cls=utils.TaskWithFailure),
ep.Endpoint(task_cls=utils.ProgressingTask)]
self.resp_running = dict(state=pr.RUNNING)
# patch classes
self.proxy_mock, self.proxy_inst_mock = self._patch_class(
server.proxy, 'Proxy')
# other mocking
self.proxy_inst_mock.is_running = True
self.executor_mock = mock.MagicMock(name='executor')
self.message_mock = mock.MagicMock(name='message')
self.message_mock.properties = {'correlation_id': self.task_uuid,
'reply_to': self.reply_to}
self.master_mock.attach_mock(self.executor_mock, 'executor')
self.master_mock.attach_mock(self.message_mock, 'message')
def server(self, reset_master_mock=False, **kwargs):
server_kwargs = dict(uuid=self.server_uuid,
exchange=self.server_exchange,
executor=self.executor_mock,
endpoints=self.endpoints,
url=self.broker_url)
server_kwargs.update(kwargs)
s = server.Server(**server_kwargs)
if reset_master_mock:
self._reset_master_mock()
return s
def request(self, **kwargs):
request = dict(task=self.task_name,
task_name=self.task_name,
action=self.task_action,
task_version=self.task_version,
arguments=self.task_args)
request.update(kwargs)
return request
@staticmethod
def resp_progress(progress):
return dict(state=pr.PROGRESS, progress=progress, event_data={})
@staticmethod
def resp_success(result):
return dict(state=pr.SUCCESS, result=result)
@staticmethod
def resp_failure(result, **kwargs):
response = dict(state=pr.FAILURE, result=result)
response.update(kwargs)
return response
def test_creation(self):
s = self.server()
# check calls
master_mock_calls = [
mock.call.Proxy(self.server_uuid, self.server_exchange,
s._on_message, url=self.broker_url)
]
self.assertEqual(self.master_mock.mock_calls, master_mock_calls)
self.assertEqual(len(s._endpoints), 3)
def test_creation_with_endpoints(self):
s = self.server(endpoints=self.endpoints)
# check calls
master_mock_calls = [
mock.call.Proxy(self.server_uuid, self.server_exchange,
s._on_message, url=self.broker_url)
]
self.assertEqual(self.master_mock.mock_calls, master_mock_calls)
self.assertEqual(len(s._endpoints), len(self.endpoints))
def test_on_message_proxy_running_ack_success(self):
request = self.request()
s = self.server(reset_master_mock=True)
s._on_message(request, self.message_mock)
# check calls
master_mock_calls = [
mock.call.message.ack(),
mock.call.executor.submit(s._process_request, request,
self.message_mock)
]
self.assertEqual(self.master_mock.mock_calls, master_mock_calls)
def test_on_message_proxy_running_ack_failure(self):
self.message_mock.ack.side_effect = exc.MessageStateError('Woot!')
s = self.server(reset_master_mock=True)
s._on_message({}, self.message_mock)
# check calls
master_mock_calls = [
mock.call.message.ack()
]
self.assertEqual(self.master_mock.mock_calls, master_mock_calls)
def test_on_message_proxy_not_running_reject_success(self):
self.proxy_inst_mock.is_running = False
s = self.server(reset_master_mock=True)
s._on_message({}, self.message_mock)
# check calls
master_mock_calls = [
mock.call.message.reject(requeue=True)
]
self.assertEqual(self.master_mock.mock_calls, master_mock_calls)
def test_on_message_proxy_not_running_reject_failure(self):
self.message_mock.reject.side_effect = exc.MessageStateError('Woot!')
self.proxy_inst_mock.is_running = False
s = self.server(reset_master_mock=True)
s._on_message({}, self.message_mock)
# check calls
master_mock_calls = [
mock.call.message.reject(requeue=True)
]
self.assertEqual(self.master_mock.mock_calls, master_mock_calls)
def test_parse_request(self):
request = self.request()
task, action, task_args = server.Server._parse_request(**request)
self.assertEqual((task, action, task_args),
(self.task_name, self.task_action,
dict(task_name=self.task_name,
arguments=self.task_args)))
def test_parse_request_with_success_result(self):
request = self.request(action='revert', result=('success', 1))
task, action, task_args = server.Server._parse_request(**request)
self.assertEqual((task, action, task_args),
(self.task_name, 'revert',
dict(task_name=self.task_name,
arguments=self.task_args,
result=1)))
def test_parse_request_with_failure_result(self):
failure = misc.Failure.from_exception(Exception('test'))
failure_dict = pu.failure_to_dict(failure)
request = self.request(action='revert',
result=('failure', failure_dict))
task, action, task_args = server.Server._parse_request(**request)
self.assertEqual((task, action, task_args),
(self.task_name, 'revert',
dict(task_name=self.task_name,
arguments=self.task_args,
result=utils.FailureMatcher(failure))))
def test_parse_request_with_failures(self):
failures = [misc.Failure.from_exception(Exception('test1')),
misc.Failure.from_exception(Exception('test2'))]
failures_dict = dict((str(i), pu.failure_to_dict(f))
for i, f in enumerate(failures))
request = self.request(action='revert', failures=failures_dict)
task, action, task_args = server.Server._parse_request(**request)
self.assertEqual(
(task, action, task_args),
(self.task_name, 'revert',
dict(task_name=self.task_name,
arguments=self.task_args,
failures=dict((str(i), utils.FailureMatcher(f))
for i, f in enumerate(failures)))))
@mock.patch("taskflow.engines.worker_based.server.LOG.error")
def test_reply_publish_failure(self, mocked_error):
self.proxy_inst_mock.publish.side_effect = RuntimeError('Woot!')
# create server and process request
s = self.server(reset_master_mock=True, endpoints=self.endpoints)
s._reply(self.reply_to, self.task_uuid)
self.assertEqual(self.master_mock.mock_calls, [
mock.call.proxy.publish({'state': 'FAILURE'}, self.task_uuid,
self.reply_to)
])
self.assertEqual(mocked_error.mock_calls, [
mock.call("Failed to send reply: Woot!")
])
def test_on_update_progress(self):
request = self.request(task='taskflow.tests.utils.ProgressingTask',
arguments={})
# create server and process request
s = self.server(reset_master_mock=True, endpoints=self.endpoints)
s._process_request(request, self.message_mock)
# check calls
master_mock_calls = [
mock.call.proxy.publish(self.resp_running, self.task_uuid,
self.reply_to),
mock.call.proxy.publish(self.resp_progress(0.0), self.task_uuid,
self.reply_to),
mock.call.proxy.publish(self.resp_progress(1.0), self.task_uuid,
self.reply_to),
mock.call.proxy.publish(self.resp_success(5), self.task_uuid,
self.reply_to)
]
self.assertEqual(self.master_mock.mock_calls, master_mock_calls)
def test_process_request(self):
# create server and process request
s = self.server(reset_master_mock=True, endpoints=self.endpoints)
s._process_request(self.request(), self.message_mock)
# check calls
master_mock_calls = [
mock.call.proxy.publish(self.resp_running, self.task_uuid,
self.reply_to),
mock.call.proxy.publish(self.resp_success(1), self.task_uuid,
self.reply_to)
]
self.assertEqual(self.master_mock.mock_calls, master_mock_calls)
@mock.patch("taskflow.engines.worker_based.server.LOG.error")
def test_process_request_parse_message_failure(self, mocked_error):
self.message_mock.properties = {}
request = self.request()
s = self.server(reset_master_mock=True)
s._process_request(request, self.message_mock)
self.assertEqual(self.master_mock.mock_calls, [])
self.assertTrue(mocked_error.called)
@mock.patch('taskflow.engines.worker_based.server.pu')
def test_process_request_parse_failure(self, pu_mock):
failure_dict = 'failure_dict'
pu_mock.failure_to_dict.return_value = failure_dict
pu_mock.failure_from_dict.side_effect = ValueError('Woot!')
request = self.request(result=('failure', 1))
# create server and process request
s = self.server(reset_master_mock=True, endpoints=self.endpoints)
s._process_request(request, self.message_mock)
# check calls
master_mock_calls = [
mock.call.proxy.publish(self.resp_failure(failure_dict),
self.task_uuid, self.reply_to)
]
self.assertEqual(self.master_mock.mock_calls, master_mock_calls)
@mock.patch('taskflow.engines.worker_based.server.pu')
def test_process_request_endpoint_not_found(self, pu_mock):
failure_dict = 'failure_dict'
pu_mock.failure_to_dict.return_value = failure_dict
request = self.request(task='<unknown>')
# create server and process request
s = self.server(reset_master_mock=True, endpoints=self.endpoints)
s._process_request(request, self.message_mock)
# check calls
master_mock_calls = [
mock.call.proxy.publish(self.resp_failure(failure_dict),
self.task_uuid, self.reply_to)
]
self.assertEqual(self.master_mock.mock_calls, master_mock_calls)
@mock.patch('taskflow.engines.worker_based.server.pu')
def test_process_request_execution_failure(self, pu_mock):
failure_dict = 'failure_dict'
pu_mock.failure_to_dict.return_value = failure_dict
request = self.request(action='<unknown>')
# create server and process request
s = self.server(reset_master_mock=True, endpoints=self.endpoints)
s._process_request(request, self.message_mock)
# check calls
master_mock_calls = [
mock.call.proxy.publish(self.resp_running, self.task_uuid,
self.reply_to),
mock.call.proxy.publish(self.resp_failure(failure_dict),
self.task_uuid, self.reply_to)
]
self.assertEqual(self.master_mock.mock_calls, master_mock_calls)
@mock.patch('taskflow.engines.worker_based.server.pu')
def test_process_request_task_failure(self, pu_mock):
failure_dict = 'failure_dict'
pu_mock.failure_to_dict.return_value = failure_dict
request = self.request(task='taskflow.tests.utils.TaskWithFailure',
arguments={})
# create server and process request
s = self.server(reset_master_mock=True, endpoints=self.endpoints)
s._process_request(request, self.message_mock)
# check calls
master_mock_calls = [
mock.call.proxy.publish(self.resp_running, self.task_uuid,
self.reply_to),
mock.call.proxy.publish(self.resp_failure(failure_dict),
self.task_uuid, self.reply_to)
]
self.assertEqual(self.master_mock.mock_calls, master_mock_calls)
def test_start(self):
self.server(reset_master_mock=True).start()
# check calls
master_mock_calls = [
mock.call.proxy.start()
]
self.assertEqual(self.master_mock.mock_calls, master_mock_calls)
def test_wait(self):
server = self.server(reset_master_mock=True)
server.start()
server.wait()
# check calls
master_mock_calls = [
mock.call.proxy.start(),
mock.call.proxy.wait()
]
self.assertEqual(self.master_mock.mock_calls, master_mock_calls)
def test_stop(self):
self.server(reset_master_mock=True).stop()
# check calls
master_mock_calls = [
mock.call.proxy.stop(),
mock.call.executor.shutdown()
]
self.assertEqual(self.master_mock.mock_calls, master_mock_calls)
| |
# import the necessary packages
import sys, os, cv, cv2, math, glob, csv
import numpy as np
from ssim import SSIM
from ssim.utils import get_gaussian_kernel
from PIL import Image, ImageOps
import random
import yaml
class FrameParser:
def __init__(self, threshold=0.8):
self.threshold = threshold
#Returns a list of columns with a description of the sprites in the passed in frame Image
def GetFrameDescription(self, frameNumber, frame, sprites,matching, overlapThreshold ,Tmin,Tmax,iterations,metaIterations, minDist = 4):
columns = []
for sprite,spritename in zip(sprites,spriteNames):
#Pattern matching right here
result = cv2.matchTemplate(sprite[0], frame, cv2.TM_CCOEFF_NORMED)#Change cv2.TM_CCOEFF_NORMED for more accurate but slower results
(h, w) = sprite[0].shape[:2]
#Get only the max values (those above the threshold) that represent a local maximum
maxVals = MaxLocalVals(result, w, h, self.threshold)
for pt in maxVals:
#Whether or not to add this to the list of columns
addIt = True
#The potential new column to add
column = [str(frameNumber), sprite[1], str(pt[0]), str(pt[1]), str(w), str(h), str(result[pt[1]][pt[0]])]
#The list of columns that this one forces out
toRemove = []
#Iterate through all current columns and ensure that no column subsumes this one and that this one subsumes no others
for c in columns:
#print column,c,matching["./"+spritesDirectory+"/"+c[1]]["./"+spritesDirectory+"/"+column[1]],OverlapAmount(c, column)
#Feel free to add this back in, though it'll slow things down a bit it's good for when some sprites contain others
#print column, c, matching["./"+spritesDirectory+"/"+c[1]]["./"+spritesDirectory+"/"+column[1]],OverlapAmount(c, column)
'''
if matching["./"+spritesDirectory+"/"+c[1]]["./"+spritesDirectory+"/"+column[1]] > 0.06:
#Check if this column is inside one of the others
if not c in toRemove:
if OverlapAmount(c, column)>=overlapThreshold:
print 'removing',c,column,OverlapAmount(c, column)
# c is in column, remove c
cScore = float(c[6])
if cScore<result[pt[1]][pt[0]]:
toRemove.append(c)
else:
addIt = False
break
'''
#Ensure that there isn't something better in this region that has a higher score
if addIt and not c in toRemove:
if abs(int(c[2])-int(column[2]))<=minDist and abs(int(c[3]) - int(column[3]) )<=minDist:
c6 = float(c[6])
if c6<result[pt[1]][pt[0]]:
toRemove.append(c)
else:
addIt = False
break
#Removes those that need removing
for r in toRemove:
columns.remove(r)
#Add those that need adding
if addIt:
columns.append(column)
inconsistent = {tuple(c):set() for c in columns}
for c1 in columns:
c1 = tuple(c1)
for c2 in columns:
c2 = tuple(c2)
if c1 != c2 and OverlapAmount(c1, c2)>=overlapThreshold and matching["./"+spritesDirectory+"/"+c1[1]]["./"+spritesDirectory+"/"+c2[1]] > 0.1:
if c2 not in inconsistent[c1]:
inconsistent[c1].add(c2)
if c1 not in inconsistent[c2]:
inconsistent[c2].add(c1)
#print inconsistent
bestSet = []
bestScore = float('-inf')
def neighbor(c):
n = {cc for cc in c}
rando = random.choice(list(n))
n.remove(rando)
for ic in inconsistent[rando]:
if len(inconsistent[ic].intersection(n)) == 0:
n.add(ic)
return n
chosen = set()
potentials = [tuple(c) for c in columns]
while len(potentials) > 0:
c = random.choice(potentials)
chosen.add(c)
potentials.remove(c)
for cO in inconsistent[c]:
if cO in potentials:
potentials.remove(cO)
TFactor = -math.log(Tmax / Tmin)
def getEnergy(s):
energy = 0
for c in s:
energy += float(c[4])*float(c[5])*float(c[6])
return frame.shape[0]*frame.shape[1]-energy
ec = getEnergy(chosen)
bestEnergy = ec
bestSet = chosen
if len(chosen) > 0:
for ii in range(metaIterations):
ec = bestEnergy
chosen = bestSet
print ii
for ti in range(iterations):
n = neighbor(chosen)
en = getEnergy(n)
T = Tmax*math.exp(TFactor*ti/iterations)
dE = en-ec
if dE > 0 and math.exp(-dE / T) < random.random():
pass #min(math.exp(min(T*0.5,-dE)/T),1) < random.random():#(dE > 0 and math.exp(-dE/T) < random.random()):
else:
chosen = n
ec = en
if ec < bestEnergy:
print 'newbest',ec
bestEnergy = ec
bestSet = chosen
print frame.shape[0]*frame.shape[1]-bestEnergy
columns = bestSet
return columns
#Find the highest scoring top left corner within a range (in this case half width and half height)
def MaxLocalVals(result, width, height, threshold):
loc = np.where( result >= threshold)
locPoints = []
for pt in zip(*loc[::-1]):
maxVal = result[pt[1]][pt[0]]
#New point to potentially add
maxPnt = pt
toRemove = []
#Ensure that you have the best local points
for pt2 in locPoints:
if maxVal<result[pt2[1]][pt2[0]] and (abs(pt2[0]-pt[0])<width/2.0 and abs(pt2[1]-pt[1])<height/2.0):#Is this point already in locPoints and in range and is it better?
maxPnt = pt2
elif maxPnt==pt and (abs(pt2[0]-pt[0])<width/2.0 and abs(pt2[1]-pt[1])<height/2.0):#Inverse, Does this point beat out something in locPoints?
toRemove.append(pt2)
for pt2 in toRemove:
locPoints.remove(pt2)
if maxPnt== pt:
locPoints.append(pt)
return locPoints
#Check the amount of overlapping pixels between these two sprites, represented as columns
def OverlapAmount(column1, column2, printIt = False):
#Rectangle defined by column1
r1left = int(column1[2])
r1right = r1left + int(column1[4])
r1bottom = int(column1[3])
r1top = r1bottom + int(column1[5])
#Rectangle defined by column2
r2left = int(column2[2])
r2right = r2left + int(column2[4])
r2bottom = int(column2[3])
r2top = r2bottom + int(column2[5])
left = max(r1left, r2left);
right = min(r1right, r2right);
top = min(r1top, r2top);
bottom = max(r1bottom, r2bottom);
SI = max(0,right-left)*max(0,top-bottom)
SU = int(column1[4])*int(column1[5])+int(column2[4])*int(column2[5])-SI
#SI = max(0, max(l1[0], l2[0]) -min(r1[0], r2[0]))*max(0, max(l1[1], l2[1]) -min(r1[1], r2[1]))
# width1*height1 + width2*height2
#SU = ((r1[0]-l1[0])*(r1[1]-l1[1]))+((r2[0]-l2[0])*(r2[1]-l2[1]))-SI
if SU==0:
return 0
#print SI, SU
return float(SI) / float(SU)
if __name__ == '__main__':
FFMPEG_BIN = "ffmpeg" # Use on Linux ans Mac OS
#FFMPEG_BIN = "ffmpeg.exe" # Use on Windows
import yaml
inputfile = sys.argv[1]
with open(inputfile,'rb') as f:
inputfile = yaml.load(f)
#Example call: python VideoParser.py Gameplay.mp4 sprites 1
video = inputfile['video']
spritesDirectory = inputfile['spritesDirectory']
framesPerSecond = inputfile['framesPerSecond']
threshold = inputfile['imageThreshold']
#The folder that the frame images will end up in
folder = inputfile['outputFolder']
if not os.path.exists(folder):
os.makedirs(folder)
scale = inputfile['videoScaling']
overlapThreshold = inputfile['overlapThreshold']
Tmin = inputfile['Tmin']
Tmax = inputfile['Tmax']
iterations = inputfile['iterations']
metaIterations = inputfile['metaIterations']
#Run the parser to generate the frame images
fname = folder+"frameDescriptions{}_{}_{}.csv".format(video,framesPerSecond,threshold)
if not os.path.isfile(fname):
os.system(FFMPEG_BIN+ " -i "+video+" -r {}".format(framesPerSecond)+' -vf scale=iw*{}'.format(scale) +':ih*{}'.format(scale) + ' ' +folder+"image-%08d.png")#" -vf scale="+widthOfFrame+":"+heightOfFrame+
sprites = []
spriteNames ={}
directory = "./"+spritesDirectory+"/"
for filename in glob.glob(directory+"*.png"):
spriteNames[filename] = ImageOps.grayscale(Image.open(filename))
img_rgb = cv2.imread(filename)
sprite_gray = cv2.cvtColor(img_rgb, cv2.COLOR_BGR2GRAY)#Gray is faster to match
splits = filename.split("/")
spritename = splits[len(splits)-1]
sprites.append((sprite_gray, spritename))
#Initialize Frame Parser
fp = FrameParser(threshold)
target = open(fname,"wb")
writer = csv.writer(target)
column = ["frame", "spritename", "x", "y", "w", "h", "confidence"]
writer.writerow(column)
gaussian_kernel_sigma = 1.5
gaussian_kernel_width = 11
gaussian_kernel_1d = get_gaussian_kernel(gaussian_kernel_width, gaussian_kernel_sigma)
matching = {}
maxVal = float('-inf')
for sprite1 in spriteNames:
matching[sprite1] = {}
for sprite2 in spriteNames:
matching[sprite1][sprite2] = SSIM(spriteNames[sprite1], gaussian_kernel_1d).ssim_value(spriteNames[sprite2])#np.max(sp.signal.correlate2d(sprites[sprite1],sprites[sprite2]))
if matching[sprite1][sprite2] > maxVal:
maxVal = matching[sprite1][sprite2]
for s1 in matching:
for s2 in matching[s1]:
matching[s1][s2] /= maxVal
print s1, s2, matching[s1][s2]
for frameFile in glob.glob(folder+"*.png"):
print "Frames: "+str(frameFile)
frame = cv2.imread(frameFile)
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
splits =frameFile.split("image-")
frameNumber = int(splits[len(splits)-1][:-4])
columns = fp.GetFrameDescription(frameNumber,frame_gray,sprites,matching, overlapThreshold ,Tmin,Tmax,iterations,metaIterations)
for c in columns:
writer.writerow(c)
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
###
# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###
import mock
import pytest
from copy import deepcopy
from hpe_test_utils import OneViewBaseTest
from oneview_module_loader import LogicalSwitchModule
FAKE_MSG_ERROR = 'Fake message error'
DEFAULT_SWITCH_NAME = 'Test Logical Switch'
LOGICAL_SWITCH_FROM_ONEVIEW = dict(
name=DEFAULT_SWITCH_NAME,
uri='/rest/logical-switches/f0d7ad37-2053-46ac-bb11-4ebdd079bb66',
logicalSwitchGroupUri='/rest/logical-switch-groups/af370d9a-f2f4-4beb-a1f1-670930d6741d',
switchCredentialConfiguration=[{'logicalSwitchManagementHost': '172.16.1.1'},
{'logicalSwitchManagementHost': '172.16.1.2'}],
scopeUris=[]
)
PARAMS_FOR_PRESENT = dict(
config='config.json',
state='present',
data=dict(
logicalSwitch=dict(
name=DEFAULT_SWITCH_NAME,
logicalSwitchGroupName="Logical Switch Group Name",
switchCredentialConfiguration=[]
), # assume it contains the switches configuration
logicalSwitchCredentials=[]
) # assume this list contains the switches credentials
)
PARAMS_FOR_UPDATE = dict(
config='config.json',
state='updated',
data=dict(
logicalSwitch=dict(
name=DEFAULT_SWITCH_NAME,
newName='Test Logical Switch - Renamed'
),
logicalSwitchCredentials=[]
) # assume this list contains the switches credentials
)
PARAMS_FOR_UPDATE_WITH_SWITCHES_AND_GROUPS = dict(
config='config.json',
state='updated',
data=dict(
logicalSwitch=dict(
name=DEFAULT_SWITCH_NAME,
logicalSwitchGroupName='Logical Switch Group Name',
switchCredentialConfiguration=[
{'logicalSwitchManagementHost': '172.16.1.3'},
{'logicalSwitchManagementHost': '172.16.1.4'}
]
),
logicalSwitchCredentials=[],
scopeUris=['/rest/scopes/fake']
) # assume this list contains the switches credentials
)
PARAMS_FOR_ABSENT = dict(
config='config.json',
state='absent',
data=dict(logicalSwitch=dict(name=DEFAULT_SWITCH_NAME))
)
PARAMS_FOR_REFRESH = dict(
config='config.json',
state='refreshed',
data=dict(logicalSwitch=dict(name=DEFAULT_SWITCH_NAME))
)
@pytest.mark.resource(TestLogicalSwitchModule='logical_switches')
class TestLogicalSwitchModule(OneViewBaseTest):
@pytest.fixture(autouse=True)
def specific_set_up(self, setUp):
self.logical_switch_group_client = self.mock_ov_client.logical_switch_groups
def test_should_create_new_logical_switch(self):
self.resource.get_by.return_value = []
self.resource.create.return_value = LOGICAL_SWITCH_FROM_ONEVIEW
self.logical_switch_group_client.get_by.return_value = [{'uri': '/rest/logical-switch-groups/aa-bb-cc'}]
self.mock_ansible_module.params = PARAMS_FOR_PRESENT
LogicalSwitchModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=LogicalSwitchModule.MSG_CREATED,
ansible_facts=dict(logical_switch=LOGICAL_SWITCH_FROM_ONEVIEW)
)
def test_should_not_create_when_logical_switch_already_exist(self):
self.resource.get_by.return_value = [LOGICAL_SWITCH_FROM_ONEVIEW]
self.logical_switch_group_client.get_by.return_value = [{'uri': '/rest/logical-switch-groups/aa-bb-cc'}]
self.mock_ansible_module.params = PARAMS_FOR_PRESENT
LogicalSwitchModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
msg=LogicalSwitchModule.MSG_ALREADY_PRESENT,
ansible_facts=dict(logical_switch=LOGICAL_SWITCH_FROM_ONEVIEW)
)
def test_should_fail_when_group_not_found(self):
self.resource.get_by.return_value = []
self.logical_switch_group_client.get_by.return_value = []
self.resource.create.return_value = LOGICAL_SWITCH_FROM_ONEVIEW
self.mock_ansible_module.params = PARAMS_FOR_PRESENT
LogicalSwitchModule().run()
self.mock_ansible_module.fail_json.assert_called_once_with(exception=mock.ANY, msg=LogicalSwitchModule.MSG_LOGICAL_SWITCH_GROUP_NOT_FOUND)
def test_should_update_logical_switch(self):
self.resource.get_by.side_effect = [[LOGICAL_SWITCH_FROM_ONEVIEW], []]
self.resource.update.return_value = LOGICAL_SWITCH_FROM_ONEVIEW
self.logical_switch_group_client.get_by.return_value = [{'uri': '/rest/logical-switch-groups/aa-bb-cc'}]
self.mock_ansible_module.params = PARAMS_FOR_UPDATE
LogicalSwitchModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=LogicalSwitchModule.MSG_UPDATED,
ansible_facts=dict(logical_switch=LOGICAL_SWITCH_FROM_ONEVIEW)
)
def test_should_not_update_when_logical_switch_not_found(self):
self.resource.get_by.side_effect = [[], []]
self.resource.update.return_value = LOGICAL_SWITCH_FROM_ONEVIEW
self.logical_switch_group_client.get_by.return_value = [{'uri': '/rest/logical-switch-groups/aa-bb-cc'}]
self.mock_ansible_module.params = PARAMS_FOR_UPDATE
LogicalSwitchModule().run()
self.mock_ansible_module.fail_json.assert_called_once_with(exception=mock.ANY, msg=LogicalSwitchModule.MSG_LOGICAL_SWITCH_NOT_FOUND)
def test_should_fail_when_group_not_found_for_update(self):
self.resource.get_by.side_effect = [[LOGICAL_SWITCH_FROM_ONEVIEW], []]
self.resource.update.return_value = LOGICAL_SWITCH_FROM_ONEVIEW
self.logical_switch_group_client.get_by.return_value = []
self.mock_ansible_module.params = PARAMS_FOR_UPDATE_WITH_SWITCHES_AND_GROUPS
LogicalSwitchModule().run()
self.mock_ansible_module.fail_json.assert_called_once_with(exception=mock.ANY, msg=LogicalSwitchModule.MSG_LOGICAL_SWITCH_GROUP_NOT_FOUND)
def test_should_fail_on_update_when_logical_switch_attribute_missing(self):
params = deepcopy(PARAMS_FOR_UPDATE_WITH_SWITCHES_AND_GROUPS)
del params['data']['logicalSwitch']
self.mock_ansible_module.params = params
LogicalSwitchModule().run()
self.mock_ansible_module.fail_json.assert_called_once_with(exception=mock.ANY, msg=LogicalSwitchModule.MSG_LOGICAL_SWITCH_NOT_FOUND)
def test_should_update_with_current_switches_and_group_when_not_provided(self):
self.resource.get_by.side_effect = [[LOGICAL_SWITCH_FROM_ONEVIEW], []]
self.resource.update.return_value = LOGICAL_SWITCH_FROM_ONEVIEW
self.mock_ansible_module.params = PARAMS_FOR_UPDATE
LogicalSwitchModule().run()
data_for_update = {
'logicalSwitch': {
'name': 'Test Logical Switch - Renamed',
'uri': '/rest/logical-switches/f0d7ad37-2053-46ac-bb11-4ebdd079bb66',
'logicalSwitchGroupUri': '/rest/logical-switch-groups/af370d9a-f2f4-4beb-a1f1-670930d6741d',
'switchCredentialConfiguration': [{'logicalSwitchManagementHost': '172.16.1.1'},
{'logicalSwitchManagementHost': '172.16.1.2'}],
},
'logicalSwitchCredentials': []
}
self.resource.update.assert_called_once_with(data_for_update)
def test_should_update_with_given_switches_and_group_when_provided(self):
self.resource.get_by.side_effect = [[LOGICAL_SWITCH_FROM_ONEVIEW], []]
self.resource.update.return_value = LOGICAL_SWITCH_FROM_ONEVIEW
self.logical_switch_group_client.get_by.return_value = [{'uri': '/rest/logical-switch-groups/aa-bb-cc'}]
self.mock_ansible_module.params = PARAMS_FOR_UPDATE_WITH_SWITCHES_AND_GROUPS
LogicalSwitchModule().run()
data_for_update = {
'logicalSwitch': {
'name': 'Test Logical Switch',
'uri': LOGICAL_SWITCH_FROM_ONEVIEW['uri'],
'logicalSwitchGroupUri': '/rest/logical-switch-groups/aa-bb-cc',
'switchCredentialConfiguration': [{'logicalSwitchManagementHost': '172.16.1.3'},
{'logicalSwitchManagementHost': '172.16.1.4'}],
},
'logicalSwitchCredentials': []
}
self.resource.update.assert_called_once_with(data_for_update)
def test_should_delete_logical_switch(self):
self.resource.get_by.return_value = [LOGICAL_SWITCH_FROM_ONEVIEW]
self.mock_ansible_module.params = PARAMS_FOR_ABSENT
LogicalSwitchModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=LogicalSwitchModule.MSG_DELETED
)
def test_should_do_nothing_when_logical_switch_not_exist(self):
self.resource.get_by.return_value = []
self.mock_ansible_module.params = PARAMS_FOR_ABSENT
LogicalSwitchModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
msg=LogicalSwitchModule.MSG_ALREADY_ABSENT
)
def test_should_refresh_logical_switch(self):
self.resource.get_by.return_value = [LOGICAL_SWITCH_FROM_ONEVIEW]
self.resource.refresh.return_value = LOGICAL_SWITCH_FROM_ONEVIEW
self.mock_ansible_module.params = PARAMS_FOR_REFRESH
LogicalSwitchModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
ansible_facts=dict(logical_switch=LOGICAL_SWITCH_FROM_ONEVIEW),
msg=LogicalSwitchModule.MSG_REFRESHED
)
def test_should_fail_when_logical_switch_not_found(self):
self.resource.get_by.return_value = []
self.resource.refresh.return_value = LOGICAL_SWITCH_FROM_ONEVIEW
self.mock_ansible_module.params = PARAMS_FOR_REFRESH
LogicalSwitchModule().run()
self.mock_ansible_module.fail_json.assert_called_once_with(exception=mock.ANY, msg=LogicalSwitchModule.MSG_LOGICAL_SWITCH_NOT_FOUND)
def test_update_scopes_when_different(self):
self.resource.get_by.side_effect = [[LOGICAL_SWITCH_FROM_ONEVIEW], []]
self.resource.update.return_value = LOGICAL_SWITCH_FROM_ONEVIEW
self.logical_switch_group_client.get_by.return_value = [{'uri': '/rest/logical-switch-groups/aa-bb-cc'}]
self.mock_ansible_module.params = PARAMS_FOR_UPDATE_WITH_SWITCHES_AND_GROUPS
self.resource.patch.return_value = LOGICAL_SWITCH_FROM_ONEVIEW
LogicalSwitchModule().run()
data_for_update = {
'logicalSwitch': {
'name': 'Test Logical Switch',
'uri': LOGICAL_SWITCH_FROM_ONEVIEW['uri'],
'logicalSwitchGroupUri': '/rest/logical-switch-groups/aa-bb-cc',
'switchCredentialConfiguration': [{'logicalSwitchManagementHost': '172.16.1.3'},
{'logicalSwitchManagementHost': '172.16.1.4'}],
},
'logicalSwitchCredentials': []
}
self.resource.update.assert_called_once_with(data_for_update)
self.resource.patch.assert_called_once_with('/rest/logical-switches/f0d7ad37-2053-46ac-bb11-4ebdd079bb66',
operation='replace',
path='/scopeUris',
value=['/rest/scopes/fake'])
def test_should_not_update_scopes_when_same(self):
LS_WITH_SCOPE = LOGICAL_SWITCH_FROM_ONEVIEW.copy()
LS_WITH_SCOPE['scopeUris'] = ['/rest/birl']
self.resource.get_by.side_effect = [[LS_WITH_SCOPE], []]
self.resource.update.return_value = LS_WITH_SCOPE
self.logical_switch_group_client.get_by.return_value = [{'uri': '/rest/logical-switch-groups/aa-bb-cc'}]
self.mock_ansible_module.params = PARAMS_FOR_UPDATE_WITH_SWITCHES_AND_GROUPS
self.resource.patch.return_value = LS_WITH_SCOPE
LogicalSwitchModule().run()
data_for_update = {
'logicalSwitch': {
'name': 'Test Logical Switch',
'uri': LS_WITH_SCOPE['uri'],
'logicalSwitchGroupUri': '/rest/logical-switch-groups/aa-bb-cc',
'switchCredentialConfiguration': [{'logicalSwitchManagementHost': '172.16.1.3'},
{'logicalSwitchManagementHost': '172.16.1.4'}],
},
'logicalSwitchCredentials': []
}
self.resource.update.assert_called_once_with(data_for_update)
self.resource.patch.not_been_called()
if __name__ == '__main__':
pytest.main([__file__])
| |
#!/usr/bin/env python3
"""
f2py2e - Fortran to Python C/API generator. 2nd Edition.
See __usage__ below.
Copyright 1999--2011 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@cens.ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/05/06 08:31:19 $
Pearu Peterson
"""
import sys
import os
import pprint
import re
from . import crackfortran
from . import rules
from . import cb_rules
from . import auxfuncs
from . import cfuncs
from . import f90mod_rules
from . import __version__
from . import capi_maps
f2py_version = __version__.version
errmess = sys.stderr.write
# outmess=sys.stdout.write
show = pprint.pprint
outmess = auxfuncs.outmess
try:
from numpy import __version__ as numpy_version
except ImportError:
numpy_version = 'N/A'
__usage__ = """\
Usage:
1) To construct extension module sources:
f2py [<options>] <fortran files> [[[only:]||[skip:]] \\
<fortran functions> ] \\
[: <fortran files> ...]
2) To compile fortran files and build extension modules:
f2py -c [<options>, <build_flib options>, <extra options>] <fortran files>
3) To generate signature files:
f2py -h <filename.pyf> ...< same options as in (1) >
Description: This program generates a Python C/API file (<modulename>module.c)
that contains wrappers for given fortran functions so that they
can be called from Python. With the -c option the corresponding
extension modules are built.
Options:
--2d-numpy Use numpy.f2py tool with NumPy support. [DEFAULT]
--2d-numeric Use f2py2e tool with Numeric support.
--2d-numarray Use f2py2e tool with Numarray support.
--g3-numpy Use 3rd generation f2py from the separate f2py package.
[NOT AVAILABLE YET]
-h <filename> Write signatures of the fortran routines to file <filename>
and exit. You can then edit <filename> and use it instead
of <fortran files>. If <filename>==stdout then the
signatures are printed to stdout.
<fortran functions> Names of fortran routines for which Python C/API
functions will be generated. Default is all that are found
in <fortran files>.
<fortran files> Paths to fortran/signature files that will be scanned for
<fortran functions> in order to determine their signatures.
skip: Ignore fortran functions that follow until `:'.
only: Use only fortran functions that follow until `:'.
: Get back to <fortran files> mode.
-m <modulename> Name of the module; f2py generates a Python/C API
file <modulename>module.c or extension module <modulename>.
Default is 'untitled'.
--[no-]lower Do [not] lower the cases in <fortran files>. By default,
--lower is assumed with -h key, and --no-lower without -h key.
--build-dir <dirname> All f2py generated files are created in <dirname>.
Default is tempfile.mkdtemp().
--overwrite-signature Overwrite existing signature file.
--[no-]latex-doc Create (or not) <modulename>module.tex.
Default is --no-latex-doc.
--short-latex Create 'incomplete' LaTeX document (without commands
\\documentclass, \\tableofcontents, and \\begin{document},
\\end{document}).
--[no-]rest-doc Create (or not) <modulename>module.rst.
Default is --no-rest-doc.
--debug-capi Create C/API code that reports the state of the wrappers
during runtime. Useful for debugging.
--[no-]wrap-functions Create Fortran subroutine wrappers to Fortran 77
functions. --wrap-functions is default because it ensures
maximum portability/compiler independence.
--include-paths <path1>:<path2>:... Search include files from the given
directories.
--help-link [..] List system resources found by system_info.py. See also
--link-<resource> switch below. [..] is optional list
of resources names. E.g. try 'f2py --help-link lapack_opt'.
--f2cmap <filename> Load Fortran-to-Python KIND specification from the given
file. Default: .f2py_f2cmap in current directory.
--quiet Run quietly.
--verbose Run with extra verbosity.
-v Print f2py version ID and exit.
numpy.distutils options (only effective with -c):
--fcompiler= Specify Fortran compiler type by vendor
--compiler= Specify C compiler type (as defined by distutils)
--help-fcompiler List available Fortran compilers and exit
--f77exec= Specify the path to F77 compiler
--f90exec= Specify the path to F90 compiler
--f77flags= Specify F77 compiler flags
--f90flags= Specify F90 compiler flags
--opt= Specify optimization flags
--arch= Specify architecture specific optimization flags
--noopt Compile without optimization
--noarch Compile without arch-dependent optimization
--debug Compile with debugging information
Extra options (only effective with -c):
--link-<resource> Link extension module with <resource> as defined
by numpy.distutils/system_info.py. E.g. to link
with optimized LAPACK libraries (vecLib on MacOSX,
ATLAS elsewhere), use --link-lapack_opt.
See also --help-link switch.
-L/path/to/lib/ -l<libname>
-D<define> -U<name>
-I/path/to/include/
<filename>.o <filename>.so <filename>.a
Using the following macros may be required with non-gcc Fortran
compilers:
-DPREPEND_FORTRAN -DNO_APPEND_FORTRAN -DUPPERCASE_FORTRAN
-DUNDERSCORE_G77
When using -DF2PY_REPORT_ATEXIT, a performance report of F2PY
interface is printed out at exit (platforms: Linux).
When using -DF2PY_REPORT_ON_ARRAY_COPY=<int>, a message is
sent to stderr whenever F2PY interface makes a copy of an
array. Integer <int> sets the threshold for array sizes when
a message should be shown.
Version: %s
numpy Version: %s
Requires: Python 3.5 or higher.
License: NumPy license (see LICENSE.txt in the NumPy source code)
Copyright 1999 - 2011 Pearu Peterson all rights reserved.
http://cens.ioc.ee/projects/f2py2e/""" % (f2py_version, numpy_version)
def scaninputline(inputline):
files, skipfuncs, onlyfuncs, debug = [], [], [], []
f, f2, f3, f5, f6, f7, f8, f9, f10 = 1, 0, 0, 0, 0, 0, 0, 0, 0
verbose = 1
dolc = -1
dolatexdoc = 0
dorestdoc = 0
wrapfuncs = 1
buildpath = '.'
include_paths = []
signsfile, modulename = None, None
options = {'buildpath': buildpath,
'coutput': None,
'f2py_wrapper_output': None}
for l in inputline:
if l == '':
pass
elif l == 'only:':
f = 0
elif l == 'skip:':
f = -1
elif l == ':':
f = 1
elif l[:8] == '--debug-':
debug.append(l[8:])
elif l == '--lower':
dolc = 1
elif l == '--build-dir':
f6 = 1
elif l == '--no-lower':
dolc = 0
elif l == '--quiet':
verbose = 0
elif l == '--verbose':
verbose += 1
elif l == '--latex-doc':
dolatexdoc = 1
elif l == '--no-latex-doc':
dolatexdoc = 0
elif l == '--rest-doc':
dorestdoc = 1
elif l == '--no-rest-doc':
dorestdoc = 0
elif l == '--wrap-functions':
wrapfuncs = 1
elif l == '--no-wrap-functions':
wrapfuncs = 0
elif l == '--short-latex':
options['shortlatex'] = 1
elif l == '--coutput':
f8 = 1
elif l == '--f2py-wrapper-output':
f9 = 1
elif l == '--f2cmap':
f10 = 1
elif l == '--overwrite-signature':
options['h-overwrite'] = 1
elif l == '-h':
f2 = 1
elif l == '-m':
f3 = 1
elif l[:2] == '-v':
print(f2py_version)
sys.exit()
elif l == '--show-compilers':
f5 = 1
elif l[:8] == '-include':
cfuncs.outneeds['userincludes'].append(l[9:-1])
cfuncs.userincludes[l[9:-1]] = '#include ' + l[8:]
elif l[:15] in '--include_paths':
outmess(
'f2py option --include_paths is deprecated, use --include-paths instead.\n')
f7 = 1
elif l[:15] in '--include-paths':
f7 = 1
elif l[0] == '-':
errmess('Unknown option %s\n' % repr(l))
sys.exit()
elif f2:
f2 = 0
signsfile = l
elif f3:
f3 = 0
modulename = l
elif f6:
f6 = 0
buildpath = l
elif f7:
f7 = 0
include_paths.extend(l.split(os.pathsep))
elif f8:
f8 = 0
options["coutput"] = l
elif f9:
f9 = 0
options["f2py_wrapper_output"] = l
elif f10:
f10 = 0
options["f2cmap_file"] = l
elif f == 1:
try:
with open(l):
pass
files.append(l)
except IOError as detail:
errmess('IOError: %s. Skipping file "%s".\n' %
(str(detail), l))
elif f == -1:
skipfuncs.append(l)
elif f == 0:
onlyfuncs.append(l)
if not f5 and not files and not modulename:
print(__usage__)
sys.exit()
if not os.path.isdir(buildpath):
if not verbose:
outmess('Creating build directory %s' % (buildpath))
os.mkdir(buildpath)
if signsfile:
signsfile = os.path.join(buildpath, signsfile)
if signsfile and os.path.isfile(signsfile) and 'h-overwrite' not in options:
errmess(
'Signature file "%s" exists!!! Use --overwrite-signature to overwrite.\n' % (signsfile))
sys.exit()
options['debug'] = debug
options['verbose'] = verbose
if dolc == -1 and not signsfile:
options['do-lower'] = 0
else:
options['do-lower'] = dolc
if modulename:
options['module'] = modulename
if signsfile:
options['signsfile'] = signsfile
if onlyfuncs:
options['onlyfuncs'] = onlyfuncs
if skipfuncs:
options['skipfuncs'] = skipfuncs
options['dolatexdoc'] = dolatexdoc
options['dorestdoc'] = dorestdoc
options['wrapfuncs'] = wrapfuncs
options['buildpath'] = buildpath
options['include_paths'] = include_paths
options.setdefault('f2cmap_file', None)
return files, options
def callcrackfortran(files, options):
rules.options = options
crackfortran.debug = options['debug']
crackfortran.verbose = options['verbose']
if 'module' in options:
crackfortran.f77modulename = options['module']
if 'skipfuncs' in options:
crackfortran.skipfuncs = options['skipfuncs']
if 'onlyfuncs' in options:
crackfortran.onlyfuncs = options['onlyfuncs']
crackfortran.include_paths[:] = options['include_paths']
crackfortran.dolowercase = options['do-lower']
postlist = crackfortran.crackfortran(files)
if 'signsfile' in options:
outmess('Saving signatures to file "%s"\n' % (options['signsfile']))
pyf = crackfortran.crack2fortran(postlist)
if options['signsfile'][-6:] == 'stdout':
sys.stdout.write(pyf)
else:
with open(options['signsfile'], 'w') as f:
f.write(pyf)
if options["coutput"] is None:
for mod in postlist:
mod["coutput"] = "%smodule.c" % mod["name"]
else:
for mod in postlist:
mod["coutput"] = options["coutput"]
if options["f2py_wrapper_output"] is None:
for mod in postlist:
mod["f2py_wrapper_output"] = "%s-f2pywrappers.f" % mod["name"]
else:
for mod in postlist:
mod["f2py_wrapper_output"] = options["f2py_wrapper_output"]
return postlist
def buildmodules(lst):
cfuncs.buildcfuncs()
outmess('Building modules...\n')
modules, mnames, isusedby = [], [], {}
for i in range(len(lst)):
if '__user__' in lst[i]['name']:
cb_rules.buildcallbacks(lst[i])
else:
if 'use' in lst[i]:
for u in lst[i]['use'].keys():
if u not in isusedby:
isusedby[u] = []
isusedby[u].append(lst[i]['name'])
modules.append(lst[i])
mnames.append(lst[i]['name'])
ret = {}
for i in range(len(mnames)):
if mnames[i] in isusedby:
outmess('\tSkipping module "%s" which is used by %s.\n' % (
mnames[i], ','.join(['"%s"' % s for s in isusedby[mnames[i]]])))
else:
um = []
if 'use' in modules[i]:
for u in modules[i]['use'].keys():
if u in isusedby and u in mnames:
um.append(modules[mnames.index(u)])
else:
outmess(
'\tModule "%s" uses nonexisting "%s" which will be ignored.\n' % (mnames[i], u))
ret[mnames[i]] = {}
dict_append(ret[mnames[i]], rules.buildmodule(modules[i], um))
return ret
def dict_append(d_out, d_in):
for (k, v) in d_in.items():
if k not in d_out:
d_out[k] = []
if isinstance(v, list):
d_out[k] = d_out[k] + v
else:
d_out[k].append(v)
def run_main(comline_list):
"""
Equivalent to running::
f2py <args>
where ``<args>=string.join(<list>,' ')``, but in Python. Unless
``-h`` is used, this function returns a dictionary containing
information on generated modules and their dependencies on source
files. For example, the command ``f2py -m scalar scalar.f`` can be
executed from Python as follows
You cannot build extension modules with this function, that is,
using ``-c`` is not allowed. Use ``compile`` command instead
Examples
--------
.. include:: run_main_session.dat
:literal:
"""
crackfortran.reset_global_f2py_vars()
f2pydir = os.path.dirname(os.path.abspath(cfuncs.__file__))
fobjhsrc = os.path.join(f2pydir, 'src', 'fortranobject.h')
fobjcsrc = os.path.join(f2pydir, 'src', 'fortranobject.c')
files, options = scaninputline(comline_list)
auxfuncs.options = options
capi_maps.load_f2cmap_file(options['f2cmap_file'])
postlist = callcrackfortran(files, options)
isusedby = {}
for i in range(len(postlist)):
if 'use' in postlist[i]:
for u in postlist[i]['use'].keys():
if u not in isusedby:
isusedby[u] = []
isusedby[u].append(postlist[i]['name'])
for i in range(len(postlist)):
if postlist[i]['block'] == 'python module' and '__user__' in postlist[i]['name']:
if postlist[i]['name'] in isusedby:
# if not quiet:
outmess('Skipping Makefile build for module "%s" which is used by %s\n' % (
postlist[i]['name'], ','.join(['"%s"' % s for s in isusedby[postlist[i]['name']]])))
if 'signsfile' in options:
if options['verbose'] > 1:
outmess(
'Stopping. Edit the signature file and then run f2py on the signature file: ')
outmess('%s %s\n' %
(os.path.basename(sys.argv[0]), options['signsfile']))
return
for i in range(len(postlist)):
if postlist[i]['block'] != 'python module':
if 'python module' not in options:
errmess(
'Tip: If your original code is Fortran source then you must use -m option.\n')
raise TypeError('All blocks must be python module blocks but got %s' % (
repr(postlist[i]['block'])))
auxfuncs.debugoptions = options['debug']
f90mod_rules.options = options
auxfuncs.wrapfuncs = options['wrapfuncs']
ret = buildmodules(postlist)
for mn in ret.keys():
dict_append(ret[mn], {'csrc': fobjcsrc, 'h': fobjhsrc})
return ret
def filter_files(prefix, suffix, files, remove_prefix=None):
"""
Filter files by prefix and suffix.
"""
filtered, rest = [], []
match = re.compile(prefix + r'.*' + suffix + r'\Z').match
if remove_prefix:
ind = len(prefix)
else:
ind = 0
for file in [x.strip() for x in files]:
if match(file):
filtered.append(file[ind:])
else:
rest.append(file)
return filtered, rest
def get_prefix(module):
p = os.path.dirname(os.path.dirname(module.__file__))
return p
def run_compile():
"""
Do it all in one call!
"""
import tempfile
i = sys.argv.index('-c')
del sys.argv[i]
remove_build_dir = 0
try:
i = sys.argv.index('--build-dir')
except ValueError:
i = None
if i is not None:
build_dir = sys.argv[i + 1]
del sys.argv[i + 1]
del sys.argv[i]
else:
remove_build_dir = 1
build_dir = tempfile.mkdtemp()
_reg1 = re.compile(r'[-][-]link[-]')
sysinfo_flags = [_m for _m in sys.argv[1:] if _reg1.match(_m)]
sys.argv = [_m for _m in sys.argv if _m not in sysinfo_flags]
if sysinfo_flags:
sysinfo_flags = [f[7:] for f in sysinfo_flags]
_reg2 = re.compile(
r'[-][-]((no[-]|)(wrap[-]functions|lower)|debug[-]capi|quiet)|[-]include')
f2py_flags = [_m for _m in sys.argv[1:] if _reg2.match(_m)]
sys.argv = [_m for _m in sys.argv if _m not in f2py_flags]
f2py_flags2 = []
fl = 0
for a in sys.argv[1:]:
if a in ['only:', 'skip:']:
fl = 1
elif a == ':':
fl = 0
if fl or a == ':':
f2py_flags2.append(a)
if f2py_flags2 and f2py_flags2[-1] != ':':
f2py_flags2.append(':')
f2py_flags.extend(f2py_flags2)
sys.argv = [_m for _m in sys.argv if _m not in f2py_flags2]
_reg3 = re.compile(
r'[-][-]((f(90)?compiler([-]exec|)|compiler)=|help[-]compiler)')
flib_flags = [_m for _m in sys.argv[1:] if _reg3.match(_m)]
sys.argv = [_m for _m in sys.argv if _m not in flib_flags]
_reg4 = re.compile(
r'[-][-]((f(77|90)(flags|exec)|opt|arch)=|(debug|noopt|noarch|help[-]fcompiler))')
fc_flags = [_m for _m in sys.argv[1:] if _reg4.match(_m)]
sys.argv = [_m for _m in sys.argv if _m not in fc_flags]
if 1:
del_list = []
for s in flib_flags:
v = '--fcompiler='
if s[:len(v)] == v:
from numpy.distutils import fcompiler
fcompiler.load_all_fcompiler_classes()
allowed_keys = list(fcompiler.fcompiler_class.keys())
nv = ov = s[len(v):].lower()
if ov not in allowed_keys:
vmap = {} # XXX
try:
nv = vmap[ov]
except KeyError:
if ov not in vmap.values():
print('Unknown vendor: "%s"' % (s[len(v):]))
nv = ov
i = flib_flags.index(s)
flib_flags[i] = '--fcompiler=' + nv
continue
for s in del_list:
i = flib_flags.index(s)
del flib_flags[i]
assert len(flib_flags) <= 2, repr(flib_flags)
_reg5 = re.compile(r'[-][-](verbose)')
setup_flags = [_m for _m in sys.argv[1:] if _reg5.match(_m)]
sys.argv = [_m for _m in sys.argv if _m not in setup_flags]
if '--quiet' in f2py_flags:
setup_flags.append('--quiet')
modulename = 'untitled'
sources = sys.argv[1:]
for optname in ['--include_paths', '--include-paths', '--f2cmap']:
if optname in sys.argv:
i = sys.argv.index(optname)
f2py_flags.extend(sys.argv[i:i + 2])
del sys.argv[i + 1], sys.argv[i]
sources = sys.argv[1:]
if '-m' in sys.argv:
i = sys.argv.index('-m')
modulename = sys.argv[i + 1]
del sys.argv[i + 1], sys.argv[i]
sources = sys.argv[1:]
else:
from numpy.distutils.command.build_src import get_f2py_modulename
pyf_files, sources = filter_files('', '[.]pyf([.]src|)', sources)
sources = pyf_files + sources
for f in pyf_files:
modulename = get_f2py_modulename(f)
if modulename:
break
extra_objects, sources = filter_files('', '[.](o|a|so)', sources)
include_dirs, sources = filter_files('-I', '', sources, remove_prefix=1)
library_dirs, sources = filter_files('-L', '', sources, remove_prefix=1)
libraries, sources = filter_files('-l', '', sources, remove_prefix=1)
undef_macros, sources = filter_files('-U', '', sources, remove_prefix=1)
define_macros, sources = filter_files('-D', '', sources, remove_prefix=1)
for i in range(len(define_macros)):
name_value = define_macros[i].split('=', 1)
if len(name_value) == 1:
name_value.append(None)
if len(name_value) == 2:
define_macros[i] = tuple(name_value)
else:
print('Invalid use of -D:', name_value)
from numpy.distutils.system_info import get_info
num_info = {}
if num_info:
include_dirs.extend(num_info.get('include_dirs', []))
from numpy.distutils.core import setup, Extension
ext_args = {'name': modulename, 'sources': sources,
'include_dirs': include_dirs,
'library_dirs': library_dirs,
'libraries': libraries,
'define_macros': define_macros,
'undef_macros': undef_macros,
'extra_objects': extra_objects,
'f2py_options': f2py_flags,
}
if sysinfo_flags:
from numpy.distutils.misc_util import dict_append
for n in sysinfo_flags:
i = get_info(n)
if not i:
outmess('No %s resources found in system'
' (try `f2py --help-link`)\n' % (repr(n)))
dict_append(ext_args, **i)
ext = Extension(**ext_args)
sys.argv = [sys.argv[0]] + setup_flags
sys.argv.extend(['build',
'--build-temp', build_dir,
'--build-base', build_dir,
'--build-platlib', '.'])
if fc_flags:
sys.argv.extend(['config_fc'] + fc_flags)
if flib_flags:
sys.argv.extend(['build_ext'] + flib_flags)
setup(ext_modules=[ext])
if remove_build_dir and os.path.exists(build_dir):
import shutil
outmess('Removing build directory %s\n' % (build_dir))
shutil.rmtree(build_dir)
def main():
if '--help-link' in sys.argv[1:]:
sys.argv.remove('--help-link')
from numpy.distutils.system_info import show_all
show_all()
return
# Probably outdated options that were not working before 1.16
if '--g3-numpy' in sys.argv[1:]:
sys.stderr.write("G3 f2py support is not implemented, yet.\\n")
sys.exit(1)
elif '--2e-numeric' in sys.argv[1:]:
sys.argv.remove('--2e-numeric')
elif '--2e-numarray' in sys.argv[1:]:
# Note that this errors becaust the -DNUMARRAY argument is
# not recognized. Just here for back compatibility and the
# error message.
sys.argv.append("-DNUMARRAY")
sys.argv.remove('--2e-numarray')
elif '--2e-numpy' in sys.argv[1:]:
sys.argv.remove('--2e-numpy')
else:
pass
if '-c' in sys.argv[1:]:
run_compile()
else:
run_main(sys.argv[1:])
| |
import random
import unittest
from hearthbreaker.agents.basic_agents import PredictableAgent, DoNothingAgent
from hearthbreaker.constants import CHARACTER_CLASS, MINION_TYPE
from hearthbreaker.engine import Game
from hearthbreaker.replay import playback, Replay
from tests.agents.testing_agents import CardTestingAgent, OneCardPlayingAgent, EnemySpellTestingAgent, \
MinionAttackingAgent, PlayAndAttackAgent
from tests.testing_utils import generate_game_for, StackedDeck
from hearthbreaker.cards import *
class TestMage(unittest.TestCase):
def setUp(self):
random.seed(1857)
def test_ArcaneMissiles(self):
game = generate_game_for(MogushanWarden, ArcaneMissiles, OneCardPlayingAgent, CardTestingAgent)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(27, game.other_player.hero.health)
game.play_single_turn()
game.play_single_turn()
game.play_single_turn()
game.play_single_turn()
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual("Mogu'shan Warden", game.current_player.minions[0].card.name)
game.play_single_turn()
# The random numbers work so that the arcane missiles hit thrice on each target
self.assertEqual(9, game.other_player.hero.health)
self.assertEqual(4, game.other_player.minions[0].health)
def test_ArcaneMissilesWithSpellPower(self):
game = playback(Replay("tests/replays/card_tests/ArcaneMissilesWithSpellDamage.hsreplay"))
game.start()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual(1, game.other_player.minions[0].health)
self.assertEqual(2, game.other_player.minions[0].calculate_max_health())
self.assertEqual(27, game.other_player.hero.health)
return game
def test_WaterElemental(self):
game = generate_game_for(WaterElemental, StonetuskBoar, PredictableAgent, DoNothingAgent)
for turn in range(0, 11):
game.play_single_turn()
self.assertEqual(25, game.other_player.hero.health)
self.assertFalse(game.other_player.hero.frozen)
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual(3, game.current_player.minions[0].calculate_attack())
self.assertEqual(6, game.current_player.minions[0].health)
self.assertEqual("Water Elemental", game.current_player.minions[0].card.name)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(22, game.other_player.hero.health)
# Always false after the end of a turn
self.assertTrue(game.other_player.hero.frozen)
# Now make sure that attacking the Water Elemental directly will freeze a character
random.seed(1857)
game = generate_game_for(WaterElemental, IronbarkProtector, OneCardPlayingAgent, PredictableAgent)
for turn in range(0, 8):
game.play_single_turn()
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual(5, game.other_player.minions[0].health)
# The player won't have taken damage because of armor, but should still be frozen
self.assertEqual(30, game.current_player.hero.health)
self.assertTrue(game.current_player.hero.frozen)
game.play_single_turn()
game.play_single_turn()
# The player should still be frozen from last turn, and so shouldn't have attacked
self.assertEqual(30, game.current_player.hero.health)
def test_IceLance(self):
game = generate_game_for(IceLance, OasisSnapjaw, CardTestingAgent, OneCardPlayingAgent)
game.play_single_turn()
self.assertTrue(game.other_player.hero.frozen)
self.assertEqual(30, game.other_player.hero.health)
game.play_single_turn()
game.play_single_turn()
self.assertTrue(game.other_player.hero.frozen)
self.assertEqual(26, game.other_player.hero.health)
for turn in range(0, 6):
game.play_single_turn()
self.assertEqual(1, len(game.other_player.minions))
self.assertTrue(game.other_player.minions[0].frozen)
self.assertEqual(7, game.other_player.minions[0].health)
def test_ManaWyrm(self):
deck1 = StackedDeck([ManaWyrm(), IceLance(), ManaWyrm(), IceLance(), IceLance(), IceLance()],
CHARACTER_CLASS.MAGE)
deck2 = StackedDeck([IronbeakOwl()], CHARACTER_CLASS.PALADIN)
game = Game([deck1, deck2], [CardTestingAgent(), OneCardPlayingAgent()])
game.pre_game()
game.current_player = 1
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual(1, game.current_player.minions[0].calculate_attack())
self.assertEqual(3, game.current_player.minions[0].health)
self.assertEqual(3, game.current_player.minions[0].calculate_max_health())
self.assertEqual("Mana Wyrm", game.current_player.minions[0].card.name)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(2, len(game.current_player.minions))
self.assertEqual(1, game.current_player.minions[0].calculate_attack())
self.assertEqual(3, game.current_player.minions[0].health)
self.assertEqual(3, game.current_player.minions[0].calculate_max_health())
self.assertEqual(2, game.current_player.minions[1].calculate_attack())
self.assertEqual(3, game.current_player.minions[1].health)
self.assertEqual(3, game.current_player.minions[1].calculate_max_health())
game.play_single_turn()
game.play_single_turn()
self.assertEqual(2, len(game.current_player.minions))
self.assertEqual(1, game.current_player.minions[0].calculate_attack())
self.assertEqual(3, game.current_player.minions[0].health)
self.assertEqual(3, game.current_player.minions[0].calculate_max_health())
self.assertEqual(5, game.current_player.minions[1].calculate_attack())
self.assertEqual(3, game.current_player.minions[1].health)
self.assertEqual(3, game.current_player.minions[1].calculate_max_health())
def test_MirrorImage(self):
game = generate_game_for(MirrorImage, StonetuskBoar, CardTestingAgent, DoNothingAgent)
game.play_single_turn()
self.assertEqual(2, len(game.current_player.minions))
self.assertEqual(0, game.current_player.minions[0].calculate_attack())
self.assertEqual(2, game.current_player.minions[0].health)
self.assertTrue(game.current_player.minions[0].taunt)
self.assertEqual("Mirror Image", game.current_player.minions[0].card.name)
self.assertEqual(0, game.current_player.minions[0].card.mana)
self.assertEqual(0, game.current_player.minions[1].calculate_attack())
self.assertEqual(2, game.current_player.minions[1].health)
self.assertTrue(game.current_player.minions[1].taunt)
self.assertEqual("Mirror Image", game.current_player.minions[1].card.name)
self.assertEqual(0, game.current_player.minions[1].card.mana)
def test_ArcaneExplosion(self):
game = generate_game_for(BloodfenRaptor, ArcaneExplosion, OneCardPlayingAgent, CardTestingAgent)
game.play_single_turn()
game.play_single_turn()
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
game.play_single_turn()
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual(1, game.other_player.minions[0].health)
self.assertEqual(30, game.other_player.hero.health)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual(1, game.other_player.minions[0].health)
self.assertEqual(30, game.other_player.hero.health)
def test_Frostbolt(self):
game = generate_game_for(OasisSnapjaw, Frostbolt, OneCardPlayingAgent, CardTestingAgent)
for turn in range(0, 4):
game.play_single_turn()
self.assertTrue(game.other_player.hero.frozen)
self.assertEqual(27, game.other_player.hero.health)
for turn in range(0, 4):
game.play_single_turn()
self.assertEqual(24, game.other_player.hero.health)
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual(1, game.other_player.minions[0].health)
self.assertTrue(game.other_player.minions[0].frozen)
def test_SorcerersApprentice(self):
game = generate_game_for([SorcerersApprentice, ArcaneMissiles, SorcerersApprentice, Frostbolt, Frostbolt,
Frostbolt], StonetuskBoar, CardTestingAgent, DoNothingAgent)
game.play_single_turn()
game.play_single_turn()
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual(3, game.current_player.minions[0].calculate_attack())
self.assertEqual(2, game.current_player.minions[0].health)
self.assertEqual("Sorcerer's Apprentice", game.current_player.minions[0].card.name)
# Arcane missiles should also have been played, since it is now free
self.assertEqual(27, game.other_player.hero.health)
# Make sure the other frostbolts have been properly reduced
self.assertEqual(1, game.current_player.hand[1].mana_cost())
self.assertEqual(1, game.current_player.hand[2].mana_cost())
game.play_single_turn()
game.play_single_turn()
# Both Sorcerer's Apprentices are killed by friendly Frostbolts.
self.assertEqual(0, len(game.current_player.minions))
# Make sure that the cards in hand are no longer reduced
self.assertEqual(2, game.current_player.hand[0].mana_cost())
def test_ArcaneIntellect(self):
game = generate_game_for(ArcaneIntellect, StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 5):
game.play_single_turn()
self.assertEqual(7, len(game.current_player.hand))
def test_FrostNova(self):
game = generate_game_for(FrostNova, StonetuskBoar, CardTestingAgent, OneCardPlayingAgent)
for turn in range(0, 5):
game.play_single_turn()
for minion in game.other_player.minions:
self.assertTrue(minion.frozen)
self.assertFalse(game.other_player.hero.frozen)
def test_Counterspell(self):
game = generate_game_for(Counterspell, Frostbolt, CardTestingAgent, CardTestingAgent)
for turn in range(0, 5):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.secrets))
self.assertEqual("Counterspell", game.current_player.secrets[0].name)
game.play_single_turn()
self.assertFalse(game.other_player.hero.frozen)
self.assertEqual(27, game.other_player.hero.health)
# Ensure that secrets are being removed after being revealed
self.assertEqual(0, len(game.other_player.secrets))
def test_IceBarrier(self):
game = generate_game_for(IceBarrier, StonetuskBoar, CardTestingAgent, PredictableAgent)
for turn in range(0, 5):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.secrets))
self.assertEqual("Ice Barrier", game.current_player.secrets[0].name)
game.play_single_turn()
# only one minion because PredictableAgent will shoot its own minions if there isn't anything else to shoot.
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual(7, game.other_player.hero.armor)
# Attacked twice on the first turn, then fireballed before getting the armor up
self.assertEqual(27, game.other_player.hero.health)
# Make sure we can't have two identical secrets at the same time
random.seed(1857)
game = generate_game_for(IceBarrier, StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 6):
game.play_single_turn()
self.assertEqual(1, len(game.other_player.secrets))
self.assertEqual("Ice Barrier", game.other_player.secrets[0].name)
game.play_single_turn()
self.assertEqual(1, len(game.current_player.secrets))
self.assertEqual("Ice Barrier", game.current_player.secrets[0].name)
def test_IceBlock(self):
game = generate_game_for([IceBlock, Deathwing], Frostbolt, CardTestingAgent, CardTestingAgent)
for turn in range(0, 12):
game.play_single_turn()
self.assertEqual(game.other_player.hero.health, 3)
self.assertEqual(1, len(game.other_player.secrets))
game.play_single_turn()
game.play_single_turn()
self.assertEqual(3, game.other_player.hero.health)
self.assertEqual(0, len(game.other_player.secrets))
game.play_single_turn()
game.play_single_turn()
self.assertTrue(game.game_ended)
def test_MirrorEntity(self):
game = generate_game_for([StonetuskBoar, MirrorEntity], IronfurGrizzly, CardTestingAgent, OneCardPlayingAgent)
for turn in range(0, 5):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.secrets))
self.assertEqual("Mirror Entity", game.current_player.secrets[0].name)
game.play_single_turn()
self.assertEqual(2, len(game.other_player.minions))
self.assertEqual("Ironfur Grizzly", game.other_player.minions[1].card.name)
self.assertEqual(game.other_player, game.other_player.minions[1].player)
self.assertEqual(1, game.other_player.minions[1].index)
def test_Spellbender(self):
game = generate_game_for([Spellbender, Wisp], Moonfire, CardTestingAgent, CardTestingAgent)
for turn in range(0, 6):
game.play_single_turn()
# The moonfire should have been re-directed to the Spellbender, which should have taken one damage
self.assertEqual(2, len(game.other_player.minions))
self.assertEqual(2, game.other_player.minions[1].health)
self.assertEqual(1, game.other_player.minions[1].calculate_attack())
self.assertEqual("Spellbender", game.other_player.minions[1].card.name)
# Now make sure it won't work when the hero is targeted
random.seed(1857)
game = generate_game_for(Spellbender, Moonfire, CardTestingAgent, CardTestingAgent)
for turn in range(0, 6):
game.play_single_turn()
self.assertEqual(0, len(game.other_player.minions))
self.assertEqual(1, len(game.other_player.secrets))
self.assertEqual(23, game.other_player.hero.health)
# Now make sure it doesn't activate when a non-targeted spell is used
random.seed(1857)
game = generate_game_for(Spellbender, ArcaneIntellect, CardTestingAgent, CardTestingAgent)
for turn in range(0, 6):
game.play_single_turn()
# The arcane intellect should not have caused the Spellbender to activate
self.assertEqual(0, len(game.other_player.minions))
self.assertEqual(1, len(game.other_player.secrets))
def test_SpellbenderFullBoard(self):
game = generate_game_for([Spellbender, Onyxia], Assassinate, OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(17):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.secrets))
self.assertEqual(7, len(game.current_player.minions))
game.play_single_turn()
self.assertEqual(6, len(game.other_player.minions))
self.assertEqual(1, len(game.other_player.secrets))
def test_Spellbender_full_board_target_hero(self):
game = generate_game_for(BaneOfDoom, [Wisp, Wisp, Wisp, Wisp, Wisp, Wisp, Wisp, Spellbender],
OneCardPlayingAgent, CardTestingAgent)
for turn in range(10):
game.play_single_turn()
self.assertEqual(7, len(game.current_player.minions))
self.assertEqual(1, len(game.current_player.secrets))
game.other_player.agent.choose_target = lambda targets: game.players[1].hero
game.play_single_turn()
self.assertEqual(7, len(game.other_player.minions))
self.assertEqual(28, game.other_player.hero.health)
self.assertEqual(1, len(game.other_player.secrets))
def test_Spellbender_target_hero_and_attack(self):
game = generate_game_for([Spellbender, OasisSnapjaw], [LavaBurst, Wisp, Loatheb],
OneCardPlayingAgent, PlayAndAttackAgent)
for turn in range(5):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.secrets))
self.assertEqual(0, len(game.other_player.minions))
game.play_single_turn()
self.assertEqual(1, len(game.other_player.secrets))
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual(25, game.other_player.hero.health)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(25, game.other_player.hero.health)
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual(6, game.other_player.minions[0].health)
self.assertEqual(1, len(game.other_player.secrets))
def test_Vaporize(self):
game = generate_game_for(Vaporize, FaerieDragon, CardTestingAgent, MinionAttackingAgent)
for turn in range(0, 6):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual(0, len(game.other_player.secrets))
self.assertEqual(30, game.other_player.hero.health)
random.seed(1857)
game = generate_game_for(Vaporize, Swipe, CardTestingAgent, PredictableAgent)
for turn in range(0, 6):
game.play_single_turn()
self.assertEqual(28, game.other_player.hero.health)
self.assertEqual(1, len(game.other_player.secrets))
self.assertFalse(game.current_player.hero.dead)
def test_KirinTorMage(self):
game = generate_game_for([KirinTorMage, Vaporize, Spellbender], StonetuskBoar,
CardTestingAgent, DoNothingAgent)
for turn in range(0, 5):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.secrets))
self.assertEqual("Vaporize", game.current_player.secrets[0].name)
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual("Kirin Tor Mage", game.current_player.minions[0].card.name)
self.assertEqual(3, game.current_player.hand[0].mana_cost())
self.assertEqual("Spellbender", game.current_player.hand[0].name)
random.seed(1857)
game = generate_game_for([KirinTorMage, Vaporize], StonetuskBoar, OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 5):
game.play_single_turn()
self.assertEqual(0, len(game.current_player.secrets))
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual("Kirin Tor Mage", game.current_player.minions[0].card.name)
self.assertEqual(3, game.current_player.hand[2].mana_cost())
self.assertEqual("Vaporize", game.current_player.hand[2].name)
def test_EtherealArcanist(self):
game = generate_game_for([Spellbender, EtherealArcanist], StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 6):
game.play_single_turn()
self.assertEqual(1, len(game.other_player.secrets))
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual(5, game.current_player.minions[0].calculate_attack())
self.assertEqual(5, game.current_player.minions[0].health)
self.assertEqual(5, game.current_player.minions[0].calculate_max_health())
game.play_single_turn()
game.play_single_turn()
self.assertEqual(7, game.current_player.minions[0].calculate_attack())
self.assertEqual(7, game.current_player.minions[0].health)
self.assertEqual(7, game.current_player.minions[0].calculate_max_health())
game.current_player.minions[0].silence()
self.assertEqual(3, game.current_player.minions[0].calculate_attack())
self.assertEqual(3, game.current_player.minions[0].health)
self.assertEqual(3, game.current_player.minions[0].calculate_max_health())
game.play_single_turn()
game.play_single_turn()
self.assertEqual(3, game.current_player.minions[0].calculate_attack())
self.assertEqual(3, game.current_player.minions[0].health)
self.assertEqual(3, game.current_player.minions[0].calculate_max_health())
# Test when the player has no secrets at all
random.seed(1857)
game = generate_game_for(EtherealArcanist, StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 7):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual(3, game.current_player.minions[0].calculate_attack())
self.assertEqual(3, game.current_player.minions[0].health)
self.assertEqual(3, game.current_player.minions[0].calculate_max_health())
def test_ConeOfCold(self):
game = generate_game_for(ConeOfCold, [StonetuskBoar, BloodfenRaptor, BloodfenRaptor], CardTestingAgent,
OneCardPlayingAgent)
for turn in range(0, 6):
game.play_single_turn()
self.assertEqual(3, len(game.current_player.minions))
game.play_single_turn()
self.assertEqual(3, len(game.other_player.minions))
self.assertTrue(game.other_player.minions[0].frozen)
self.assertEqual(1, game.other_player.minions[0].health)
self.assertTrue(game.other_player.minions[1].frozen)
self.assertEqual(1, game.other_player.minions[1].health)
self.assertFalse(game.other_player.minions[2].frozen)
self.assertEqual(1, game.other_player.minions[2].health)
self.assertEqual(30, game.other_player.hero.health)
# Now check to ensure that it will work when targeting the other end of the minion list
game.current_player.agent.choose_target = lambda targets: targets[len(targets) - 1]
game.play_single_turn()
game.play_single_turn()
# Neither of the minions which survive Cone of Cold will be frozen, since they weren't touched this round
self.assertEqual(2, len(game.other_player.minions))
self.assertFalse(game.other_player.minions[0].frozen)
self.assertFalse(game.other_player.minions[1].frozen)
def test_Fireball(self):
game = generate_game_for([Fireball, KoboldGeomancer], StonetuskBoar, EnemySpellTestingAgent, DoNothingAgent)
for turn in range(0, 7):
game.play_single_turn()
self.assertEqual(24, game.other_player.hero.health)
for turn in range(0, 4):
game.play_single_turn()
self.assertEqual(17, game.other_player.hero.health)
def test_Polymorph(self):
game = generate_game_for(MogushanWarden, Polymorph, OneCardPlayingAgent, CardTestingAgent)
for turn in range(0, 7):
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertTrue(game.current_player.minions[0].taunt)
self.assertEqual(1, game.current_player.minions[0].calculate_attack())
self.assertEqual(7, game.current_player.minions[0].health)
self.assertEqual("Mogu'shan Warden", game.current_player.minions[0].card.name)
game.play_single_turn()
self.assertEqual(1, len(game.other_player.minions))
self.assertFalse(game.other_player.minions[0].taunt)
self.assertEqual(1, game.other_player.minions[0].calculate_attack())
self.assertEqual(1, game.other_player.minions[0].health)
self.assertEqual("Sheep", game.other_player.minions[0].card.name)
self.assertEqual(MINION_TYPE.BEAST, game.other_player.minions[0].card.minion_type)
def test_Blizzard(self):
game = generate_game_for(Blizzard, MogushanWarden, CardTestingAgent, OneCardPlayingAgent)
for turn in range(0, 10):
game.play_single_turn()
self.assertEqual(2, len(game.current_player.minions))
self.assertEqual(7, game.current_player.minions[0].health)
self.assertEqual(7, game.current_player.minions[1].health)
self.assertFalse(game.current_player.minions[0].frozen)
self.assertFalse(game.current_player.minions[1].frozen)
game.play_single_turn()
self.assertEqual(2, len(game.other_player.minions))
self.assertEqual(5, game.other_player.minions[0].health)
self.assertEqual(5, game.other_player.minions[1].health)
self.assertTrue(game.other_player.minions[0].frozen)
self.assertTrue(game.other_player.minions[1].frozen)
def test_Flamestrike(self):
game = generate_game_for(Flamestrike, MogushanWarden, CardTestingAgent, OneCardPlayingAgent)
for turn in range(0, 12):
game.play_single_turn()
self.assertEqual(3, len(game.current_player.minions))
self.assertEqual(7, game.current_player.minions[0].health)
self.assertEqual(7, game.current_player.minions[1].health)
self.assertEqual(7, game.current_player.minions[2].health)
game.play_single_turn()
self.assertEqual(3, len(game.other_player.minions))
self.assertEqual(3, game.other_player.minions[0].health)
self.assertEqual(3, game.other_player.minions[1].health)
self.assertEqual(3, game.other_player.minions[2].health)
def test_Pyroblast(self):
game = generate_game_for(Pyroblast, StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 18):
game.play_single_turn()
self.assertEqual(30, game.current_player.hero.health)
game.play_single_turn()
self.assertEqual(20, game.other_player.hero.health)
def test_ArchmageAntonidas(self):
game = generate_game_for([ArchmageAntonidas, Vaporize], StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(0, 12):
game.play_single_turn()
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual("Archmage Antonidas", game.current_player.minions[0].card.name)
game.play_single_turn()
game.play_single_turn()
self.assertEqual("Fireball", game.current_player.hand[9].name)
def test_Duplicate(self):
game = generate_game_for([BloodfenRaptor, Duplicate], ShadowBolt, OneCardPlayingAgent, CardTestingAgent)
for turn in range(0, 6):
game.play_single_turn()
self.assertEqual(6, len(game.other_player.hand))
self.assertEqual("Bloodfen Raptor", game.other_player.hand[4].name)
self.assertEqual("Bloodfen Raptor", game.other_player.hand[5].name)
self.assertEqual(0, len(game.other_player.secrets))
def test_Duplicate_and_play_after(self):
game = generate_game_for([Wisp, Wisp, Wisp, Wisp, Wisp, Duplicate], LightningStorm,
CardTestingAgent, OneCardPlayingAgent)
for turn in range(5):
game.play_single_turn()
self.assertEqual(0, len(game.current_player.hand))
self.assertEqual(5, len(game.current_player.minions))
self.assertEqual(1, len(game.current_player.secrets))
game.play_single_turn()
self.assertEqual(0, len(game.other_player.secrets))
self.assertEqual(0, len(game.other_player.minions))
self.assertEqual(2, len(game.other_player.hand))
game.play_single_turn()
self.assertEqual(0, len(game.current_player.hand))
self.assertEqual(3, len(game.current_player.minions))
def test_Duplicate_MadScientist(self):
game = generate_game_for(Hellfire, [MadScientist, MagmaRager, Duplicate],
OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(6):
game.play_single_turn()
self.assertEqual(2, len(game.current_player.minions))
self.assertEqual(0, len(game.current_player.secrets))
game.play_single_turn()
self.assertEqual(0, len(game.other_player.minions))
self.assertEqual(0, len(game.other_player.secrets))
self.assertEqual("Magma Rager", game.other_player.hand[-1].name)
self.assertEqual("Magma Rager", game.other_player.hand[-2].name)
def test_Snowchugger(self):
game = generate_game_for(Snowchugger, StonetuskBoar, PredictableAgent, DoNothingAgent)
for turn in range(0, 7):
game.play_single_turn()
self.assertEqual(27, game.other_player.hero.health)
self.assertFalse(game.other_player.hero.frozen)
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual(2, game.current_player.minions[0].calculate_attack())
self.assertEqual(3, game.current_player.minions[0].health)
self.assertEqual("Snowchugger", game.current_player.minions[0].card.name)
game.play_single_turn()
game.play_single_turn()
self.assertEqual(25, game.other_player.hero.health)
# Always false after the end of a turn
self.assertTrue(game.other_player.hero.frozen)
# Now make sure that attacking the Snowchugger directly will freeze a character
random.seed(1857)
game = generate_game_for(Snowchugger, IronbarkProtector, OneCardPlayingAgent, PredictableAgent)
for turn in range(0, 4):
game.play_single_turn()
self.assertEqual(1, len(game.other_player.minions))
self.assertEqual(2, game.other_player.minions[0].health)
# The player should be frozen because of weapon attack
self.assertEqual(29, game.current_player.hero.health)
self.assertTrue(game.current_player.hero.frozen)
game.play_single_turn()
game.play_single_turn()
# The player should still be frozen from last turn, and thus shouldn't have attacked
self.assertEqual(29, game.current_player.hero.health)
# If Snowchugger gets 0 attack, and is being attacked so will the minion NOT be frozen since no damage was dealt
game = generate_game_for(Snowchugger, StonetuskBoar, PredictableAgent, PredictableAgent)
for turn in range(0, 2):
game.play_single_turn()
self.assertEqual(1, len(game.players[1].minions))
self.assertEqual("Snowchugger", game.players[1].minions[0].card.name)
# Cheat
game.players[1].minions[0].base_attack = 0
self.assertEqual(0, game.players[1].minions[0].calculate_attack())
self.assertEqual(3, game.players[1].minions[0].health)
# Stonetusk should have attacked the Snowchugger, and will NOT be frozen since they didn't take damage
game.play_single_turn()
self.assertEqual(1, game.players[1].minions[0].health)
self.assertFalse(game.players[0].minions[0].frozen)
def test_GoblinBlastmage(self):
game = generate_game_for([GoblinBlastmage, ClockworkGnome, GoblinBlastmage], [Mechwarper, ClockworkGnome],
CardTestingAgent, CardTestingAgent)
for turn in range(6):
game.play_single_turn()
self.assertEqual(0, len(game.other_player.minions))
self.assertEqual(7, len(game.current_player.minions))
self.assertEqual(3, game.current_player.minions[0].health)
self.assertEqual(1, game.current_player.minions[1].health)
self.assertEqual(3, game.current_player.minions[2].health)
self.assertEqual(1, game.current_player.minions[3].health)
self.assertEqual(3, game.current_player.minions[4].health)
self.assertEqual(1, game.current_player.minions[5].health)
self.assertEqual(3, game.current_player.minions[6].health)
# Blastmage should not go off, as there is no friendly mech down
game.play_single_turn()
self.assertEqual(1, len(game.current_player.minions))
self.assertEqual(7, len(game.other_player.minions))
self.assertEqual(3, game.other_player.minions[0].health)
self.assertEqual(1, game.other_player.minions[1].health)
self.assertEqual(3, game.other_player.minions[2].health)
self.assertEqual(1, game.other_player.minions[3].health)
self.assertEqual(3, game.other_player.minions[4].health)
self.assertEqual(1, game.other_player.minions[5].health)
self.assertEqual(3, game.other_player.minions[6].health)
self.assertEqual(30, game.other_player.hero.health)
game.play_single_turn()
game.play_single_turn()
# The Blastmage hits the warper at index 2 twice, and the two gnomes at indices 1 and 3.
self.assertEqual(3, len(game.current_player.minions))
self.assertEqual(5, len(game.other_player.minions))
self.assertEqual(3, game.other_player.minions[0].health)
self.assertEqual(1, game.other_player.minions[1].health)
self.assertEqual(1, game.other_player.minions[2].health)
self.assertEqual(3, game.other_player.minions[3].health)
self.assertEqual(3, game.other_player.minions[4].health)
self.assertEqual(30, game.other_player.hero.health)
def test_Flamecannon(self):
game = generate_game_for(Flamecannon, SenjinShieldmasta, OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(0, 8):
game.play_single_turn()
# Flamecannon hasn't been played since there hasn't been an enemy minion until now.
self.assertEqual(7, len(game.players[0].hand))
self.assertEqual(1, len(game.players[1].minions))
self.assertEqual(5, game.players[1].minions[0].health)
# Enemy minion exist, so Flamecannon will be played.
game.play_single_turn()
self.assertEqual(7, len(game.players[0].hand))
self.assertEqual(1, len(game.players[1].minions))
self.assertEqual(1, game.players[1].minions[0].health)
def test_WeeSpellstopper(self):
game = generate_game_for(WeeSpellstopper, ShadowBolt, OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(0, 8):
game.play_single_turn()
# First Spellstopper gets Bolted but lives with 1 hp
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(1, game.players[0].minions[0].health)
# Once there are 2 Spellstoppers, they are both spell immune
game.play_single_turn()
game.play_single_turn()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(5, game.players[0].minions[0].health)
self.assertEqual(1, game.players[0].minions[1].health)
game.play_single_turn()
game.players[0].minions[0].die(None)
game.players[0].minions[1].die(None)
game.check_delayed()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(1, game.players[0].minions[0].health)
# Last Spellstopper is not immune and dies to Shadow Bolt
game.play_single_turn()
self.assertEqual(0, len(game.players[0].minions))
def test_WeeSpellstopperSilence(self):
game = generate_game_for(WeeSpellstopper, [Silence, ShadowBolt], OneCardPlayingAgent,
OneCardPlayingAgent)
for turn in range(0, 8):
game.play_single_turn()
# First Spellstopper gets silenced
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(5, game.players[0].minions[0].health)
# Once there are 2 Spellstoppers, but only the first receives the aura
game.play_single_turn()
game.play_single_turn()
self.assertEqual(2, len(game.players[0].minions))
self.assertEqual(1, game.players[0].minions[0].health)
self.assertEqual(5, game.players[0].minions[1].health)
def test_FlameLeviathan(self):
game = generate_game_for(Wisp, FlameLeviathan, CardTestingAgent, CardTestingAgent)
game.play_single_turn()
self.assertEqual(4, len(game.current_player.minions))
self.assertEqual(30, game.current_player.hero.health)
self.assertEqual(30, game.other_player.hero.health)
game.play_single_turn()
self.assertEqual(0, len(game.other_player.minions))
self.assertEqual(28, game.current_player.hero.health)
self.assertEqual(28, game.other_player.hero.health)
def test_EchoOfMedivh(self):
game = generate_game_for([NoviceEngineer, NoviceEngineer, GnomishInventor, GnomishInventor, EchoOfMedivh], Wisp,
OneCardPlayingAgent, DoNothingAgent)
for turn in range(0, 10):
game.play_single_turn()
# Plays first 4 "draw" minions
self.assertEqual(8, len(game.players[0].hand))
self.assertEqual(4, len(game.players[0].minions))
game.play_single_turn()
# Plays Echo and overflows
self.assertEqual(10, len(game.players[0].hand))
self.assertEqual(4, len(game.players[0].minions))
self.assertEqual("Novice Engineer", game.players[0].hand[8].name)
self.assertEqual("Novice Engineer", game.players[0].hand[9].name)
def test_UnstablePortal(self):
game = generate_game_for(UnstablePortal, StonetuskBoar, CardTestingAgent, DoNothingAgent)
for turn in range(3):
game.play_single_turn()
self.assertEqual(5, len(game.current_player.hand))
self.assertTrue(game.current_player.hand[-1].is_minion())
if game.current_player.hand[-1].mana >= 3:
# TODO This assertion may fail, if unstable portal summons a Giant. Don't know how to solve that issue
self.assertEqual(3, game.current_player.hand[-1].mana - game.current_player.hand[-1].mana_cost())
def test_DragonsBreath(self):
game = generate_game_for([Flamestrike, DragonsBreath], StonetuskBoar, CardTestingAgent, OneCardPlayingAgent)
for turn in range(13):
game.play_single_turn()
# The flamestrike kills 6 boars, so the Dragon's Breath is free
self.assertEqual(0, len(game.other_player.minions))
self.assertEqual(26, game.other_player.hero.health)
game.play_single_turn()
game.play_single_turn()
# The Flamestrike only kills one boar, so we can't afford the Dragon's breath
self.assertEqual(0, len(game.other_player.minions))
self.assertEqual(26, game.other_player.hero.health)
def test_Flamewaker(self):
game = generate_game_for([Flamewaker, CircleOfHealing], CircleOfHealing,
OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(6):
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(30, game.players[0].hero.health)
self.assertEqual(30, game.players[1].hero.health)
game.play_single_turn()
self.assertEqual(1, len(game.players[0].minions))
self.assertEqual(30, game.players[0].hero.health)
self.assertEqual(28, game.players[1].hero.health)
def test_ArcaneBlast(self):
game = generate_game_for([KoboldGeomancer, DalaranMage, OgreMagi, ArcaneBlast], TournamentMedic,
OneCardPlayingAgent, OneCardPlayingAgent)
for turn in range(8):
game.play_single_turn()
self.assertEqual(3, len(game.other_player.minions))
self.assertEqual(1, len(game.current_player.minions))
game.play_single_turn()
self.assertEqual(0, len(game.other_player.minions))
self.assertEqual(3, len(game.current_player.minions))
| |
# Copyright 2013 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility class for VM related operations.
Based on the "root/virtualization/v2" namespace available starting with
Hyper-V Server / Windows Server 2012.
"""
import sys
import uuid
if sys.platform == 'win32':
import wmi
from oslo_config import cfg
from oslo_log import log as logging
from nova.virt.hyperv import constants
from nova.virt.hyperv import vmutils
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class VMUtilsV2(vmutils.VMUtils):
_PHYS_DISK_RES_SUB_TYPE = 'Microsoft:Hyper-V:Physical Disk Drive'
_DISK_DRIVE_RES_SUB_TYPE = 'Microsoft:Hyper-V:Synthetic Disk Drive'
_DVD_DRIVE_RES_SUB_TYPE = 'Microsoft:Hyper-V:Synthetic DVD Drive'
_SCSI_RES_SUBTYPE = 'Microsoft:Hyper-V:Synthetic SCSI Controller'
_HARD_DISK_RES_SUB_TYPE = 'Microsoft:Hyper-V:Virtual Hard Disk'
_DVD_DISK_RES_SUB_TYPE = 'Microsoft:Hyper-V:Virtual CD/DVD Disk'
_IDE_CTRL_RES_SUB_TYPE = 'Microsoft:Hyper-V:Emulated IDE Controller'
_SCSI_CTRL_RES_SUB_TYPE = 'Microsoft:Hyper-V:Synthetic SCSI Controller'
_SERIAL_PORT_RES_SUB_TYPE = 'Microsoft:Hyper-V:Serial Port'
_VIRTUAL_SYSTEM_TYPE_REALIZED = 'Microsoft:Hyper-V:System:Realized'
_VIRTUAL_SYSTEM_SUBTYPE_GEN2 = 'Microsoft:Hyper-V:SubType:2'
_SNAPSHOT_FULL = 2
_METRIC_AGGR_CPU_AVG = 'Aggregated Average CPU Utilization'
_METRIC_AGGR_MEMORY_AVG = 'Aggregated Average Memory Utilization'
_METRIC_ENABLED = 2
_STORAGE_ALLOC_SETTING_DATA_CLASS = 'Msvm_StorageAllocationSettingData'
_ETHERNET_PORT_ALLOCATION_SETTING_DATA_CLASS = \
'Msvm_EthernetPortAllocationSettingData'
_AUTOMATIC_STARTUP_ACTION_NONE = 2
_vm_power_states_map = {constants.HYPERV_VM_STATE_ENABLED: 2,
constants.HYPERV_VM_STATE_DISABLED: 3,
constants.HYPERV_VM_STATE_SHUTTING_DOWN: 4,
constants.HYPERV_VM_STATE_REBOOT: 11,
constants.HYPERV_VM_STATE_PAUSED: 9,
constants.HYPERV_VM_STATE_SUSPENDED: 6}
def __init__(self, host='.'):
super(VMUtilsV2, self).__init__(host)
def _init_hyperv_wmi_conn(self, host):
self._conn = wmi.WMI(moniker='//%s/root/virtualization/v2' % host)
def list_instance_notes(self):
instance_notes = []
for vs in self._conn.Msvm_VirtualSystemSettingData(
['ElementName', 'Notes'],
VirtualSystemType=self._VIRTUAL_SYSTEM_TYPE_REALIZED):
instance_notes.append((vs.ElementName, [v for v in vs.Notes if v]))
return instance_notes
def list_instances(self):
"""Return the names of all the instances known to Hyper-V."""
return [v.ElementName for v in
self._conn.Msvm_VirtualSystemSettingData(
['ElementName'],
VirtualSystemType=self._VIRTUAL_SYSTEM_TYPE_REALIZED)]
def _create_vm_obj(self, vs_man_svc, vm_name, vm_gen, notes,
dynamic_memory_ratio):
vs_data = self._conn.Msvm_VirtualSystemSettingData.new()
vs_data.ElementName = vm_name
vs_data.Notes = notes
# Don't start automatically on host boot
vs_data.AutomaticStartupAction = self._AUTOMATIC_STARTUP_ACTION_NONE
# vNUMA and dynamic memory are mutually exclusive
if dynamic_memory_ratio > 1:
vs_data.VirtualNumaEnabled = False
if vm_gen == constants.VM_GEN_2:
vs_data.VirtualSystemSubType = self._VIRTUAL_SYSTEM_SUBTYPE_GEN2
vs_data.SecureBootEnabled = False
(job_path,
vm_path,
ret_val) = vs_man_svc.DefineSystem(ResourceSettings=[],
ReferenceConfiguration=None,
SystemSettings=vs_data.GetText_(1))
job = self.check_ret_val(ret_val, job_path)
if not vm_path and job:
vm_path = job.associators(self._AFFECTED_JOB_ELEMENT_CLASS)[0]
return self._get_wmi_obj(vm_path)
def _get_vm_setting_data(self, vm):
vmsettings = vm.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)
# Avoid snapshots
return [s for s in vmsettings if
s.VirtualSystemType == self._VIRTUAL_SYSTEM_TYPE_REALIZED][0]
def _get_attached_disks_query_string(self, scsi_controller_path):
# DVD Drives can be attached to SCSI as well, if the VM Generation is 2
return ("SELECT * FROM Msvm_ResourceAllocationSettingData WHERE ("
"ResourceSubType='%(res_sub_type)s' OR "
"ResourceSubType='%(res_sub_type_virt)s' OR "
"ResourceSubType='%(res_sub_type_dvd)s') AND "
"Parent = '%(parent)s'" % {
'res_sub_type': self._PHYS_DISK_RES_SUB_TYPE,
'res_sub_type_virt': self._DISK_DRIVE_RES_SUB_TYPE,
'res_sub_type_dvd': self._DVD_DRIVE_RES_SUB_TYPE,
'parent': scsi_controller_path.replace("'", "''")})
def attach_drive(self, vm_name, path, ctrller_path, drive_addr,
drive_type=constants.DISK):
"""Create a drive and attach it to the vm."""
vm = self._lookup_vm_check(vm_name)
if drive_type == constants.DISK:
res_sub_type = self._DISK_DRIVE_RES_SUB_TYPE
elif drive_type == constants.DVD:
res_sub_type = self._DVD_DRIVE_RES_SUB_TYPE
drive = self._get_new_resource_setting_data(res_sub_type)
# Set the ctrller as parent.
drive.Parent = ctrller_path
drive.Address = drive_addr
drive.AddressOnParent = drive_addr
# Add the cloned disk drive object to the vm.
new_resources = self._add_virt_resource(drive, vm.path_())
drive_path = new_resources[0]
if drive_type == constants.DISK:
res_sub_type = self._HARD_DISK_RES_SUB_TYPE
elif drive_type == constants.DVD:
res_sub_type = self._DVD_DISK_RES_SUB_TYPE
res = self._get_new_resource_setting_data(
res_sub_type, self._STORAGE_ALLOC_SETTING_DATA_CLASS)
res.Parent = drive_path
res.HostResource = [path]
self._add_virt_resource(res, vm.path_())
def attach_volume_to_controller(self, vm_name, controller_path, address,
mounted_disk_path):
"""Attach a volume to a controller."""
vm = self._lookup_vm_check(vm_name)
diskdrive = self._get_new_resource_setting_data(
self._PHYS_DISK_RES_SUB_TYPE)
diskdrive.AddressOnParent = address
diskdrive.Parent = controller_path
diskdrive.HostResource = [mounted_disk_path]
self._add_virt_resource(diskdrive, vm.path_())
def _get_disk_resource_address(self, disk_resource):
return disk_resource.AddressOnParent
def create_scsi_controller(self, vm_name):
"""Create an iscsi controller ready to mount volumes."""
scsicontrl = self._get_new_resource_setting_data(
self._SCSI_RES_SUBTYPE)
scsicontrl.VirtualSystemIdentifiers = ['{' + str(uuid.uuid4()) + '}']
vm = self._lookup_vm_check(vm_name)
self._add_virt_resource(scsicontrl, vm.path_())
def _get_disk_resource_disk_path(self, disk_resource):
return disk_resource.HostResource
def destroy_vm(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
# Remove the VM. It does not destroy any associated virtual disk.
(job_path, ret_val) = vs_man_svc.DestroySystem(vm.path_())
self.check_ret_val(ret_val, job_path)
def _add_virt_resource(self, res_setting_data, vm_path):
"""Adds a new resource to the VM."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
res_xml = [res_setting_data.GetText_(1)]
(job_path,
new_resources,
ret_val) = vs_man_svc.AddResourceSettings(vm_path, res_xml)
self.check_ret_val(ret_val, job_path)
return new_resources
def _modify_virt_resource(self, res_setting_data, vm_path):
"""Updates a VM resource."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
(job_path,
out_res_setting_data,
ret_val) = vs_man_svc.ModifyResourceSettings(
ResourceSettings=[res_setting_data.GetText_(1)])
self.check_ret_val(ret_val, job_path)
def _remove_virt_resource(self, res_setting_data, vm_path):
"""Removes a VM resource."""
vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0]
res_path = [res_setting_data.path_()]
(job_path, ret_val) = vs_man_svc.RemoveResourceSettings(res_path)
self.check_ret_val(ret_val, job_path)
def get_vm_state(self, vm_name):
settings = self.get_vm_summary_info(vm_name)
return settings['EnabledState']
def take_vm_snapshot(self, vm_name):
vm = self._lookup_vm_check(vm_name)
vs_snap_svc = self._conn.Msvm_VirtualSystemSnapshotService()[0]
(job_path, snp_setting_data, ret_val) = vs_snap_svc.CreateSnapshot(
AffectedSystem=vm.path_(),
SnapshotType=self._SNAPSHOT_FULL)
self.check_ret_val(ret_val, job_path)
job_wmi_path = job_path.replace('\\', '/')
job = wmi.WMI(moniker=job_wmi_path)
snp_setting_data = job.associators(
wmi_result_class=self._VIRTUAL_SYSTEM_SETTING_DATA_CLASS)[0]
return snp_setting_data.path_()
def remove_vm_snapshot(self, snapshot_path):
vs_snap_svc = self._conn.Msvm_VirtualSystemSnapshotService()[0]
(job_path, ret_val) = vs_snap_svc.DestroySnapshot(snapshot_path)
self.check_ret_val(ret_val, job_path)
def set_nic_connection(self, vm_name, nic_name, vswitch_conn_data):
nic_data = self._get_nic_data_by_name(nic_name)
eth_port_data = self._get_new_setting_data(
self._ETHERNET_PORT_ALLOCATION_SETTING_DATA_CLASS)
eth_port_data.HostResource = [vswitch_conn_data]
eth_port_data.Parent = nic_data.path_()
vm = self._lookup_vm_check(vm_name)
self._add_virt_resource(eth_port_data, vm.path_())
def enable_vm_metrics_collection(self, vm_name):
metric_names = [self._METRIC_AGGR_CPU_AVG,
self._METRIC_AGGR_MEMORY_AVG]
vm = self._lookup_vm_check(vm_name)
metric_svc = self._conn.Msvm_MetricService()[0]
(disks, volumes) = self._get_vm_disks(vm)
filtered_disks = [d for d in disks if
d.ResourceSubType is not self._DVD_DISK_RES_SUB_TYPE]
# enable metrics for disk.
for disk in filtered_disks:
self._enable_metrics(metric_svc, disk)
for metric_name in metric_names:
metric_def = self._conn.CIM_BaseMetricDefinition(Name=metric_name)
if not metric_def:
LOG.debug("Metric not found: %s", metric_name)
else:
self._enable_metrics(metric_svc, vm, metric_def[0].path_())
def _enable_metrics(self, metric_svc, element, definition_path=None):
metric_svc.ControlMetrics(
Subject=element.path_(),
Definition=definition_path,
MetricCollectionEnabled=self._METRIC_ENABLED)
| |
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
"""
RDS Resource Manager
====================
Example Policies
----------------
Find rds instances that are publicly available
.. code-block:: yaml
policies:
- name: rds-public
resource: rds
filters:
- PubliclyAccessible: true
Find rds instances that are not encrypted
.. code-block:: yaml
policies:
- name: rds-non-encrypted
resource: rds
filters:
- type: value
key: StorageEncrypted
value: true
op: ne
"""
import functools
import itertools
import logging
import operator
import jmespath
import re
from decimal import Decimal as D, ROUND_HALF_UP
from distutils.version import LooseVersion
from botocore.exceptions import ClientError
from concurrent.futures import as_completed
from c7n.actions import (
ActionRegistry, BaseAction, ModifyVpcSecurityGroupsAction)
from c7n.exceptions import PolicyValidationError
from c7n.filters import (
CrossAccountAccessFilter, FilterRegistry, Filter, ValueFilter, AgeFilter)
from c7n.filters.offhours import OffHour, OnHour
import c7n.filters.vpc as net_filters
from c7n.manager import resources
from c7n.query import QueryResourceManager, DescribeSource, ConfigSource, TypeInfo
from c7n import deprecated, tags
from c7n.tags import universal_augment
from c7n.utils import (
local_session, type_schema, get_retry, chunks, snapshot_identifier)
from c7n.resources.kms import ResourceKmsKeyAlias
from c7n.resources.securityhub import OtherResourcePostFinding
log = logging.getLogger('custodian.rds')
filters = FilterRegistry('rds.filters')
actions = ActionRegistry('rds.actions')
class DescribeRDS(DescribeSource):
def augment(self, dbs):
for d in dbs:
d['Tags'] = d.pop('TagList', ())
return dbs
class ConfigRDS(ConfigSource):
def load_resource(self, item):
resource = super().load_resource(item)
for k in list(resource.keys()):
if k.startswith('Db'):
resource["DB%s" % k[2:]] = resource[k]
return resource
@resources.register('rds')
class RDS(QueryResourceManager):
"""Resource manager for RDS DB instances.
"""
class resource_type(TypeInfo):
service = 'rds'
arn_type = 'db'
arn_separator = ':'
enum_spec = ('describe_db_instances', 'DBInstances', None)
id = 'DBInstanceIdentifier'
name = 'Endpoint.Address'
filter_name = 'DBInstanceIdentifier'
filter_type = 'scalar'
date = 'InstanceCreateTime'
dimension = 'DBInstanceIdentifier'
cfn_type = config_type = 'AWS::RDS::DBInstance'
arn = 'DBInstanceArn'
universal_taggable = True
default_report_fields = (
'DBInstanceIdentifier',
'DBName',
'Engine',
'EngineVersion',
'MultiAZ',
'AllocatedStorage',
'StorageEncrypted',
'PubliclyAccessible',
'InstanceCreateTime',
)
permissions_enum = ('rds:DescribeDBInstances',)
filter_registry = filters
action_registry = actions
source_mapping = {
'describe': DescribeRDS,
'config': ConfigRDS
}
def _db_instance_eligible_for_backup(resource):
db_instance_id = resource['DBInstanceIdentifier']
# Database instance is not in available state
if resource.get('DBInstanceStatus', '') != 'available':
log.debug(
"DB instance %s is not in available state",
db_instance_id)
return False
# The specified DB Instance is a member of a cluster and its
# backup retention should not be modified directly. Instead,
# modify the backup retention of the cluster using the
# ModifyDbCluster API
if resource.get('DBClusterIdentifier', ''):
log.debug(
"DB instance %s is a cluster member",
db_instance_id)
return False
# DB Backups not supported on a read replica for engine postgres
if (resource.get('ReadReplicaSourceDBInstanceIdentifier', '') and
resource.get('Engine', '') == 'postgres'):
log.debug(
"DB instance %s is a postgres read-replica",
db_instance_id)
return False
# DB Backups not supported on a read replica running a mysql
# version before 5.6
if (resource.get('ReadReplicaSourceDBInstanceIdentifier', '') and
resource.get('Engine', '') == 'mysql'):
engine_version = resource.get('EngineVersion', '')
# Assume "<major>.<minor>.<whatever>"
match = re.match(r'(?P<major>\d+)\.(?P<minor>\d+)\..*', engine_version)
if (match and int(match.group('major')) < 5 or
(int(match.group('major')) == 5 and int(match.group('minor')) < 6)):
log.debug(
"DB instance %s is a version %s mysql read-replica",
db_instance_id,
engine_version)
return False
return True
def _db_instance_eligible_for_final_snapshot(resource):
status = resource.get('DBInstanceStatus', '')
# If the DB instance you are deleting has a status of "Creating,"
# you will not be able to have a final DB snapshot taken
# If the DB instance is in a failure state with a status of "failed,"
# "incompatible-restore," or "incompatible-network," you can only delete
# the instance when the SkipFinalSnapshot parameter is set to "true."
eligible_for_final_snapshot = True
if status in ['creating', 'failed', 'incompatible-restore', 'incompatible-network']:
eligible_for_final_snapshot = False
# FinalDBSnapshotIdentifier can not be specified when deleting a
# replica instance
if resource.get('ReadReplicaSourceDBInstanceIdentifier', ''):
eligible_for_final_snapshot = False
# if it's a rds-cluster, don't try to run the rds instance snapshot api call
if resource.get('DBClusterIdentifier', False):
eligible_for_final_snapshot = False
if not eligible_for_final_snapshot:
log.debug('DB instance is not eligible for a snapshot:/n %s', resource)
return eligible_for_final_snapshot
def _get_available_engine_upgrades(client, major=False):
"""Returns all extant rds engine upgrades.
As a nested mapping of engine type to known versions
and their upgrades.
Defaults to minor upgrades, but configurable to major.
Example::
>>> _get_available_engine_upgrades(client)
{
'oracle-se2': {'12.1.0.2.v2': '12.1.0.2.v5',
'12.1.0.2.v3': '12.1.0.2.v5'},
'postgres': {'9.3.1': '9.3.14',
'9.3.10': '9.3.14',
'9.3.12': '9.3.14',
'9.3.2': '9.3.14'}
}
"""
results = {}
paginator = client.get_paginator('describe_db_engine_versions')
for page in paginator.paginate():
engine_versions = page['DBEngineVersions']
for v in engine_versions:
if not v['Engine'] in results:
results[v['Engine']] = {}
if 'ValidUpgradeTarget' not in v or len(v['ValidUpgradeTarget']) == 0:
continue
for t in v['ValidUpgradeTarget']:
if not major and t['IsMajorVersionUpgrade']:
continue
if LooseVersion(t['EngineVersion']) > LooseVersion(
results[v['Engine']].get(v['EngineVersion'], '0.0.0')):
results[v['Engine']][v['EngineVersion']] = t['EngineVersion']
return results
filters.register('offhour', OffHour)
filters.register('onhour', OnHour)
@filters.register('default-vpc')
class DefaultVpc(net_filters.DefaultVpcBase):
""" Matches if an rds database is in the default vpc
:example:
.. code-block:: yaml
policies:
- name: default-vpc-rds
resource: rds
filters:
- type: default-vpc
"""
schema = type_schema('default-vpc')
def __call__(self, rdb):
return self.match(rdb['DBSubnetGroup']['VpcId'])
@filters.register('security-group')
class SecurityGroupFilter(net_filters.SecurityGroupFilter):
RelatedIdsExpression = "VpcSecurityGroups[].VpcSecurityGroupId"
@filters.register('subnet')
class SubnetFilter(net_filters.SubnetFilter):
RelatedIdsExpression = "DBSubnetGroup.Subnets[].SubnetIdentifier"
@filters.register('vpc')
class VpcFilter(net_filters.VpcFilter):
RelatedIdsExpression = "DBSubnetGroup.VpcId"
filters.register('network-location', net_filters.NetworkLocation)
@filters.register('kms-alias')
class KmsKeyAlias(ResourceKmsKeyAlias):
def process(self, dbs, event=None):
return self.get_matching_aliases(dbs)
@actions.register('auto-patch')
class AutoPatch(BaseAction):
"""Toggle AutoMinorUpgrade flag on RDS instance
'window' parameter needs to be in the format 'ddd:hh:mm-ddd:hh:mm' and
have at least 30 minutes between start & end time.
If 'window' is not specified, AWS will assign a random maintenance window
to each instance selected.
:example:
.. code-block:: yaml
policies:
- name: enable-rds-autopatch
resource: rds
filters:
- AutoMinorVersionUpgrade: false
actions:
- type: auto-patch
minor: true
window: Mon:23:00-Tue:01:00
"""
schema = type_schema(
'auto-patch',
minor={'type': 'boolean'}, window={'type': 'string'})
permissions = ('rds:ModifyDBInstance',)
def process(self, dbs):
client = local_session(
self.manager.session_factory).client('rds')
params = {'AutoMinorVersionUpgrade': self.data.get('minor', True)}
if self.data.get('window'):
params['PreferredMaintenanceWindow'] = self.data['window']
for db in dbs:
client.modify_db_instance(
DBInstanceIdentifier=db['DBInstanceIdentifier'],
**params)
@filters.register('upgrade-available')
class UpgradeAvailable(Filter):
""" Scan DB instances for available engine upgrades
This will pull DB instances & check their specific engine for any
engine version with higher release numbers than the current one
This will also annotate the rds instance with 'target_engine' which is
the most recent version of the engine available
:example:
.. code-block:: yaml
policies:
- name: rds-upgrade-available
resource: rds
filters:
- type: upgrade-available
major: False
"""
schema = type_schema('upgrade-available',
major={'type': 'boolean'},
value={'type': 'boolean'})
permissions = ('rds:DescribeDBEngineVersions',)
def process(self, resources, event=None):
client = local_session(self.manager.session_factory).client('rds')
check_upgrade_extant = self.data.get('value', True)
check_major = self.data.get('major', False)
engine_upgrades = _get_available_engine_upgrades(
client, major=check_major)
results = []
for r in resources:
target_upgrade = engine_upgrades.get(
r['Engine'], {}).get(r['EngineVersion'])
if target_upgrade is None:
if check_upgrade_extant is False:
results.append(r)
continue
r['c7n-rds-engine-upgrade'] = target_upgrade
results.append(r)
return results
@actions.register('upgrade')
class UpgradeMinor(BaseAction):
"""Upgrades a RDS instance to the latest major/minor version available
Use of the 'immediate' flag (default False) will automatically upgrade
the RDS engine disregarding the existing maintenance window.
:example:
.. code-block:: yaml
policies:
- name: upgrade-rds-minor
resource: rds
actions:
- type: upgrade
major: False
immediate: False
"""
schema = type_schema(
'upgrade',
major={'type': 'boolean'},
immediate={'type': 'boolean'})
permissions = ('rds:ModifyDBInstance',)
def process(self, resources):
client = local_session(self.manager.session_factory).client('rds')
engine_upgrades = None
for r in resources:
if 'EngineVersion' in r['PendingModifiedValues']:
# Upgrade has already been scheduled
continue
if 'c7n-rds-engine-upgrade' not in r:
if engine_upgrades is None:
engine_upgrades = _get_available_engine_upgrades(
client, major=self.data.get('major', False))
target = engine_upgrades.get(
r['Engine'], {}).get(r['EngineVersion'])
if target is None:
log.debug(
"implicit filter no upgrade on %s",
r['DBInstanceIdentifier'])
continue
r['c7n-rds-engine-upgrade'] = target
client.modify_db_instance(
DBInstanceIdentifier=r['DBInstanceIdentifier'],
EngineVersion=r['c7n-rds-engine-upgrade'],
ApplyImmediately=self.data.get('immediate', False))
@actions.register('tag-trim')
class TagTrim(tags.TagTrim):
permissions = ('rds:RemoveTagsFromResource',)
def process_tag_removal(self, client, resource, candidates):
client.remove_tags_from_resource(ResourceName=resource['DBInstanceArn'], TagKeys=candidates)
START_STOP_ELIGIBLE_ENGINES = {
'postgres', 'sqlserver-ee',
'oracle-se2', 'mariadb', 'oracle-ee',
'sqlserver-ex', 'sqlserver-se', 'oracle-se',
'mysql', 'oracle-se1', 'sqlserver-web'}
def _eligible_start_stop(db, state="available"):
# See conditions noted here
# https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_StopInstance.html
# Note that this doesn't really specify what happens for all the nosql engines
# that are available as rds engines.
if db.get('DBInstanceStatus') != state:
return False
if db.get('MultiAZ') and db['Engine'].startswith('sqlserver-'):
return False
if db['Engine'] not in START_STOP_ELIGIBLE_ENGINES:
return False
if db.get('ReadReplicaDBInstanceIdentifiers'):
return False
if db.get('ReadReplicaSourceDBInstanceIdentifier'):
return False
# TODO is SQL Server mirror is detectable.
return True
@actions.register('stop')
class Stop(BaseAction):
"""Stop an rds instance.
https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_StopInstance.html
"""
schema = type_schema('stop')
permissions = ("rds:StopDBInstance",)
def process(self, resources):
client = local_session(self.manager.session_factory).client('rds')
for r in filter(_eligible_start_stop, resources):
try:
client.stop_db_instance(
DBInstanceIdentifier=r['DBInstanceIdentifier'])
except ClientError as e:
log.exception(
"Error stopping db instance:%s err:%s",
r['DBInstanceIdentifier'], e)
@actions.register('start')
class Start(BaseAction):
"""Start an rds instance.
"""
schema = type_schema('start')
permissions = ("rds:StartDBInstance",)
def process(self, resources):
client = local_session(self.manager.session_factory).client('rds')
start_filter = functools.partial(_eligible_start_stop, state='stopped')
for r in filter(start_filter, resources):
try:
client.start_db_instance(
DBInstanceIdentifier=r['DBInstanceIdentifier'])
except ClientError as e:
log.exception(
"Error starting db instance:%s err:%s",
r['DBInstanceIdentifier'], e)
@actions.register('delete')
class Delete(BaseAction):
"""Deletes selected RDS instances
This will delete RDS instances. It is recommended to apply with a filter
to avoid deleting all RDS instances in the account.
:example:
.. code-block:: yaml
policies:
- name: rds-delete
resource: rds
filters:
- default-vpc
actions:
- type: delete
skip-snapshot: true
"""
schema = type_schema('delete', **{
'skip-snapshot': {'type': 'boolean'},
'copy-restore-info': {'type': 'boolean'}
})
permissions = ('rds:DeleteDBInstance', 'rds:AddTagsToResource')
def validate(self):
if self.data.get('skip-snapshot', False) and self.data.get(
'copy-restore-info'):
raise PolicyValidationError(
"skip-snapshot cannot be specified with copy-restore-info on %s" % (
self.manager.data,))
return self
def process(self, dbs):
skip = self.data.get('skip-snapshot', False)
# Concurrency feels like overkill here.
client = local_session(self.manager.session_factory).client('rds')
for db in dbs:
params = dict(
DBInstanceIdentifier=db['DBInstanceIdentifier'])
if skip or not _db_instance_eligible_for_final_snapshot(db):
params['SkipFinalSnapshot'] = True
else:
params['FinalDBSnapshotIdentifier'] = snapshot_identifier(
'Final', db['DBInstanceIdentifier'])
if self.data.get('copy-restore-info', False):
self.copy_restore_info(client, db)
if not db['CopyTagsToSnapshot']:
client.modify_db_instance(
DBInstanceIdentifier=db['DBInstanceIdentifier'],
CopyTagsToSnapshot=True)
self.log.info(
"Deleting rds: %s snapshot: %s",
db['DBInstanceIdentifier'],
params.get('FinalDBSnapshotIdentifier', False))
try:
client.delete_db_instance(**params)
except ClientError as e:
if e.response['Error']['Code'] == "InvalidDBInstanceState":
continue
raise
return dbs
def copy_restore_info(self, client, instance):
tags = []
tags.append({
'Key': 'VPCSecurityGroups',
'Value': ''.join([
g['VpcSecurityGroupId'] for g in instance['VpcSecurityGroups']
])})
tags.append({
'Key': 'OptionGroupName',
'Value': instance['OptionGroupMemberships'][0]['OptionGroupName']})
tags.append({
'Key': 'ParameterGroupName',
'Value': instance['DBParameterGroups'][0]['DBParameterGroupName']})
tags.append({
'Key': 'InstanceClass',
'Value': instance['DBInstanceClass']})
tags.append({
'Key': 'StorageType',
'Value': instance['StorageType']})
tags.append({
'Key': 'MultiAZ',
'Value': str(instance['MultiAZ'])})
tags.append({
'Key': 'DBSubnetGroupName',
'Value': instance['DBSubnetGroup']['DBSubnetGroupName']})
client.add_tags_to_resource(
ResourceName=self.manager.generate_arn(
instance['DBInstanceIdentifier']),
Tags=tags)
@actions.register('set-snapshot-copy-tags')
class CopySnapshotTags(BaseAction):
"""Enables copying tags from rds instance to snapshot
DEPRECATED - use modify-db instead with `CopyTagsToSnapshot`
:example:
.. code-block:: yaml
policies:
- name: enable-rds-snapshot-tags
resource: rds
filters:
- type: value
key: Engine
value: aurora
op: eq
actions:
- type: set-snapshot-copy-tags
enable: True
"""
deprecations = (
deprecated.action("use modify-db instead with `CopyTagsToSnapshot`"),
)
schema = type_schema(
'set-snapshot-copy-tags',
enable={'type': 'boolean'})
permissions = ('rds:ModifyDBInstance',)
def process(self, resources):
error = None
with self.executor_factory(max_workers=2) as w:
futures = {}
client = local_session(self.manager.session_factory).client('rds')
resources = [r for r in resources
if r['CopyTagsToSnapshot'] != self.data.get('enable', True)]
for r in resources:
futures[w.submit(self.set_snapshot_tags, client, r)] = r
for f in as_completed(futures):
if f.exception():
error = f.exception()
self.log.error(
'error updating rds:%s CopyTagsToSnapshot \n %s',
futures[f]['DBInstanceIdentifier'], error)
if error:
raise error
return resources
def set_snapshot_tags(self, client, r):
self.manager.retry(
client.modify_db_instance,
DBInstanceIdentifier=r['DBInstanceIdentifier'],
CopyTagsToSnapshot=self.data.get('enable', True))
@RDS.action_registry.register("post-finding")
class DbInstanceFinding(OtherResourcePostFinding):
fields = [
{'key': 'DBSubnetGroupName', 'expr': 'DBSubnetGroup.DBSubnetGroupName'},
{'key': 'VpcId', 'expr': 'DBSubnetGroup.VpcId'},
]
@actions.register('snapshot')
class Snapshot(BaseAction):
"""Creates a manual snapshot of a RDS instance
:example:
.. code-block:: yaml
policies:
- name: rds-snapshot
resource: rds
actions:
- snapshot
"""
schema = type_schema('snapshot')
permissions = ('rds:CreateDBSnapshot',)
def process(self, dbs):
with self.executor_factory(max_workers=3) as w:
futures = []
for db in dbs:
futures.append(w.submit(
self.process_rds_snapshot,
db))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception creating rds snapshot \n %s",
f.exception())
return dbs
def process_rds_snapshot(self, resource):
if not _db_instance_eligible_for_backup(resource):
return
c = local_session(self.manager.session_factory).client('rds')
c.create_db_snapshot(
DBSnapshotIdentifier=snapshot_identifier(
self.data.get('snapshot-prefix', 'Backup'),
resource['DBInstanceIdentifier']),
DBInstanceIdentifier=resource['DBInstanceIdentifier'])
@actions.register('resize')
class ResizeInstance(BaseAction):
"""Change the allocated storage of an rds instance.
:example:
This will find databases using over 85% of their allocated
storage, and resize them to have an additional 30% storage
the resize here is async during the next maintenance.
.. code-block:: yaml
policies:
- name: rds-resize-up
resource: rds
filters:
- type: metrics
name: FreeStorageSpace
percent-attr: AllocatedStorage
attr-multiplier: 1073741824
value: 90
op: greater-than
actions:
- type: resize
percent: 30
This will find databases using under 20% of their allocated
storage, and resize them to be 30% smaller, the resize here
is configured to be immediate.
.. code-block:: yaml
policies:
- name: rds-resize-down
resource: rds
filters:
- type: metrics
name: FreeStorageSpace
percent-attr: AllocatedStorage
attr-multiplier: 1073741824
value: 90
op: greater-than
actions:
- type: resize
percent: -30
immediate: true
"""
schema = type_schema(
'resize',
percent={'type': 'number'},
immediate={'type': 'boolean'})
permissions = ('rds:ModifyDBInstance',)
def process(self, resources):
c = local_session(self.manager.session_factory).client('rds')
for r in resources:
old_val = D(r['AllocatedStorage'])
_100 = D(100)
new_val = ((_100 + D(self.data['percent'])) / _100) * old_val
rounded = int(new_val.quantize(D('0'), ROUND_HALF_UP))
c.modify_db_instance(
DBInstanceIdentifier=r['DBInstanceIdentifier'],
AllocatedStorage=rounded,
ApplyImmediately=self.data.get('immediate', False))
@actions.register('retention')
class RetentionWindow(BaseAction):
"""
Sets the 'BackupRetentionPeriod' value for automated snapshots,
enforce (min, max, exact) sets retention days occordingly.
:example:
.. code-block:: yaml
policies:
- name: rds-snapshot-retention
resource: rds
filters:
- type: value
key: BackupRetentionPeriod
value: 7
op: lt
actions:
- type: retention
days: 7
copy-tags: true
enforce: exact
"""
date_attribute = "BackupRetentionPeriod"
schema = type_schema(
'retention', **{'days': {'type': 'number'},
'copy-tags': {'type': 'boolean'},
'enforce': {'type': 'string', 'enum': [
'min', 'max', 'exact']}})
permissions = ('rds:ModifyDBInstance',)
def process(self, dbs):
with self.executor_factory(max_workers=3) as w:
futures = []
for db in dbs:
futures.append(w.submit(
self.process_snapshot_retention,
db))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception setting rds retention \n %s",
f.exception())
return dbs
def process_snapshot_retention(self, resource):
current_retention = int(resource.get('BackupRetentionPeriod', 0))
current_copy_tags = resource['CopyTagsToSnapshot']
new_retention = self.data['days']
new_copy_tags = self.data.get('copy-tags', True)
retention_type = self.data.get('enforce', 'min').lower()
if ((retention_type == 'min' or
current_copy_tags != new_copy_tags) and
_db_instance_eligible_for_backup(resource)):
self.set_retention_window(
resource,
max(current_retention, new_retention),
new_copy_tags)
return resource
if ((retention_type == 'max' or
current_copy_tags != new_copy_tags) and
_db_instance_eligible_for_backup(resource)):
self.set_retention_window(
resource,
min(current_retention, new_retention),
new_copy_tags)
return resource
if ((retention_type == 'exact' or
current_copy_tags != new_copy_tags) and
_db_instance_eligible_for_backup(resource)):
self.set_retention_window(resource, new_retention, new_copy_tags)
return resource
def set_retention_window(self, resource, retention, copy_tags):
c = local_session(self.manager.session_factory).client('rds')
c.modify_db_instance(
DBInstanceIdentifier=resource['DBInstanceIdentifier'],
BackupRetentionPeriod=retention,
CopyTagsToSnapshot=copy_tags)
@actions.register('set-public-access')
class RDSSetPublicAvailability(BaseAction):
"""
This action allows for toggling an RDS instance
'PubliclyAccessible' flag to true or false
:example:
.. code-block:: yaml
policies:
- name: disable-rds-public-accessibility
resource: rds
filters:
- PubliclyAccessible: true
actions:
- type: set-public-access
state: false
"""
schema = type_schema(
"set-public-access",
state={'type': 'boolean'})
permissions = ('rds:ModifyDBInstance',)
def set_accessibility(self, r):
client = local_session(self.manager.session_factory).client('rds')
client.modify_db_instance(
DBInstanceIdentifier=r['DBInstanceIdentifier'],
PubliclyAccessible=self.data.get('state', False))
def process(self, rds):
with self.executor_factory(max_workers=2) as w:
futures = {w.submit(self.set_accessibility, r): r for r in rds}
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception setting public access on %s \n %s",
futures[f]['DBInstanceIdentifier'], f.exception())
return rds
@resources.register('rds-subscription')
class RDSSubscription(QueryResourceManager):
class resource_type(TypeInfo):
service = 'rds'
arn_type = 'es'
cfn_type = 'AWS::RDS::EventSubscription'
enum_spec = (
'describe_event_subscriptions', 'EventSubscriptionsList', None)
name = id = "CustSubscriptionId"
arn = 'EventSubscriptionArn'
date = "SubscriptionCreateTime"
permissions_enum = ('rds:DescribeEventSubscriptions',)
universal_taggable = object()
augment = universal_augment
@RDSSubscription.action_registry.register('delete')
class RDSSubscriptionDelete(BaseAction):
"""Deletes a RDS snapshot resource
:example:
.. code-block:: yaml
policies:
- name: rds-subscription-delete
resource: rds-subscription
filters:
- type: value
key: CustSubscriptionId
value: xyz
actions:
- delete
"""
schema = type_schema('delete')
permissions = ('rds:DeleteEventSubscription',)
def process(self, resources):
client = local_session(self.manager.session_factory).client('rds')
for r in resources:
self.manager.retry(
client.delete_event_subscription, SubscriptionName=r['CustSubscriptionId'],
ignore_err_codes=('SubscriptionNotFoundFault',
'InvalidEventSubscriptionStateFault'))
class DescribeRDSSnapshot(DescribeSource):
def augment(self, snaps):
for s in snaps:
s['Tags'] = s.pop('TagList', ())
return snaps
@resources.register('rds-snapshot')
class RDSSnapshot(QueryResourceManager):
"""Resource manager for RDS DB snapshots.
"""
class resource_type(TypeInfo):
service = 'rds'
arn_type = 'snapshot'
arn_separator = ':'
enum_spec = ('describe_db_snapshots', 'DBSnapshots', None)
name = id = 'DBSnapshotIdentifier'
date = 'SnapshotCreateTime'
config_type = "AWS::RDS::DBSnapshot"
filter_name = "DBSnapshotIdentifier"
filter_type = "scalar"
universal_taggable = True
permissions_enum = ('rds:DescribeDBSnapshots',)
source_mapping = {
'describe': DescribeRDSSnapshot,
'config': ConfigSource
}
@RDSSnapshot.filter_registry.register('onhour')
class RDSSnapshotOnHour(OnHour):
"""Scheduled action on rds snapshot."""
@RDSSnapshot.filter_registry.register('latest')
class LatestSnapshot(Filter):
"""Return the latest snapshot for each database.
"""
schema = type_schema('latest', automatic={'type': 'boolean'})
permissions = ('rds:DescribeDBSnapshots',)
def process(self, resources, event=None):
results = []
if not self.data.get('automatic', True):
resources = [r for r in resources if r['SnapshotType'] == 'manual']
for db_identifier, snapshots in itertools.groupby(
resources, operator.itemgetter('DBInstanceIdentifier')):
results.append(
sorted(snapshots,
key=operator.itemgetter('SnapshotCreateTime'))[-1])
return results
@RDSSnapshot.filter_registry.register('age')
class RDSSnapshotAge(AgeFilter):
"""Filters RDS snapshots based on age (in days)
:example:
.. code-block:: yaml
policies:
- name: rds-snapshot-expired
resource: rds-snapshot
filters:
- type: age
days: 28
op: ge
actions:
- delete
"""
schema = type_schema(
'age', days={'type': 'number'},
op={'$ref': '#/definitions/filters_common/comparison_operators'})
date_attribute = 'SnapshotCreateTime'
def get_resource_date(self, i):
return i.get('SnapshotCreateTime')
@RDSSnapshot.action_registry.register('restore')
class RestoreInstance(BaseAction):
"""Restore an rds instance from a snapshot.
Note this requires the snapshot or db deletion be taken
with the `copy-restore-info` boolean flag set to true, as
various instance metadata is stored on the snapshot as tags.
additional parameters to restore db instance api call be overriden
via `restore_options` settings. various modify db instance parameters
can be specified via `modify_options` settings.
"""
schema = type_schema(
'restore',
restore_options={'type': 'object'},
modify_options={'type': 'object'})
permissions = (
'rds:ModifyDBInstance',
'rds:ModifyDBParameterGroup',
'rds:ModifyOptionGroup',
'rds:RebootDBInstance',
'rds:RestoreDBInstanceFromDBSnapshot')
poll_period = 60
restore_keys = {
'VPCSecurityGroups', 'MultiAZ', 'DBSubnetGroupName',
'InstanceClass', 'StorageType', 'ParameterGroupName',
'OptionGroupName'}
def validate(self):
found = False
for f in self.manager.iter_filters():
if isinstance(f, LatestSnapshot):
found = True
if not found:
# do we really need this...
raise PolicyValidationError(
"must filter by latest to use restore action %s" % (
self.manager.data,))
return self
def process(self, resources):
client = local_session(self.manager.session_factory).client('rds')
# restore up to 10 in parallel, we have to wait on each.
with self.executor_factory(
max_workers=min(10, len(resources) or 1)) as w:
futures = {}
for r in resources:
tags = {t['Key']: t['Value'] for t in r['Tags']}
if not set(tags).issuperset(self.restore_keys):
self.log.warning(
"snapshot:%s missing restore tags",
r['DBSnapshotIdentifier'])
continue
futures[w.submit(self.process_instance, client, r)] = r
for f in as_completed(futures):
r = futures[f]
if f.exception():
self.log.warning(
"Error restoring db:%s from:%s error:\n%s",
r['DBInstanceIdentifier'], r['DBSnapshotIdentifier'],
f.exception())
continue
def process_instance(self, client, r):
params, post_modify = self.get_restore_from_tags(r)
self.manager.retry(
client.restore_db_instance_from_db_snapshot, **params)
waiter = client.get_waiter('db_instance_available')
# wait up to 40m
waiter.config.delay = self.poll_period
waiter.wait(DBInstanceIdentifier=params['DBInstanceIdentifier'])
self.manager.retry(
client.modify_db_instance,
DBInstanceIdentifier=params['DBInstanceIdentifier'],
ApplyImmediately=True,
**post_modify)
self.manager.retry(
client.reboot_db_instance,
DBInstanceIdentifier=params['DBInstanceIdentifier'],
ForceFailover=False)
def get_restore_from_tags(self, snapshot):
params, post_modify = {}, {}
tags = {t['Key']: t['Value'] for t in snapshot['Tags']}
params['DBInstanceIdentifier'] = snapshot['DBInstanceIdentifier']
params['DBSnapshotIdentifier'] = snapshot['DBSnapshotIdentifier']
params['MultiAZ'] = tags['MultiAZ'] == 'True' and True or False
params['DBSubnetGroupName'] = tags['DBSubnetGroupName']
params['DBInstanceClass'] = tags['InstanceClass']
params['CopyTagsToSnapshot'] = True
params['StorageType'] = tags['StorageType']
params['OptionGroupName'] = tags['OptionGroupName']
post_modify['DBParameterGroupName'] = tags['ParameterGroupName']
post_modify['VpcSecurityGroupIds'] = tags['VPCSecurityGroups'].split(',')
params['Tags'] = [
{'Key': k, 'Value': v} for k, v in tags.items()
if k not in self.restore_keys]
params.update(self.data.get('restore_options', {}))
post_modify.update(self.data.get('modify_options', {}))
return params, post_modify
@RDSSnapshot.filter_registry.register('cross-account')
class CrossAccountAccess(CrossAccountAccessFilter):
permissions = ('rds:DescribeDBSnapshotAttributes',)
attributes_key = 'c7n:attributes'
annotation_key = 'c7n:CrossAccountViolations'
def process(self, resources, event=None):
self.accounts = self.get_accounts()
results = []
with self.executor_factory(max_workers=2) as w:
futures = []
for resource_set in chunks(resources, 20):
futures.append(w.submit(
self.process_resource_set, resource_set))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception checking cross account access\n %s" % (
f.exception()))
continue
results.extend(f.result())
return results
def process_resource_set(self, resource_set):
client = local_session(self.manager.session_factory).client('rds')
results = []
for r in resource_set:
attrs = {t['AttributeName']: t['AttributeValues']
for t in self.manager.retry(
client.describe_db_snapshot_attributes,
DBSnapshotIdentifier=r['DBSnapshotIdentifier'])[
'DBSnapshotAttributesResult']['DBSnapshotAttributes']}
r[self.attributes_key] = attrs
shared_accounts = set(attrs.get('restore', []))
delta_accounts = shared_accounts.difference(self.accounts)
if delta_accounts:
r[self.annotation_key] = list(delta_accounts)
results.append(r)
return results
@RDSSnapshot.action_registry.register('set-permissions')
class SetPermissions(BaseAction):
"""Set permissions for copying or restoring an RDS snapshot
Use the 'add' and 'remove' parameters to control which accounts to
add or remove, respectively. The default is to remove any
permissions granted to other AWS accounts.
Use `remove: matched` in combination with the `cross-account` filter
for more flexible removal options such as preserving access for
a set of whitelisted accounts:
:example:
.. code-block:: yaml
policies:
- name: rds-snapshot-remove-cross-account
resource: rds-snapshot
filters:
- type: cross-account
whitelist:
- '112233445566'
actions:
- type: set-permissions
remove: matched
"""
schema = type_schema(
'set-permissions',
remove={'oneOf': [
{'enum': ['matched']},
{'type': 'array', 'items': {
'oneOf': [
{'type': 'string', 'minLength': 12, 'maxLength': 12},
{'enum': ['all']},
],
}}
]},
add={
'type': 'array', 'items': {
'oneOf': [
{'type': 'string', 'minLength': 12, 'maxLength': 12},
{'enum': ['all']},
]
}
}
)
permissions = ('rds:ModifyDBSnapshotAttribute',)
def validate(self):
if self.data.get('remove') == 'matched':
found = False
for f in self.manager.iter_filters():
if isinstance(f, CrossAccountAccessFilter):
found = True
break
if not found:
raise PolicyValidationError(
"policy:%s filter:%s with matched requires cross-account filter" % (
self.manager.ctx.policy.name, self.type))
def process(self, snapshots):
client = local_session(self.manager.session_factory).client('rds')
for s in snapshots:
self.process_snapshot(client, s)
def process_snapshot(self, client, snapshot):
add_accounts = self.data.get('add', [])
remove_accounts = self.data.get('remove', [])
if not (add_accounts or remove_accounts):
if CrossAccountAccess.attributes_key not in snapshot:
attrs = {
t['AttributeName']: t['AttributeValues']
for t in self.manager.retry(
client.describe_db_snapshot_attributes,
DBSnapshotIdentifier=snapshot['DBSnapshotIdentifier']
)['DBSnapshotAttributesResult']['DBSnapshotAttributes']
}
snapshot[CrossAccountAccess.attributes_key] = attrs
remove_accounts = snapshot[CrossAccountAccess.attributes_key].get('restore', [])
elif remove_accounts == 'matched':
remove_accounts = snapshot.get(CrossAccountAccess.annotation_key, [])
if add_accounts or remove_accounts:
client.modify_db_snapshot_attribute(
DBSnapshotIdentifier=snapshot['DBSnapshotIdentifier'],
AttributeName='restore',
ValuesToRemove=remove_accounts,
ValuesToAdd=add_accounts)
@RDSSnapshot.action_registry.register('region-copy')
class RegionCopySnapshot(BaseAction):
"""Copy a snapshot across regions.
Note there is a max in flight for cross region rds snapshots
of 5 per region. This action will attempt to retry automatically
for an hr.
Example::
- name: copy-encrypted-snapshots
description: |
copy snapshots under 1 day old to dr region with kms
resource: rds-snapshot
region: us-east-1
filters:
- Status: available
- type: value
key: SnapshotCreateTime
value_type: age
value: 1
op: less-than
actions:
- type: region-copy
target_region: us-east-2
target_key: arn:aws:kms:us-east-2:0000:key/cb291f53-c9cf61
copy_tags: true
tags:
OriginRegion: us-east-1
"""
schema = type_schema(
'region-copy',
target_region={'type': 'string'},
target_key={'type': 'string'},
copy_tags={'type': 'boolean'},
tags={'type': 'object'},
required=('target_region',))
permissions = ('rds:CopyDBSnapshot',)
min_delay = 120
max_attempts = 30
def validate(self):
if self.data.get('target_region') and self.manager.data.get('mode'):
raise PolicyValidationError(
"cross region snapshot may require waiting for "
"longer then lambda runtime allows %s" % (self.manager.data,))
return self
def process(self, resources):
if self.data['target_region'] == self.manager.config.region:
self.log.warning(
"Source and destination region are the same, skipping copy")
return
for resource_set in chunks(resources, 20):
self.process_resource_set(resource_set)
def process_resource(self, target, key, tags, snapshot):
p = {}
if key:
p['KmsKeyId'] = key
p['TargetDBSnapshotIdentifier'] = snapshot[
'DBSnapshotIdentifier'].replace(':', '-')
p['SourceRegion'] = self.manager.config.region
p['SourceDBSnapshotIdentifier'] = snapshot['DBSnapshotArn']
if self.data.get('copy_tags', True):
p['CopyTags'] = True
if tags:
p['Tags'] = tags
retry = get_retry(
('SnapshotQuotaExceeded',),
# TODO make this configurable, class defaults to 1hr
min_delay=self.min_delay,
max_attempts=self.max_attempts,
log_retries=logging.DEBUG)
try:
result = retry(target.copy_db_snapshot, **p)
except ClientError as e:
if e.response['Error']['Code'] == 'DBSnapshotAlreadyExists':
self.log.warning(
"Snapshot %s already exists in target region",
snapshot['DBSnapshotIdentifier'])
return
raise
snapshot['c7n:CopiedSnapshot'] = result[
'DBSnapshot']['DBSnapshotArn']
def process_resource_set(self, resource_set):
target_client = self.manager.session_factory(
region=self.data['target_region']).client('rds')
target_key = self.data.get('target_key')
tags = [{'Key': k, 'Value': v} for k, v
in self.data.get('tags', {}).items()]
for snapshot_set in chunks(resource_set, 5):
for r in snapshot_set:
# If tags are supplied, copy tags are ignored, and
# we need to augment the tag set with the original
# resource tags to preserve the common case.
rtags = tags and list(tags) or None
if tags and self.data.get('copy_tags', True):
rtags.extend(r['Tags'])
self.process_resource(target_client, target_key, rtags, r)
@RDSSnapshot.action_registry.register('delete')
class RDSSnapshotDelete(BaseAction):
"""Deletes a RDS snapshot resource
:example:
.. code-block:: yaml
policies:
- name: rds-snapshot-delete-stale
resource: rds-snapshot
filters:
- type: age
days: 28
op: ge
actions:
- delete
"""
schema = type_schema('delete')
permissions = ('rds:DeleteDBSnapshot',)
def process(self, snapshots):
log.info("Deleting %d rds snapshots", len(snapshots))
with self.executor_factory(max_workers=3) as w:
futures = []
for snapshot_set in chunks(reversed(snapshots), size=50):
futures.append(
w.submit(self.process_snapshot_set, snapshot_set))
for f in as_completed(futures):
if f.exception():
self.log.error(
"Exception deleting snapshot set \n %s",
f.exception())
return snapshots
def process_snapshot_set(self, snapshots_set):
c = local_session(self.manager.session_factory).client('rds')
for s in snapshots_set:
c.delete_db_snapshot(
DBSnapshotIdentifier=s['DBSnapshotIdentifier'])
@actions.register('modify-security-groups')
class RDSModifyVpcSecurityGroups(ModifyVpcSecurityGroupsAction):
permissions = ('rds:ModifyDBInstance', 'rds:ModifyDBCluster')
vpc_expr = 'DBSubnetGroup.VpcId'
def process(self, rds_instances):
replication_group_map = {}
client = local_session(self.manager.session_factory).client('rds')
groups = super(RDSModifyVpcSecurityGroups, self).get_groups(
rds_instances)
# either build map for DB cluster or modify DB instance directly
for idx, i in enumerate(rds_instances):
if i.get('DBClusterIdentifier'):
# build map of Replication Groups to Security Groups
replication_group_map[i['DBClusterIdentifier']] = groups[idx]
else:
client.modify_db_instance(
DBInstanceIdentifier=i['DBInstanceIdentifier'],
VpcSecurityGroupIds=groups[idx])
# handle DB cluster, if necessary
for idx, r in enumerate(replication_group_map.keys()):
client.modify_db_cluster(
DBClusterIdentifier=r,
VpcSecurityGroupIds=replication_group_map[r]
)
class DescribeSubnetGroup(DescribeSource):
def augment(self, resources):
_db_subnet_group_tags(
resources, self.manager.session_factory,
self.manager.executor_factory, self.manager.retry)
return resources
@resources.register('rds-subnet-group')
class RDSSubnetGroup(QueryResourceManager):
"""RDS subnet group."""
class resource_type(TypeInfo):
service = 'rds'
arn_type = 'subgrp'
id = name = 'DBSubnetGroupName'
arn_separator = ':'
enum_spec = (
'describe_db_subnet_groups', 'DBSubnetGroups', None)
filter_name = 'DBSubnetGroupName'
filter_type = 'scalar'
permissions_enum = ('rds:DescribeDBSubnetGroups',)
cfn_type = config_type = 'AWS::RDS::DBSubnetGroup'
universal_taggable = object()
source_mapping = {
'config': ConfigSource,
'describe': DescribeSubnetGroup
}
def _db_subnet_group_tags(subnet_groups, session_factory, executor_factory, retry):
client = local_session(session_factory).client('rds')
def process_tags(g):
try:
g['Tags'] = client.list_tags_for_resource(
ResourceName=g['DBSubnetGroupArn'])['TagList']
return g
except client.exceptions.DBSubnetGroupNotFoundFault:
return None
return list(filter(None, map(process_tags, subnet_groups)))
@RDSSubnetGroup.action_registry.register('delete')
class RDSSubnetGroupDeleteAction(BaseAction):
"""Action to delete RDS Subnet Group
It is recommended to apply a filter to the delete policy to avoid unwanted
deletion of any rds subnet groups.
:example:
.. code-block:: yaml
policies:
- name: rds-subnet-group-delete
resource: rds-subnet-group
filters:
- Instances: []
actions:
- delete
"""
schema = type_schema('delete')
permissions = ('rds:DeleteDBSubnetGroup',)
def process(self, subnet_group):
with self.executor_factory(max_workers=2) as w:
list(w.map(self.process_subnetgroup, subnet_group))
def process_subnetgroup(self, subnet_group):
client = local_session(self.manager.session_factory).client('rds')
client.delete_db_subnet_group(DBSubnetGroupName=subnet_group['DBSubnetGroupName'])
@RDSSubnetGroup.filter_registry.register('unused')
class UnusedRDSSubnetGroup(Filter):
"""Filters all launch rds subnet groups that are not in use but exist
:example:
.. code-block:: yaml
policies:
- name: rds-subnet-group-delete-unused
resource: rds-subnet-group
filters:
- unused
"""
schema = type_schema('unused')
def get_permissions(self):
return self.manager.get_resource_manager('rds').get_permissions()
def process(self, configs, event=None):
rds = self.manager.get_resource_manager('rds').resources()
self.used = set(jmespath.search('[].DBSubnetGroup.DBSubnetGroupName', rds))
self.used.update(set(jmespath.search('[].DBSubnetGroup.DBSubnetGroupName',
self.manager.get_resource_manager('rds-cluster').resources(augment=False))))
return super(UnusedRDSSubnetGroup, self).process(configs)
def __call__(self, config):
return config['DBSubnetGroupName'] not in self.used
@filters.register('db-parameter')
class ParameterFilter(ValueFilter):
"""
Applies value type filter on set db parameter values.
:example:
.. code-block:: yaml
policies:
- name: rds-pg
resource: rds
filters:
- type: db-parameter
key: someparam
op: eq
value: someval
"""
schema = type_schema('db-parameter', rinherit=ValueFilter.schema)
schema_alias = False
permissions = ('rds:DescribeDBInstances', 'rds:DescribeDBParameters', )
@staticmethod
def recast(val, datatype):
""" Re-cast the value based upon an AWS supplied datatype
and treat nulls sensibly.
"""
ret_val = val
if datatype == 'string':
ret_val = str(val)
elif datatype == 'boolean':
# AWS returns 1s and 0s for boolean for most of the cases
if val.isdigit():
ret_val = bool(int(val))
# AWS returns 'TRUE,FALSE' for Oracle engine
elif val == 'TRUE':
ret_val = True
elif val == 'FALSE':
ret_val = False
elif datatype == 'integer':
if val.isdigit():
ret_val = int(val)
elif datatype == 'float':
ret_val = float(val) if val else 0.0
return ret_val
def process(self, resources, event=None):
results = []
paramcache = {}
client = local_session(self.manager.session_factory).client('rds')
paginator = client.get_paginator('describe_db_parameters')
param_groups = {db['DBParameterGroups'][0]['DBParameterGroupName']
for db in resources}
for pg in param_groups:
cache_key = {
'region': self.manager.config.region,
'account_id': self.manager.config.account_id,
'rds-pg': pg}
pg_values = self.manager._cache.get(cache_key)
if pg_values is not None:
paramcache[pg] = pg_values
continue
param_list = list(itertools.chain(*[p['Parameters']
for p in paginator.paginate(DBParameterGroupName=pg)]))
paramcache[pg] = {
p['ParameterName']: self.recast(p['ParameterValue'], p['DataType'])
for p in param_list if 'ParameterValue' in p}
self.manager._cache.save(cache_key, paramcache[pg])
for resource in resources:
for pg in resource['DBParameterGroups']:
pg_values = paramcache[pg['DBParameterGroupName']]
if self.match(pg_values):
resource.setdefault('c7n:MatchedDBParameter', []).append(
self.data.get('key'))
results.append(resource)
break
return results
@actions.register('modify-db')
class ModifyDb(BaseAction):
"""Modifies an RDS instance based on specified parameter
using ModifyDbInstance.
'Update' is an array with with key value pairs that should be set to
the property and value you wish to modify.
'Immediate" determines whether the modification is applied immediately
or not. If 'immediate' is not specified, default is false.
:example:
.. code-block:: yaml
policies:
- name: disable-rds-deletion-protection
resource: rds
filters:
- DeletionProtection: true
- PubliclyAccessible: true
actions:
- type: modify-db
update:
- property: 'DeletionProtection'
value: false
- property: 'PubliclyAccessible'
value: false
immediate: true
"""
schema = type_schema(
'modify-db',
immediate={"type": 'boolean'},
update={
'type': 'array',
'items': {
'type': 'object',
'properties': {
'property': {'type': 'string', 'enum': [
'AllocatedStorage',
'DBInstanceClass',
'DBSubnetGroupName',
'DBSecurityGroups',
'VpcSecurityGroupIds',
'MasterUserPassword',
'DBParameterGroupName',
'BackupRetentionPeriod',
'PreferredBackupWindow',
'PreferredMaintenanceWindow',
'MultiAZ',
'EngineVersion',
'AllowMajorVersionUpgrade',
'AutoMinorVersionUpgrade',
'LicenseModel',
'Iops',
'OptionGroupName',
'NewDBInstanceIdentifier',
'StorageType',
'TdeCredentialArn',
'TdeCredentialPassword',
'CACertificateIdentifier',
'Domain',
'CopyTagsToSnapshot',
'MonitoringInterval',
'MonitoringRoleARN',
'DBPortNumber',
'PubliclyAccessible',
'DomainIAMRoleName',
'PromotionTier',
'EnableIAMDatabaseAuthentication',
'EnablePerformanceInsights',
'PerformanceInsightsKMSKeyId',
'PerformanceInsightsRetentionPeriod',
'CloudwatchLogsExportConfiguration',
'ProcessorFeatures',
'UseDefaultProcessorFeatures',
'DeletionProtection',
'MaxAllocatedStorage',
'CertificateRotationRestart']},
'value': {}
},
},
},
required=('update',))
permissions = ('rds:ModifyDBInstance',)
conversion_map = {
'DBSubnetGroupName': 'DBSubnetGroup.DBSubnetGroupName',
'VpcSecurityGroupIds': 'VpcSecurityGroups[].VpcSecurityGroupId',
'DBParameterGroupName': 'DBParameterGroups[].DBParameterGroupName',
'OptionGroupName': 'OptionGroupMemberships[].OptionGroupName',
'NewDBInstanceIdentifier': 'DBInstanceIdentifier',
'Domain': 'DomainMemberships[].DomainName',
'DBPortNumber': 'Endpoint.Port',
'EnablePerformanceInsights': 'PerformanceInsightsEnabled',
'CloudwatchLogsExportConfiguration': 'EnabledCloudwatchLogsExports'
}
def validate(self):
if self.data.get('update'):
update_dict = dict((i['property'], i['value']) for i in self.data.get('update'))
if ('MonitoringInterval' in update_dict and update_dict['MonitoringInterval'] > 0 and
'MonitoringRoleARN' not in update_dict):
raise PolicyValidationError(
"A MonitoringRoleARN value is required \
if you specify a MonitoringInterval value other than 0")
if ('CloudwatchLogsExportConfiguration' in update_dict
and all(
k not in update_dict.get('CloudwatchLogsExportConfiguration')
for k in ('EnableLogTypes', 'DisableLogTypes'))):
raise PolicyValidationError(
"A EnableLogTypes or DisableLogTypes input list is required\
for setting CloudwatchLogsExportConfiguration")
return self
def process(self, resources):
c = local_session(self.manager.session_factory).client('rds')
for r in resources:
param = {
u['property']: u['value'] for u in self.data.get('update')
if r.get(
u['property'],
jmespath.search(
self.conversion_map.get(u['property'], 'None'), r))
!= u['value']}
if not param:
continue
param['ApplyImmediately'] = self.data.get('immediate', False)
param['DBInstanceIdentifier'] = r['DBInstanceIdentifier']
try:
c.modify_db_instance(**param)
except c.exceptions.DBInstanceNotFoundFault:
raise
@resources.register('rds-reserved')
class ReservedRDS(QueryResourceManager):
class resource_type(TypeInfo):
service = 'rds'
name = id = 'ReservedDBInstanceId'
date = 'StartTime'
enum_spec = (
'describe_reserved_db_instances', 'ReservedDBInstances', None)
filter_name = 'ReservedDBInstances'
filter_type = 'list'
arn_type = "ri"
arn = "ReservedDBInstanceArn"
permissions_enum = ('rds:DescribeReservedDBInstances',)
universal_taggable = object()
augment = universal_augment
| |
# ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2012 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# Copyright (c) 2009-2014 Richard Jones, Claudio Canepa
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
from __future__ import division, print_function, unicode_literals
__docformat__ = 'restructuredtext'
class Rect(object):
'''Define a rectangular area.
Many convenience handles and other properties are also defined - all of
which may be assigned to which will result in altering the position
and sometimes dimensions of the Rect:
- top -- y pixel extent
- bottom -- y pixel extent
- left -- x pixel extent
- right -- x pixel extent
- position -- (x, y) of bottom-left corner pixel
- origin -- (x, y) of bottom-left corner pixel
- center -- (x, y) of center pixel
- topleft -- (x, y) of top-left corner pixel
- topright -- (x, y) of top-right corner pixel
- bottomleft -- (x, y) of bottom-left corner pixel
- bottomright -- (x, y) of bottom-right corner pixel
- midtop -- (x, y) of middle of top side pixel
- midbottom -- (x, y) of middle of bottom side pixel
- midleft -- (x, y) of middle of left side pixel
- midright -- (x, y) of middle of right side pixel
- size -- (width, height) of rect
The Rect area includes the bottom and left borders but not the top and
right borders.
'''
def __init__(self, x, y, width, height):
'''Create a Rect with the bottom-left corner at (x, y) and
dimensions (width, height).
'''
self._x, self._y = x, y
self._width, self._height = width, height
def __nonzero__(self):
return bool(self.width and self.height)
def __repr__(self):
return 'Rect(xy=%.4g,%.4g; wh=%.4g,%.4g)'%(self.x, self.y,
self.width, self.height)
def __eq__(self, other):
'''Compare the two rects.
>>> r1 = Rect(0, 0, 10, 10)
>>> r1 == Rect(0, 0, 10, 10)
True
>>> r1 == Rect(1, 0, 10, 10)
False
>>> r1 == Rect(0, 1, 10, 10)
False
>>> r1 == Rect(0, 0, 11, 10)
False
>>> r1 == Rect(0, 0, 10, 11)
False
'''
return (self.x == other.x and self.y == other.y and
self.width == other.width and self.height == other.height)
# py3 compatiblity: obj that defines __eq__ needs to define __hash__ to be
# hashable, and we need that class RectCell(Rect, Cell) be hashable
__hash__ = object.__hash__
def __ne__(self, other):
'''Compare the two rects.
>>> r1 = Rect(0, 0, 10, 10)
>>> r1 != Rect(0, 0, 10, 10)
False
>>> r1 != Rect(1, 0, 10, 10)
True
>>> r1 != Rect(0, 1, 10, 10)
True
>>> r1 != Rect(0, 0, 11, 10)
True
>>> r1 != Rect(0, 0, 10, 11)
True
'''
return not (self == other)
def copy(self):
return self.__class__(self.x, self.y, self.width, self.height)
# the following four properties will most likely be overridden in a
# subclass
def set_x(self, value): self._x = value
x = property(lambda self: self._x, set_x)
def set_y(self, value): self._y = value
y = property(lambda self: self._y, set_y)
def set_width(self, value): self._width = value
width = property(lambda self: self._width, set_width)
def set_height(self, value): self._height = value
height = property(lambda self: self._height, set_height)
def contains(self, x, y):
'''Return boolean whether the point defined by x, y is inside the
rect area.
'''
if x < self.x or x > self.x + self.width: return False
if y < self.y or y > self.y + self.height: return False
return True
def intersects(self, other):
'''Return boolean whether the "other" rect (an object with .x, .y,
.width and .height attributes) overlaps this Rect in any way.
'''
if self.x + self.width < other.x: return False
if other.x + other.width < self.x: return False
if self.y + self.height < other.y: return False
if other.y + other.height < self.y: return False
return True
def clippedBy(self, other):
'''Determine whether this rect is clipped by the other rect.
>>> r1 = Rect(0, 0, 10, 10)
>>> r2 = Rect(1, 1, 9, 9)
>>> r2.clippedBy(r1) # r2 fits inside r1
False
>>> r1.clippedBy(r2) # r1 is clipped by r2
True
>>> r2 = Rect(1, 1, 11, 11)
>>> r1.intersect(r2)
Rect(xy=1,1; wh=9,9)
>>> r1.clippedBy(r2)
True
>>> r2.intersect(r1)
Rect(xy=1,1; wh=9,9)
>>> r2.clippedBy(r1)
True
>>> r2 = Rect(11, 11, 1, 1)
>>> r1.clippedBy(r2)
True
'''
if self.intersects(other): return True
if i.x > self.x: return True
if i.y > self.y: return True
if i.width < self.width: return True
if i.height < self.height: return True
return False
def intersect(self, other):
'''Find the intersection of two Rects.
>>> r1 = Rect(0, 51, 200, 17)
>>> r2 = Rect(0, 64, 200, 55)
>>> r1.intersect(r2)
Rect(xy=0,64; wh=200,4)
>>> r1 = Rect(0, 64, 200, 55)
>>> r2 = Rect(0, 0, 200, 17)
>>> print r1.intersect(r2)
None
>>> r1 = Rect(10, 10, 10, 10)
>>> r2 = Rect(20, 20, 10, 10)
>>> print r1.intersect(r2)
None
>>> bool(Rect(0, 0, 1, 1))
True
>>> bool(Rect(0, 0, 1, 0))
False
>>> bool(Rect(0, 0, 0, 1))
False
>>> bool(Rect(0, 0, 0, 0))
False
'''
s_tr_x, s_tr_y = self.topright
o_tr_x, o_tr_y = other.topright
bl_x = max(self.x, other.x)
bl_y = max(self.y, other.y)
tr_x = min(s_tr_x, o_tr_x)
tr_y = min(s_tr_y, o_tr_y)
w, h = max(0, tr_x-bl_x), max(0, tr_y-bl_y)
if not w or not h:
return None
return self.__class__(bl_x, bl_y, w, h)
def set_position(self, value): self._x, self._y = value
position = property(lambda self: (self._x, self._y), set_position)
def set_size(self, value): self._width, self._height = value
size = property(lambda self: (self._width, self._height), set_size)
def get_origin(self): return self.x, self.y
def set_origin(self, origin): self.x, self.y = origin
origin = property(get_origin, set_origin)
def get_top(self): return self.y + self.height
def set_top(self, y): self.y = y - self.height
top = property(get_top, set_top)
# r/w, in pixels, y extent
def get_bottom(self): return self.y
def set_bottom(self, y): self.y = y
bottom = property(get_bottom, set_bottom)
def get_left(self): return self.x
def set_left(self, x): self.x = x
left = property(get_left, set_left)
def get_right(self): return self.x + self.width
def set_right(self, x): self.x = x - self.width
right = property(get_right, set_right)
def get_center(self):
return (self.x + self.width//2, self.y + self.height//2)
def set_center(self, center):
x, y = center
self.position = (x - self.width//2, y - self.height//2.0)
center = property(get_center, set_center)
def get_midtop(self):
return (self.x + self.width//2, self.y + self.height)
def set_midtop(self, midtop):
x, y = midtop
self.position = (x - self.width//2, y - self.height)
midtop = property(get_midtop, set_midtop)
def get_midbottom(self):
return (self.x + self.width//2, self.y)
def set_midbottom(self, midbottom):
x, y = midbottom
self.position = (x - self.width//2, y)
midbottom = property(get_midbottom, set_midbottom)
def get_midleft(self):
return (self.x, self.y + self.height//2)
def set_midleft(self, midleft):
x, y = midleft
self.position = (x, y - self.height//2)
midleft = property(get_midleft, set_midleft)
def get_midright(self):
return (self.x + self.width, self.y + self.height//2)
def set_midright(self, midright):
x, y = midright
self.position = (x - self.width, y - self.height//2)
midright = property(get_midright, set_midright)
def get_topleft(self):
return (self.x, self.y + self.height)
def set_topleft(self, position):
x, y = position
self.position = (x, y - self.height)
topleft = property(get_topleft, set_topleft)
def get_topright(self):
return (self.x + self.width, self.y + self.height)
def set_topright(self, position):
x, y = position
self.position = (x - self.width, y - self.height)
topright = property(get_topright, set_topright)
def get_bottomright(self):
return (self.x + self.width, self.y)
def set_bottomright(self, position):
x, y = position
self.position = (x - self.width, y)
bottomright = property(get_bottomright, set_bottomright)
def get_bottomleft(self):
return (self.x, self.y)
def set_bottomleft(self, position):
self.x, self.y = position
bottomleft = property(get_bottomleft, set_bottomleft)
| |
# -*- coding: utf-8 -*-
import inspect
import logging
from enum import Enum, auto
from typing import List
import pandas as pd
from PyQt5.QtCore import pyqtSignal, Qt, pyqtSlot
from PyQt5.QtGui import QStandardItemModel, QStandardItem
from PyQt5.QtWidgets import QWidget, QTextEdit
from dgp.core.controllers.dataset_controller import DataSetController, DataSegmentController
from dgp.gui.plotting.backends import AxisFormatter
from dgp.gui.plotting.plotters import TransformPlot
from dgp.gui.ui.transform_tab_widget import Ui_TransformInterface
from dgp.lib.transform.graph import TransformGraph
from dgp.lib.transform.transform_graphs import AirbornePost
try:
from pygments import highlight
from pygments.lexers import PythonLexer
from pygments.formatters import HtmlFormatter
HAS_HIGHLIGHTER = True
except ImportError:
HAS_HIGHLIGHTER = False
class _Mode(Enum):
NORMAL = auto()
SEGMENTS = auto()
class TransformWidget(QWidget, Ui_TransformInterface):
result = pyqtSignal()
# User Roles for specific data within a channel
TIME = 0x0101
LATITUDE = 0x0102
LONGITUDE = 0x103
def __init__(self, dataset: DataSetController, plotter: TransformPlot):
super().__init__()
self.setupUi(self)
self.log = logging.getLogger(__name__)
self._dataset: DataSetController = dataset
self._plot = plotter
self._mode = _Mode.NORMAL
self._segment_indexes = {}
self._result: pd.DataFrame = None
self.result.connect(self._on_result)
# Line mask to view individual lines
self._mask = None
# Initialize Models for ComboBoxes
self.plot_index = QStandardItemModel()
self.transform_graphs = QStandardItemModel()
# Set ComboBox Models
self.qcb_plot_index.setModel(self.plot_index)
self.qcb_transform_graphs.setModel(self.transform_graphs)
self.qcb_transform_graphs.currentIndexChanged.connect(self._graph_source)
self.qcb_plot_index.currentIndexChanged[int].connect(self._index_changed)
# Initialize model for transformed channels
self._channel_model = QStandardItemModel()
self._channel_model.itemChanged.connect(self._channel_state_changed)
self.qlv_channels.setModel(self._channel_model)
self._index_map = {
'Time': self.TIME,
'Latitude': self.LATITUDE,
'Longitude': self.LONGITUDE
}
for key, value in self._index_map.items():
item = QStandardItem(key)
item.setData(value, Qt.UserRole)
self.plot_index.appendRow(item)
self.qcb_plot_index.setCurrentIndex(0)
for choice, method in [('Airborne Post', AirbornePost)]:
item = QStandardItem(choice)
item.setData(method, Qt.UserRole)
self.transform_graphs.appendRow(item)
self.qpb_execute_transform.clicked.connect(self.execute_transform)
self.qpb_select_all.clicked.connect(lambda: self._set_all_channels(Qt.Checked))
self.qpb_select_none.clicked.connect(lambda: self._set_all_channels(Qt.Unchecked))
self.qpb_toggle_mode.clicked.connect(self._mode_toggled)
self.qte_source_browser.setReadOnly(True)
self.qte_source_browser.setLineWrapMode(QTextEdit.NoWrap)
@property
def xaxis_index(self) -> int:
return self.qcb_plot_index.currentData(Qt.UserRole)
@property
def raw_gravity(self) -> pd.DataFrame:
return self._dataset.gravity
@property
def raw_trajectory(self) -> pd.DataFrame:
return self._dataset.trajectory
@property
def dataframe(self) -> pd.DataFrame:
return self._dataset.dataframe()
@property
def transform(self) -> TransformGraph:
return self.qcb_transform_graphs.currentData(Qt.UserRole)
@property
def _channels(self) -> List[QStandardItem]:
return [self._channel_model.item(i)
for i in range(self._channel_model.rowCount())]
@property
def _segments(self) -> List[DataSegmentController]:
return [self._dataset.segment_model.item(i)
for i in range(self._dataset.segment_model.rowCount())]
def _graph_source(self, index): # pragma: no cover
"""Utility to display the transform graph source (__init__) method
containing the definition for the graph.
If Pygments is available the source code will be highlighted
Notes
-----
The inspection of the source code is somewhat fragile and dependent on
the way the graph is defined in the source. The current method gets the
__init__ source code for the TransformGraph descendant then searches for
the string index of 'self.transform_graph', and takes from the first '{'
until the first '}'.
"""
graph = self.transform
src = inspect.getsource(graph.__init__)
start_str = 'self.transform_graph'
start_i = src.find('{', src.find(start_str)) + 1
src = src[start_i:src.find('}')]
trimmed = map(lambda x: x.lstrip(' '), src.split('\n'))
src = ''
for line in trimmed:
src += f'{line}\n'
if HAS_HIGHLIGHTER:
css = HtmlFormatter().get_style_defs('.highlight')
style_block = f'<style>{css}</style>'
html = highlight(src, PythonLexer(stripall=True), HtmlFormatter())
self.qte_source_browser.setHtml(f'{style_block}{html}')
else:
self.qte_source_browser.setText(src)
def _mode_toggled(self):
"""Toggle the mode state between Normal or Segments"""
self._set_all_channels(state=Qt.Unchecked)
if self._mode is _Mode.NORMAL:
self._mode = _Mode.SEGMENTS
else:
self._mode = _Mode.NORMAL
self.log.debug(f'Changed mode to {self._mode}')
return
def _set_all_channels(self, state=Qt.Checked):
for i in range(self._channel_model.rowCount()):
self._channel_model.item(i).setCheckState(state)
def _add_series(self, series: pd.Series, row=0):
if self._mode is _Mode.NORMAL:
self._plot.add_series(series, row)
elif self._mode is _Mode.SEGMENTS:
self._segment_indexes[series.name] = []
for i, segment in enumerate(self._segments):
start_i = self._result.index.searchsorted(segment.get_attr('start'))
stop_i = self._result.index.searchsorted(segment.get_attr('stop'))
seg_data = series.iloc[start_i:stop_i]
seg_data.name = f'{series.name}-{segment.get_attr("label") or i}'
self._segment_indexes[series.name].append(seg_data.name)
self._plot.add_series(seg_data, row=0)
def _remove_series(self, series: pd.Series):
if self._mode is _Mode.NORMAL:
self._plot.remove_series(series.name, row=0)
elif self._mode is _Mode.SEGMENTS:
for name in self._segment_indexes[series.name]:
self._plot.remove_series(name, row=0)
del self._segment_indexes[series.name]
def _channel_state_changed(self, item: QStandardItem):
data: pd.Series = item.data(self.xaxis_index)
if item.checkState() == Qt.Checked:
self._add_series(data, row=0)
else:
self._remove_series(data)
@pyqtSlot(int, name='_index_changed')
def _index_changed(self, index: int):
self.log.debug(f'X-Axis changed to {self.qcb_plot_index.currentText()}')
if self._result is None:
return
if self.xaxis_index in {self.LATITUDE, self.LONGITUDE}:
self._plot.set_axis_formatters(AxisFormatter.SCALAR)
else:
self._plot.set_axis_formatters(AxisFormatter.DATETIME)
for channel in self._channels:
if channel.checkState() == Qt.Checked:
channel.setCheckState(Qt.Unchecked)
channel.setCheckState(Qt.Checked)
@pyqtSlot(name='_on_result')
def _on_result(self):
"""_on_result called when Transformation DataFrame has been computed.
This method creates the channel objects for the interface.
"""
default_channels = ['fac']
time_df = self._result
lat_df = time_df.set_index('lat')
lon_df = time_df.set_index('lon')
for i in range(self._channel_model.rowCount()):
item = self._channel_model.item(i)
del item
self._channel_model.clear()
for col in sorted(time_df.columns):
item = QStandardItem(col)
item.setCheckable(True)
item.setData(time_df[col], self.TIME)
if col == 'lat':
item.setData(pd.Series(), self.LATITUDE)
else:
item.setData(lat_df[col], self.LATITUDE)
if col == 'lon':
item.setData(pd.Series(), self.LONGITUDE)
else:
item.setData(lon_df[col], self.LONGITUDE)
self._channel_model.appendRow(item)
if col in default_channels:
item.setCheckState(Qt.Checked)
def execute_transform(self):
gravity = self.raw_gravity
trajectory = self.raw_trajectory
if gravity.empty or trajectory.empty:
self.log.warning("Missing trajectory or gravity")
return
transform = self.qcb_transform_graphs.currentData(Qt.UserRole)
graph = transform(trajectory, gravity, 0, 0)
self.log.info("Executing graph")
graph.execute()
del self._result
self._result = graph.result_df()
self.result.emit()
| |
from __future__ import unicode_literals
import datetime
import pickle
from decimal import Decimal
from operator import attrgetter
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import FieldError
from django.db import connection
from django.db.models import (
Avg, Count, F, Max, Q, StdDev, Sum, Value, Variance,
)
from django.test import TestCase, skipUnlessAnyDBFeature, skipUnlessDBFeature
from django.test.utils import Approximate
from django.utils import six
from .models import (
Alfa, Author, Book, Bravo, Charlie, Clues, Entries, HardbackBook, ItemTag,
Publisher, SelfRefFK, Store, WithManualPK,
)
class AggregationTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34)
cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35)
cls.a3 = Author.objects.create(name='Brad Dayley', age=45)
cls.a4 = Author.objects.create(name='James Bennett', age=29)
cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37)
cls.a6 = Author.objects.create(name='Paul Bissex', age=29)
cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25)
cls.a8 = Author.objects.create(name='Peter Norvig', age=57)
cls.a9 = Author.objects.create(name='Stuart Russell', age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(name='Apress', num_awards=3)
cls.p2 = Publisher.objects.create(name='Sams', num_awards=1)
cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7)
cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right',
pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6)
)
cls.b2 = Book.objects.create(
isbn='067232959', name='Sams Teach Yourself Django in 24 Hours',
pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3)
)
cls.b3 = Book.objects.create(
isbn='159059996', name='Practical Django Projects',
pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23)
)
cls.b4 = Book.objects.create(
isbn='013235613', name='Python Web Development with Django',
pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3)
)
cls.b5 = HardbackBook.objects.create(
isbn='013790395', name='Artificial Intelligence: A Modern Approach',
pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15), weight=4.5)
cls.b6 = HardbackBook.objects.create(
isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15), weight=3.7)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
s1 = Store.objects.create(
name='Amazon.com',
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59)
)
s2 = Store.objects.create(
name='Books.com',
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59)
)
s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30)
)
s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
s3.books.add(cls.b3, cls.b4, cls.b6)
def assertObjectAttrs(self, obj, **kwargs):
for attr, value in six.iteritems(kwargs):
self.assertEqual(getattr(obj, attr), value)
def test_aggregates_in_where_clause(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
Tests that the subselect works and returns results equivalent to a
query with the IDs listed.
Before the corresponding fix for this bug, this test passed in 1.1 and
failed in 1.2-beta (trunk).
"""
qs = Book.objects.values('contact').annotate(Max('id'))
qs = qs.order_by('contact').values_list('id__max', flat=True)
# don't do anything with the queryset (qs) before including it as a
# subquery
books = Book.objects.order_by('id')
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
def test_aggregates_in_where_clause_pre_eval(self):
"""
Regression test for #12822: DatabaseError: aggregates not allowed in
WHERE clause
Same as the above test, but evaluates the queryset for the subquery
before it's used as a subquery.
Before the corresponding fix for this bug, this test failed in both
1.1 and 1.2-beta (trunk).
"""
qs = Book.objects.values('contact').annotate(Max('id'))
qs = qs.order_by('contact').values_list('id__max', flat=True)
# force the queryset (qs) for the subquery to be evaluated in its
# current state
list(qs)
books = Book.objects.order_by('id')
qs1 = books.filter(id__in=qs)
qs2 = books.filter(id__in=list(qs))
self.assertEqual(list(qs1), list(qs2))
@skipUnlessDBFeature('supports_subqueries_in_group_by')
def test_annotate_with_extra(self):
"""
Regression test for #11916: Extra params + aggregation creates
incorrect SQL.
"""
# Oracle doesn't support subqueries in group by clause
shortest_book_sql = """
SELECT name
FROM aggregation_regress_book b
WHERE b.publisher_id = aggregation_regress_publisher.id
ORDER BY b.pages
LIMIT 1
"""
# tests that this query does not raise a DatabaseError due to the full
# subselect being (erroneously) added to the GROUP BY parameters
qs = Publisher.objects.extra(select={
'name_of_shortest_book': shortest_book_sql,
}).annotate(total_books=Count('book'))
# force execution of the query
list(qs)
def test_aggregate(self):
# Ordering requests are ignored
self.assertEqual(
Author.objects.order_by("name").aggregate(Avg("age")),
{"age__avg": Approximate(37.444, places=1)}
)
# Implicit ordering is also ignored
self.assertEqual(
Book.objects.aggregate(Sum("pages")),
{"pages__sum": 3703},
)
# Baseline results
self.assertEqual(
Book.objects.aggregate(Sum('pages'), Avg('pages')),
{'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)}
)
# Empty values query doesn't affect grouping or results
self.assertEqual(
Book.objects.values().aggregate(Sum('pages'), Avg('pages')),
{'pages__sum': 3703, 'pages__avg': Approximate(617.166, places=2)}
)
# Aggregate overrides extra selected column
self.assertEqual(
Book.objects.extra(select={'price_per_page': 'price / pages'}).aggregate(Sum('pages')),
{'pages__sum': 3703}
)
def test_annotation(self):
# Annotations get combined with extra select clauses
obj = Book.objects.annotate(mean_auth_age=Avg("authors__age")).extra(
select={"manufacture_cost": "price * .5"}).get(pk=self.b2.pk)
self.assertObjectAttrs(
obj,
contact_id=self.a3.id,
isbn='067232959',
mean_auth_age=45.0,
name='Sams Teach Yourself Django in 24 Hours',
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=self.p2.id,
rating=3.0
)
# Different DB backends return different types for the extra select computation
self.assertIn(obj.manufacture_cost, (11.545, Decimal('11.545')))
# Order of the annotate/extra in the query doesn't matter
obj = Book.objects.extra(select={'manufacture_cost': 'price * .5'}).annotate(
mean_auth_age=Avg('authors__age')).get(pk=self.b2.pk)
self.assertObjectAttrs(
obj,
contact_id=self.a3.id,
isbn='067232959',
mean_auth_age=45.0,
name='Sams Teach Yourself Django in 24 Hours',
pages=528,
price=Decimal("23.09"),
pubdate=datetime.date(2008, 3, 3),
publisher_id=self.p2.id,
rating=3.0
)
# Different DB backends return different types for the extra select computation
self.assertIn(obj.manufacture_cost, (11.545, Decimal('11.545')))
# Values queries can be combined with annotate and extra
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(
select={'manufacture_cost': 'price * .5'}).values().get(pk=self.b2.pk)
manufacture_cost = obj['manufacture_cost']
self.assertIn(manufacture_cost, (11.545, Decimal('11.545')))
del obj['manufacture_cost']
self.assertEqual(obj, {
'id': self.b2.id,
'contact_id': self.a3.id,
'isbn': '067232959',
'mean_auth_age': 45.0,
'name': 'Sams Teach Yourself Django in 24 Hours',
'pages': 528,
'price': Decimal('23.09'),
'pubdate': datetime.date(2008, 3, 3),
'publisher_id': self.p2.id,
'rating': 3.0,
})
# The order of the (empty) values, annotate and extra clauses doesn't
# matter
obj = Book.objects.values().annotate(mean_auth_age=Avg('authors__age')).extra(
select={'manufacture_cost': 'price * .5'}).get(pk=self.b2.pk)
manufacture_cost = obj['manufacture_cost']
self.assertIn(manufacture_cost, (11.545, Decimal('11.545')))
del obj['manufacture_cost']
self.assertEqual(obj, {
'id': self.b2.id,
'contact_id': self.a3.id,
'isbn': '067232959',
'mean_auth_age': 45.0,
'name': 'Sams Teach Yourself Django in 24 Hours',
'pages': 528,
'price': Decimal('23.09'),
'pubdate': datetime.date(2008, 3, 3),
'publisher_id': self.p2.id,
'rating': 3.0
})
# If the annotation precedes the values clause, it won't be included
# unless it is explicitly named
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(
select={'price_per_page': 'price / pages'}).values('name').get(pk=self.b1.pk)
self.assertEqual(obj, {
"name": 'The Definitive Guide to Django: Web Development Done Right',
})
obj = Book.objects.annotate(mean_auth_age=Avg('authors__age')).extra(
select={'price_per_page': 'price / pages'}).values('name', 'mean_auth_age').get(pk=self.b1.pk)
self.assertEqual(obj, {
'mean_auth_age': 34.5,
'name': 'The Definitive Guide to Django: Web Development Done Right',
})
# If an annotation isn't included in the values, it can still be used
# in a filter
qs = Book.objects.annotate(n_authors=Count('authors')).values('name').filter(n_authors__gt=2)
self.assertQuerysetEqual(
qs, [
{"name": 'Python Web Development with Django'}
],
lambda b: b,
)
# The annotations are added to values output if values() precedes
# annotate()
obj = Book.objects.values('name').annotate(mean_auth_age=Avg('authors__age')).extra(
select={'price_per_page': 'price / pages'}).get(pk=self.b1.pk)
self.assertEqual(obj, {
'mean_auth_age': 34.5,
'name': 'The Definitive Guide to Django: Web Development Done Right',
})
# Check that all of the objects are getting counted (allow_nulls) and
# that values respects the amount of objects
self.assertEqual(
len(Author.objects.annotate(Avg('friends__age')).values()),
9
)
# Check that consecutive calls to annotate accumulate in the query
qs = (
Book.objects
.values('price')
.annotate(oldest=Max('authors__age'))
.order_by('oldest', 'price')
.annotate(Max('publisher__num_awards'))
)
self.assertQuerysetEqual(
qs, [
{'price': Decimal("30"), 'oldest': 35, 'publisher__num_awards__max': 3},
{'price': Decimal("29.69"), 'oldest': 37, 'publisher__num_awards__max': 7},
{'price': Decimal("23.09"), 'oldest': 45, 'publisher__num_awards__max': 1},
{'price': Decimal("75"), 'oldest': 57, 'publisher__num_awards__max': 9},
{'price': Decimal("82.8"), 'oldest': 57, 'publisher__num_awards__max': 7}
],
lambda b: b,
)
def test_aggrate_annotation(self):
# Aggregates can be composed over annotations.
# The return type is derived from the composed aggregate
vals = (
Book.objects
.all()
.annotate(num_authors=Count('authors__id'))
.aggregate(Max('pages'), Max('price'), Sum('num_authors'), Avg('num_authors'))
)
self.assertEqual(vals, {
'num_authors__sum': 10,
'num_authors__avg': Approximate(1.666, places=2),
'pages__max': 1132,
'price__max': Decimal("82.80")
})
# Regression for #15624 - Missing SELECT columns when using values, annotate
# and aggregate in a single query
self.assertEqual(
Book.objects.annotate(c=Count('authors')).values('c').aggregate(Max('c')),
{'c__max': 3}
)
def test_decimal_aggregate_annotation_filter(self):
"""
Filtering on an aggregate annotation with Decimal values should work.
Requires special handling on SQLite (#18247).
"""
self.assertEqual(
len(Author.objects.annotate(sum=Sum('book_contact_set__price')).filter(sum__gt=Decimal(40))),
1
)
self.assertEqual(
len(Author.objects.annotate(sum=Sum('book_contact_set__price')).filter(sum__lte=Decimal(40))),
4
)
def test_field_error(self):
# Bad field requests in aggregates are caught and reported
with self.assertRaises(FieldError):
Book.objects.all().aggregate(num_authors=Count('foo'))
with self.assertRaises(FieldError):
Book.objects.all().annotate(num_authors=Count('foo'))
with self.assertRaises(FieldError):
Book.objects.all().annotate(num_authors=Count('authors__id')).aggregate(Max('foo'))
def test_more(self):
# Old-style count aggregations can be mixed with new-style
self.assertEqual(
Book.objects.annotate(num_authors=Count('authors')).count(),
6
)
# Non-ordinal, non-computed Aggregates over annotations correctly
# inherit the annotation's internal type if the annotation is ordinal
# or computed
vals = Book.objects.annotate(num_authors=Count('authors')).aggregate(Max('num_authors'))
self.assertEqual(
vals,
{'num_authors__max': 3}
)
vals = Publisher.objects.annotate(avg_price=Avg('book__price')).aggregate(Max('avg_price'))
self.assertEqual(
vals,
{'avg_price__max': 75.0}
)
# Aliases are quoted to protected aliases that might be reserved names
vals = Book.objects.aggregate(number=Max('pages'), select=Max('pages'))
self.assertEqual(
vals,
{'number': 1132, 'select': 1132}
)
# Regression for #10064: select_related() plays nice with aggregates
obj = Book.objects.select_related('publisher').annotate(
num_authors=Count('authors')).values().get(isbn='013790395')
self.assertEqual(obj, {
'contact_id': self.a8.id,
'id': self.b5.id,
'isbn': '013790395',
'name': 'Artificial Intelligence: A Modern Approach',
'num_authors': 2,
'pages': 1132,
'price': Decimal("82.8"),
'pubdate': datetime.date(1995, 1, 15),
'publisher_id': self.p3.id,
'rating': 4.0,
})
# Regression for #10010: exclude on an aggregate field is correctly
# negated
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors'))),
6
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).filter(num_authors__gt=2)),
1
)
self.assertEqual(
len(Book.objects.annotate(num_authors=Count('authors')).exclude(num_authors__gt=2)),
5
)
self.assertEqual(
len(
Book.objects
.annotate(num_authors=Count('authors'))
.filter(num_authors__lt=3)
.exclude(num_authors__lt=2)
),
2
)
self.assertEqual(
len(
Book.objects
.annotate(num_authors=Count('authors'))
.exclude(num_authors__lt=2)
.filter(num_authors__lt=3)
),
2
)
def test_aggregate_fexpr(self):
# Aggregates can be used with F() expressions
# ... where the F() is pushed into the HAVING clause
qs = (
Publisher.objects
.annotate(num_books=Count('book'))
.filter(num_books__lt=F('num_awards') / 2)
.order_by('name')
.values('name', 'num_books', 'num_awards')
)
self.assertQuerysetEqual(
qs, [
{'num_books': 1, 'name': 'Morgan Kaufmann', 'num_awards': 9},
{'num_books': 2, 'name': 'Prentice Hall', 'num_awards': 7}
],
lambda p: p,
)
qs = (
Publisher.objects
.annotate(num_books=Count('book'))
.exclude(num_books__lt=F('num_awards') / 2)
.order_by('name')
.values('name', 'num_books', 'num_awards')
)
self.assertQuerysetEqual(
qs, [
{'num_books': 2, 'name': 'Apress', 'num_awards': 3},
{'num_books': 0, 'name': "Jonno's House of Books", 'num_awards': 0},
{'num_books': 1, 'name': 'Sams', 'num_awards': 1}
],
lambda p: p,
)
# ... and where the F() references an aggregate
qs = (
Publisher.objects
.annotate(num_books=Count('book'))
.filter(num_awards__gt=2 * F('num_books'))
.order_by('name')
.values('name', 'num_books', 'num_awards')
)
self.assertQuerysetEqual(
qs, [
{'num_books': 1, 'name': 'Morgan Kaufmann', 'num_awards': 9},
{'num_books': 2, 'name': 'Prentice Hall', 'num_awards': 7}
],
lambda p: p,
)
qs = (
Publisher.objects
.annotate(num_books=Count('book'))
.exclude(num_books__lt=F('num_awards') / 2)
.order_by('name')
.values('name', 'num_books', 'num_awards')
)
self.assertQuerysetEqual(
qs, [
{'num_books': 2, 'name': 'Apress', 'num_awards': 3},
{'num_books': 0, 'name': "Jonno's House of Books", 'num_awards': 0},
{'num_books': 1, 'name': 'Sams', 'num_awards': 1}
],
lambda p: p,
)
def test_db_col_table(self):
# Tests on fields with non-default table and column names.
qs = (
Clues.objects
.values('EntryID__Entry')
.annotate(Appearances=Count('EntryID'), Distinct_Clues=Count('Clue', distinct=True))
)
self.assertQuerysetEqual(qs, [])
qs = Entries.objects.annotate(clue_count=Count('clues__ID'))
self.assertQuerysetEqual(qs, [])
def test_boolean_conversion(self):
# Aggregates mixed up ordering of columns for backend's convert_values
# method. Refs #21126.
e = Entries.objects.create(Entry='foo')
c = Clues.objects.create(EntryID=e, Clue='bar')
qs = Clues.objects.select_related('EntryID').annotate(Count('ID'))
self.assertQuerysetEqual(
qs, [c], lambda x: x)
self.assertEqual(qs[0].EntryID, e)
self.assertIs(qs[0].EntryID.Exclude, False)
def test_empty(self):
# Regression for #10089: Check handling of empty result sets with
# aggregates
self.assertEqual(
Book.objects.filter(id__in=[]).count(),
0
)
vals = (
Book.objects
.filter(id__in=[])
.aggregate(
num_authors=Count('authors'),
avg_authors=Avg('authors'),
max_authors=Max('authors'),
max_price=Max('price'),
max_rating=Max('rating'),
)
)
self.assertEqual(
vals,
{'max_authors': None, 'max_rating': None, 'num_authors': 0, 'avg_authors': None, 'max_price': None}
)
qs = (
Publisher.objects
.filter(name="Jonno's House of Books")
.annotate(
num_authors=Count('book__authors'),
avg_authors=Avg('book__authors'),
max_authors=Max('book__authors'),
max_price=Max('book__price'),
max_rating=Max('book__rating'),
).values()
)
self.assertQuerysetEqual(
qs,
[{
'max_authors': None,
'name': "Jonno's House of Books",
'num_awards': 0,
'max_price': None,
'num_authors': 0,
'max_rating': None,
'id': self.p5.id,
'avg_authors': None,
}],
lambda p: p
)
def test_more_more(self):
# Regression for #10113 - Fields mentioned in order_by() must be
# included in the GROUP BY. This only becomes a problem when the
# order_by introduces a new join.
self.assertQuerysetEqual(
Book.objects.annotate(num_authors=Count('authors')).order_by('publisher__name', 'name'), [
"Practical Django Projects",
"The Definitive Guide to Django: Web Development Done Right",
"Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp",
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"Sams Teach Yourself Django in 24 Hours",
],
lambda b: b.name
)
# Regression for #10127 - Empty select_related() works with annotate
qs = Book.objects.filter(rating__lt=4.5).select_related().annotate(Avg('authors__age'))
self.assertQuerysetEqual(
qs,
[
('Artificial Intelligence: A Modern Approach', 51.5, 'Prentice Hall', 'Peter Norvig'),
('Practical Django Projects', 29.0, 'Apress', 'James Bennett'),
(
'Python Web Development with Django',
Approximate(30.333, places=2),
'Prentice Hall',
'Jeffrey Forcier',
),
('Sams Teach Yourself Django in 24 Hours', 45.0, 'Sams', 'Brad Dayley')
],
lambda b: (b.name, b.authors__age__avg, b.publisher.name, b.contact.name)
)
# Regression for #10132 - If the values() clause only mentioned extra
# (select=) columns, those columns are used for grouping
qs = Book.objects.extra(select={'pub': 'publisher_id'}).values('pub').annotate(Count('id')).order_by('pub')
self.assertQuerysetEqual(
qs, [
{'pub': self.b1.id, 'id__count': 2},
{'pub': self.b2.id, 'id__count': 1},
{'pub': self.b3.id, 'id__count': 2},
{'pub': self.b4.id, 'id__count': 1}
],
lambda b: b
)
qs = (
Book.objects
.extra(select={'pub': 'publisher_id', 'foo': 'pages'})
.values('pub')
.annotate(Count('id'))
.order_by('pub')
)
self.assertQuerysetEqual(
qs, [
{'pub': self.p1.id, 'id__count': 2},
{'pub': self.p2.id, 'id__count': 1},
{'pub': self.p3.id, 'id__count': 2},
{'pub': self.p4.id, 'id__count': 1}
],
lambda b: b
)
# Regression for #10182 - Queries with aggregate calls are correctly
# realiased when used in a subquery
ids = (
Book.objects
.filter(pages__gt=100)
.annotate(n_authors=Count('authors'))
.filter(n_authors__gt=2)
.order_by('n_authors')
)
self.assertQuerysetEqual(
Book.objects.filter(id__in=ids), [
"Python Web Development with Django",
],
lambda b: b.name
)
# Regression for #15709 - Ensure each group_by field only exists once
# per query
qstr = str(Book.objects.values('publisher').annotate(max_pages=Max('pages')).order_by().query)
# Check that there is just one GROUP BY clause (zero commas means at
# most one clause)
self.assertEqual(qstr[qstr.index('GROUP BY'):].count(', '), 0)
def test_duplicate_alias(self):
# Regression for #11256 - duplicating a default alias raises ValueError.
with self.assertRaises(ValueError):
Book.objects.all().annotate(Avg('authors__age'), authors__age__avg=Avg('authors__age'))
def test_field_name_conflict(self):
# Regression for #11256 - providing an aggregate name
# that conflicts with a field name on the model raises ValueError
with self.assertRaises(ValueError):
Author.objects.annotate(age=Avg('friends__age'))
def test_m2m_name_conflict(self):
# Regression for #11256 - providing an aggregate name
# that conflicts with an m2m name on the model raises ValueError
with self.assertRaises(ValueError):
Author.objects.annotate(friends=Count('friends'))
def test_values_queryset_non_conflict(self):
# Regression for #14707 -- If you're using a values query set, some potential conflicts are avoided.
# age is a field on Author, so it shouldn't be allowed as an aggregate.
# But age isn't included in values(), so it is.
results = Author.objects.values('name').annotate(age=Count('book_contact_set')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['age'], 1)
# Same problem, but aggregating over m2m fields
results = Author.objects.values('name').annotate(age=Avg('friends__age')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['age'], 32.0)
# Same problem, but colliding with an m2m field
results = Author.objects.values('name').annotate(friends=Count('friends')).order_by('name')
self.assertEqual(len(results), 9)
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
self.assertEqual(results[0]['friends'], 2)
def test_reverse_relation_name_conflict(self):
# Regression for #11256 - providing an aggregate name
# that conflicts with a reverse-related name on the model raises ValueError
with self.assertRaises(ValueError):
Author.objects.annotate(book_contact_set=Avg('friends__age'))
def test_pickle(self):
# Regression for #10197 -- Queries with aggregates can be pickled.
# First check that pickling is possible at all. No crash = success
qs = Book.objects.annotate(num_authors=Count('authors'))
pickle.dumps(qs)
# Then check that the round trip works.
query = qs.query.get_compiler(qs.db).as_sql()[0]
qs2 = pickle.loads(pickle.dumps(qs))
self.assertEqual(
qs2.query.get_compiler(qs2.db).as_sql()[0],
query,
)
def test_more_more_more(self):
# Regression for #10199 - Aggregate calls clone the original query so
# the original query can still be used
books = Book.objects.all()
books.aggregate(Avg("authors__age"))
self.assertQuerysetEqual(
books.all(), [
'Artificial Intelligence: A Modern Approach',
'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
'Practical Django Projects',
'Python Web Development with Django',
'Sams Teach Yourself Django in 24 Hours',
'The Definitive Guide to Django: Web Development Done Right'
],
lambda b: b.name
)
# Regression for #10248 - Annotations work with dates()
qs = Book.objects.annotate(num_authors=Count('authors')).filter(num_authors=2).dates('pubdate', 'day')
self.assertQuerysetEqual(
qs, [
datetime.date(1995, 1, 15),
datetime.date(2007, 12, 6),
],
lambda b: b
)
# Regression for #10290 - extra selects with parameters can be used for
# grouping.
qs = (
Book.objects
.annotate(mean_auth_age=Avg('authors__age'))
.extra(select={'sheets': '(pages + %s) / %s'}, select_params=[1, 2])
.order_by('sheets')
.values('sheets')
)
self.assertQuerysetEqual(
qs, [
150,
175,
224,
264,
473,
566
],
lambda b: int(b["sheets"])
)
# Regression for 10425 - annotations don't get in the way of a count()
# clause
self.assertEqual(
Book.objects.values('publisher').annotate(Count('publisher')).count(),
4
)
self.assertEqual(
Book.objects.annotate(Count('publisher')).values('publisher').count(),
6
)
# Note: intentionally no order_by(), that case needs tests, too.
publishers = Publisher.objects.filter(id__in=[1, 2])
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
publishers = publishers.annotate(n_books=Count("book"))
sorted_publishers = sorted(publishers, key=lambda x: x.name)
self.assertEqual(
sorted_publishers[0].n_books,
2
)
self.assertEqual(
sorted_publishers[1].n_books,
1
)
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
books = Book.objects.filter(publisher__in=publishers)
self.assertQuerysetEqual(
books, [
"Practical Django Projects",
"Sams Teach Yourself Django in 24 Hours",
"The Definitive Guide to Django: Web Development Done Right",
],
lambda b: b.name
)
self.assertEqual(
sorted(p.name for p in publishers),
[
"Apress",
"Sams"
]
)
# Regression for 10666 - inherited fields work with annotations and
# aggregations
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum('book_ptr__pages')),
{'n_pages': 2078}
)
self.assertEqual(
HardbackBook.objects.aggregate(n_pages=Sum('pages')),
{'n_pages': 2078},
)
qs = HardbackBook.objects.annotate(n_authors=Count('book_ptr__authors')).values('name', 'n_authors')
self.assertQuerysetEqual(
qs,
[
{'n_authors': 2, 'name': 'Artificial Intelligence: A Modern Approach'},
{
'n_authors': 1,
'name': 'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp'
}
],
lambda h: h
)
qs = HardbackBook.objects.annotate(n_authors=Count('authors')).values('name', 'n_authors')
self.assertQuerysetEqual(
qs,
[
{'n_authors': 2, 'name': 'Artificial Intelligence: A Modern Approach'},
{
'n_authors': 1,
'name': 'Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp'
}
],
lambda h: h,
)
# Regression for #10766 - Shouldn't be able to reference an aggregate
# fields in an aggregate() call.
with self.assertRaises(FieldError):
Book.objects.annotate(mean_age=Avg('authors__age')).annotate(Avg('mean_age'))
def test_empty_filter_count(self):
self.assertEqual(
Author.objects.filter(id__in=[]).annotate(Count("friends")).count(),
0
)
def test_empty_filter_aggregate(self):
self.assertEqual(
Author.objects.filter(id__in=[]).annotate(Count("friends")).aggregate(Count("pk")),
{"pk__count": None}
)
def test_none_call_before_aggregate(self):
# Regression for #11789
self.assertEqual(
Author.objects.none().aggregate(Avg('age')),
{'age__avg': None}
)
def test_annotate_and_join(self):
self.assertEqual(
Author.objects.annotate(c=Count("friends__name")).exclude(friends__name="Joe").count(),
Author.objects.count()
)
def test_f_expression_annotation(self):
# Books with less than 200 pages per author.
qs = Book.objects.values("name").annotate(
n_authors=Count("authors")
).filter(
pages__lt=F("n_authors") * 200
).values_list("pk")
self.assertQuerysetEqual(
Book.objects.filter(pk__in=qs), [
"Python Web Development with Django"
],
attrgetter("name")
)
def test_values_annotate_values(self):
qs = Book.objects.values("name").annotate(
n_authors=Count("authors")
).values_list("pk", flat=True)
self.assertEqual(list(qs), list(Book.objects.values_list("pk", flat=True)))
def test_having_group_by(self):
# Test that when a field occurs on the LHS of a HAVING clause that it
# appears correctly in the GROUP BY clause
qs = Book.objects.values_list("name").annotate(
n_authors=Count("authors")
).filter(
pages__gt=F("n_authors")
).values_list("name", flat=True)
# Results should be the same, all Books have more pages than authors
self.assertEqual(
list(qs), list(Book.objects.values_list("name", flat=True))
)
def test_values_list_annotation_args_ordering(self):
"""
Annotate *args ordering should be preserved in values_list results.
**kwargs comes after *args.
Regression test for #23659.
"""
books = Book.objects.values_list("publisher__name").annotate(
Count("id"), Avg("price"), Avg("authors__age"), avg_pgs=Avg("pages")
).order_by("-publisher__name")
self.assertEqual(books[0], ('Sams', 1, 23.09, 45.0, 528.0))
def test_annotation_disjunction(self):
qs = Book.objects.annotate(n_authors=Count("authors")).filter(
Q(n_authors=2) | Q(name="Python Web Development with Django")
)
self.assertQuerysetEqual(
qs, [
"Artificial Intelligence: A Modern Approach",
"Python Web Development with Django",
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name")
)
qs = (
Book.objects
.annotate(n_authors=Count("authors"))
.filter(
Q(name="The Definitive Guide to Django: Web Development Done Right") |
(Q(name="Artificial Intelligence: A Modern Approach") & Q(n_authors=3))
)
)
self.assertQuerysetEqual(
qs,
[
"The Definitive Guide to Django: Web Development Done Right",
],
attrgetter("name")
)
qs = Publisher.objects.annotate(
rating_sum=Sum("book__rating"),
book_count=Count("book")
).filter(
Q(rating_sum__gt=5.5) | Q(rating_sum__isnull=True)
).order_by('pk')
self.assertQuerysetEqual(
qs, [
"Apress",
"Prentice Hall",
"Jonno's House of Books",
],
attrgetter("name")
)
qs = Publisher.objects.annotate(
rating_sum=Sum("book__rating"),
book_count=Count("book")
).filter(
Q(rating_sum__gt=F("book_count")) | Q(rating_sum=None)
).order_by("num_awards")
self.assertQuerysetEqual(
qs, [
"Jonno's House of Books",
"Sams",
"Apress",
"Prentice Hall",
"Morgan Kaufmann"
],
attrgetter("name")
)
def test_quoting_aggregate_order_by(self):
qs = Book.objects.filter(
name="Python Web Development with Django"
).annotate(
authorCount=Count("authors")
).order_by("authorCount")
self.assertQuerysetEqual(
qs, [
("Python Web Development with Django", 3),
],
lambda b: (b.name, b.authorCount)
)
@skipUnlessDBFeature('supports_stddev')
def test_stddev(self):
self.assertEqual(
Book.objects.aggregate(StdDev('pages')),
{'pages__stddev': Approximate(311.46, 1)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('rating')),
{'rating__stddev': Approximate(0.60, 1)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('price')),
{'price__stddev': Approximate(24.16, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('pages', sample=True)),
{'pages__stddev': Approximate(341.19, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('rating', sample=True)),
{'rating__stddev': Approximate(0.66, 2)}
)
self.assertEqual(
Book.objects.aggregate(StdDev('price', sample=True)),
{'price__stddev': Approximate(26.46, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('pages')),
{'pages__variance': Approximate(97010.80, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('rating')),
{'rating__variance': Approximate(0.36, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('price')),
{'price__variance': Approximate(583.77, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('pages', sample=True)),
{'pages__variance': Approximate(116412.96, 1)}
)
self.assertEqual(
Book.objects.aggregate(Variance('rating', sample=True)),
{'rating__variance': Approximate(0.44, 2)}
)
self.assertEqual(
Book.objects.aggregate(Variance('price', sample=True)),
{'price__variance': Approximate(700.53, 2)}
)
def test_filtering_by_annotation_name(self):
# Regression test for #14476
# The name of the explicitly provided annotation name in this case
# poses no problem
qs = Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
# Neither in this case
qs = Author.objects.annotate(book_count=Count('book')).filter(book_count=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
# This case used to fail because the ORM couldn't resolve the
# automatically generated annotation name `book__count`
qs = Author.objects.annotate(Count('book')).filter(book__count=2).order_by('name')
self.assertQuerysetEqual(
qs,
['Peter Norvig'],
lambda b: b.name
)
# Referencing the auto-generated name in an aggregate() also works.
self.assertEqual(
Author.objects.annotate(Count('book')).aggregate(Max('book__count')),
{'book__count__max': 2}
)
def test_annotate_joins(self):
"""
Test that the base table's join isn't promoted to LOUTER. This could
cause the query generation to fail if there is an exclude() for fk-field
in the query, too. Refs #19087.
"""
qs = Book.objects.annotate(n=Count('pk'))
self.assertIs(qs.query.alias_map['aggregation_regress_book'].join_type, None)
# Check that the query executes without problems.
self.assertEqual(len(qs.exclude(publisher=-1)), 6)
@skipUnlessAnyDBFeature('allows_group_by_pk', 'allows_group_by_selected_pks')
def test_aggregate_duplicate_columns(self):
# Regression test for #17144
results = Author.objects.annotate(num_contacts=Count('book_contact_set'))
# There should only be one GROUP BY clause, for the `id` column.
# `name` and `age` should not be grouped on.
_, _, group_by = results.query.get_compiler(using='default').pre_sql_setup()
self.assertEqual(len(group_by), 1)
self.assertIn('id', group_by[0][0])
self.assertNotIn('name', group_by[0][0])
self.assertNotIn('age', group_by[0][0])
# Ensure that we get correct results.
self.assertEqual(
[(a.name, a.num_contacts) for a in results.order_by('name')],
[
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 0),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 0),
('Peter Norvig', 2),
('Stuart Russell', 0),
('Wesley J. Chun', 0),
]
)
@skipUnlessAnyDBFeature('allows_group_by_pk', 'allows_group_by_selected_pks')
def test_aggregate_duplicate_columns_only(self):
# Works with only() too.
results = Author.objects.only('id', 'name').annotate(num_contacts=Count('book_contact_set'))
_, _, grouping = results.query.get_compiler(using='default').pre_sql_setup()
self.assertEqual(len(grouping), 1)
self.assertIn('id', grouping[0][0])
self.assertNotIn('name', grouping[0][0])
self.assertNotIn('age', grouping[0][0])
# Ensure that we get correct results.
self.assertEqual(
[(a.name, a.num_contacts) for a in results.order_by('name')],
[
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 0),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 0),
('Peter Norvig', 2),
('Stuart Russell', 0),
('Wesley J. Chun', 0),
]
)
@skipUnlessAnyDBFeature('allows_group_by_pk', 'allows_group_by_selected_pks')
def test_aggregate_duplicate_columns_select_related(self):
# And select_related()
results = Book.objects.select_related('contact').annotate(
num_authors=Count('authors'))
_, _, grouping = results.query.get_compiler(using='default').pre_sql_setup()
# In the case of `group_by_selected_pks` we also group by contact.id because of the select_related.
self.assertEqual(len(grouping), 1 if connection.features.allows_group_by_pk else 2)
self.assertIn('id', grouping[0][0])
self.assertNotIn('name', grouping[0][0])
self.assertNotIn('contact', grouping[0][0])
# Ensure that we get correct results.
self.assertEqual(
[(b.name, b.num_authors) for b in results.order_by('name')],
[
('Artificial Intelligence: A Modern Approach', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Practical Django Projects', 1),
('Python Web Development with Django', 3),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 2)
]
)
def test_reverse_join_trimming(self):
qs = Author.objects.annotate(Count('book_contact_set__contact'))
self.assertIn(' JOIN ', str(qs.query))
def test_aggregation_with_generic_reverse_relation(self):
"""
Regression test for #10870: Aggregates with joins ignore extra
filters provided by setup_joins
tests aggregations with generic reverse relations
"""
django_book = Book.objects.get(name='Practical Django Projects')
ItemTag.objects.create(object_id=django_book.id, tag='intermediate',
content_type=ContentType.objects.get_for_model(django_book))
ItemTag.objects.create(object_id=django_book.id, tag='django',
content_type=ContentType.objects.get_for_model(django_book))
# Assign a tag to model with same PK as the book above. If the JOIN
# used in aggregation doesn't have content type as part of the
# condition the annotation will also count the 'hi mom' tag for b.
wmpk = WithManualPK.objects.create(id=django_book.pk)
ItemTag.objects.create(object_id=wmpk.id, tag='hi mom',
content_type=ContentType.objects.get_for_model(wmpk))
ai_book = Book.objects.get(name__startswith='Paradigms of Artificial Intelligence')
ItemTag.objects.create(object_id=ai_book.id, tag='intermediate',
content_type=ContentType.objects.get_for_model(ai_book))
self.assertEqual(Book.objects.aggregate(Count('tags')), {'tags__count': 3})
results = Book.objects.annotate(Count('tags')).order_by('-tags__count', 'name')
self.assertEqual(
[(b.name, b.tags__count) for b in results],
[
('Practical Django Projects', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Artificial Intelligence: A Modern Approach', 0),
('Python Web Development with Django', 0),
('Sams Teach Yourself Django in 24 Hours', 0),
('The Definitive Guide to Django: Web Development Done Right', 0)
]
)
def test_negated_aggregation(self):
expected_results = Author.objects.exclude(
pk__in=Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2)
).order_by('name')
expected_results = [a.name for a in expected_results]
qs = Author.objects.annotate(book_cnt=Count('book')).exclude(
Q(book_cnt=2), Q(book_cnt=2)).order_by('name')
self.assertQuerysetEqual(
qs,
expected_results,
lambda b: b.name
)
expected_results = Author.objects.exclude(
pk__in=Author.objects.annotate(book_cnt=Count('book')).filter(book_cnt=2)
).order_by('name')
expected_results = [a.name for a in expected_results]
qs = Author.objects.annotate(book_cnt=Count('book')).exclude(Q(book_cnt=2) | Q(book_cnt=2)).order_by('name')
self.assertQuerysetEqual(
qs,
expected_results,
lambda b: b.name
)
def test_name_filters(self):
qs = Author.objects.annotate(Count('book')).filter(
Q(book__count__exact=2) | Q(name='Adrian Holovaty')
).order_by('name')
self.assertQuerysetEqual(
qs,
['Adrian Holovaty', 'Peter Norvig'],
lambda b: b.name
)
def test_name_expressions(self):
# Test that aggregates are spotted correctly from F objects.
# Note that Adrian's age is 34 in the fixtures, and he has one book
# so both conditions match one author.
qs = Author.objects.annotate(Count('book')).filter(
Q(name='Peter Norvig') | Q(age=F('book__count') + 33)
).order_by('name')
self.assertQuerysetEqual(
qs,
['Adrian Holovaty', 'Peter Norvig'],
lambda b: b.name
)
def test_ticket_11293(self):
q1 = Q(price__gt=50)
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count('authors')).filter(
q1 | q2).order_by('pk')
self.assertQuerysetEqual(
query, [1, 4, 5, 6],
lambda b: b.pk)
def test_ticket_11293_q_immutable(self):
"""
Check that splitting a q object to parts for where/having doesn't alter
the original q-object.
"""
q1 = Q(isbn='')
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count('authors'))
query.filter(q1 | q2)
self.assertEqual(len(q2.children), 1)
def test_fobj_group_by(self):
"""
Check that an F() object referring to related column works correctly
in group by.
"""
qs = Book.objects.annotate(
account=Count('authors')
).filter(
account=F('publisher__num_awards')
)
self.assertQuerysetEqual(
qs, ['Sams Teach Yourself Django in 24 Hours'],
lambda b: b.name)
def test_annotate_reserved_word(self):
"""
Regression #18333 - Ensure annotated column name is properly quoted.
"""
vals = Book.objects.annotate(select=Count('authors__id')).aggregate(Sum('select'), Avg('select'))
self.assertEqual(vals, {
'select__sum': 10,
'select__avg': Approximate(1.666, places=2),
})
def test_annotate_on_relation(self):
book = Book.objects.annotate(avg_price=Avg('price'), publisher_name=F('publisher__name')).get(pk=self.b1.pk)
self.assertEqual(book.avg_price, 30.00)
self.assertEqual(book.publisher_name, "Apress")
def test_aggregate_on_relation(self):
# A query with an existing annotation aggregation on a relation should
# succeed.
qs = Book.objects.annotate(avg_price=Avg('price')).aggregate(
publisher_awards=Sum('publisher__num_awards')
)
self.assertEqual(qs['publisher_awards'], 30)
def test_annotate_distinct_aggregate(self):
# There are three books with rating of 4.0 and two of the books have
# the same price. Hence, the distinct removes one rating of 4.0
# from the results.
vals1 = Book.objects.values('rating', 'price').distinct().aggregate(result=Sum('rating'))
vals2 = Book.objects.aggregate(result=Sum('rating') - Value(4.0))
self.assertEqual(vals1, vals2)
class JoinPromotionTests(TestCase):
def test_ticket_21150(self):
b = Bravo.objects.create()
c = Charlie.objects.create(bravo=b)
qs = Charlie.objects.select_related('alfa').annotate(Count('bravo__charlie'))
self.assertQuerysetEqual(
qs, [c], lambda x: x)
self.assertIs(qs[0].alfa, None)
a = Alfa.objects.create()
c.alfa = a
c.save()
# Force re-evaluation
qs = qs.all()
self.assertQuerysetEqual(
qs, [c], lambda x: x)
self.assertEqual(qs[0].alfa, a)
def test_existing_join_not_promoted(self):
# No promotion for existing joins
qs = Charlie.objects.filter(alfa__name__isnull=False).annotate(Count('alfa__name'))
self.assertIn(' INNER JOIN ', str(qs.query))
# Also, the existing join is unpromoted when doing filtering for already
# promoted join.
qs = Charlie.objects.annotate(Count('alfa__name')).filter(alfa__name__isnull=False)
self.assertIn(' INNER JOIN ', str(qs.query))
# But, as the join is nullable first use by annotate will be LOUTER
qs = Charlie.objects.annotate(Count('alfa__name'))
self.assertIn(' LEFT OUTER JOIN ', str(qs.query))
def test_non_nullable_fk_not_promoted(self):
qs = Book.objects.annotate(Count('contact__name'))
self.assertIn(' INNER JOIN ', str(qs.query))
class SelfReferentialFKTests(TestCase):
def test_ticket_24748(self):
t1 = SelfRefFK.objects.create(name='t1')
SelfRefFK.objects.create(name='t2', parent=t1)
SelfRefFK.objects.create(name='t3', parent=t1)
self.assertQuerysetEqual(
SelfRefFK.objects.annotate(num_children=Count('children')).order_by('name'),
[('t1', 2), ('t2', 0), ('t3', 0)],
lambda x: (x.name, x.num_children)
)
| |
#
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Heat API exception subclasses - maps API response errors to AWS Errors."""
import six
import webob.exc
from heat.common.i18n import _
from heat.common import serializers
class HeatAPIException(webob.exc.HTTPError):
"""webob HTTPError subclass that creates a serialized body.
Subclass webob HTTPError so we can correctly serialize the wsgi response
into the http response body, using the format specified by the request.
Note this should not be used directly, instead use the subclasses
defined below which map to AWS API errors
"""
code = 400
title = "HeatAPIException"
explanation = _("Generic HeatAPIException, please use specific "
"subclasses!")
err_type = "Sender"
def __init__(self, detail=None):
"""Overload HTTPError constructor to create a default serialized body.
This is required because not all error responses are processed
by the wsgi controller (such as auth errors), which are further up the
paste pipeline. We serialize in XML by default (as AWS does).
"""
webob.exc.HTTPError.__init__(self, detail=detail)
serializer = serializers.XMLResponseSerializer()
serializer.default(self, self.get_unserialized_body())
def get_unserialized_body(self):
"""Return a dict suitable for serialization in the wsgi controller.
This wraps the exception details in a format which maps to the
expected format for the AWS API.
"""
# Note the aws response format specifies a "Code" element which is not
# the html response code, but the AWS API error code, e.g self.title
if self.detail:
message = ":".join([self.explanation, self.detail])
else:
message = self.explanation
return {'ErrorResponse': {'Error': {'Type': self.err_type,
'Code': self.title, 'Message': message}}}
# Common Error Subclasses:
class HeatIncompleteSignatureError(HeatAPIException):
"""The request signature does not conform to AWS standards."""
code = 400
title = "IncompleteSignature"
explanation = _("The request signature does not conform to AWS standards")
class HeatInternalFailureError(HeatAPIException):
"""The request processing has failed due to some unknown error."""
code = 500
title = "InternalFailure"
explanation = _("The request processing has failed due to an "
"internal error")
err_type = "Server"
class HeatInvalidActionError(HeatAPIException):
"""The action or operation requested is invalid."""
code = 400
title = "InvalidAction"
explanation = _("The action or operation requested is invalid")
class HeatInvalidClientTokenIdError(HeatAPIException):
"""The X.509 certificate or AWS Access Key ID provided does not exist."""
code = 403
title = "InvalidClientTokenId"
explanation = _("The certificate or AWS Key ID provided does not exist")
class HeatInvalidParameterCombinationError(HeatAPIException):
"""Parameters that must not be used together were used together."""
code = 400
title = "InvalidParameterCombination"
explanation = _("Incompatible parameters were used together")
class HeatInvalidParameterValueError(HeatAPIException):
"""A bad or out-of-range value was supplied for the input parameter."""
code = 400
title = "InvalidParameterValue"
explanation = _("A bad or out-of-range value was supplied")
class HeatInvalidQueryParameterError(HeatAPIException):
"""AWS query string is malformed, does not adhere to AWS standards."""
code = 400
title = "InvalidQueryParameter"
explanation = _("AWS query string is malformed, does not adhere to "
"AWS spec")
class HeatMalformedQueryStringError(HeatAPIException):
"""The query string is malformed."""
code = 404
title = "MalformedQueryString"
explanation = _("The query string is malformed")
class HeatMissingActionError(HeatAPIException):
"""The request is missing an action or operation parameter."""
code = 400
title = "MissingAction"
explanation = _("The request is missing an action or operation parameter")
class HeatMissingAuthenticationTokenError(HeatAPIException):
"""Does not contain a valid AWS Access Key or certificate.
Request must contain either a valid (registered) AWS Access Key ID
or X.509 certificate.
"""
code = 403
title = "MissingAuthenticationToken"
explanation = _("Does not contain a valid AWS Access Key or certificate")
class HeatMissingParameterError(HeatAPIException):
"""A mandatory input parameter is missing.
An input parameter that is mandatory for processing the request is missing.
"""
code = 400
title = "MissingParameter"
explanation = _("A mandatory input parameter is missing")
class HeatOptInRequiredError(HeatAPIException):
"""The AWS Access Key ID needs a subscription for the service."""
code = 403
title = "OptInRequired"
explanation = _("The AWS Access Key ID needs a subscription for the "
"service")
class HeatRequestExpiredError(HeatAPIException):
"""Request expired or more than 15mins in the future.
Request is past expires date or the request date (either with 15 minute
padding), or the request date occurs more than 15 minutes in the future.
"""
code = 400
title = "RequestExpired"
explanation = _("Request expired or more than 15mins in the future")
class HeatServiceUnavailableError(HeatAPIException):
"""The request has failed due to a temporary failure of the server."""
code = 503
title = "ServiceUnavailable"
explanation = _("Service temporarily unavailable")
err_type = "Server"
class HeatThrottlingError(HeatAPIException):
"""Request was denied due to request throttling."""
code = 400
title = "Throttling"
explanation = _("Request was denied due to request throttling")
class AlreadyExistsError(HeatAPIException):
"""Resource with the name requested already exists."""
code = 400
title = 'AlreadyExists'
explanation = _("Resource with the name requested already exists")
# Not documented in the AWS docs, authentication failure errors
class HeatAccessDeniedError(HeatAPIException):
"""Authentication fails due to user IAM group memberships.
This is the response given when authentication fails due to user
IAM group memberships meaning we deny access.
"""
code = 403
title = "AccessDenied"
explanation = _("User is not authorized to perform action")
class HeatSignatureError(HeatAPIException):
"""Authentication fails due to a bad signature."""
code = 403
title = "SignatureDoesNotMatch"
explanation = _("The request signature we calculated does not match the "
"signature you provided")
# Heat-specific errors
class HeatAPINotImplementedError(HeatAPIException):
"""API action is not yet implemented."""
code = 500
title = "APINotImplemented"
explanation = _("The requested action is not yet implemented")
err_type = "Server"
class HeatActionInProgressError(HeatAPIException):
"""Cannot perform action on stack in its current state."""
code = 400
title = 'InvalidAction'
explanation = ("Cannot perform action on stack while other actions are " +
"in progress")
def map_remote_error(ex):
"""Map rpc_common.RemoteError exceptions to HeatAPIException subclasses.
Map rpc_common.RemoteError exceptions returned by the engine
to HeatAPIException subclasses which can be used to return
properly formatted AWS error responses.
"""
inval_param_errors = (
'AttributeError',
'ValueError',
'InvalidTenant',
'StackNotFound',
'ResourceActionNotSupported',
'ResourceNotFound',
'ResourceNotAvailable',
'ResourceTypeNotFound',
'PhysicalResourceNotFound',
'WatchRuleNotFound',
'StackValidationFailed',
'InvalidSchemaError',
'InvalidTemplateReference',
'InvalidTemplateVersion',
'InvalidTemplateSection',
'UnknownUserParameter',
'UserParameterMissing',
'InvalidTemplateParameter',
'MissingCredentialError',
'ResourcePropertyConflict',
'PropertyUnspecifiedError',
)
denied_errors = ('Forbidden', 'NotAuthorized')
already_exists_errors = ('StackExists')
invalid_action_errors = ('ActionInProgress',)
ex_type = ex.__class__.__name__
if ex_type.endswith('_Remote'):
ex_type = ex_type[:-len('_Remote')]
if ex_type in inval_param_errors:
return HeatInvalidParameterValueError(detail=six.text_type(ex))
elif ex_type in denied_errors:
return HeatAccessDeniedError(detail=six.text_type(ex))
elif ex_type in already_exists_errors:
return AlreadyExistsError(detail=six.text_type(ex))
elif ex_type in invalid_action_errors:
return HeatActionInProgressError(detail=six.text_type(ex))
else:
# Map everything else to internal server error for now
return HeatInternalFailureError(detail=six.text_type(ex))
| |
#!/usr/bin/env python3
import collections
class BigInt:
def __init__(self):
self.number = [0]
def skim(self):
carrier = 0
for i in range(len(self.number)):
self.number[i] += carrier
head = self.number[i] % 10
carrier = (self.number[i] - head) / 10
self.number[i] = int(head)
while carrier != 0:
head = carrier % 10
carrier = (carrier - head) / 10
self.number.append(int(head))
def add(self, factor):
self.number[0] += factor
self.skim();
def mul(self, factor):
carry = 0
for i in range(len(self.number)):
self.number[i] *= factor
self.number[i] += carry
carry = 0
if self.number[i] > 9:
head = int(self.number[i] % 10)
carry = int((self.number[i] - head) / 10)
self.number[i] = head
while carry != 0:
head = carry % 10
carry = (carry - head) / 10
self.number.append(int(head))
def pow(self, factor):
if factor < 0:
raise NotImplementedError("Negative powers not supported")
if type(factor) == type(0.1) and not factor.is_integer():
raise NotImplementedError("Non-integer powers not supported")
if factor == 0:
self.numbers = [1]
return
oldSelf = self.clone()
for _ in range(factor - 1):
self.bigMul(oldSelf)
def smartPow(self, factor):
# Inspired by: https://en.wikipedia.org/wiki/Exponentiation_by_squaring
if factor < 0:
raise NotImplementedError("Negative powers not supported")
if type(factor) == type(0.1) and not factor.is_integer():
raise NotImplementedError("Non-integer powers not supported")
if factor == 0:
self.numbers = [1]
return
if factor == 1:
return
if (factor % 2) == 0:
# Even
self.bigMul(self)
self.smartPow(factor / 2)
else:
# Odd
oldSelf = self.clone()
self.bigMul(self)
self.smartPow((factor - 1) / 2)
self.bigMul(oldSelf)
def smartPowIt(self, factor):
# Inspired by: https://en.wikipedia.org/wiki/Exponentiation_by_squaring
if factor < 0:
raise NotImplementedError("Negative powers not supported")
if type(factor) == type(0.1) and not factor.is_integer():
raise NotImplementedError("Non-integer powers not supported")
if factor == 0:
self.numbers = [1]
return
if factor == 1:
return
y = BigInt()
y.add(1)
while factor > 1:
if (factor % 2) == 0:
# Even
self.bigMul(self)
factor /= 2
else:
# Odd
y.bigMul(self)
self.bigMul(self)
factor = (factor - 1) / 2
self.bigMul(y)
def skimOne(self, i):
if self.number[i] > 9:
old = self.number[i]
self.number[i] = int(old % 10)
head = int((old - (old % 10)) / 10)
if i + 1 < len(self.number):
self.number[i + 1] += head
else:
self.number.append(head)
def bigAdd(self, bigInt):
# TODO: Self add does not work!
if len(self.number) < len(bigInt.number):
self.number += [0] * (len(bigInt.number) - len(self.number))
for (i, v) in enumerate(bigInt.number):
self.number[i] += bigInt.number[i]
self.skimOne(i)
# TODO: Bottleneck for smartpow is here!
# self.skim()
def bigMul(self, bigFactor):
# We can take the internal list because we construct a new list
# (in total)
# So even if we multiply with self this should still work out
total = BigInt()
# For each factor...
for (i, v) in enumerate(bigFactor.number):
# If v is zero, skip it, because then the order should be skipped
if v == 0:
continue
# Make a copy of the original
digitSelf = self.clone()
# Shift it the amount of places of the current digit
digitSelf.shift(i)
# If v is more than zero, multiply
if v > 1:
digitSelf.mul(v)
total.bigAdd(digitSelf)
# Set the end result
self.number = total.number
def getNumberArray(self):
return list(self.number)
def toString(self):
result = ""
for i in self.number:
result += str(i)
return result[::-1]
def clone(self):
newSelf = BigInt()
newSelf.number = self.getNumberArray()
return newSelf
def shift(self, n):
if n == 0:
return
if n < 0:
raise NotImplementedError("Negative shifts are not yet implemented")
oldLen = len(self.number)
self.number += [0] * n
for i in range(len(self.number) - 1, n - 1, -1):
self.number[i] = self.number[i - n]
self.number[i - n] = 0
def take(self, n):
if n == 0:
self.number = [0]
if n < 0:
raise ValueError("Non-negative takes are not supported")
self.number = self.number[:n]
def generatePrimeTable(lim):
numbers = [True] * lim
numbers[0] = False
numbers[1] = False
currNum = 4
while currNum < lim:
numbers[currNum] = False
currNum += 2
prime = 3
while prime < lim:
if numbers[prime]:
currNum = prime
currNum += prime
while currNum < lim:
numbers[currNum] = False
currNum += prime
prime += 2
return numbers
class NumberJuggler:
def __init__(self, lim):
print("Generating prime lookup table")
self.primeTable = generatePrimeTable(lim)
print("Generating prime list")
self.primeList = [i for i, b in enumerate(self.primeTable) if b]
print("Finished initializing number juggler")
def getFactorization(self, num):
factorisation = collections.defaultdict(int)
countdown = num
for prime in self.primeList:
if countdown == 1: break
while countdown % prime == 0:
countdown = countdown // prime
factorisation[prime] += 1
return factorisation
def getFactors(self, num):
factorisation = self.getFactorization(num)
result = []
for k, v in factorisation.items():
result.extend([k] * v)
return result
def getPrimeFactors(self, num):
return list(self.getFactorization(num).keys())
def getDivisors(self, num):
if num == 1: return [1]
factorization = self.getFactorization(num)
factors = list(factorization.keys())
factorCounts = [0] * len(factors)
factorCounts[0] = 1
run = True
divisors = [1]
while run:
divisor = 1;
for j in range(0, len(factors)):
if factorCounts[j] != 0:
divisor *= factors[j]**factorCounts[j]
if divisor != num:
divisors.append(divisor)
factorCounts[0] += 1
for j in range(0, len(factorCounts)):
if factorCounts[j] == factorization[factors[j]] + 1:
if j == len(factorCounts) - 1:
run = False
break
else:
factorCounts[j] = 0;
factorCounts[j + 1] += 1
return divisors
def mergeSort(array):
if len(array) <= 1:
return array[:]
else:
mid = len(array) // 2
left = mergeSort(array[:mid])
right = mergeSort(array[mid:])
result = []
while len(left) > 0 and len(right) > 0:
if left[0] < right[0]:
result.append(left.pop(0))
else:
result.append(right.pop(0))
if len(left) > 0:
result.extend(left)
elif len(right) > 0:
result.extend(right)
return result
def removeDupsOrdered(array):
prev = array[0]
result = [prev]
for e in array[1:]:
if e != prev:
prev = e
result.append(e)
return result
def simplifyFraction(nj, numerator, denominator):
if denominator == 0:
return (0, 0)
if numerator == 0:
return (0, 0)
numFactors = nj.getFactors(numerator)
denFactors = nj.getFactors(denominator)
i = 0
while i < len(denFactors):
currFactor = denFactors[i]
if currFactor in denFactors and currFactor in numFactors:
denFactors.remove(currFactor)
numFactors.remove(currFactor)
else:
i += 1
newNumerator = 1
for f in numFactors:
newNumerator *= f
newDenominator = 1
for f in denFactors:
newDenominator *= f
return (newNumerator, newDenominator)
def isPandigital(num):
numStr = str(num)
seen = [False] * len(numStr)
total = 0
for c in numStr:
cInt = int(c)
if cInt < 1 or cInt > len(numStr):
total = -1
break
if not seen[cInt - 1]:
total += 1
seen[cInt - 1] = True
else:
total = -1
break
return total == len(numStr)
def generatePermutations(elements):
allPerms = []
if len(elements) == 1:
return [elements]
for i in range(0, len(elements)):
lessElements = list(elements)
del lessElements[i]
partialPerms = generatePermutations(lessElements)
for perm in partialPerms:
allPerms.append([elements[i]] + perm)
return allPerms
def isPermutationOf(a, b):
a = str(a)
b = str(b)
return sorted(a) == sorted(b)
# Taken from: https://www.reddit.com/r/learnpython/comments/2uhczk/all_possible_slices_of_a_list/
def sublists(s):
length = len(s)
for size in range(1, length + 1):
for start in range(0, (length - size) + 1):
yield s[start:start+size]
if __name__ == "__main__":
print("Unit testing!")
print("Tests for BigInt")
bi = BigInt()
bi.add(123)
assert(bi.toString() == "123")
bi.shift(3)
assert(bi.toString() == "123000")
bi = BigInt()
bi.add(50)
bi.mul(5)
# print(bi.toString())
assert(bi.toString() == "250")
ba = BigInt()
ba.add(200)
bb = BigInt()
bb.add(12345)
bb.bigAdd(ba)
assert(bb.toString() == str(12345 + 200))
ba = BigInt()
ba.add(12345)
bb = BigInt()
bb.add(67890)
bb.bigMul(ba)
assert(bb.toString() == str(12345 * 67890))
ba = BigInt()
ba.add(3)
bb = BigInt()
bb.add(3)
ba.bigMul(bb)
ba.bigMul(bb)
assert(ba.toString() == "27")
bi = BigInt()
bi.add(3)
bi.pow(3)
assert(bi.toString() == "27")
bi = BigInt()
bi.add(80)
bi.pow(80)
assert(bi.toString() == str(80 ** 80))
bi = BigInt()
bi.add(3)
bi.smartPow(3)
assert(bi.toString() == "27")
bi = BigInt()
bi.add(80)
bi.smartPow(80)
assert(bi.toString() == str(80 ** 80))
bi = BigInt()
bi.add(3)
bi.smartPowIt(3)
assert(bi.toString() == "27")
bi = BigInt()
bi.add(80)
bi.smartPowIt(80)
assert(bi.toString() == str(80 ** 80))
assert(isPermutationOf(1487, 4817))
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: et sw=4 ts=4
'''
Copyright (c) 2008, Yahoo! Inc. All rights reserved.
Code licensed under the BSD License:
http://developer.yahoo.net/yui/license.html
version: 1.0.0b1
'''
''' Prints documentation with htmltmpl from the json data outputted by parser.py '''
import os, re, simplejson, shutil, logging, logging.config, time, datetime
import const
from cStringIO import StringIO
from Cheetah.Template import Template
from sets import Set
from pkg_resources import resource_filename
try:
logging.config.fileConfig(os.path.join(sys.path[0], const.LOGCONFIG))
except:
try:
logging.config.fileConfig(resource_filename(__name__, const.LOGCONFIG))
except:
pass
log = logging.getLogger('yuidoc.generate')
class DocGenerator(object):
def __init__(self, inpath, datafile, outpath, templatepath, newext, showprivate=False,
projectname='Yahoo! UI Library',
version='',
projecturl='http://developer.yahoo.com/yui/',
ydn=False):
def _mkdir(newdir):
if os.path.isdir(newdir): pass
elif os.path.isfile(newdir):
raise OSError("a file with the same name as the desired " \
"dir, '%s', already exists." % newdir)
else:
head, tail = os.path.split(newdir)
if head and not os.path.isdir(head): _mkdir(head)
if tail: os.mkdir(newdir)
self.moduleprefix = const.MODULE_PREFIX
self.inpath = os.path.abspath(inpath)
# set and output path, create if needed
self.outpath = os.path.abspath(outpath)
self.newext = newext
_mkdir(self.outpath)
self.templatepath = os.path.abspath(templatepath)
# copy all of the directories from the template directory to the
# destination directory.
for i in os.listdir(self.templatepath):
fullname = os.path.join(self.templatepath, i)
if os.path.isdir(fullname):
targetdir = os.path.join(self.outpath, i)
try:
shutil.rmtree(targetdir)
except: pass
shutil.copytree(fullname, targetdir)
self.showprivate = showprivate
f=open(os.path.join(inpath, datafile))
self.rawdata = StringIO(f.read()).getvalue()
d = self.data = simplejson.loads(self.rawdata)
self.projectname = projectname
self.projecturl = projecturl
self.ydn = ydn
self.version = version
self.modulename = ""
self.moduletitle = ""
self.moduledesc = "Please supply a module block somewhere in your code"
# self.requires = None
self.modules = d[const.MODULES]
self.modulenames = self.modules.keys()
self.modulenames.sort(lambda x,y: cmp(x.lower(), y.lower()))
self.cleansedmodulename = self.cleanseStr(self.modulename)
self.classname = ""
self.filename = ""
self.pagetype = ""
self.classmap = d[const.CLASS_MAP]
self.classnames = ""
self.filenames = ""
self.allprops = []
def cleanseStr(self, strg):
cleanregex= re.compile(r"[^\w\-]")
cleansed = cleanregex.sub('', strg.lower())
# log.warn('cleansed module: %s' %(cleansed));
return self.moduleprefix + cleansed
def write(self, filename, data):
out = open(os.path.join(self.outpath, filename), "w")
out.writelines(str(data))
out.close()
def process(self):
def assignGlobalProperties(template):
template.projectname = self.projectname
template.projecturl = self.projecturl
template.ydn = self.ydn
template.version = self.version
template.modules = self.modules
template.modulenames = self.modulenames
template.modulename = self.modulename
template.moduletitle = self.moduletitle
template.cleansedmodulename = self.cleansedmodulename
template.moduledesc = self.moduledesc
template.year = datetime.date.today().strftime('%Y')
template.filename = self.filename
if self.filename:
template.filepath = os.path.join(self.inpath, self.filename)
template.filepath_highlighted = template.filepath + self.newext
template.pagetype = self.pagetype
template.classmap = self.classmap
template.classnames = self.classnames
template.filenames = self.filenames
template.classname = self.classname
template.requires = ""
template.optional = ""
template.properties = ""
template.methods = ""
template.events = ""
template.configs = ""
template.extends = ""
template.uses = ""
template.index = False # is this the index page
def transferToTemplate(prop, dict, template, valOverride=''):
val = ""
if prop in dict:
val = unicode(dict[prop])
if valOverride:
val = valOverride
setattr(template, prop, val)
def transferToDict(prop, dict1, dict2, default="", skipOverrideIfNoMatch=False):
val = ""
if prop in dict1:
val = unicode(dict1[prop])
if not val:
val = default
else:
if skipOverrideIfNoMatch:
pass
else:
val = default
dict2[prop] = val
def shouldShow(item):
if const.STATIC not in item and \
(self.showprivate or const.PRIVATE not in item):
return True
else:
return False
def shouldShowClass(item):
if self.showprivate or const.PRIVATE not in item:
return True
else:
return False
def soft_sort(x, y):
return cmp(x.lower(), y.lower())
def getPropsFromSuperclass(superc, classes, dict):
# get inherited data
if shouldShowClass(superc):
supercname = superc[const.NAME]
if const.PROPERTIES in superc:
inhdef = dict[const.PROPERTIES][supercname] = []
keys = superc[const.PROPERTIES].keys()
keys.sort(soft_sort)
for prop in keys:
superprop = superc[const.PROPERTIES][prop]
if shouldShow(superprop):
if const.PRIVATE in superprop: access = const.PRIVATE
elif const.PROTECTED in superprop: access = const.PROTECTED
else:access = ""
inhdef.append({const.NAME: prop, const.ACCESS: access, const.DEPRECATED: const.DEPRECATED in superprop})
if const.METHODS in superc:
inhdef = dict[const.METHODS][supercname] = []
keys = superc[const.METHODS].keys()
keys.sort(soft_sort)
for method in keys:
supermethod = superc[const.METHODS][method]
if shouldShow(supermethod):
if const.PRIVATE in supermethod: access = const.PRIVATE
elif const.PROTECTED in supermethod: access = const.PROTECTED
else:access = ""
inhdef.append({const.NAME: method, const.ACCESS: access, const.DEPRECATED: const.DEPRECATED in supermethod})
if const.EVENTS in superc:
inhdef = dict[const.EVENTS][supercname] = []
keys = superc[const.EVENTS].keys()
keys.sort(soft_sort)
for event in keys:
superevent = superc[const.EVENTS][event]
if shouldShow(superevent):
# inhdef.append(event)
if const.PRIVATE in superevent: access = const.PRIVATE
elif const.PROTECTED in superevent: access = const.PROTECTED
else:access = ""
inhdef.append({const.NAME: event, const.ACCESS: access, const.DEPRECATED: const.DEPRECATED in superevent})
if const.CONFIGS in superc:
inhdef = dict[const.CONFIGS][supercname] = []
keys = superc[const.CONFIGS].keys()
keys.sort(soft_sort)
for config in keys:
superconfig = superc[const.CONFIGS][config]
if shouldShow(superconfig):
#inhdef.append(config)
if const.PRIVATE in superconfig: access = const.PRIVATE
elif const.PROTECTED in superconfig: access = const.PROTECTED
else:access = ""
inhdef.append({const.NAME: config, const.ACCESS: access, const.DEPRECATED: const.DEPRECATED in superconfig})
if const.EXTENDS in superc:
supercname = superc[const.EXTENDS]
if supercname in classes:
getPropsFromSuperclass(classes[supercname], classes, dict)
if const.USES in superc:
for supercname in superc[const.USES]:
if supercname in classes:
getPropsFromSuperclass(classes[supercname], classes, dict)
# build url: class, property, type
def getUrl(c, p, t=''):
return "%s.html#%s_%s" %(c, t, p)
#sort is case insensitive and ignores puctuation for the search json file
def allprop_sort(x, y):
pat = re.compile(r"[\_\-\.]")
cx = x[const.NAME].lower()
cy = y[const.NAME].lower()
cx = pat.sub('', cx)
cy = pat.sub('', cy)
return cmp(cx, cy)
log.info("-------------------------------------------------------")
# copy the json file
# jsonname = self.cleansedmodulename + ".json"
jsonname = "raw.json"
log.info("Writing " + jsonname)
self.write(jsonname, self.rawdata)
for mname in self.modules:
log.info("Generating module splash for %s" %(mname))
m = self.modules[mname]
self.filename = ""
self.classname = ""
classes = self.data[const.CLASS_MAP]
self.classnames = []
for i in m[const.CLASS_LIST]:
if shouldShowClass(classes[i]):
self.classnames.append(i)
self.classnames.sort(soft_sort)
t = Template(file=os.path.join(self.templatepath, "main.tmpl"))
t.timestamp = time.time()
self.modulename = mname
self.moduletitle = mname
if const.TITLE in m:
self.moduletitle = m[const.TITLE]
self.cleansedmodulename = self.cleanseStr(mname)
if const.DESCRIPTION in m:
self.moduledesc = m[const.DESCRIPTION]
else:
log.warn("Missing module description for " + mname)
self.moduledesc = ''
self.filenames = m[const.FILE_LIST]
self.filenames.sort(soft_sort)
assignGlobalProperties(t)
transferToTemplate(const.REQUIRES, m, t)
transferToTemplate(const.OPTIONAL, m, t)
transferToTemplate(const.BETA, m, t, "Beta")
transferToTemplate(const.EXPERIMENTAL, m, t, "Experimental")
if len(m[const.SUBMODULES]) > 0:
strg = ', '.join(m[const.SUBMODULES])
else:
strg = 'none'
transferToTemplate(const.SUBMODULES, m, t, strg)
t.submodules = m[const.SUBMODULES]
transferToTemplate(const.SUBDATA, m, t, '')
t.subdata = m[const.SUBDATA]
moduleprops = []
classList = []
# class API view
#for i in classes:
for i in m[const.CLASS_LIST]:
self.classname = unicode(i)
c = classes[i]
if shouldShowClass(c):
log.info("Generating API page for " + i)
assignGlobalProperties(t)
# template items that need default vaules even if not included
transferToTemplate( const.SEE, c, t )
transferToTemplate( const.DEPRECATED, c, t )
transferToTemplate( const.DESCRIPTION, c, t )
transferToTemplate( const.STATIC, c, t )
if const.STATIC in c: t.static = const.STATIC
transferToTemplate( const.FINAL, c, t )
if const.FINAL in c: t.final = const.FINAL
transferToTemplate( const.ACCESS, c, t )
if const.PRIVATE in c: t.access = const.PRIVATE
elif const.PROTECTED in c: t.access = const.PROTECTED
desc = ''
if const.DESCRIPTION in c:
desc = c[const.DESCRIPTION]
#subclasses
subclasses = self.subclasses = []
for j in classes:
if const.SUPERCLASS in classes[j] and classes[j][const.SUPERCLASS] == i:
subclasses.append(j)
t.subclasses = subclasses
gName = i.replace('YAHOO.widget.', '');
gName = gName.replace('YAHOO.util.', '');
classInfo = { const.DESCRIPTION: desc, const.NAME: i, const.GUESSEDNAME: gName, const.EXTENDS: [] }
# Properties/fields
props = t.properties = []
if const.PROPERTIES in c:
keys = c[const.PROPERTIES].keys()
keys.sort(soft_sort)
for propertykey in keys:
prop = c[const.PROPERTIES][propertykey]
if self.showprivate or const.PRIVATE not in prop:
propdata = {const.NAME: propertykey, const.HOST: i, const.TYPE: 'property', const.URL:getUrl(i, propertykey, const.PROPERTY)}
transferToDict( const.ACCESS, prop, propdata )
if const.PRIVATE in prop: propdata[const.ACCESS] = const.PRIVATE
elif const.PROTECTED in prop: propdata[const.ACCESS] = const.PROTECTED
self.allprops.append(propdata.copy())
moduleprops.append(propdata.copy())
transferToDict( const.TYPE, prop, propdata, const.OBJECT )
transferToDict( const.DESCRIPTION, prop, propdata )
transferToDict( const.DEFAULT, prop, propdata )
transferToDict( const.DEPRECATED, prop, propdata, const.NBWS, const.DEPRECATED )
transferToDict( const.SEE, prop, propdata )
transferToDict( const.STATIC, prop, propdata )
if const.STATIC in prop: propdata[const.STATIC] = const.STATIC
transferToDict( const.FINAL, prop, propdata )
if const.FINAL in prop: propdata[const.FINAL] = const.FINAL
props.append(propdata)
# Methods
methods = t.methods = []
if const.METHODS in c:
keys = c[const.METHODS].keys()
keys.sort(soft_sort)
for methodkey in keys:
method = c[const.METHODS][methodkey]
if self.showprivate or const.PRIVATE not in method:
methoddata = {const.NAME: methodkey, const.HOST: i, const.TYPE: 'method', const.URL:getUrl(i, methodkey, const.METHOD)}
transferToDict( const.ACCESS, method, methoddata )
if const.PRIVATE in method: methoddata[const.ACCESS] = const.PRIVATE
elif const.PROTECTED in method: methoddata[const.ACCESS] = const.PROTECTED
self.allprops.append(methoddata.copy())
moduleprops.append(methoddata.copy())
transferToDict( const.DESCRIPTION, method, methoddata )
transferToDict( const.DEPRECATED, method, methoddata, const.NBWS, const.DEPRECATED )
transferToDict( const.SEE, method, methoddata )
transferToDict( const.STATIC, method, methoddata )
if const.STATIC in method: methoddata[const.STATIC] = const.STATIC
transferToDict( const.FINAL, method, methoddata )
if const.FINAL in method: methoddata[const.FINAL] = const.FINAL
transferToDict( const.CHAINABLE, method, methoddata )
if const.CHAINABLE in method: methoddata[const.CHAINABLE] = const.CHAINABLE
ret = methoddata[const.RETURN] = {const.NAME:"", const.DESCRIPTION:"", const.TYPE:const.VOID}
if const.RETURN in method:
transferToDict( const.TYPE, method[const.RETURN], ret, "" )
transferToDict( const.DESCRIPTION, method[const.RETURN], ret )
params = methoddata[const.PARAMS] = []
if const.PARAMS in method:
mp = method[const.PARAMS]
for p in mp:
param = {}
transferToDict( const.NAME, p, param, const.UNKNOWN )
transferToDict( const.TYPE, p, param, const.OBJECT )
transferToDict( const.DESCRIPTION, p, param )
params.append(param)
methods.append(methoddata)
# Events
events = t.events = []
if const.EVENTS in c:
keys = c[const.EVENTS].keys()
keys.sort(soft_sort)
for eventkey in keys:
event = c[const.EVENTS][eventkey]
if self.showprivate or const.PRIVATE not in event:
eventdata = {const.NAME: eventkey, const.HOST: i, const.TYPE: 'event', const.URL:getUrl(i, eventkey, const.EVENT)}
transferToDict( const.ACCESS, event, eventdata )
if const.PRIVATE in event: eventdata[const.ACCESS] = const.PRIVATE
elif const.PROTECTED in event: eventdata[const.ACCESS] = const.PROTECTED
self.allprops.append(eventdata.copy())
moduleprops.append(eventdata.copy())
transferToDict( const.DESCRIPTION, event, eventdata )
transferToDict( const.DEPRECATED, event, eventdata, const.NBWS, const.DEPRECATED )
transferToDict( const.SEE, event, eventdata )
transferToDict( const.STATIC, event, eventdata )
if const.STATIC in event: eventdata[const.STATIC] = const.STATIC
transferToDict( const.FINAL, event, eventdata )
if const.FINAL in event: eventdata[const.FINAL] = const.FINAL
transferToDict( const.BUBBLES, event, eventdata )
#Bubbles should contain a classname to bubble to
#if const.BUBBLES in event: eventdata[const.BUBBLES] = const.BUBBLES
transferToDict( const.PREVENTABLE, event, eventdata )
#preventable should contain a default method
#Bug #20
#if const.PREVENTABLE in event: eventdata[const.PREVENTABLE] = const.PREVENTABLE
transferToDict( const.CANCELABLE, event, eventdata )
if const.CANCELABLE in event: eventdata[const.CANCELABLE] = const.CANCELABLE
params = eventdata[const.PARAMS] = []
if const.PARAMS in event:
mp = event[const.PARAMS]
for p in mp:
param = {}
transferToDict( const.NAME, p, param, const.UNKNOWN )
transferToDict( const.TYPE, p, param, const.OBJECT )
transferToDict( const.DESCRIPTION, p, param )
params.append(param)
events.append(eventdata)
# configs
configs = t.configs = []
if const.CONFIGS in c:
keys = c[const.CONFIGS].keys()
keys.sort(soft_sort)
for configkey in keys:
config = c[const.CONFIGS][configkey]
if self.showprivate or const.PRIVATE not in config:
configdata = {const.NAME: configkey, const.HOST: i, const.TYPE: 'config', const.URL:getUrl(i, configkey, const.CONFIG)}
transferToDict( const.ACCESS, config, configdata )
if const.PRIVATE in config: configdata[const.ACCESS] = const.PRIVATE
elif const.PROTECTED in config: configdata[const.ACCESS] = const.PROTECTED
self.allprops.append(configdata.copy())
moduleprops.append(configdata.copy())
transferToDict( const.TYPE, config, configdata, const.OBJECT )
transferToDict( const.DESCRIPTION, config, configdata )
transferToDict( const.DEFAULT, config, configdata )
transferToDict( const.DEPRECATED, config, configdata, const.NBWS, const.DEPRECATED )
transferToDict( const.SEE, config, configdata )
transferToDict( const.STATIC, config, configdata )
if const.STATIC in config: configdata[const.STATIC] = const.STATIC
transferToDict( const.FINAL, config, configdata )
if const.FINAL in config: configdata[const.FINAL] = const.READONLY
transferToDict( const.WRITEONCE, config, configdata )
if const.WRITEONCE in config: configdata[const.WRITEONCE] = const.WRITEONCE
configs.append(configdata)
# get inherited data
inherited = t.inherited = {const.PROPERTIES:{}, const.METHODS:{}, const.EVENTS:{}, const.CONFIGS:{}, const.SUPERCLASS: {} }
if const.EXTENDS in c:
supercname = t.extends = unicode(c[const.EXTENDS])
if supercname in classes:
superc = classes[supercname]
getPropsFromSuperclass(superc, classes, inherited)
if const.USES in c:
for supercname in c[const.USES]:
t.uses = c[const.USES]
if supercname in classes:
superc = classes[supercname]
getPropsFromSuperclass(superc, classes, inherited)
#Create the superclass chain and attach it to the classInfo Object
extends = {}
for i in inherited:
for a in inherited[i]:
extends[a] = a
inherited[const.SUPERCLASS] = extends
classInfo[const.EXTENDS] = inherited
classList.append(classInfo)
# Constructor -- technically the parser can take multiple constructors
# but that does't help here
constructordata = t.constructor = {}
if const.CONSTRUCTORS in c:
constructor = c[const.CONSTRUCTORS][0]
transferToDict( const.DESCRIPTION, constructor, constructordata )
ret = constructordata[const.RETURN] = {}
if const.RETURN in constructor:
transferToDict( const.TYPE, constructor[const.RETURN], ret, const.VOID )
transferToDict( const.DESCRIPTION, constructor[const.RETURN], ret )
params = constructordata[const.PARAMS] = []
if const.PARAMS in constructor:
cp = constructor[const.PARAMS]
for p in cp:
param = {}
transferToDict( const.NAME, p, param, const.UNKNOWN )
transferToDict( const.TYPE, p, param, const.OBJECT )
transferToDict( const.DESCRIPTION, p, param )
params.append(param)
# write module splash
moduleprops.sort(allprop_sort)
t.allprops_raw = moduleprops
moduleprops_json = simplejson.dumps(moduleprops)
t.allprops = moduleprops_json
classList.sort(allprop_sort)
t.classList_raw = classList
t.classList = simplejson.dumps(classList)
self.write("%s.html" %(self.classname), t)
# clear out class name
self.classname = ""
t.classname = ""
t.filename = ""
t.properties = ""
t.methods = ""
t.events = ""
t.configs = ""
# write module splash
moduleprops.sort(allprop_sort)
t.allprops_raw = moduleprops
moduleprops_json = simplejson.dumps(moduleprops)
t.allprops = moduleprops_json
# log.warn('cleansed module file name: %s' %(t.cleansedmodulename));
self.write( t.cleansedmodulename + ".html", t)
# class source view
for i in m[const.FILE_LIST]:
log.info("Generating source view for " + i)
self.filename = unicode(i)
assignGlobalProperties(t)
self.write("%s.html" %(self.filename), t)
#remove dups
allprops = []
propmap = {}
for i in self.allprops:
url = i[const.URL]
if url not in propmap:
allprops.append(i)
propmap[url] = True
allprops.sort(allprop_sort)
allprops_json = simplejson.dumps(allprops)
self.write("index.json",allprops_json)
# index
log.info("Generating index")
t = Template(file=os.path.join(self.templatepath, "main.tmpl"))
t.timestamp = time.time()
self.modulename = ""
self.moduletitle = ""
self.classname = ""
self.classnames = []
for i in self.data[const.CLASS_MAP].keys():
if shouldShowClass(self.data[const.CLASS_MAP][i]):
self.classnames.append(i)
self.classnames.sort(soft_sort)
self.filenames = self.data[const.FILE_MAP].keys()
self.filenames.sort(soft_sort)
self.filename = ""
assignGlobalProperties(t)
t.allprops = allprops_json
t.index = True
self.write("index.html", t)
# map all classes to the corresponding module for external loaders
t = Template(file=os.path.join(self.templatepath, "classmap.tmpl"))
t.timestamp = time.time()
pkgMap = {}
keys = self.data[const.CLASS_MAP].keys()
keys.sort()
for i in keys:
try:
pkgMap[i] = self.data[const.CLASS_MAP][i][const.MODULE]
except:
try:
log.warn('class map ' + i + ' failure (no module declaration?)')
except: pass
t.pkgmap = simplejson.dumps(pkgMap)
self.write("classmap.js", t)
log.info(" ")
log.info("Done\n")
def main():
from optparse import OptionParser
optparser = OptionParser("usage: %prog inputdir [options] inputdir")
optparser.set_defaults(outputdir="docs",
inputfile="parsed.json",
newext=".highlighted",
showprivate=False,
project="Yahoo! UI Library",
version=""
)
optparser.add_option( "-o", "--outputdir",
action="store", dest="outputdir", type="string",
help="Directory to write the html documentation" )
optparser.add_option( "-f", "--file",
action="store", dest="inputfile", type="string",
help="The name of the file that contains the JSON doc info" )
optparser.add_option( "-t", "--template",
action="store", dest="templatedir", type="string",
help="The directory containing the html tmplate" )
optparser.add_option( "-c", "--crosslink",
action="store", dest="crosslinkdir", type="string",
help="The directory containing json data for other modules to crosslink" )
optparser.add_option( "-s", "--showprivate",
action="store_true", dest="showprivate",
help="Should private properties/methods be in the docs?" )
optparser.add_option( "-n", "--newextension",
action="store", dest="newext", type="string",
help="The extension to append to the syntax output file" )
optparser.add_option( "-m", "--project",
action="store", dest="project", type="string",
help="The name of the project" )
optparser.add_option( "-v", "--version",
action="store", dest="version", type="string",
help="The version of the project" )
optparser.add_option( "-u", "--projecturl",
action="store", dest="projecturl", type="string",
help="The project url" )
optparser.add_option( "-y", "--ydn",
action="store_true", dest="ydn",
help="Add YDN MyBlogLog intrumentation?" )
(options, inputdirs) = optparser.parse_args()
if len(inputdirs) > 0:
generator = DocGenerator( inputdirs[0],
options.inputfile,
options.outputdir,
options.templatedir,
options.showprivate,
options.project,
options.version,
options.projecturl,
options.ydn
)
generator.process()
else:
optparser.error("Incorrect number of arguments")
if __name__ == '__main__':
main()
| |
#
# epydoc.html: HTML colorizers
# Edward Loper
#
# Created [10/16/02 09:49 PM]
# $Id: colorize.py,v 1.5 2003/07/13 08:40:06 edloper Exp $
#
r"""
Functions to produce colorized HTML code for various objects.
Currently, C{colorize} defines functions to colorize regular
expressions and doctest blocks.
@var RE_TAG: The CSS class for colorizing regular expressions.
@var ANY_TAG: The CSS class for colorizing C{"."} in regular
expressions.
@var ESCAPE_TAG: The CSS class for colorizing escaped characters (such
as C{r"\("}) in regular expressions.
@var CATEGORY_TAG: The CSS class for colorizing character categories
(such as C{r"\d"})) in regular expressions.
@var AT_TAG: The CSS class for colorizing character locations (such as
C{"^"}) in regular expressions.
@var BRANCH_TAG: The CSS class for colorizing C{"|"} in regular
expressions.
@var STAR_TAG: The CSS class for colorizing C{"*"} and C{"*?"} in
regular expressions.
@var PLUS_TAG: The CSS class for colorizing C{"+"} and C{"+?"} in
regular expressions.
@var QMRK_TAG: The CSS class for colorizing C{"?"} and C{"??"} in
regular expressions.
@var RNG_TAG: The CSS class for colorizing repeat ranges (such as
C{"a{3,8}"}) in regular expressions.
@var PAREN_TAG: The CSS class for colorizing parenthases in regular
expressions.
@var CHOICE_TAG: The CSS class for colorizing character choice
expressions (such as C{"[abc]"}) in regular expressions.
@var ASSERT_TAG: The CSS class for colorizing assertions (such as
C{"(?=abc)"}) in regular expressions.
@var REF_TAG: The CSS class for colorizing references (such as
C{r"\1"}) in regular expressions.
@var _PROMPT_RE: The regular expression used to find Python prompts
(">>>" and "...") in doctest blocks.
@var _DOCTEST_RE: The regular expression used by L{_doctest_sub} to
colorize doctest blocks.
"""
__docformat__ = 'epytext en'
import sys, sre_parse, sre, re
import sre_constants
##################################################
## Regular expression colorizer
##################################################
# HTML tags for colorize_re
RE_TAG = 're'
ANY_TAG = 're-char' # r"."
ESCAPE_TAG = 're-char' # r"\("
CATEGORY_TAG = 're-char' # r"\d"
AT_TAG = 're-char' # r"^"
BRANCH_TAG = 're-op' # r"a|b|c"
STAR_TAG = 're-op' # r"a*"
PLUS_TAG = 're-op' # r"a+"
QMRK_TAG = 're-op' # r"a?"
RNG_TAG = 're-op' # r"a{3,8}"
PAREN_TAG = 're-group' # r"(abc)"
CHOICE_TAG = 're-group' # r"[abc]"
ASSERT_TAG = 're-group' # r"(?=foo)"
REF_TAG = 're-ref' # r"\1"
def colorize_re(regexp):
r"""
@return: The HTML code for a colorized version of the pattern for
the given SRE regular expression. If C{colorize_re} can't
figure out how to colorize the regexp, then it will simply return
the (uncolorized) pattern, with C{'&'}, C{'<'}, and C{'>'}
escaped as HTML entities. The colorized expression includes
spans with the following css classes:
- X{re}: The entire regular expression.
- X{re-char}: Special characters (such as C{'.'}, C{'\('}),
character categories (such as C{'\w'}), and locations
(such as C{'\b'}).
- X{re-op}: Operators (such as C{'*'} and C{'|'}).
- X{re-group}: Grouping constructs (such as C{'(...)'}).
- X{re-ref} References (such as C{'\1'})
@rtype: C{string}
@param regexp: The regular expression to colorize.
@type regexp: C{SRE_Pattern} or C{string}
"""
try:
if type(regexp) == type(''): regexp = sre.compile(regexp)
tree = sre_parse.parse(regexp.pattern, regexp.flags)
return ('<span class="%s">%s</span>' %
(RE_TAG, _colorize_re(tree, 1)))
except:
try:
pat = regexp.pattern
pat = pat.replace('&', '&')
pat = pat.replace('<', '<')
pat = pat.replace('>', '>')
return '<span class="%s">%s</span>' % (RE_TAG, pat)
except:
try:
str = `regexp`
str = str.replace('&', '&')
str = str.replace('<', '<')
str = str.replace('>', '>')
return str
except: return '<span class="%s">...</span>' % RE_TAG
def _colorize_re(tree, noparen=0):
"""
Recursively descend the given regexp parse tree to produce the
HTML code for a colorized version of the regexp.
@param tree: The regexp parse tree for the regexp that should be
colorized.
@type tree: L{sre_parse.SubPattern}
@param noparen: If true, then don't include parenthases around the
expression in C{tree}, even if it contains multiple elements.
@type noparen: C{boolean}
@return: The HTML code for a colorized version of C{tree}
@rtype: C{string}
"""
str = ''
if len(tree) > 1 and not noparen:
str += '<span class="%s">(</span>' % PAREN_TAG
for elt in tree:
op = elt[0]
args = elt[1]
if op == sre_constants.LITERAL:
c = chr(args)
if c == '&': str += '&'
elif c == '<': str += '<'
elif c == '>': str += '>'
elif c == '\t': str += r'<span class="%s">\t</span>' % ESCAPE_TAG
elif c == '\n': str += r'<span class="%s">\n</span>' % ESCAPE_TAG
elif c == '\r': str += r'<span class="%s">\r</span>' % ESCAPE_TAG
elif c == '\f': str += r'<span class="%s">\f</span>' % ESCAPE_TAG
elif c == '\v': str += r'<span class="%s">\v</span>' % ESCAPE_TAG
elif c in '.^$\\*+?{}[]|()':
str += '<span class="%s">\\%c</span>' % (ESCAPE_TAG, c)
else: str += chr(args)
continue
elif op == sre_constants.ANY:
str += '<span class="%s">.</span>' % ANY_TAG
elif op == sre_constants.BRANCH:
if args[0] is not None:
raise ValueError('Branch expected None arg but got %s'
% args[0])
VBAR = '<span class="%s">|</span>' % BRANCH_TAG
str += VBAR.join([_colorize_re(item,1) for item in args[1]])
elif op == sre_constants.IN:
if (len(args) == 1 and args[0][0] == sre_constants.CATEGORY):
str += _colorize_re(args)
else:
str += '<span class="%s">[</span>' % CHOICE_TAG
str += _colorize_re(args, 1)
str += '<span class="%s">]</span>' % CHOICE_TAG
elif op == sre_constants.CATEGORY:
str += '<span class="%s">' % CATEGORY_TAG
if args == sre_constants.CATEGORY_DIGIT: str += r'\d'
elif args == sre_constants.CATEGORY_NOT_DIGIT: str += r'\D'
elif args == sre_constants.CATEGORY_SPACE: str += r'\s'
elif args == sre_constants.CATEGORY_NOT_SPACE: str += r'\S'
elif args == sre_constants.CATEGORY_WORD: str += r'\w'
elif args == sre_constants.CATEGORY_NOT_WORD: str += r'\W'
else: raise ValueError('Unknown category %s' % args)
str += '</span>'
elif op == sre_constants.AT:
str += '<span class="%s">' % AT_TAG
if args == sre_constants.AT_BEGINNING_STRING: str += r'\A'
elif args == sre_constants.AT_BEGINNING: str += r'^'
elif args == sre_constants.AT_END: str += r'$'
elif args == sre_constants.AT_BOUNDARY: str += r'\b'
elif args == sre_constants.AT_NON_BOUNDARY: str += r'\B'
elif args == sre_constants.AT_END_STRING: str += r'\Z'
else: raise ValueError('Unknown position %s' % args)
str += '</span>'
elif op == sre_constants.MAX_REPEAT:
min = args[0]
max = args[1]
if max == sre_constants.MAXREPEAT:
if min == 0:
str += _colorize_re(args[2])
str += '<span class="%s">*</span>' % STAR_TAG
elif min == 1:
str += _colorize_re(args[2])
str += '<span class="%s">+</span>' % PLUS_TAG
else:
str += _colorize_re(args[2])
str += '<span class="%s">{%d,}</span>' % (RNG_TAG, min)
elif min == 0:
if max == 1:
str += _colorize_re(args[2])
str += '<span class="%s">?</span>' % QMRK_TAG
else:
str += _colorize_re(args[2])
str += '<span class="%s">{,%d}</span>' % (RNG_TAG, max)
elif min == max:
str += _colorize_re(args[2])
str += '<span class="%s">{%d}</span>' % (RNG_TAG, max)
else:
str += _colorize_re(args[2])
str += '<span class="%s">{%d,%d}</span>' % (RNG_TAG, min, max)
elif op == sre_constants.MIN_REPEAT:
min = args[0]
max = args[1]
if max == sre_constants.MAXREPEAT:
if min == 0:
str += _colorize_re(args[2])
str += '<span class="%s">*?</span>' % STAR_TAG
elif min == 1:
str += _colorize_re(args[2])
str += '<span class="%s">+?</span>' % PLUS_TAG
else:
str += _colorize_re(args[2])
str += '<span class="%s">{%d,}?</span>' % (RNG_TAG, min)
elif min == 0:
if max == 1:
str += _colorize_re(args[2])
str += '<span class="%s">??</span>' % QMRK_TAG
else:
str += _colorize_re(args[2])
str += '<span class="%s">{,%d}?</span>' % (RNG_TAG, max)
elif min == max:
str += _colorize_re(args[2])
str += '<span class="%s">{%d}?</span>' % (RNG_TAG, max)
else:
str += _colorize_re(args[2])
str += '<span class="%s">{%d,%d}?</span>'%(RNG_TAG, min, max)
elif op == sre_constants.SUBPATTERN:
if args[0] is None:
str += '<span class="%s">(?:</span>' % PAREN_TAG
elif type(args[0]) == type(0):
# This is cheating:
str += '<span class="%s">(</span>' % PAREN_TAG
else:
str += '<span class="%s">(?P<</span>' % PAREN_TAG
str += '<span class="%s">%s</span>' % (REF_TAG, args[0])
str += '<span class="%s">></span>' % PAREN_TAG
str += _colorize_re(args[1], 1)
str += '<span class="%s">)</span>' % PAREN_TAG
elif op == sre_constants.GROUPREF:
str += '<span class="%s">\\%d</span>' % (REF_TAG, args)
elif op == sre_constants.RANGE:
str += ('%c<span class="%s">-</span>%c' %
(chr(args[0]), CHOICE_TAG, chr(args[1])))
elif op == sre_constants.NEGATE:
str += '<span class="%s">^</span>' % CHOICE_TAG
elif op == sre_constants.ASSERT:
if args[0]: str += '<span class="%s">(?=</span>' % ASSERT_TAG
else: str += '<span class="%s">(?<=</span>' % ASSERT_TAG
str += ''.join(_colorize_re(args[1], 1))
str += '<span class="%s">)</span>' % ASSERT_TAG
elif op == sre_constants.ASSERT_NOT:
if args[0]: str += '<span class="%s">(?!</span>' % ASSERT_TAG
else: str += '<span class="%s">(?<!</span>' % ASSERT_TAG
str += ''.join(_colorize_re(args[1], 1))
str += '<span class="%s">)</span>' % ASSERT_TAG
elif op == sre_constants.NOT_LITERAL:
lit = _colorize_re( ((sre_constants.LITERAL, args),) )
str += ('<span class="%s">[^</span>%s<span class="%s">]</span>' %
(CHOICE_TAG, lit, CHOICE_TAG))
else:
print 'UNKNOWN ELT', elt[0], elt
if len(tree) > 1 and not noparen:
str += '<span class="%s">)</span>' % PAREN_TAG
return str
##################################################
## Doctest block colorizer
##################################################
# Regular expressions for colorize_doctestblock
_KEYWORDS = ["del", "from", "lambda", "return", "and", "or", "is",
"global", "not", "try", "break", "else", "if", "elif",
"while", "class", "except", "import", "pass", "raise",
"continue", "finally", "in", "print", "def", "for"]
_KEYWORD = '|'.join([r'(\b%s\b)' % _KW for _KW in _KEYWORDS])
_STRING = '|'.join([r'("""("""|.*?((?!").)"""))', r'("("|.*?((?!").)"))',
r"('''('''|.*?[^\\']'''))", r"('('|.*?[^\\']'))"])
_STRING = _STRING.replace('"', '"') # Careful with this!
_COMMENT = '(#.*?$)'
_PROMPT = r'(^\s*(>>>|\.\.\.)(\s|$))'
_PROMPT_RE = re.compile(_PROMPT, re.MULTILINE | re.DOTALL)
_DOCTEST_RE = re.compile('|'.join([_STRING, _COMMENT, _KEYWORD]),
re.MULTILINE | re.DOTALL)
del _KEYWORDS, _KEYWORD, _STRING, _COMMENT, _PROMPT, _KW
def colorize_doctestblock(str):
"""
@return: The HTML code for a colorized version of a given doctest
block. In particular, this identifies spans with the
following css classes:
- X{py-src}: The Python source code.
- X{py-prompt}: The ">>>" and "..." prompts.
- X{py-string}: Strings in the Python source code.
- X{py-comment}: Comments in the Python source code.
- X{py-keyword}: Keywords in the Python source code.
- X{py-output}: Python's output (lines without a prompt).
The string that is passed to colorize_doctest should already
have HTML characters escaped (e.g., C{">"} should be encoded
as C{">"}).
@type str: C{string}
@param str: The contents of the doctest block to be colorized.
@rtype: C{string}
"""
pysrc = pyout = ''
outstr = ''
for line in str.split('\n')+['\n']:
if _PROMPT_RE.match(line):
if pyout:
outstr += ('<span class="py-output">%s</span>\n\n' %
pyout.strip())
pyout = ''
pysrc += line+'\n'
else:
if pysrc:
# Prompt over-rides other colors (incl string)
pysrc = _DOCTEST_RE.sub(_doctest_sub, pysrc)
pysrc = _PROMPT_RE.sub(r'<span class="py-prompt">'+
r'\1</span>', pysrc)
outstr += ('<span class="py-src">%s</span>\n'
% pysrc.strip())
pysrc = ''
pyout += line+'\n'
if pyout.strip():
outstr += ('<span class="py-output">%s</span>\n' %
pyout.strip())
return outstr.strip()
def _doctest_sub(match):
"""
This helper function is used by L{colorize_doctestblock} to
add colorization to matching expressions. It is called by
C{_DOCTEST_RE.sub} with an expression that matches
C{_DOCTEST_RE}.
@return: The HTML code for the colorized expression.
@rtype: C{string}
@see: L{_DOCTEST_RE}
"""
str = match.group()
if str[:1] == "'" or str[:6] == '"':
return '<span class="py-string">%s</span>' % str
elif str[:1] in '#':
return '<span class="py-comment">%s</span>' % str
else:
return '<span class="py-keyword">%s</span>' % str
| |
# coding=utf-8
# Copyright 2020 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Generic group optimization with Bayes optimal experimental design (OED) .
This file implements a generic Bayes OED function to optimize groups. Given a
utility function such as area under the ROC curve (AUC), the Bayes OED
approach attempts to find groups that maximize the expected objective function
under the current posterior.
"""
import itertools
import gin
import jax
import jax.numpy as np
from group_testing import metrics
from group_testing import utils
from group_testing.group_selectors import group_selector
from group_testing.group_selectors import mutual_information
@gin.configurable
def entropy():
"""Entropy as an objective function."""
@jax.jit
def entropy_utility(particle_weights, particles):
"""Entropy of the distribution as utility.
This function returns the entropy utility of the distribution of weights,
defined as the Kullback-Leibler divergence between the uniform distribution
and the distribution encoded by the particles.
Note that this requires all particles to be distinct; one should therefore
be careful to call this function only after after collapsing together
particles that may be repeated in the 'particles' array.
Args:
particle_weights: weights of particles
particles: particles summarizing belief about infection status
Returns:
The entropy utility of the distribution of weights.
"""
return particles.shape[1]*np.log(2) - metrics.entropy(particle_weights)
return entropy_utility
@gin.configurable
def auc():
"""Area under the curve as an objective function."""
@jax.jit
def auc_utility(particle_weights, particles):
"""Expected AUC of the marginal predictor as utility.
This function returns the AUC utility of a distribution encoded as a
weighted sum of Dirac measures at particles. The AUC utility is the expected
AUC of the marginal distribution as predictor.
Args:
particle_weights: weights of particles
particles: particles summarizing belief about infection status
Returns:
The AUC utility of the distribution.
"""
marginal = np.sum(particle_weights[:, np.newaxis] * particles, axis=0)
sorted_particles = particles[:, np.argsort(marginal)]
false_count = np.cumsum(1 - sorted_particles, axis=1)
area = np.sum(sorted_particles * false_count, axis=1)
aucs = area / (
false_count[:, -1] * (sorted_particles.shape[1] - false_count[:, -1]))
return np.nansum(aucs * particle_weights)
return auc_utility
@gin.configurable
def mean_sensitivity_specificity(threshold=0.1):
"""Mean of sensitivity and specificity as an objective function.
Args:
threshold: threshold on the marginal to make a positive or negative
prediction.
Returns:
A function that takes two parameters the weights and the particles and
computes the objective function.
"""
@jax.jit
def _mean_sen_spe_utility(particle_weights, particles):
"""Expected mean sensitivity/specificity of the marginal predictor.
This function returns the mean sensitivity/specificity utility of a
distribution encoded as a weighted sum of Dirac measures at particles. The
mean sensitivity/specificity utility is the expected mean
sensitivity/specificity of the marginal distribution thresholded at
'threshold' as predictor.
Args:
particle_weights: weights of particles
particles: particles summarizing belief about infection status
Returns:
The mean sensitivity/specificity utility of the distribution.
"""
num_patients = particles.shape[1]
marginal = np.sum(particle_weights[:, np.newaxis] * particles, axis=0)
y_pred = marginal > threshold
total_pos = np.sum(particles, axis=1)
total_neg = num_patients - total_pos
sensitivities = np.sum(y_pred * particles, axis=1) / total_pos
specifities = np.sum((1-y_pred) * (1-particles), axis=1) / total_neg
sum_sen_spe = sensitivities + specifities
return np.nansum(sum_sen_spe * particle_weights) / 2
return _mean_sen_spe_utility
def group_utility(particle_weights,
particles,
groups,
group_sensitivities,
group_specificities,
utility_fun):
"""Compute the utility of a set of groups.
This function computes the utility of a set of groups, given a distribution
over the population status encoded as a weighted sum of Dirac measures on
particles, the specificities and sensitivities of tests, and a utility
function.
Args:
particle_weights: weights of particles
particles: particles summarizing belief about infection status
groups: set of groups to be tested
group_sensitivities: sensitivies of test for each group
group_specificities: specificities of test for each group
utility_fun: a utility function that takes as input (particle_weights,
particles) and output the utility of the distribution
Returns:
The expected utility (over the test results) of the posterior
"""
num_groups = groups.shape[0]
proba_y_is_one_given_x = (np.matmul(particles, np.transpose(groups))
* (group_sensitivities + group_specificities - 1)
+ 1.0 - group_specificities)
proba_y_is_one_given_x = np.expand_dims(proba_y_is_one_given_x, axis=2)
test_res = np.array(list(itertools.product([0, 1], repeat=num_groups)))
test_res = np.expand_dims(np.transpose(test_res), axis=0)
proba_y_given_x = np.product(test_res * proba_y_is_one_given_x + (1-test_res)
* (1-proba_y_is_one_given_x), axis=1)
proba_y_and_x = proba_y_given_x * np.expand_dims(particle_weights, 1)
proba_y = np.sum(proba_y_and_x, axis=0)
proba_x_given_y = proba_y_and_x / np.expand_dims(proba_y, 0)
vutility_fun = jax.vmap(utility_fun, [1, None])
utility_x_given_y = vutility_fun(proba_x_given_y, particles)
return np.dot(proba_y, utility_x_given_y)
def next_best_group(particle_weights,
particles,
previous_groups,
cur_group,
sensitivity,
specificity,
utility_fun,
backtracking: bool):
"""Performs greedy utility optimization to compute the next best group.
Given a set of groups previous_groups, and a current candidate group
cur_group, this function computes the utility of the combination of
previous_groups and cur_group modified by adding (if backtracking = True) or
adding (if backtracking = False) on element to cur_group, and returns the
combination with largest utility.
Args:
particle_weights: weights of particles
particles: particles summarizing belief about infection status
previous_groups: groups already chosen
cur_group: group that we wish to optimize
sensitivity: value (vector) of sensitivity(-ies depending on group size).
specificity: value (vector) of specificity(-ies depending on group size).
utility_fun: function to compute the utility of a set of groups
backtracking: (bool), True if removing rather than adding individuals.
Returns:
best_group : cur_group updated with best choice
utility: utility of best_group
"""
if backtracking:
# Backward mode: test groups obtained by removing an item to cur_group
candidate_groups = np.logical_not(
mutual_information.add_ones_to_line(np.logical_not(cur_group)))
else:
# Forward mode: test groups obtained by adding an item to cur_group
candidate_groups = mutual_information.add_ones_to_line(cur_group)
n_candidates = candidate_groups.shape[0]
# Combine past groups with candidate groups
candidate_sets = np.concatenate(
(np.repeat(previous_groups[:, :, np.newaxis], n_candidates, axis=2),
np.expand_dims(np.transpose(candidate_groups), axis=0)),
axis=0)
# Compute utility of each candidate group
group_sizes = np.sum(candidate_sets[:, :, 0], axis=1)
group_sensitivities = utils.select_from_sizes(sensitivity, group_sizes)
group_specificities = utils.select_from_sizes(specificity, group_sizes)
group_util_fun = lambda x: group_utility(particle_weights, particles, x,
group_sensitivities,
group_specificities, utility_fun)
mgroup_util_fun = jax.vmap(group_util_fun, in_axes=2)
objectives = mgroup_util_fun(candidate_sets)
# Greedy selection of largest value
index = np.argmax(objectives)
return (candidate_groups[index, :], objectives[index])
@gin.configurable
class BayesOED(group_selector.GroupSelector):
"""Uses generic Bayed OED to choose groups."""
NEEDS_POSTERIOR = True
def __init__(self,
forward_iterations=1,
backward_iterations=0,
utility_fn=auc()):
if forward_iterations <= backward_iterations:
raise ValueError('Forward should be greater than backward.')
super().__init__()
self.forward_iterations = forward_iterations
self.backward_iterations = backward_iterations
self.utility_fn = utility_fn
def get_groups(self, rng, state):
"""A greedy forward-backward algorithm to pick groups with large utility."""
particle_weights, particles = mutual_information.collapse_particles(
rng, state.particle_weights, state.particles)
n_patients = particles.shape[1]
iterations = [self.forward_iterations, self.backward_iterations]
chosen_groups = np.empty((0, n_patients), dtype=bool)
added_groups_counter = 0
while added_groups_counter < state.extra_tests_needed:
# start forming a new group, and improve it greedily
proposed_group = np.zeros((n_patients,), dtype=bool)
obj_old = -1
while np.sum(proposed_group) < state.max_group_size:
for steps, backtrack in zip(iterations, [False, True]):
for _ in range(steps):
# Extract candidate with largest utility
proposed_group, obj_new = next_best_group(particle_weights,
particles,
chosen_groups,
proposed_group,
state.prior_sensitivity,
state.prior_specificity,
self.utility_fn,
backtracking=backtrack)
if obj_new > obj_old + 1e-6:
cur_group = proposed_group
obj_old = obj_new
else:
break
# stop adding, form next group
chosen_groups = np.concatenate((chosen_groups, cur_group[np.newaxis, :]),
axis=0)
added_groups_counter += 1
return chosen_groups
| |
from __future__ import print_function
import sys
from subprocess import Popen, PIPE, STDOUT
from signal import signal, SIGPIPE, SIG_DFL
from pysv import utils
signal(SIGPIPE,SIG_DFL)
SAT = 'sat'
UNSAT = 'unsat'
UNKNOWN = 'unknown'
TIMEOUT = 'timeout'
class Solver(object):
"""Handles interaction with the solver. Supports both interactive mode and single script run."""
Z3 = 'z3'
CVC4 = 'cvc4'
MathSAT = 'mathsat'
OTHER = 'other'
supported_solvers = [Z3, CVC4, MathSAT, OTHER]
def __init__(self, env):
self.env = env
self.solver_type = env.solver
self.solver_interactive_mode = env.solver_interactive_mode
def apply(self, script, other_params=None):
"""Runs a solver and passes the script on the standard input.
:param script: A script in SMT-LIB 2.0 language.
:param other_params: Optional parameters for the solver.
:return: SolverResult containing merged output from stdout and stderr.
"""
if other_params is None:
other_params = []
try:
if self.solver_interactive_mode:
out_data, err_data = self.run_solver_interactive(script, other_params)
else:
out_data, err_data = self.run_solver_script(script, other_params)
except OSError as e:
print("Solver binaries not found! Check your solvers_bin folder or --solver_path argument.", file=sys.stderr)
raise e
except Exception as e:
print("Solver could not be executed or critical error occurred!", file=sys.stderr)
raise e
else: # no exception.
text = self.prepare_apply_output(out_data, err_data)
return SolverResult(text, self.env, script, text_errors=err_data)
def run_solver_interactive(self, script, other_params):
script += '\n' # To avoid situation that lack of \n at the end makes solver run indefinitely.
cmd_strings = self.get_cmd_strings(other_params)
p = Popen(cmd_strings, stdout=PIPE, stdin=PIPE, stderr=STDOUT, universal_newlines=True, bufsize=-1)
# Asserting all constraints and reading output.
p.stdin.write(script)
p.stdin.flush()
auxiliary_output = ''
# Reading auxiliary outputs and satisfiability decision.
line = p.stdout.readline()
decisions = ['sat', 'unsat', 'unknown']
solver_abort = False
i = 0
while line.rstrip() not in decisions:
if line == '':
if p.poll() is not None:
solver_abort = True # This will be reached if solver process is killed or it exceeds time limit.
break
i += 1
else:
i = 0
if i >= 100:
r = p.poll()
raise Exception("Only empty strings read from the solver's output! Check, if solver process ended without any errors. Solver reply to poll function: " + str(r))
auxiliary_output += line
line = p.stdout.readline()
if line[0:6] == '(error':
raise Exception("Solver returned error! '" + line.rstrip() +"'")
if solver_abort:
return 'timeout\n', ''
else:
decision = line
out = ''
err = ''
if decision.rstrip() == 'sat':
# o2, e2 = self.p.communicate(input='(get-model)')
code = '(get-model)\n'
if self.env.produce_assignments:
code += '(get-assignment)\n'
code += '\n(exit)\n'
p.stdin.write(code)
p.stdin.flush()
out = p.stdout.read()
elif decision.rstrip() == 'unsat':
if self.env.produce_unsat_core:
# o2, e2 = self.p.communicate(input='(get-unsat-core)')
p.stdin.write('(get-unsat-core)\n(exit)\n')
p.stdin.flush()
out = p.stdout.read()
p.terminate()
return decision + out, auxiliary_output + err
def run_solver_script(self, script, other_params):
cmd_strings = self.get_cmd_strings(other_params)
p = Popen(cmd_strings, stdout=PIPE, stdin=PIPE, stderr=PIPE, universal_newlines=True, bufsize=-1)
return p.communicate(input=script) # a tuple (stdout, stderr) is returned
def get_cmd_strings(self, other_params):
# raise Exception('Function get_cmd_strings is not implemented!')
cmd = [self.env.solver_path]
cmd.extend(Solver.get_solver_specific_args(self.solver_type, self.env))
cmd.extend(other_params)
return cmd
def prepare_apply_output(self, out_data, err_data):
res = out_data
if err_data != '':
res += '\n' + err_data
return res
@staticmethod
def get_solver_specific_args(solver_type, env, other_args=None):
if other_args is None:
other_args = []
if solver_type == Solver.Z3:
res = ['-smt2', 'pp.single_line=true', 'pp.decimal_precision=50', 'pp.decimal=true',
'pp.min-alias-size=1000000', 'pp.max_depth=1000000', 'model.completion=true', '-in']
elif solver_type == Solver.CVC4:
res = ['--lang', 'smt']
elif solver_type == Solver.MathSAT:
res = ['-input=smt2', '-model_generation=TRUE'] #'-model'
else:
res = []
res.extend(other_args)
return res
@staticmethod
def get_solver_specific_bin_name(solver_type):
if solver_type == Solver.Z3:
return 'z3'
elif solver_type == Solver.CVC4:
return 'cvc4'
elif solver_type == Solver.MathSAT:
return 'mathsat'
else:
return None
class SolverBinaries(Solver):
def __init__(self, env, solver_name=None, args=None):
if args is None:
args = []
if solver_name is None:
solver_name = Solver.get_solver_specific_bin_name(env.solver)
self.args = args
# self.solver_name = solver_name
if env.solver_path is not None:
self.solver_name = env.solver_path
else:
import os
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname( __file__ ), '..', 'solvers_bin'))
self.solver_name = BASE_DIR + os.sep + solver_name
# print('Solver path: ' + self.solver_name)
Solver.__init__(self, env)
def get_cmd_strings(self, other_params):
cmd = [self.solver_name]
cmd.extend(Solver.get_solver_specific_args(self.solver_type, self.env, self.args))
cmd.extend(other_params)
return cmd
def run_solver(script, env):
"""Runs the SMT-LIB 2.0 script using specified solver."""
if env.solver in Solver.supported_solvers:
return SolverBinaries(env).apply(script)
else:
raise Exception('The chosen solver is not supported! Supported: {0}.'.format(Solver.supported_solvers))
class SolverResult(object):
"""Processed synthesis result of the solver.
Attributes:
-----------
text : string
Raw output of the solver.
decision : string
Solver decision: sat, unsat, unsupported or unknown.
model : dict[string,string]
Dictionary containing values of all variables present in the model.
"""
def __init__(self, result, env, script=None, text_errors=""):
self.env = env
if isinstance(result, SolverResult):
self.text = result.text
self.text_errors = result.text_errors
self.decision = result.decision
self.model = result.model.copy()
self.unsat_core = result.unsat_core[:]
self.assignments = result.assignments.copy()
self.script = result.script
self.was_any_error = result.was_any_error
else:
self.text = result.strip()
self.text_errors = text_errors
self.was_any_error = True if text_errors is not None and text_errors != "" else False
utils.logger.debug('SolverResult.text = ' + self.text)
self.decision = SolverResult.get_decision(result)
self.model = {}
self.assignments = {}
self.unsat_core = []
self.script = script
if self.decision == 'sat':
if len(self.text) == 3: # there is nothing beside 'sat'
self.model = None
else:
self.model = SolverResult.get_model_values(result)
if env.produce_assignments:
self.assignments = SolverResult.get_assignments(result, env.solver_interactive_mode)
if self.decision == 'unsat' and env.produce_unsat_core:
self.unsat_core = SolverResult.get_unsat_core(result, env.solver_interactive_mode, env.produce_assignments)
def __getitem__(self, var_name):
return self.model[var_name]
def __str__(self):
return self.text
def str_formatted(self):
"""Returns string in the format declared by the output_data option."""
if not self.env.silent:
res = '----------------------------------------------\n'
res += 'SOLVER RESULT\n'
res += '----------------------------------------------\n'
else:
res = ''
for s in self.env.output_data:
if s == 'raw':
res += str(self.text) + '\n'
elif s == 'decision':
res += str(self.decision) + '\n'
elif s == 'model':
res += str(self.model) + '\n'
elif s == 'unsat_core':
res += str(self.unsat_core) + '\n'
elif s == 'assignments':
res += str(self.assignments) + '\n'
elif s == 'final_code':
if not self.env.silent:
res += '--------------------------\n'
res += 'SYNTHESIZED PYTHON CODE:'
res += str(self.final_code) + '\n' # final_code will work only for SynthesisResult subclass.
elif s == "holes_content":
res += str(self.holes_content) # final_code will work only for SynthesisResult subclass.
return res
@staticmethod
def get_decision(text):
"""Returns solver decision: sat, unsat, unsupported or unknown."""
if text == '':
return ''
else:
return text.split()[0]
@staticmethod
def get_model_values(text):
"""Returns dictionary with values assigned to variables."""
words = utils.str_to_wlist(text)
if len(words) <= 1:
raise Exception('Trying to get model while presumably only check-sat was invoked!')
start = 1
prefix = words[start] + words[start + 1] # On the i=0 always is a decision.
if prefix == '(objectives':
start = utils.index_of_closing_parenthesis(words, start) + 1
prefix = words[start] + words[start + 1]
if prefix == '(error' or prefix not in ('(model', '(('):
return {}
elif prefix == '(model' or 'define-fun' in text:
return SolverResult.get_model_explicit(words[start:])
elif prefix == '((':
return SolverResult.get_model_simplified(words[start:])
@staticmethod
def get_model_explicit(words):
model_values = {}
i = 2
while i < len(words):
if words[i] == 'define-fun':
name = words[i + 1]
# Usually i+2 and i+3 contains parenthesis, but in some scenarios there may be some values
# in between. They are ignored here.
i = utils.index_of_closing_parenthesis(words, i + 2)
# i+1 is a type of the function
if words[i + 2] == '(':
j = utils.index_of_closing_parenthesis(words, i + 2)
value = ' '.join(words[i + 2:j + 1])
i = j + 1 # j+1 is closing parenthesis of define-fun
else:
value = words[i + 2]
i += 3 # i+3 is closing parenthesis of define-fun
model_values[name] = value
assert words[i] == ')' # i should point to the last parenthesis of define-fun
i += 1
return model_values
@staticmethod
def get_model_simplified(words):
# raise Exception('Loading model in the simplified form is not supported yet!')
model_values = {}
i = 1
while i < len(words):
if words[i] == '(':
name = words[i + 1]
# i+1 is a type of the function
if words[i + 2] == '(':
j = utils.index_of_closing_parenthesis(words, i + 2)
value = ' '.join(words[i + 2:j + 1])
i = j + 1 # j+1 is closing parenthesis
else:
value = words[i + 2]
i += 3 # i+3 is closing parenthesis
model_values[name] = value
assert words[i] == ')' # i should point to the last parenthesis of value definition
i += 1
return model_values
@staticmethod
def get_unsat_core(text, interactive_mode, produce_assignments):
"""Returns unsat-core or empty list if it was not obtainable (e.g. decision was sat)."""
unsat_core = []
words = utils.str_to_wlist(text)
# skip model - NOTE: in the interactive mode there is no need to skip the model
if not interactive_mode:
j = utils.index_of_closing_parenthesis(words, 1) + 1 # omit model
if produce_assignments:
x = words[j:]
j = utils.index_of_closing_parenthesis(words, j) + 1 # omit assignments
x = words[j:]
if j+1 >= len(words):
return []
else:
j = 1
prefix = words[j] + words[j+1]
if prefix == '(error':
return []
else:
while j < len(words):
if words[j] == '(':
pass
elif words[j] == ')':
break
else:
unsat_core.append(words[j])
j += 1
return unsat_core
@staticmethod
def get_assignments(text, interactive_mode):
"""Returns dictionary of assignments if decision was sat."""
words = utils.str_to_wlist(text)
j = 1 # On the j=0 is a decision.
prefix = words[j] + words[j + 1]
if prefix == '(objectives':
j = utils.index_of_closing_parenthesis(words, j) + 1
prefix = words[j] + words[j + 1]
if prefix == '(model' or prefix == '((': # skip model - assignments are printed after it.
j = utils.index_of_closing_parenthesis(words, j) + 1
prefix = words[j] + words[j + 1]
if j + 1 >= len(words):
return []
elif prefix == '(error':
return {}
else:
return utils.wlist_to_dict_parenthesis(words[j:])
| |
# module msys.py
# Requires Python 2.4 or better and win32api.
"""MSYS specifics for Msys terminal IO and for running shell scripts
exports msys_raw_input, MsysException, Msys
"""
from msysio import raw_input_ as msys_raw_input, print_ as msys_print
from msysio import is_msys
import os
import time
import subprocess
import re
import glob
try:
import _winreg
except ImportError:
import winreg as _winreg
# For Python 2.x/3.x compatibility
def geterror():
return sys.exc_info()[1]
FSTAB_REGEX = (r'^[ \t]*(?P<path>'
r'([a-zA-Z]:){0,1}([\\/][^\s*^?:%\\/]+)+)'
r'[ \t]+/mingw(\s|$)'
)
def has_drive(path):
"""Return true if the MSYS path strats with a drive letter"""
return re.match('/[A-Z]/', path, re.I) is not None
class MsysException(Exception):
"""Path retrieval problem"""
pass
def find_msys_version_subdir(msys_dir):
"""Return the full MSYS root directory path
If msys_dir path lacks the version subdirectory, e.g. 1.0, then the
path is searched for one. The user will be prompted to choose if more
than one version is found.
"""
regex = r'[\\/][1-9][.][0-9]$'
if re.search(regex, msys_dir) is not None:
return msys_dir
roots = glob.glob(os.path.join(msys_dir, '[1-9].[0-9]'))
roots.sort()
roots.reverse()
if not roots:
raise MsysException("No msys versions found.\n")
else:
if len(roots) == 1:
root = roots[0]
else:
msys_print("Select an Msys version:")
for i, path in enumerate(roots):
msys_print(" %d = %s" % (i+1, os.path.split(path)[1]))
choice = msys_raw_input("Select 1-%d (1 = default):")
if not choice:
root = roots[0]
else:
root = roots[int(choice)-1]
return root
def input_msys_dir():
"""Return user entered MSYS directory path
May raise MsysException."""
while 1:
dir_path = msys_raw_input("Enter the MSYS directory path,\n"
"(or press [Enter] to quit):")
dir_path = dir_path.strip()
if not dir_path:
raise MsysException("Input aborted by user")
dir_path = os.path.abspath(dir_path)
try:
return find_msys_version_subdir(dir_path)
except MsysException:
msys_print(geterror())
def find_msys_registry():
"""Return the MSYS 1.0 directory path stored in the Windows registry
The return value is an encoded ascii str. The registry entry for the
uninstaller is used. Raise a LookupError if not found.
"""
subkey = (
'Software\\Microsoft\\Windows\\CurrentVersion\\Uninstall\\MSYS-1.0_is1')
try:
key = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, subkey)
try:
return _winreg.QueryValueEx(key, 'Inno Setup: App Path')[0].encode()
finally:
key.Close()
except WindowsError:
raise LookupError("MSYS not found in the registry")
def as_shell(msys_root):
"""Append MSYS shell program to MSYS root directory path"""
return os.path.join(msys_root, 'bin', 'sh.exe')
def check_for_shell(msys_directory=None):
"""Check various locations for MSYS shell or root directory.
May raise MsysException.
"""
if msys_directory is not None:
try:
dir_path = find_msys_version_subdir(msys_directory)
except MsysException:
pass
else:
return as_shell(dir_path)
try:
shell = os.environ['SHELL']
except KeyError:
pass
else:
if is_msys():
return shell + '.exe'
return shell
try:
dir_path = find_msys_registry()
except LookupError:
pass
else:
return as_shell(dir_path)
return as_shell(input_msys_dir())
def find_msys_shell(msys_directory=None):
"""Retrun the MSYS shell program path
MsysException is raised if the shell program is not found. The user
is prompt is prompted as a last resort if no directory is found or
there are multiple choices.
"""
shell = check_for_shell(msys_directory)
while 1:
shell = os.path.abspath(shell.replace('/', os.sep))
if os.path.isfile(shell):
break
msys_print("Directory %s has no MSYS shell." % shell)
shell = as_shell(input_msys_dir())
return shell
def find_mingw_root(msys_directory):
"""Return the Windows equivalent of /mingw"""
# Look it up in the fstabs file.
fstab_path = os.path.join(msys_directory, 'etc', 'fstab')
try:
fstab = open(fstab_path, 'r')
except IOError:
raise MsysException("Unable to open MSYS fstab file %s" % fstab_path)
else:
match = re.search(FSTAB_REGEX, fstab.read(), re.MULTILINE)
if match is None:
raise MsysException(
"The required MinGW path is not in the MSYS fstab file")
dir_path = os.path.abspath(match.groupdict()['path'])
if not os.path.isdir(dir_path):
raise MsysException("%s is not a directory" % dir_path)
return dir_path
class Msys(object):
"""Return a new Msys environment; May raise MsysException
Msys([msys_directory, [require_mingw]])
msys_directory: A string giving the path of the MSYS directory.
Either or both keyword arguments can be omitted. If msys_directory
is not provided then the environment variable SHELL and the Windows
registry are checked. Finally the user is prompted for the directory
path. If require_mingw is True, the default, the mingw directory path
is retrieved from the MSYS fstab file. An MsysException is raised if
the required paths are not found.
"""
_is_msys = is_msys()
def __init__(self, msys_directory=None, require_mingw=None):
"""New environment
May raise MsysException"""
if require_mingw is None:
require_mingw = True
self._environ = os.environ.copy()
self._shell = find_msys_shell(msys_directory)
self._msys_root = os.path.split(os.path.split(self.shell)[0])[0].lower()
try:
self._mingw_root = find_mingw_root(self.msys_root)
except MsysException:
if require_mingw:
raise
self._mingw_root = None
else:
self.environ['MINGW_ROOT_DIRECTORY'] = self._mingw_root
environ = property(lambda self: self._environ,
doc="Environment variables")
shell = property(lambda self: self._shell,
doc="MSYS shell program path")
msys_root = property(lambda self: self._msys_root,
doc="MSYS root directory path")
mingw_root = property(lambda self: self._mingw_root,
doc="MinGW root directory path")
is_msys = property(lambda self: self._is_msys,
doc="True if the execution environment is MSYS")
def windows_to_msys(self, path):
"""Return an MSYS translation of an absolute Windows path"""
msys_root = self.msys_root
mingw_root = self.mingw_root
path_lower = path.lower()
if path_lower.startswith(msys_root.lower()):
return '/usr' + path[len(msys_root):].replace(os.sep, '/')
if mingw_root is not None and path_lower.startswith(mingw_root.lower()):
return '/mingw' + path[len(mingw_root):].replace(os.sep, '/')
drive, tail = os.path.splitdrive(path)
return '/%s%s' % (drive[0], tail.replace(os.sep, '/'))
def msys_to_windows(self, path):
"""Return a Windows translation of an MSYS path
The Unix path separator is uses as it survives the distutils setup
file read process. Raises a ValueError if the path cannot be
translated.
"""
msys_root = self.msys_root
mingw_root = self.mingw_root
if path.startswith('/usr'):
path = msys_root + path[4:]
elif path.startswith('/mingw'):
if mingw_root is None:
raise ValueError('Unable to map the MinGW directory')
path = mingw_root + path[6:]
elif has_drive(path):
path = path[1] + ":" + path[2:]
elif path == '/':
path = msys_root
elif path.startswith('/'):
path = msys_root + path
return path.replace(os.sep, '/')
def run_shell_script(self, script):
"""Run the MSYS shell script and return the shell return code
script is a string representing the contents of the script.
"""
cmd = [self.shell]
if not self._is_msys:
cmd.append('--login')
previous_cwd = os.getcwd()
try:
process = subprocess.Popen(cmd,
stdin=subprocess.PIPE,
env=self.environ)
process.communicate(script)
return process.returncode
finally:
time.sleep(2) # Allow shell subprocesses to terminate.
os.chdir(previous_cwd)
def run_shell_command(self, command):
"""Run the MSYS shell command and return stdout output as a string
command is a list of strings giving the command and its arguments.
The first list entry must be the MSYS path name of a bash shell
script file.
"""
args = [self.shell]
if not self._is_msys:
args.append('--login')
args.extend(command)
previous_cwd = os.getcwd()
try:
return subprocess.Popen(args,
stdout=subprocess.PIPE,
env=self.environ).communicate()[0]
finally:
time.sleep(3) # Allow shell subprocesses to terminate.
os.chdir(previous_cwd)
__all__ = ['Msys', 'msys_raw_input', 'msys_print', 'MsysException']
| |
# Copyright (C) 2012 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests for Backup code.
"""
import json
from xml.dom import minidom
import webob
# needed for stubs to work
import cinder.backup
from cinder import context
from cinder import db
from cinder import exception
from cinder.openstack.common import log as logging
from cinder import test
from cinder.tests.api import fakes
# needed for stubs to work
import cinder.volume
LOG = logging.getLogger(__name__)
class BackupsAPITestCase(test.TestCase):
"""Test Case for backups API."""
def setUp(self):
super(BackupsAPITestCase, self).setUp()
def tearDown(self):
super(BackupsAPITestCase, self).tearDown()
@staticmethod
def _create_backup(volume_id=1,
display_name='test_backup',
display_description='this is a test backup',
container='volumebackups',
status='creating',
size=0, object_count=0):
"""Create a backup object."""
backup = {}
backup['volume_id'] = volume_id
backup['user_id'] = 'fake'
backup['project_id'] = 'fake'
backup['host'] = 'testhost'
backup['availability_zone'] = 'az1'
backup['display_name'] = display_name
backup['display_description'] = display_description
backup['container'] = container
backup['status'] = status
backup['fail_reason'] = ''
backup['size'] = size
backup['object_count'] = object_count
return db.backup_create(context.get_admin_context(), backup)['id']
@staticmethod
def _get_backup_attrib(backup_id, attrib_name):
return db.backup_get(context.get_admin_context(),
backup_id)[attrib_name]
@staticmethod
def _create_volume(display_name='test_volume',
display_description='this is a test volume',
status='creating',
size=1):
"""Create a volume object."""
vol = {}
vol['size'] = size
vol['user_id'] = 'fake'
vol['project_id'] = 'fake'
vol['status'] = status
vol['display_name'] = display_name
vol['display_description'] = display_description
vol['attach_status'] = 'detached'
return db.volume_create(context.get_admin_context(), vol)['id']
def test_show_backup(self):
volume_id = self._create_volume(size=5)
backup_id = self._create_backup(volume_id)
LOG.debug('Created backup with id %s' % backup_id)
req = webob.Request.blank('/v2/fake/backups/%s' %
backup_id)
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 200)
self.assertEqual(res_dict['backup']['availability_zone'], 'az1')
self.assertEqual(res_dict['backup']['container'], 'volumebackups')
self.assertEqual(res_dict['backup']['description'],
'this is a test backup')
self.assertEqual(res_dict['backup']['name'], 'test_backup')
self.assertEqual(res_dict['backup']['id'], backup_id)
self.assertEqual(res_dict['backup']['object_count'], 0)
self.assertEqual(res_dict['backup']['size'], 0)
self.assertEqual(res_dict['backup']['status'], 'creating')
self.assertEqual(res_dict['backup']['volume_id'], volume_id)
db.backup_destroy(context.get_admin_context(), backup_id)
db.volume_destroy(context.get_admin_context(), volume_id)
def test_show_backup_xml_content_type(self):
volume_id = self._create_volume(size=5)
backup_id = self._create_backup(volume_id)
req = webob.Request.blank('/v2/fake/backups/%s' % backup_id)
req.method = 'GET'
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
dom = minidom.parseString(res.body)
backup = dom.getElementsByTagName('backup')
name = backup.item(0).getAttribute('name')
container_name = backup.item(0).getAttribute('container')
self.assertEquals(container_name.strip(), "volumebackups")
self.assertEquals(name.strip(), "test_backup")
db.backup_destroy(context.get_admin_context(), backup_id)
db.volume_destroy(context.get_admin_context(), volume_id)
def test_show_backup_with_backup_NotFound(self):
req = webob.Request.blank('/v2/fake/backups/9999')
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 404)
self.assertEqual(res_dict['itemNotFound']['code'], 404)
self.assertEqual(res_dict['itemNotFound']['message'],
'Backup 9999 could not be found.')
def test_list_backups_json(self):
backup_id1 = self._create_backup()
backup_id2 = self._create_backup()
backup_id3 = self._create_backup()
req = webob.Request.blank('/v2/fake/backups')
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 200)
self.assertEqual(len(res_dict['backups'][0]), 3)
self.assertEqual(res_dict['backups'][0]['id'], backup_id1)
self.assertEqual(res_dict['backups'][0]['name'], 'test_backup')
self.assertEqual(len(res_dict['backups'][1]), 3)
self.assertEqual(res_dict['backups'][1]['id'], backup_id2)
self.assertEqual(res_dict['backups'][1]['name'], 'test_backup')
self.assertEqual(len(res_dict['backups'][2]), 3)
self.assertEqual(res_dict['backups'][2]['id'], backup_id3)
self.assertEqual(res_dict['backups'][2]['name'], 'test_backup')
db.backup_destroy(context.get_admin_context(), backup_id3)
db.backup_destroy(context.get_admin_context(), backup_id2)
db.backup_destroy(context.get_admin_context(), backup_id1)
def test_list_backups_xml(self):
backup_id1 = self._create_backup()
backup_id2 = self._create_backup()
backup_id3 = self._create_backup()
req = webob.Request.blank('/v2/fake/backups')
req.method = 'GET'
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
dom = minidom.parseString(res.body)
backup_list = dom.getElementsByTagName('backup')
self.assertEqual(backup_list.item(0).attributes.length, 2)
self.assertEqual(backup_list.item(0).getAttribute('id'),
backup_id1)
self.assertEqual(backup_list.item(1).attributes.length, 2)
self.assertEqual(backup_list.item(1).getAttribute('id'),
backup_id2)
self.assertEqual(backup_list.item(2).attributes.length, 2)
self.assertEqual(backup_list.item(2).getAttribute('id'),
backup_id3)
db.backup_destroy(context.get_admin_context(), backup_id3)
db.backup_destroy(context.get_admin_context(), backup_id2)
db.backup_destroy(context.get_admin_context(), backup_id1)
def test_list_backups_detail_json(self):
backup_id1 = self._create_backup()
backup_id2 = self._create_backup()
backup_id3 = self._create_backup()
req = webob.Request.blank('/v2/fake/backups/detail')
req.method = 'GET'
req.headers['Content-Type'] = 'application/json'
req.headers['Accept'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 200)
self.assertEqual(len(res_dict['backups'][0]), 12)
self.assertEqual(res_dict['backups'][0]['availability_zone'], 'az1')
self.assertEqual(res_dict['backups'][0]['container'],
'volumebackups')
self.assertEqual(res_dict['backups'][0]['description'],
'this is a test backup')
self.assertEqual(res_dict['backups'][0]['name'],
'test_backup')
self.assertEqual(res_dict['backups'][0]['id'], backup_id1)
self.assertEqual(res_dict['backups'][0]['object_count'], 0)
self.assertEqual(res_dict['backups'][0]['size'], 0)
self.assertEqual(res_dict['backups'][0]['status'], 'creating')
self.assertEqual(res_dict['backups'][0]['volume_id'], '1')
self.assertEqual(len(res_dict['backups'][1]), 12)
self.assertEqual(res_dict['backups'][1]['availability_zone'], 'az1')
self.assertEqual(res_dict['backups'][1]['container'],
'volumebackups')
self.assertEqual(res_dict['backups'][1]['description'],
'this is a test backup')
self.assertEqual(res_dict['backups'][1]['name'],
'test_backup')
self.assertEqual(res_dict['backups'][1]['id'], backup_id2)
self.assertEqual(res_dict['backups'][1]['object_count'], 0)
self.assertEqual(res_dict['backups'][1]['size'], 0)
self.assertEqual(res_dict['backups'][1]['status'], 'creating')
self.assertEqual(res_dict['backups'][1]['volume_id'], '1')
self.assertEqual(len(res_dict['backups'][2]), 12)
self.assertEqual(res_dict['backups'][2]['availability_zone'], 'az1')
self.assertEqual(res_dict['backups'][2]['container'],
'volumebackups')
self.assertEqual(res_dict['backups'][2]['description'],
'this is a test backup')
self.assertEqual(res_dict['backups'][2]['name'],
'test_backup')
self.assertEqual(res_dict['backups'][2]['id'], backup_id3)
self.assertEqual(res_dict['backups'][2]['object_count'], 0)
self.assertEqual(res_dict['backups'][2]['size'], 0)
self.assertEqual(res_dict['backups'][2]['status'], 'creating')
self.assertEqual(res_dict['backups'][2]['volume_id'], '1')
db.backup_destroy(context.get_admin_context(), backup_id3)
db.backup_destroy(context.get_admin_context(), backup_id2)
db.backup_destroy(context.get_admin_context(), backup_id1)
def test_list_backups_detail_xml(self):
backup_id1 = self._create_backup()
backup_id2 = self._create_backup()
backup_id3 = self._create_backup()
req = webob.Request.blank('/v2/fake/backups/detail')
req.method = 'GET'
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 200)
dom = minidom.parseString(res.body)
backup_detail = dom.getElementsByTagName('backup')
self.assertEqual(backup_detail.item(0).attributes.length, 11)
self.assertEqual(
backup_detail.item(0).getAttribute('availability_zone'), 'az1')
self.assertEqual(
backup_detail.item(0).getAttribute('container'), 'volumebackups')
self.assertEqual(
backup_detail.item(0).getAttribute('description'),
'this is a test backup')
self.assertEqual(
backup_detail.item(0).getAttribute('name'), 'test_backup')
self.assertEqual(
backup_detail.item(0).getAttribute('id'), backup_id1)
self.assertEqual(
int(backup_detail.item(0).getAttribute('object_count')), 0)
self.assertEqual(
int(backup_detail.item(0).getAttribute('size')), 0)
self.assertEqual(
backup_detail.item(0).getAttribute('status'), 'creating')
self.assertEqual(
int(backup_detail.item(0).getAttribute('volume_id')), 1)
self.assertEqual(backup_detail.item(1).attributes.length, 11)
self.assertEqual(
backup_detail.item(1).getAttribute('availability_zone'), 'az1')
self.assertEqual(
backup_detail.item(1).getAttribute('container'), 'volumebackups')
self.assertEqual(
backup_detail.item(1).getAttribute('description'),
'this is a test backup')
self.assertEqual(
backup_detail.item(1).getAttribute('name'), 'test_backup')
self.assertEqual(
backup_detail.item(1).getAttribute('id'), backup_id2)
self.assertEqual(
int(backup_detail.item(1).getAttribute('object_count')), 0)
self.assertEqual(
int(backup_detail.item(1).getAttribute('size')), 0)
self.assertEqual(
backup_detail.item(1).getAttribute('status'), 'creating')
self.assertEqual(
int(backup_detail.item(1).getAttribute('volume_id')), 1)
self.assertEqual(backup_detail.item(2).attributes.length, 11)
self.assertEqual(
backup_detail.item(2).getAttribute('availability_zone'), 'az1')
self.assertEqual(
backup_detail.item(2).getAttribute('container'), 'volumebackups')
self.assertEqual(
backup_detail.item(2).getAttribute('description'),
'this is a test backup')
self.assertEqual(
backup_detail.item(2).getAttribute('name'), 'test_backup')
self.assertEqual(
backup_detail.item(2).getAttribute('id'), backup_id3)
self.assertEqual(
int(backup_detail.item(2).getAttribute('object_count')), 0)
self.assertEqual(
int(backup_detail.item(2).getAttribute('size')), 0)
self.assertEqual(
backup_detail.item(2).getAttribute('status'), 'creating')
self.assertEqual(
int(backup_detail.item(2).getAttribute('volume_id')), 1)
db.backup_destroy(context.get_admin_context(), backup_id3)
db.backup_destroy(context.get_admin_context(), backup_id2)
db.backup_destroy(context.get_admin_context(), backup_id1)
def test_create_backup_json(self):
volume_id = self._create_volume(status='available', size=5)
body = {"backup": {"display_name": "nightly001",
"display_description":
"Nightly Backup 03-Sep-2012",
"volume_id": volume_id,
"container": "nightlybackups",
}
}
req = webob.Request.blank('/v2/fake/backups')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
LOG.info(res_dict)
self.assertEqual(res.status_int, 202)
self.assertTrue('id' in res_dict['backup'])
db.volume_destroy(context.get_admin_context(), volume_id)
def test_create_backup_xml(self):
volume_size = 2
volume_id = self._create_volume(status='available', size=volume_size)
req = webob.Request.blank('/v2/fake/backups')
req.body = ('<backup display_name="backup-001" '
'display_description="Nightly Backup" '
'volume_id="%s" container="Container001"/>' % volume_id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
dom = minidom.parseString(res.body)
backup = dom.getElementsByTagName('backup')
self.assertTrue(backup.item(0).hasAttribute('id'))
db.volume_destroy(context.get_admin_context(), volume_id)
def test_create_backup_with_no_body(self):
# omit body from the request
req = webob.Request.blank('/v2/fake/backups')
req.body = json.dumps(None)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.headers['Accept'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['code'], 400)
self.assertEqual(res_dict['badRequest']['message'],
'The server could not comply with the request since'
' it is either malformed or otherwise incorrect.')
def test_create_backup_with_body_KeyError(self):
# omit volume_id from body
body = {"backup": {"display_name": "nightly001",
"display_description":
"Nightly Backup 03-Sep-2012",
"container": "nightlybackups",
}
}
req = webob.Request.blank('/v2/fake/backups')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['code'], 400)
self.assertEqual(res_dict['badRequest']['message'],
'Incorrect request body format')
def test_create_backup_with_VolumeNotFound(self):
body = {"backup": {"display_name": "nightly001",
"display_description":
"Nightly Backup 03-Sep-2012",
"volume_id": 9999,
"container": "nightlybackups",
}
}
req = webob.Request.blank('/v2/fake/backups')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 404)
self.assertEqual(res_dict['itemNotFound']['code'], 404)
self.assertEqual(res_dict['itemNotFound']['message'],
'Volume 9999 could not be found.')
def test_create_backup_with_InvalidVolume(self):
# need to create the volume referenced below first
volume_size = 5
volume_id = self._create_volume(status='restoring', size=volume_size)
body = {"backup": {"display_name": "nightly001",
"display_description":
"Nightly Backup 03-Sep-2012",
"volume_id": volume_id,
"container": "nightlybackups",
}
}
req = webob.Request.blank('/v2/fake/backups')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['code'], 400)
self.assertEqual(res_dict['badRequest']['message'],
'Invalid volume: Volume to be backed up must'
' be available')
def test_delete_backup_available(self):
backup_id = self._create_backup(status='available')
req = webob.Request.blank('/v2/fake/backups/%s' %
backup_id)
req.method = 'DELETE'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
self.assertEqual(self._get_backup_attrib(backup_id, 'status'),
'deleting')
db.backup_destroy(context.get_admin_context(), backup_id)
def test_delete_backup_error(self):
backup_id = self._create_backup(status='error')
req = webob.Request.blank('/v2/fake/backups/%s' %
backup_id)
req.method = 'DELETE'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
self.assertEqual(self._get_backup_attrib(backup_id, 'status'),
'deleting')
db.backup_destroy(context.get_admin_context(), backup_id)
def test_delete_backup_with_backup_NotFound(self):
req = webob.Request.blank('/v2/fake/backups/9999')
req.method = 'DELETE'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 404)
self.assertEqual(res_dict['itemNotFound']['code'], 404)
self.assertEqual(res_dict['itemNotFound']['message'],
'Backup 9999 could not be found.')
def test_delete_backup_with_InvalidBackup(self):
backup_id = self._create_backup()
req = webob.Request.blank('/v2/fake/backups/%s' %
backup_id)
req.method = 'DELETE'
req.headers['Content-Type'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['code'], 400)
self.assertEqual(res_dict['badRequest']['message'],
'Invalid backup: Backup status must be '
'available or error')
db.backup_destroy(context.get_admin_context(), backup_id)
def test_restore_backup_volume_id_specified_json(self):
backup_id = self._create_backup(status='available')
# need to create the volume referenced below first
volume_size = 5
volume_id = self._create_volume(status='available', size=volume_size)
body = {"restore": {"volume_id": volume_id, }}
req = webob.Request.blank('/v2/fake/backups/%s/restore' %
backup_id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 202)
self.assertEqual(res_dict['restore']['backup_id'], backup_id)
self.assertEqual(res_dict['restore']['volume_id'], volume_id)
def test_restore_backup_volume_id_specified_xml(self):
backup_id = self._create_backup(status='available')
volume_size = 2
volume_id = self._create_volume(status='available', size=volume_size)
req = webob.Request.blank('/v2/fake/backups/%s/restore' % backup_id)
req.body = '<restore volume_id="%s"/>' % volume_id
req.method = 'POST'
req.headers['Content-Type'] = 'application/xml'
req.headers['Accept'] = 'application/xml'
res = req.get_response(fakes.wsgi_app())
self.assertEqual(res.status_int, 202)
dom = minidom.parseString(res.body)
restore = dom.getElementsByTagName('restore')
self.assertEqual(restore.item(0).getAttribute('backup_id'),
backup_id)
self.assertEqual(restore.item(0).getAttribute('volume_id'), volume_id)
db.backup_destroy(context.get_admin_context(), backup_id)
db.volume_destroy(context.get_admin_context(), volume_id)
def test_restore_backup_with_no_body(self):
# omit body from the request
backup_id = self._create_backup(status='available')
req = webob.Request.blank('/v2/fake/backups/%s/restore' %
backup_id)
req.body = json.dumps(None)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.headers['Accept'] = 'application/json'
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['code'], 400)
self.assertEqual(res_dict['badRequest']['message'],
'The server could not comply with the request since'
' it is either malformed or otherwise incorrect.')
db.backup_destroy(context.get_admin_context(), backup_id)
def test_restore_backup_with_body_KeyError(self):
# omit restore from body
backup_id = self._create_backup(status='available')
req = webob.Request.blank('/v2/fake/backups/%s/restore' % backup_id)
body = {"": {}}
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.headers['Accept'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['code'], 400)
self.assertEqual(res_dict['badRequest']['message'],
'The server could not comply with the request since'
' it is either malformed or otherwise incorrect.')
def test_restore_backup_volume_id_unspecified(self):
# intercept volume creation to ensure created volume
# has status of available
def fake_volume_api_create(cls, context, size, name, description):
volume_id = self._create_volume(status='available', size=size)
return db.volume_get(context, volume_id)
self.stubs.Set(cinder.volume.API, 'create',
fake_volume_api_create)
backup_id = self._create_backup(size=5, status='available')
body = {"restore": {}}
req = webob.Request.blank('/v2/fake/backups/%s/restore' %
backup_id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 202)
self.assertEqual(res_dict['restore']['backup_id'], backup_id)
def test_restore_backup_with_InvalidInput(self):
def fake_backup_api_restore_throwing_InvalidInput(cls, context,
backup_id,
volume_id):
msg = _("Invalid input")
raise exception.InvalidInput(reason=msg)
self.stubs.Set(cinder.backup.API, 'restore',
fake_backup_api_restore_throwing_InvalidInput)
backup_id = self._create_backup(status='available')
# need to create the volume referenced below first
volume_size = 0
volume_id = self._create_volume(status='available', size=volume_size)
body = {"restore": {"volume_id": volume_id, }}
req = webob.Request.blank('/v2/fake/backups/%s/restore' %
backup_id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['code'], 400)
self.assertEqual(res_dict['badRequest']['message'],
'Invalid input received: Invalid input')
def test_restore_backup_with_InvalidVolume(self):
backup_id = self._create_backup(status='available')
# need to create the volume referenced below first
volume_size = 5
volume_id = self._create_volume(status='attaching', size=volume_size)
body = {"restore": {"volume_id": volume_id, }}
req = webob.Request.blank('/v2/fake/backups/%s/restore' %
backup_id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['code'], 400)
self.assertEqual(res_dict['badRequest']['message'],
'Invalid volume: Volume to be restored to must '
'be available')
db.volume_destroy(context.get_admin_context(), volume_id)
db.backup_destroy(context.get_admin_context(), backup_id)
def test_restore_backup_with_InvalidBackup(self):
backup_id = self._create_backup(status='restoring')
# need to create the volume referenced below first
volume_size = 5
volume_id = self._create_volume(status='available', size=volume_size)
body = {"restore": {"volume_id": volume_id, }}
req = webob.Request.blank('/v2/fake/backups/%s/restore' %
backup_id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['code'], 400)
self.assertEqual(res_dict['badRequest']['message'],
'Invalid backup: Backup status must be available')
db.volume_destroy(context.get_admin_context(), volume_id)
db.backup_destroy(context.get_admin_context(), backup_id)
def test_restore_backup_with_BackupNotFound(self):
# need to create the volume referenced below first
volume_size = 5
volume_id = self._create_volume(status='available', size=volume_size)
body = {"restore": {"volume_id": volume_id, }}
req = webob.Request.blank('/v2/fake/backups/9999/restore')
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 404)
self.assertEqual(res_dict['itemNotFound']['code'], 404)
self.assertEqual(res_dict['itemNotFound']['message'],
'Backup 9999 could not be found.')
db.volume_destroy(context.get_admin_context(), volume_id)
def test_restore_backup_with_VolumeNotFound(self):
backup_id = self._create_backup(status='available')
body = {"restore": {"volume_id": "9999", }}
req = webob.Request.blank('/v2/fake/backups/%s/restore' %
backup_id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 404)
self.assertEqual(res_dict['itemNotFound']['code'], 404)
self.assertEqual(res_dict['itemNotFound']['message'],
'Volume 9999 could not be found.')
db.backup_destroy(context.get_admin_context(), backup_id)
def test_restore_backup_with_VolumeSizeExceedsAvailableQuota(self):
def fake_backup_api_restore_throwing_VolumeSizeExceedsAvailableQuota(
cls, context, backup_id, volume_id):
raise exception.VolumeSizeExceedsAvailableQuota()
self.stubs.Set(
cinder.backup.API,
'restore',
fake_backup_api_restore_throwing_VolumeSizeExceedsAvailableQuota)
backup_id = self._create_backup(status='available')
# need to create the volume referenced below first
volume_size = 5
volume_id = self._create_volume(status='available', size=volume_size)
body = {"restore": {"volume_id": volume_id, }}
req = webob.Request.blank('/v2/fake/backups/%s/restore' %
backup_id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 413)
self.assertEqual(res_dict['overLimit']['code'], 413)
self.assertEqual(res_dict['overLimit']['message'],
'Requested volume or snapshot exceeds allowed '
'Gigabytes quota')
def test_restore_backup_with_VolumeLimitExceeded(self):
def fake_backup_api_restore_throwing_VolumeLimitExceeded(cls,
context,
backup_id,
volume_id):
raise exception.VolumeLimitExceeded(allowed=1)
self.stubs.Set(cinder.backup.API, 'restore',
fake_backup_api_restore_throwing_VolumeLimitExceeded)
backup_id = self._create_backup(status='available')
# need to create the volume referenced below first
volume_size = 5
volume_id = self._create_volume(status='available', size=volume_size)
body = {"restore": {"volume_id": volume_id, }}
req = webob.Request.blank('/v2/fake/backups/%s/restore' %
backup_id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 413)
self.assertEqual(res_dict['overLimit']['code'], 413)
self.assertEqual(res_dict['overLimit']['message'],
'Maximum number of volumes allowed '
'(%(allowed)d) exceeded')
def test_restore_backup_to_undersized_volume(self):
backup_size = 10
backup_id = self._create_backup(status='available', size=backup_size)
# need to create the volume referenced below first
volume_size = 5
volume_id = self._create_volume(status='available', size=volume_size)
body = {"restore": {"volume_id": volume_id, }}
req = webob.Request.blank('/v2/fake/backups/%s/restore' %
backup_id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 400)
self.assertEqual(res_dict['badRequest']['code'], 400)
self.assertEqual(res_dict['badRequest']['message'],
'Invalid volume: volume size %d is too '
'small to restore backup of size %d.'
% (volume_size, backup_size))
db.volume_destroy(context.get_admin_context(), volume_id)
db.backup_destroy(context.get_admin_context(), backup_id)
def test_restore_backup_to_oversized_volume(self):
backup_id = self._create_backup(status='available', size=10)
# need to create the volume referenced below first
volume_size = 15
volume_id = self._create_volume(status='available', size=volume_size)
body = {"restore": {"volume_id": volume_id, }}
req = webob.Request.blank('/v2/fake/backups/%s/restore' %
backup_id)
req.method = 'POST'
req.headers['Content-Type'] = 'application/json'
req.body = json.dumps(body)
res = req.get_response(fakes.wsgi_app())
res_dict = json.loads(res.body)
self.assertEqual(res.status_int, 202)
self.assertEqual(res_dict['restore']['backup_id'], backup_id)
self.assertEqual(res_dict['restore']['volume_id'], volume_id)
db.volume_destroy(context.get_admin_context(), volume_id)
db.backup_destroy(context.get_admin_context(), backup_id)
| |
"""
This is a convenient container gathering all the main
search methods for the various database tables.
It is intended to be used e.g. as
> from evennia.utils import search
> match = search.objects(...)
Note that this is not intended to be a complete listing of all search
methods! You need to refer to the respective manager to get all
possible search methods. To get to the managers from your code, import
the database model and call its 'objects' property.
Also remember that all commands in this file return lists (also if
there is only one match) unless noted otherwise.
Example: To reach the search method 'get_object_with_player'
in evennia/objects/managers.py:
> from evennia.objects.models import ObjectDB
> match = Object.objects.get_object_with_player(...)
"""
# Import the manager methods to be wrapped
from django.contrib.contenttypes.models import ContentType
# limit symbol import from API
__all__ = ("search_object", "search_player", "search_script",
"search_message", "search_channel", "search_help_entry",
"search_object_tag", "search_script_tag", "search_player_tag",
"search_channel_tag")
# import objects this way to avoid circular import problems
ObjectDB = ContentType.objects.get(app_label="objects", model="objectdb").model_class()
PlayerDB = ContentType.objects.get(app_label="players", model="playerdb").model_class()
ScriptDB = ContentType.objects.get(app_label="scripts", model="scriptdb").model_class()
Msg = ContentType.objects.get(app_label="comms", model="msg").model_class()
Channel = ContentType.objects.get(app_label="comms", model="channeldb").model_class()
HelpEntry = ContentType.objects.get(app_label="help", model="helpentry").model_class()
Tag = ContentType.objects.get(app_label="typeclasses", model="tag").model_class()
#------------------------------------------------------------------
# Search manager-wrappers
#------------------------------------------------------------------
#
# Search objects as a character
#
# NOTE: A more powerful wrapper of this method
# is reachable from within each command class
# by using self.caller.search()!
#
# def object_search(self, ostring=None,
# attribute_name=None,
# typeclass=None,
# candidates=None,
# exact=True):
#
# Search globally or in a list of candidates and return results.
# The result is always a list of Objects (or the empty list)
#
# Arguments:
# ostring: (str) The string to compare names against. By default (if
# not attribute_name is set), this will search object.key
# and object.aliases in order. Can also be on the form #dbref,
# which will, if exact=True be matched against primary key.
# attribute_name: (str): Use this named ObjectAttribute to match ostring
# against, instead of the defaults.
# typeclass (str or TypeClass): restrict matches to objects having
# this typeclass. This will help speed up global searches.
# candidates (list obj ObjectDBs): If supplied, search will only be
# performed among the candidates in this list. A common list
# of candidates is the contents of the current location.
# exact (bool): Match names/aliases exactly or partially. Partial
# matching matches the beginning of words in the names/aliases,
# using a matching routine to separate multiple matches in
# names with multiple components (so "bi sw" will match
# "Big sword"). Since this is more expensive than exact
# matching, it is recommended to be used together with
# the objlist keyword to limit the number of possibilities.
# This keyword has no meaning if attribute_name is set.
#
# Returns:
# A list of matching objects (or a list with one unique match)
# def object_search(self, ostring, caller=None,
# candidates=None,
# attribute_name=None):
#
search_object = ObjectDB.objects.object_search
search_objects = search_object
object_search = search_object
objects = search_objects
#
# Search for players
#
# def player_search(self, ostring):
# """
# Searches for a particular player by name or
# database id.
#
# ostring = a string or database id.
# """
search_player = PlayerDB.objects.player_search
search_players = search_player
player_search = search_player
players = search_players
#
# Searching for scripts
#
# def script_search(self, ostring, obj=None, only_timed=False):
# """
# Search for a particular script.
#
# ostring - search criterion - a script ID or key
# obj - limit search to scripts defined on this object
# only_timed - limit search only to scripts that run
# on a timer.
# """
search_script = ScriptDB.objects.script_search
search_scripts = search_script
script_search = search_script
scripts = search_scripts
#
# Searching for communication messages
#
#
# def message_search(self, sender=None, receiver=None, channel=None, freetext=None):
# """
# Search the message database for particular messages. At least one
# of the arguments must be given to do a search.
#
# sender - get messages sent by a particular player
# receiver - get messages received by a certain player
# channel - get messages sent to a particular channel
# freetext - Search for a text string in a message.
# NOTE: This can potentially be slow, so make sure to supply
# one of the other arguments to limit the search.
# """
search_message = Msg.objects.message_search
search_messages = search_message
message_search = search_message
messages = search_messages
#
# Search for Communication Channels
#
# def channel_search(self, ostring)
# """
# Search the channel database for a particular channel.
#
# ostring - the key or database id of the channel.
# exact - requires an exact ostring match (not case sensitive)
# """
search_channel = Channel.objects.channel_search
search_channels = search_channel
channel_search = search_channel
channels = search_channels
#
# Find help entry objects.
#
# def search_help(self, ostring, help_category=None):
# """
# Retrieve a search entry object.
#
# ostring - the help topic to look for
# category - limit the search to a particular help topic
# """
search_help = HelpEntry.objects.search_help
search_help_entry = search_help
search_help_entries = search_help
help_entry_search = search_help
help_entries = search_help
# Locate Attributes
# search_object_attribute(key, category, value, strvalue) (also search_attribute works)
# search_player_attribute(key, category, value, strvalue) (also search_attribute works)
# search_script_attribute(key, category, value, strvalue) (also search_attribute works)
# search_channel_attribute(key, category, value, strvalue) (also search_attribute works)
# Note that these return the object attached to the Attribute,
# not the attribute object itself (this is usually what you want)
def search_object_attribute(key=None, category=None, value=None, strvalue=None):
return ObjectDB.objects.get_by_attribute(key=key, category=category, value=value, strvalue=strvalue)
def search_player_attribute(key=None, category=None, value=None, strvalue=None):
return PlayerDB.objects.get_by_attribute(key=key, category=category, value=value, strvalue=strvalue)
def search_script_attribute(key=None, category=None, value=None, strvalue=None):
return ScriptDB.objects.get_by_attribute(key=key, category=category, value=value, strvalue=strvalue)
def search_channel_attribute(key=None, category=None, value=None, strvalue=None):
return Channel.objects.get_by_attribute(key=key, category=category, value=value, strvalue=strvalue)
# search for attribute objects
search_attribute_object = ObjectDB.objects.get_attribute
# Locate Tags
# search_object_tag(key=None, category=None) (also search_tag works)
# search_player_tag(key=None, category=None)
# search_script_tag(key=None, category=None)
# search_channel_tag(key=None, category=None)
# Note that this returns the object attached to the tag, not the tag
# object itself (this is usually what you want)
def search_object_tag(key=None, category=None):
return ObjectDB.objects.get_by_tag(key=key, category=category)
search_tag = search_object_tag # this is the most common case
def search_player_tag(key=None, category=None):
return PlayerDB.objects.get_by_tag(key=key, category=category)
def search_script_tag(key=None, category=None):
return ScriptDB.objects.get_by_tag(key=key, category=category)
def search_channel_tag(key=None, category=None):
return Channel.objects.get_by_tag(key=key, category=category)
# search for tag objects
search_tag_object = ObjectDB.objects.get_tag
| |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for convolutional operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import test_util
def GetShrunkInceptionShapes(shrink=10):
"""Iterator for smaller versions of convolution shapes in 2015 Inception.
Relative to inception, each depth value is `depth // shrink`.
Args:
shrink: Factor to shrink each depth value by relative to Inception.
Yields:
Tuple (input_size, filter_size, out_size, stride, padding), the convolution
parameters of Inception layers.
"""
input_sizes = [[4, 5, 5, 1248], [4, 8, 8, 384], [4, 8, 8, 384],
[4, 8, 8, 2048], [4, 8, 8, 448], [4, 8, 8, 2048],
[4, 8, 8, 2048], [4, 8, 8, 2048], [4, 8, 8, 1760],
[4, 8, 8, 1760], [4, 8, 8, 1760], [4, 8, 8, 1760],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1248],
[4, 17, 17, 128], [4, 17, 17, 1248], [4, 17, 17, 224],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 1216],
[4, 17, 17, 1216], [4, 17, 17, 224], [4, 17, 17, 192],
[4, 17, 17, 192], [4, 17, 17, 1152], [4, 17, 17, 1152],
[4, 17, 17, 192], [4, 17, 17, 160], [4, 17, 17, 1152],
[4, 17, 17, 1024], [4, 17, 17, 128], [4, 17, 17, 1024],
[4, 17, 17, 128], [4, 17, 17, 1024], [4, 17, 17, 128],
[4, 17, 17, 768], [4, 17, 17, 128], [4, 17, 17, 128],
[4, 17, 17, 768], [4, 17, 17, 768], [4, 35, 35, 96],
[4, 35, 35, 288], [4, 35, 35, 64], [4, 35, 35, 288],
[4, 35, 35, 256], [4, 35, 35, 48], [4, 35, 35, 256],
[4, 35, 35, 96], [4, 35, 35, 192], [4, 35, 35, 192],
[4, 35, 35, 192], [4, 73, 73, 64], [4, 73, 73, 64],
[4, 147, 147, 24]]
filter_sizes = [[1, 1, 1248, 128], [1, 3, 384, 384], [3, 1, 384, 384],
[1, 1, 2048, 192], [3, 3, 448, 384], [1, 1, 2048, 320],
[1, 1, 2048, 448], [1, 1, 2048, 384], [1, 1, 1760, 384],
[1, 1, 1760, 192], [1, 1, 1760, 448], [1, 1, 1760, 320],
[3, 3, 192, 192], [3, 3, 192, 192], [1, 1, 1248, 192],
[3, 3, 128, 320], [1, 1, 1248, 128], [1, 3, 224, 224],
[3, 1, 192, 256], [1, 3, 192, 256], [1, 1, 1216, 192],
[1, 1, 1216, 96], [3, 1, 224, 224], [3, 3, 192, 224],
[1, 3, 192, 192], [1, 1, 1152, 192], [1, 1, 1152, 128],
[3, 1, 192, 192], [3, 3, 160, 192], [1, 1, 1152, 160],
[1, 1, 1024, 128], [1, 3, 128, 192], [1, 1, 1024, 160],
[3, 1, 128, 192], [1, 1, 1024, 256], [3, 1, 128, 128],
[1, 1, 768, 192], [1, 3, 128, 128], [3, 3, 128, 128],
[1, 1, 768, 128], [1, 1, 768, 320], [3, 3, 96, 96],
[3, 3, 288, 384], [3, 3, 64, 96], [1, 1, 288, 64],
[1, 1, 256, 64], [5, 5, 48, 64], [1, 1, 256, 48],
[3, 3, 96, 96], [1, 1, 192, 32], [1, 1, 192, 64],
[1, 1, 192, 48], [3, 3, 64, 192], [1, 1, 64, 64],
[1, 1, 24, 64]]
out_sizes = [[4, 5, 5, 128], [4, 8, 8, 384], [4, 8, 8, 384],
[4, 8, 8, 192], [4, 8, 8, 384], [4, 8, 8, 320],
[4, 8, 8, 448], [4, 8, 8, 384], [4, 8, 8, 384],
[4, 8, 8, 192], [4, 8, 8, 448], [4, 8, 8, 320],
[4, 8, 8, 192], [4, 17, 17, 192], [4, 17, 17, 192],
[4, 8, 8, 320], [4, 17, 17, 128], [4, 17, 17, 224],
[4, 17, 17, 256], [4, 17, 17, 256], [4, 17, 17, 192],
[4, 17, 17, 96], [4, 17, 17, 224], [4, 17, 17, 224],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 128],
[4, 17, 17, 192], [4, 17, 17, 192], [4, 17, 17, 160],
[4, 17, 17, 128], [4, 17, 17, 192], [4, 17, 17, 160],
[4, 17, 17, 192], [4, 17, 17, 256], [4, 17, 17, 128],
[4, 17, 17, 192], [4, 17, 17, 128], [4, 17, 17, 128],
[4, 17, 17, 128], [4, 17, 17, 320], [4, 17, 17, 96],
[4, 17, 17, 384], [4, 35, 35, 96], [4, 35, 35, 64],
[4, 35, 35, 64], [4, 35, 35, 64], [4, 35, 35, 48],
[4, 35, 35, 96], [4, 35, 35, 32], [4, 35, 35, 64],
[4, 35, 35, 48], [4, 71, 71, 192], [4, 73, 73, 64],
[4, 147, 147, 64]]
strides = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 1, 1, 2, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
# Shrink sizes to make the test faster
for i in input_sizes:
i[3] //= shrink
for f in filter_sizes:
f[2] //= shrink
f[3] //= shrink
for o in out_sizes:
o[3] //= shrink
# pylint: disable=invalid-name
VALID = "VALID"
SAME = "SAME"
# pylint: enable=invalid-name
paddings = [SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, VALID, SAME, SAME, VALID,
SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, SAME, SAME, SAME, SAME,
SAME, VALID, VALID, SAME, SAME, SAME, SAME, SAME,
SAME, SAME, SAME, SAME, VALID, VALID, VALID]
for i, f, o, s, p in zip(input_sizes, filter_sizes, out_sizes, strides,
paddings):
yield i, f, o, s, p
def NHWCToNCHW(input_tensor):
"""Convert the input from NHWC format to NCHW.
Args:
input_tensor: a 4-D tensor, or a 4-element array representing the same.
Returns:
the converted tensor or a shape array
"""
if isinstance(input_tensor, tf.Tensor):
return tf.transpose(input_tensor, [0, 3, 1, 2])
else:
return [input_tensor[0], input_tensor[3], input_tensor[1], input_tensor[2]]
def NCHWToNHWC(input_tensor):
"""Convert the input from NCHW format to NHWC.
Args:
input_tensor: a 4-D tensor, or a 4-element array representing the same.
Returns:
the converted tensor or a shape array
"""
if isinstance(input_tensor, tf.Tensor):
return tf.transpose(input_tensor, [0, 2, 3, 1])
else:
return [input_tensor[0], input_tensor[2], input_tensor[3], input_tensor[1]]
def GetTestConfigs():
"""Get all the valid tests configs to run.
Returns:
all the valid test configs as tuples of data_format and use_gpu.
"""
test_configs = [("NHWC", False), ("NHWC", True)]
if test_util.IsGoogleCudaEnabled():
# "NCHW" format is not currently supported on CPU.
test_configs += [("NCHW", True)]
return test_configs
class Conv2DTest(tf.test.TestCase):
def _SetupValuesForDevice(self, tensor_in_sizes, filter_in_sizes, strides,
padding, data_format, use_gpu):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
strides: Stride: [col_stride, row_stride]
padding: Padding type.
data_format: Format of the data tensors.
use_gpu: True if the operations should be run on GPU
Returns:
Symbolic tensor value that can be used to execute the computation
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
with self.test_session(use_gpu=use_gpu) as sess:
t1 = tf.constant(x1, shape=tensor_in_sizes)
t2 = tf.constant(x2, shape=filter_in_sizes)
strides = [1] + strides + [1]
if data_format == "NCHW":
t1 = NHWCToNCHW(t1)
strides = NHWCToNCHW(strides)
conv = tf.nn.conv2d(t1,
t2,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
conv = NCHWToNHWC(conv)
return conv
def _CompareFwdValues(self, tensor_in_sizes, filter_in_sizes,
conv_strides, padding):
"""Verifies that CPU and GPU produce the same values.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[kernel_rows, kernel_cols, input_depth, output_depth].
conv_strides: [row_stride, col_stride] for the convolution;
padding: Padding type.
"""
x1 = np.random.rand(*tensor_in_sizes).astype(np.float32)
x2 = np.random.rand(*filter_in_sizes).astype(np.float32)
def _SetupVal(data_format, use_gpu):
with self.test_session(use_gpu=use_gpu):
t1 = tf.constant(x1, shape=tensor_in_sizes)
t2 = tf.constant(x2, shape=filter_in_sizes)
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t1 = NHWCToNCHW(t1)
strides = NHWCToNCHW(strides)
conv = tf.nn.conv2d(t1,
t2,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
conv = NCHWToNHWC(conv)
return conv
tensors = []
for (data_format, use_gpu) in GetTestConfigs():
tensors.append(_SetupVal(data_format, use_gpu))
with self.test_session() as sess:
values = sess.run(tensors)
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-5, atol=1e-5)
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, strides,
padding, expected):
tensors = []
for (data_format, use_gpu) in GetTestConfigs():
result = self._SetupValuesForDevice(tensor_in_sizes,
filter_in_sizes,
strides,
padding,
data_format,
use_gpu=use_gpu)
tensors.append(result)
with self.test_session() as sess:
values = sess.run(tensors)
for i in range(len(tensors)):
conv = tensors[i]
value = values[i]
print("expected = ", expected)
print("actual = ", value)
self.assertArrayNear(expected, np.ravel(value), 1e-5)
self.assertShapeEqual(value, conv)
def testConv2D1x1Filter(self):
expected_output = [30.0, 36.0, 42.0, 66.0, 81.0, 96.0, 102.0, 126.0, 150.0,
138.0, 171.0, 204.0, 174.0, 216.0, 258.0, 210.0, 261.0,
312.0]
self._VerifyValues(tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
strides=[1, 1], padding="VALID",
expected=expected_output)
def testConv2DEmpty(self):
expected_output = []
self._VerifyValues(tensor_in_sizes=[0, 2, 3, 3],
filter_in_sizes=[1, 1, 3, 3],
strides=[1, 1], padding="VALID",
expected=expected_output)
def testConv2D2x2Filter(self):
# The outputs are computed using third_party/py/IPython/notebook.
expected_output = [2271.0, 2367.0, 2463.0, 2901.0, 3033.0, 3165.0]
self._VerifyValues(tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[1, 1], padding="VALID",
expected=expected_output)
def testConv2D1x2Filter(self):
# The outputs are computed using third_party/py/IPython/notebook.
expected_output = [231.0, 252.0, 273.0, 384.0, 423.0, 462.0, 690.0,
765.0, 840.0, 843.0, 936.0, 1029.0]
self._VerifyValues(tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[1, 2, 3, 3],
strides=[1, 1], padding="VALID",
expected=expected_output)
def testConv2D2x2FilterStride2(self):
expected_output = [2271.0, 2367.0, 2463.0]
self._VerifyValues(tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[2, 2], padding="VALID",
expected=expected_output)
def testConv2D2x2FilterStride2Same(self):
expected_output = [2271.0, 2367.0, 2463.0, 1230.0, 1305.0, 1380.0]
self._VerifyValues(tensor_in_sizes=[1, 2, 3, 3],
filter_in_sizes=[2, 2, 3, 3],
strides=[2, 2], padding="SAME",
expected=expected_output)
def testConv2D2x2FilterStride1x2(self):
expected_output = [58.0, 78.0, 98.0, 118.0, 138.0, 158.0]
self._VerifyValues(tensor_in_sizes=[1, 3, 6, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[1, 2], padding="VALID",
expected=expected_output)
def testConv2DKernelSmallerThanStrideValid(self):
expected_output = [65, 95, 275, 305]
self._VerifyValues(tensor_in_sizes=[1, 7, 7, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[3, 3], padding="VALID",
expected=expected_output)
def testConv2DKernelSmallerThanStrideSame(self):
self._VerifyValues(tensor_in_sizes=[1, 3, 3, 1],
filter_in_sizes=[1, 1, 1, 1],
strides=[2, 2], padding="SAME",
expected=[1, 3, 7, 9])
self._VerifyValues(tensor_in_sizes=[1, 4, 4, 1],
filter_in_sizes=[1, 1, 1, 1],
strides=[2, 2], padding="SAME",
expected=[1, 3, 9, 11])
self._VerifyValues(tensor_in_sizes=[1, 4, 4, 1],
filter_in_sizes=[2, 2, 1, 1],
strides=[3, 3], padding="SAME",
expected=[44, 28, 41, 16])
# TODO this currently fails.
#self._VerifyValues(tensor_in_sizes=[1, 8, 8, 1],
# filter_in_sizes=[2, 2, 1, 1],
# strides=[4, 4], padding="SAME",
# expected=[72, 112, 392, 432])
# Testing for backprops
def _RunAndVerifyBackpropInput(self, input_sizes, filter_sizes, output_sizes,
strides, padding, expected, data_format,
use_gpu):
total_output_size = 1
total_filter_size = 1
for s in output_sizes:
total_output_size *= s
for s in filter_sizes:
total_filter_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_filter_size + 1)]
x2 = [f * 1.0 for f in range(1, total_output_size + 1)]
with self.test_session(use_gpu=use_gpu) as sess:
if data_format == "NCHW":
input_sizes = NHWCToNCHW(input_sizes)
t0 = tf.constant(input_sizes, shape=[len(input_sizes)])
t1 = tf.constant(x1, shape=filter_sizes)
t2 = tf.constant(x2, shape=output_sizes)
strides = [1] + strides + [1]
if data_format == "NCHW":
t2 = NHWCToNCHW(t2)
strides = NHWCToNCHW(strides)
conv = tf.nn.conv2d_backprop_input(t0,
t1,
t2,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
conv = NCHWToNHWC(conv)
# "values" consists of two tensors for two backprops
value = sess.run(conv)
self.assertShapeEqual(value, conv)
print("expected = ", expected)
print("actual = ", value)
self.assertArrayNear(expected, value.flatten(), 1e-5)
def _CompareBackpropInput(self, input_sizes, filter_sizes, output_sizes,
conv_strides, padding):
x1 = np.random.rand(*filter_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(data_format, use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
if data_format == "NCHW":
new_input_sizes = NHWCToNCHW(input_sizes)
else:
new_input_sizes = input_sizes
t0 = tf.constant(new_input_sizes, shape=[len(new_input_sizes)])
t1 = tf.constant(x1, shape=filter_sizes)
t2 = tf.constant(x2, shape=output_sizes)
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t2 = NHWCToNCHW(t2)
strides = NHWCToNCHW(strides)
conv = tf.nn.conv2d_backprop_input(t0,
t1,
t2,
strides=strides,
padding=padding,
data_format=data_format)
if data_format == "NCHW":
conv = NCHWToNHWC(conv)
ret = conv.eval()
self.assertShapeEqual(ret, conv)
return ret
values = []
for (data_format, use_gpu) in GetTestConfigs():
values.append(_GetVal(data_format, use_gpu))
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-4, atol=1e-4)
def testConv2D2x2Depth1ValidBackpropInput(self):
expected_output = [1.0, 4.0, 4.0, 3.0, 10.0, 8.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu)
def testConv2D2x2Depth3ValidBackpropInput(self):
expected_output = [14.0, 32.0, 50.0,
100.0, 163.0, 226.0,
167.0, 212.0, 257.0,
122.0, 140.0, 158.0,
478.0, 541.0, 604.0,
437.0, 482.0, 527.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu)
def testConv2D2x2Depth3ValidBackpropInputStride1x2(self):
expected_output = [1.0, 2.0, 2.0, 4.0, 3.0, 6.0,
7.0, 12.0, 11.0, 18.0, 15.0, 24.0,
12.0, 16.0, 15.0, 20.0, 18.0, 24.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 2, 3, 1],
strides=[1, 2],
padding="VALID",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu)
def testConv2DStrideTwoFilterOneSameBackpropInput(self):
expected_output = [1.0, 0.0, 2.0, 0.0,
0.0, 0.0, 0.0, 0.0,
3.0, 0.0, 4.0, 0.0,
0.0, 0.0, 0.0, 0.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropInput(input_sizes=[1, 4, 4, 1],
filter_sizes=[1, 1, 1, 1],
output_sizes=[1, 2, 2, 1],
strides=[2, 2],
padding="SAME",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu)
# Testing for backprops
def _RunAndVerifyBackpropFilter(self, input_sizes, filter_sizes, output_sizes,
strides, padding, expected, data_format,
use_gpu):
total_input_size = 1
total_output_size = 1
for s in input_sizes:
total_input_size *= s
for s in output_sizes:
total_output_size *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x0 = [f * 1.0 for f in range(1, total_input_size + 1)]
x2 = [f * 1.0 for f in range(1, total_output_size + 1)]
with self.test_session(use_gpu=use_gpu) as sess:
t0 = tf.constant(x0, shape=input_sizes)
t1 = tf.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = tf.constant(x2, shape=output_sizes)
strides = [1] + strides + [1]
if data_format == "NCHW":
t0 = NHWCToNCHW(t0)
t2 = NHWCToNCHW(t2)
strides = NHWCToNCHW(strides)
conv = tf.nn.conv2d_backprop_filter(t0,
t1,
t2,
strides=strides,
padding=padding,
data_format=data_format)
value = sess.run(conv)
self.assertShapeEqual(value, conv)
print("expected = ", expected)
print("actual = ", value)
self.assertArrayNear(expected, value.flatten(), 1e-5)
def _CompareBackFilter(self, input_sizes, filter_sizes, output_sizes,
conv_strides, padding):
x0 = np.random.rand(*input_sizes).astype(np.float32)
x2 = np.random.rand(*output_sizes).astype(np.float32)
def _GetVal(data_format, use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
t0 = tf.constant(x0, shape=input_sizes)
t1 = tf.constant(filter_sizes, shape=[len(filter_sizes)])
t2 = tf.constant(x2, shape=output_sizes)
strides = [1] + conv_strides + [1]
if data_format == "NCHW":
t0 = NHWCToNCHW(t0)
t2 = NHWCToNCHW(t2)
strides = NHWCToNCHW(strides)
conv = tf.nn.conv2d_backprop_filter(t0,
t1,
t2,
strides=strides,
padding=padding,
data_format=data_format)
ret = conv.eval()
self.assertShapeEqual(ret, conv)
return ret
values = []
for (data_format, use_gpu) in GetTestConfigs():
values.append(_GetVal(data_format, use_gpu))
for i in range(1, len(values)):
self.assertAllClose(values[0], values[i], rtol=1e-4, atol=1e-4)
def testConv2D2x2Depth1ValidBackpropFilter(self):
expected = [5.0, 8.0, 14.0, 17.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(input_sizes=[1, 2, 3, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 1, 2, 1],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
def testConv2D2x2Depth3ValidBackpropFilter(self):
expected = [17.0, 22.0, 27.0, 22.0, 29.0, 36.0, 27.0, 36.0, 45.0,
32.0, 43.0, 54.0, 37.0, 50.0, 63.0, 42.0, 57.0, 72.0,
62.0, 85.0, 108.0, 67.0, 92.0, 117.0, 72.0, 99.0, 126.0,
77.0, 106.0, 135.0, 82.0, 113.0, 144.0, 87.0, 120.0, 153.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(input_sizes=[1, 2, 3, 3],
filter_sizes=[2, 2, 3, 3],
output_sizes=[1, 1, 2, 3],
strides=[1, 1],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
def testConv2D2x2Depth3ValidBackpropFilterStride1x2(self):
expected = [161.0, 182.0, 287.0, 308.0]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(input_sizes=[1, 3, 6, 1],
filter_sizes=[2, 2, 1, 1],
output_sizes=[1, 2, 3, 1],
strides=[1, 2],
padding="VALID",
expected=expected,
data_format=data_format,
use_gpu=use_gpu)
def testConv2DStrideTwoFilterOneSameBackpropFilter(self):
expected_output = [78.]
for (data_format, use_gpu) in GetTestConfigs():
self._RunAndVerifyBackpropFilter(input_sizes=[1, 4, 4, 1],
filter_sizes=[1, 1, 1, 1],
output_sizes=[1, 2, 2, 1],
strides=[2, 2],
padding="SAME",
expected=expected_output,
data_format=data_format,
use_gpu=use_gpu)
# Gradient checkers
def ConstructAndTestGradient(self, batch, input_rows, input_cols, filter_rows,
filter_cols, in_depth, out_depth, stride_rows,
stride_cols, padding, test_input, data_format,
use_gpu):
input_shape = [batch, input_rows, input_cols, in_depth]
filter_shape = [filter_rows, filter_cols, in_depth, out_depth]
# TODO(yangke): re-factor the computation of output shape.
if padding == "VALID":
output_rows = (input_rows - filter_rows + stride_rows) // stride_rows
output_cols = (input_cols - filter_cols + stride_cols) // stride_cols
else:
output_rows = (input_rows + stride_rows - 1) // stride_rows
output_cols = (input_cols + stride_cols - 1) // stride_cols
output_shape = [batch, output_rows, output_cols, out_depth]
input_size = 1
for x in input_shape:
input_size *= x
filter_size = 1
for x in filter_shape:
filter_size *= x
input_data = [x * 1.0 / input_size for x in range(0, input_size)]
filter_data = [x * 1.0 / filter_size for x in range(0, filter_size)]
with self.test_session(use_gpu=use_gpu):
# Conv2DGrad functions are not compiled for double due to
# a problem in the way Eigen's Conv2DGrad works for double.
# So we disable the DOUBLE path. We should re-enable this
# when double support returns for CPU and/or GPU.
# data_type = tf.float64
# tolerance = 1e-8
data_type = tf.float32
tolerance = 0.002
input_tensor = tf.constant(input_data, shape=input_shape,
dtype=data_type, name="input")
filter_tensor = tf.constant(filter_data, shape=filter_shape,
dtype=data_type, name="filter")
strides = [1, stride_rows, stride_cols, 1]
if data_format == "NCHW":
new_input_tensor = NHWCToNCHW(input_tensor)
strides = NHWCToNCHW(strides)
else:
new_input_tensor = input_tensor
conv = tf.nn.conv2d(new_input_tensor,
filter_tensor,
strides,
padding,
data_format=data_format,
name="conv")
if data_format == "NCHW":
conv = NCHWToNHWC(conv)
self.assertEqual(output_shape, conv.get_shape())
if test_input:
err = tf.test.compute_gradient_error(input_tensor, input_shape, conv,
output_shape)
else:
err = tf.test.compute_gradient_error(filter_tensor, filter_shape, conv,
output_shape)
print("conv_2d gradient error = ", err)
self.assertLess(err, tolerance)
def testInputGradientValidPaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientValidPaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testInputGradientValidPaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(batch=2,
input_rows=4,
input_cols=5,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientValidPaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testInputGradientValidPaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(batch=2,
input_rows=7,
input_cols=6,
filter_rows=3,
filter_cols=3,
in_depth=4,
out_depth=5,
stride_rows=3,
stride_cols=3,
padding="VALID",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientValidPaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(batch=2,
input_rows=8,
input_cols=7,
filter_rows=4,
filter_cols=4,
in_depth=2,
out_depth=3,
stride_rows=3,
stride_cols=3,
padding="VALID",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testInputGradientSamePaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(batch=2,
input_rows=7,
input_cols=6,
filter_rows=3,
filter_cols=3,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="SAME",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientSamePaddingStrideOne(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=1,
stride_cols=1,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testInputGradientSamePaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(batch=2,
input_rows=5,
input_cols=4,
filter_rows=3,
filter_cols=3,
in_depth=3,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="SAME",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientSamePaddingStrideTwo(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(batch=4,
input_rows=6,
input_cols=5,
filter_rows=2,
filter_cols=2,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=2,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testInputGradientSamePaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(batch=2,
input_rows=7,
input_cols=6,
filter_rows=3,
filter_cols=3,
in_depth=4,
out_depth=5,
stride_rows=3,
stride_cols=3,
padding="SAME",
test_input=True,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientSamePaddingStrideThree(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(batch=2,
input_rows=8,
input_cols=7,
filter_rows=4,
filter_cols=4,
in_depth=2,
out_depth=3,
stride_rows=3,
stride_cols=3,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testFilterGradientSamePaddingStride2x1(self):
for (data_format, use_gpu) in GetTestConfigs():
self.ConstructAndTestGradient(batch=2,
input_rows=8,
input_cols=7,
filter_rows=4,
filter_cols=4,
in_depth=2,
out_depth=3,
stride_rows=2,
stride_cols=1,
padding="SAME",
test_input=False,
data_format=data_format,
use_gpu=use_gpu)
def testShapeFunctionEdgeCases(self):
# All shapes unknown.
c1 = tf.nn.conv2d(tf.placeholder(tf.float32),
tf.placeholder(tf.float32),
strides=[1, 1, 1, 1], padding="SAME")
self.assertEqual([None, None, None, None], c1.get_shape().as_list())
# Incorrect input shape.
with self.assertRaises(ValueError):
tf.nn.conv2d(tf.placeholder(tf.float32, shape=[1, 3]),
tf.placeholder(tf.float32),
strides=[1, 1, 1, 1], padding="SAME")
# Incorrect filter shape.
with self.assertRaises(ValueError):
tf.nn.conv2d(tf.placeholder(tf.float32),
tf.placeholder(tf.float32, shape=[1, 3]),
strides=[1, 1, 1, 1], padding="SAME")
# Depth mismatch.
with self.assertRaises(ValueError):
tf.nn.conv2d(tf.placeholder(tf.float32,
shape=[32, 20, 20, 3]),
tf.placeholder(tf.float32,
shape=[4, 4, 2, 2]),
strides=[1, 1, 1, 1], padding="SAME")
# Illegal strides.
with self.assertRaisesRegexp(ValueError, "strides in the batch and depth"):
tf.nn.conv2d(tf.placeholder(tf.float32),
tf.placeholder(tf.float32),
strides=[2, 1, 1, 1], padding="SAME")
with self.assertRaisesRegexp(ValueError, "strides in the batch and depth"):
tf.nn.conv2d(tf.placeholder(tf.float32),
tf.placeholder(tf.float32),
strides=[1, 1, 1, 2], padding="SAME")
# Filter larger than input.
with self.assertRaisesRegexp(ValueError,
"Filter must not be larger than the input"):
tf.nn.conv2d(tf.placeholder(tf.float32,
shape=[32, 20, 20, 3]),
tf.placeholder(tf.float32,
shape=[20, 21, 3, 2]),
strides=[1, 1, 1, 1], padding="SAME")
with self.assertRaisesRegexp(ValueError,
"Filter must not be larger than the input"):
tf.nn.conv2d(tf.placeholder(tf.float32,
shape=[32, 20, 20, 3]),
tf.placeholder(tf.float32,
shape=[21, 20, 3, 2]),
strides=[1, 1, 1, 1], padding="SAME")
# This is only a very simple test. More comprehensive tests live in
# //learning/dist_belief/experimental/brain_compatibility/conv_nn_test.py
# where we compare the numeric results of the depthwise conv op with the
# depthwise weighted sum transformer in dist_belief.
class DepthwiseConv2DTest(tf.test.TestCase):
def _VerifyValues(self, tensor_in_sizes, filter_in_sizes, stride,
padding, expected):
"""Verifies the output values of the convolution function.
Args:
tensor_in_sizes: Input tensor dimensions in
[batch, input_rows, input_cols, input_depth].
filter_in_sizes: Filter tensor dimensions in
[filter_rows, filter_cols, input_depth, depth_multiplier].
stride: Stride.
padding: Padding type.
expected: An array containing the expected operation outputs.
"""
total_size_1 = 1
total_size_2 = 1
for s in tensor_in_sizes:
total_size_1 *= s
for s in filter_in_sizes:
total_size_2 *= s
# Initializes the input tensor with array containing incrementing
# numbers from 1.
x1 = [f * 1.0 for f in range(1, total_size_1 + 1)]
x2 = [f * 1.0 for f in range(1, total_size_2 + 1)]
with self.test_session() as sess:
t1 = tf.constant(x1, shape=tensor_in_sizes)
t1.set_shape(tensor_in_sizes)
t2 = tf.constant(x2, shape=filter_in_sizes)
conv = tf.nn.depthwise_conv2d(t1, t2, strides=[1, stride, stride, 1],
padding=padding)
value = sess.run(conv)
print("value = ", value)
self.assertArrayNear(expected, np.ravel(value), 1e-5)
self.assertShapeEqual(value, conv)
def testConv2D2x2Filter(self):
# The inputs look like this (it's a 3 x 2 matrix, each of depth 2):
#
# [ (1.0, 2.0), (3.0, 4.0), ( 5.0, 6.0) ]
# [ (7.0, 8.0), (9.0, 10.0), (11.0, 12.0) ]
# We can view this as two inputs
#
# input depth 0:
#
# [ 1.0, 3.0, 5.0 ]
# [ 7.0, 9.0, 11.0 ]
#
# input depth 1:
#
# [ 2.0, 4.0, 6.0 ]
# [ 8.0, 10.0, 12.0 ]
#
# The filter looks like this (it has two 2 x 2 patches, each generating 2
# depths):
#
# filter #0:
#
# [ (1.0, 3.0), ( 5.0, 7.0)]
# [ (9.0, 11.0), (13.0, 15.0)]
#
# filter #1:
#
# [ ( 2.0, 4.0), ( 6.0, 8.0)]
# [ (10.0, 12.0), (14.0, 16.0)]
#
# So the outputs are:
#
# (position 0, 0: in_depth 0, output_depth 0 -- using filter #0)
# 1.0 * 1.0 + 7.0 * 9.0 + 3.0 * 5.0 + 9.0 * 13.0 = 196
# (position 0, 0: in_depth 0, output_depth 1 -- using filter #1)
# 1.0 * 2.0 + 7.0 * 10.0 + 3.0 * 6.0 + 9.0 * 14.0 = 216
# (position 0, 0: in_depth 1, output_depth 2 -- using filter #0)
# 2.0 * 3.0 + 8.0 * 11.0 + 4.0 * 7.0 + 10.0 * 15.0 = 272
# (position 0, 0: in_depth 1, output_depth 3 -- using filter #1)
# 2.0 * 4.0 + 8.0 * 12.0 + 4.0 * 8.0 + 10.0 * 16.0 = 296
#
# (position 1, 0: in_depth 0, output_depth 0 -- using filter #0)
# 3.0 * 1.0 + 9.0 * 9.0 + 5.0 * 5.0 + 11.0 * 13.0 = 252
# (position 1, 0: in_depth 0, output_depth 1 -- using filter #1)
# 3.0 * 2.0 + 9.0 * 10.0 + 5.0 * 6.0 + 11.0 * 14.0 = 280
# (position 1, 0: in_depth 1, output_depth 2 -- using filter #0)
# 4.0 * 3.0 + 10.0 * 11.0 + 6.0 * 7.0 + 12.0 * 15.0 = 344
# (position 1, 0: in_depth 1, output_depth 3 -- using filter #1)
# 4.0 * 4.0 + 10.0 * 12.0 + 6.0 * 8.0 + 12.0 * 16.0 = 376
expected_output = [196, 216, 272, 296, 252, 280, 344, 376]
self._VerifyValues(tensor_in_sizes=[1, 2, 3, 2],
filter_in_sizes=[2, 2, 2, 2],
stride=1, padding="VALID",
expected=expected_output)
class SeparableConv2DTest(tf.test.TestCase):
def _InitValues(self, sizes):
"""Initializes values for input tensors.
Args:
sizes: Tensor dimensions.
Returns:
Tensor initialized to values.
"""
total_size = 1
for s in sizes:
total_size *= s
x = [f * 0.5 for f in range(1, total_size + 1)]
return tf.constant(x, shape=sizes)
def _VerifyValues(self, tensor_in_sizes, depthwise_filter_in_sizes,
pointwise_filter_in_sizes, stride, padding, expected):
"""Verifies the output values of the separable convolution function.
Args:
tensor_in_sizes: Input tensor dimensions.
depthwise_filter_in_sizes: Depthwise filter tensor dimensions.
pointwise_filter_in_sizes: Pointwise filter tensor dimensions.
stride: Stride.
padding: Padding type.
expected: An array containing the expected operation outputs.
"""
with self.test_session() as sess:
t1 = self._InitValues(tensor_in_sizes)
f1 = self._InitValues(depthwise_filter_in_sizes)
f1.set_shape(depthwise_filter_in_sizes)
f2 = self._InitValues(pointwise_filter_in_sizes)
conv = tf.nn.separable_conv2d(t1, f1, f2, strides=[1, stride, stride, 1],
padding=padding)
value = sess.run(conv)
print("value = ", value)
self.assertArrayNear(expected, np.ravel(value), 1e-5)
self.assertShapeEqual(value, conv)
def testSeparableConv2D(self):
# The output is the result of two convolutions:
# First with tensor_in[1, 4, 4, 2] * filter1[2, 2, 2, 3].
# Second with intermediate_out[1, 4, 4, 6] * filter2[1, 1, 6, 7].
# Complexity is O(2*3*2*2 + 6*7*1*1) as opposed to O(2*7*2*2).
expected_output = [
6644.5, 6971.5, 7298.5, 7625.5, 7952.5, 8279.5, 8606.5, 8154.5, 8556.5,
8958.5, 9360.5, 9762.5, 10164.5, 10566.5, 9664.5, 10141.5, 10618.5,
11095.5, 11572.5, 12049.5, 12526.5, 4145.5, 4346.5, 4547.5, 4748.5,
4949.5, 5150.5, 5351.5, 12684.5, 13311.5, 13938.5, 14565.5, 15192.5,
15819.5, 16446.5, 14194.5, 14896.5, 15598.5, 16300.5, 17002.5, 17704.5,
18406.5, 15704.5, 16481.5, 17258.5, 18035.5, 18812.5, 19589.5, 20366.5,
6499.5, 6814.5, 7129.5, 7444.5, 7759.5, 8074.5, 8389.5, 18724.5,
19651.5, 20578.5, 21505.5, 22432.5, 23359.5, 24286.5, 20234.5, 21236.5,
22238.5, 23240.5, 24242.5, 25244.5, 26246.5, 21744.5, 22821.5, 23898.5,
24975.5, 26052.5, 27129.5, 28206.5, 8853.5, 9282.5, 9711.5, 10140.5,
10569.5, 10998.5, 11427.5, 5746.75, 6010.75, 6274.75, 6538.75, 6802.75,
7066.75, 7330.75, 6168.75, 6452.25, 6735.75, 7019.25, 7302.75, 7586.25,
7869.75, 6590.75, 6893.75, 7196.75, 7499.75, 7802.75, 8105.75, 8408.75,
2036.25, 2119.5, 2202.75, 2286.0, 2369.25, 2452.5, 2535.75]
self._VerifyValues(tensor_in_sizes=[1, 4, 4, 2],
depthwise_filter_in_sizes=[2, 2, 2, 3],
pointwise_filter_in_sizes=[1, 1, 6, 7],
stride=1, padding="SAME",
expected=expected_output)
def testSeparableConv2DEqualInputOutputDepth(self):
# The output is the result of two convolutions:
# First with tensor_in[1, 4, 4, 2] * filter1[2, 2, 3, 3].
# Second with intermediate_out[1, 4, 4, 6] * filter2[1, 1, 6, 6].
# Complexity is O(2*3*2*2 + 6*6*1*1) as opposed to O(2*6*2*2).
expected_output = [
5742.0, 6069.0, 6396.0, 6723.0, 7050.0, 7377.0,
7047.0, 7449.0, 7851.0, 8253.0, 8655.0, 9057.0,
8352.0, 8829.0, 9306.0, 9783.0, 10260.0, 10737.0,
3582.0, 3783.0, 3984.0, 4185.0, 4386.0, 4587.0,
10962.0, 11589.0, 12216.0, 12843.0, 13470.0, 14097.0,
12267.0, 12969.0, 13671.0, 14373.0, 15075.0, 15777.0,
13572.0, 14349.0, 15126.0, 15903.0, 16680.0, 17457.0,
5616.0, 5931.0, 6246.0, 6561.0, 6876.0, 7191.0,
16182.0, 17109.0, 18036.0, 18963.0, 19890.0, 20817.0,
17487.0, 18489.0, 19491.0, 20493.0, 21495.0, 22497.0,
18792.0, 19869.0, 20946.0, 22023.0, 23100.0, 24177.0,
7650.0, 8079.0, 8508.0, 8937.0, 9366.0, 9795.0,
4963.5, 5227.5, 5491.5, 5755.5, 6019.5, 6283.5,
5328.0, 5611.5, 5895.0, 6178.5, 6462.0, 6745.5,
5692.5, 5995.5, 6298.5, 6601.5, 6904.5, 7207.5,
1757.25, 1840.5, 1923.75, 2007.0, 2090.25, 2173.5]
self._VerifyValues(tensor_in_sizes=[1, 4, 4, 2],
depthwise_filter_in_sizes=[2, 2, 2, 3],
pointwise_filter_in_sizes=[1, 1, 6, 6],
stride=1, padding="SAME",
expected=expected_output)
def testSeparableConv2DIllegalCases(self):
# Output depth less then input depth.
with self.assertRaisesRegexp(
ValueError,
"Refusing to perform an overparameterized separable convolution"):
self._VerifyValues(tensor_in_sizes=[1, 4, 4, 2],
depthwise_filter_in_sizes=[2, 2, 2, 3],
pointwise_filter_in_sizes=[1, 1, 6, 5],
stride=1, padding="SAME",
expected=None)
def GetInceptionFwdTest(input_size, filter_size, stride, padding):
def Test(self):
tf.logging.info("Testing InceptionFwd %s", (input_size, filter_size,
stride, padding))
self._CompareFwdValues(input_size, filter_size, [stride, stride], padding)
return Test
def GetInceptionBackInputTest(input_size, filter_size, output_size,
stride, padding):
def Test(self):
tf.logging.info("Testing InceptionBackInput %s",
(input_size, filter_size, output_size, stride, padding))
self._CompareBackpropInput(input_size, filter_size, output_size,
[stride, stride], padding)
return Test
def GetInceptionBackFilterTest(input_size, filter_size, output_size,
strides, padding):
def Test(self):
tf.logging.info("Testing InceptionBackFilter %s",
(input_size, filter_size, output_size, strides, padding))
self._CompareBackFilter(input_size, filter_size, output_size,
strides, padding)
return Test
if __name__ == "__main__":
for index, (input_size_, filter_size_, output_size_, stride_,
padding_) in enumerate(GetShrunkInceptionShapes()):
setattr(Conv2DTest, "testInceptionFwd_" + str(index),
GetInceptionFwdTest(input_size_, filter_size_, stride_, padding_))
setattr(Conv2DTest, "testInceptionBackInput_" + str(index),
GetInceptionBackInputTest(input_size_, filter_size_, output_size_,
stride_, padding_))
setattr(Conv2DTest, "testInceptionBackFilter_" + str(index),
GetInceptionBackFilterTest(input_size_, filter_size_, output_size_,
[stride_, stride_], padding_))
tf.test.main()
| |
# Copyright (c) 2012-2016 Seafile Ltd.
# encoding: utf-8
import os
import logging
import json
from django.core.cache import cache
from django.http import HttpResponse, HttpResponseRedirect, Http404, \
HttpResponseBadRequest
from django.utils.translation import ugettext as _, activate
from django.contrib import messages
from django.utils.html import escape
import seaserv
from seaserv import seafile_api
from pysearpc import SearpcError
from seahub.share.forms import FileLinkShareForm, \
UploadLinkShareForm
from seahub.share.models import FileShare, UploadLinkShare
from seahub.share.signals import share_repo_to_user_successful
from seahub.auth.decorators import login_required, login_required_ajax
from seahub.base.decorators import require_POST
from seahub.contacts.signals import mail_sended
from seahub.views import is_registered_user, check_folder_permission
from seahub.utils import string2list, IS_EMAIL_CONFIGURED, check_filename_with_rename, \
is_valid_username, is_valid_email, send_html_email, is_org_context, \
gen_token, normalize_cache_key, get_site_name
from seahub.utils.mail import send_html_email_with_dj_template
from seahub.settings import SITE_ROOT, REPLACE_FROM_EMAIL, \
ADD_REPLY_TO_HEADER, SHARE_LINK_EMAIL_LANGUAGE, \
SHARE_LINK_AUDIT_CODE_TIMEOUT
from seahub.profile.models import Profile
# Get an instance of a logger
logger = logging.getLogger(__name__)
# rpc wrapper
def is_org_repo_owner(username, repo_id):
owner = seaserv.seafserv_threaded_rpc.get_org_repo_owner(repo_id)
return True if owner == username else False
def org_share_repo(org_id, repo_id, from_user, to_user, permission):
return seaserv.seafserv_threaded_rpc.org_add_share(org_id, repo_id,
from_user, to_user,
permission)
def org_remove_share(org_id, repo_id, from_user, to_user):
return seaserv.seafserv_threaded_rpc.org_remove_share(org_id, repo_id,
from_user, to_user)
# functions
def share_to_group(request, repo, group, permission):
"""Share repo to group with given permission.
"""
repo_id = repo.id
group_id = group.id
from_user = request.user.username
if is_org_context(request):
org_id = request.user.org.org_id
group_repo_ids = seafile_api.get_org_group_repoids(org_id, group.id)
else:
group_repo_ids = seafile_api.get_group_repoids(group.id)
if repo.id in group_repo_ids:
return False
try:
if is_org_context(request):
org_id = request.user.org.org_id
seafile_api.add_org_group_repo(repo_id, org_id, group_id,
from_user, permission)
else:
seafile_api.set_group_repo(repo_id, group_id, from_user,
permission)
return True
except Exception as e:
logger.error(e)
return False
def share_to_user(request, repo, to_user, permission):
"""Share repo to a user with given permission.
"""
repo_id = repo.id
from_user = request.user.username
if from_user == to_user:
return False
# permission check
org_id = None
if is_org_context(request):
org_id = request.user.org.org_id
if not seaserv.ccnet_threaded_rpc.org_user_exists(org_id, to_user):
return False
else:
if not is_registered_user(to_user):
return False
try:
if is_org_context(request):
org_id = request.user.org.org_id
org_share_repo(org_id, repo_id, from_user, to_user, permission)
else:
seafile_api.share_repo(repo_id, from_user, to_user, permission)
except SearpcError as e:
logger.error(e)
return False
else:
# send a signal when sharing repo successful
share_repo_to_user_successful.send(sender=None,
from_user=from_user,
to_user=to_user, repo=repo,
path='/', org_id=org_id)
return True
# share link
@login_required_ajax
def send_shared_link(request):
"""
Handle ajax post request to send file shared link.
"""
if not request.method == 'POST':
raise Http404
content_type = 'application/json; charset=utf-8'
if not IS_EMAIL_CONFIGURED:
data = json.dumps({'error': _('Failed to send email, email service is not properly configured, please contact administrator.')})
return HttpResponse(data, status=500, content_type=content_type)
form = FileLinkShareForm(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
file_shared_link = form.cleaned_data['file_shared_link']
file_shared_name = form.cleaned_data['file_shared_name']
file_shared_type = form.cleaned_data['file_shared_type']
extra_msg = escape(form.cleaned_data['extra_msg'])
to_email_list = string2list(email)
send_success, send_failed = [], []
# use contact_email, if present
username = Profile.objects.get_contact_email_by_user(request.user.username)
for to_email in to_email_list:
if not is_valid_email(to_email):
send_failed.append(to_email)
continue
if SHARE_LINK_EMAIL_LANGUAGE:
activate(SHARE_LINK_EMAIL_LANGUAGE)
# Add email to contacts.
mail_sended.send(sender=None, user=request.user.username,
email=to_email)
c = {
'email': request.user.username,
'to_email': to_email,
'file_shared_link': file_shared_link,
'file_shared_name': file_shared_name,
}
if extra_msg:
c['extra_msg'] = extra_msg
if REPLACE_FROM_EMAIL:
from_email = username
else:
from_email = None # use default from email
if ADD_REPLY_TO_HEADER:
reply_to = username
else:
reply_to = None
try:
if file_shared_type == 'f':
c['file_shared_type'] = _("file")
send_html_email(_('A file is shared to you on %s') % get_site_name(),
'shared_link_email.html',
c, from_email, [to_email],
reply_to=reply_to
)
else:
c['file_shared_type'] = _("directory")
send_html_email(_('A directory is shared to you on %s') % get_site_name(),
'shared_link_email.html',
c, from_email, [to_email],
reply_to=reply_to)
send_success.append(to_email)
except Exception:
send_failed.append(to_email)
if len(send_success) > 0:
data = json.dumps({"send_success": send_success, "send_failed": send_failed})
return HttpResponse(data, status=200, content_type=content_type)
else:
data = json.dumps({"error": _("Internal server error, or please check the email(s) you entered")})
return HttpResponse(data, status=400, content_type=content_type)
else:
return HttpResponseBadRequest(json.dumps(form.errors),
content_type=content_type)
@login_required
def save_shared_link(request):
"""Save public share link to one's library.
"""
username = request.user.username
token = request.GET.get('t', '')
dst_repo_id = request.POST.get('dst_repo', '')
dst_path = request.POST.get('dst_path', '')
next_page = request.META.get('HTTP_REFERER', None)
if not next_page:
next_page = SITE_ROOT
if not dst_repo_id or not dst_path:
messages.error(request, _('Please choose a directory.'))
return HttpResponseRedirect(next_page)
if check_folder_permission(request, dst_repo_id, dst_path) != 'rw':
messages.error(request, _('Permission denied'))
return HttpResponseRedirect(next_page)
try:
fs = FileShare.objects.get(token=token)
except FileShare.DoesNotExist:
raise Http404
src_repo_id = fs.repo_id
src_path = os.path.dirname(fs.path)
obj_name = os.path.basename(fs.path)
new_obj_name = check_filename_with_rename(dst_repo_id, dst_path, obj_name)
seafile_api.copy_file(src_repo_id, src_path, obj_name,
dst_repo_id, dst_path, new_obj_name, username,
need_progress=0)
messages.success(request, _('Successfully saved.'))
return HttpResponseRedirect(next_page)
@login_required_ajax
def send_shared_upload_link(request):
"""
Handle ajax post request to send shared upload link.
"""
if not request.method == 'POST':
raise Http404
content_type = 'application/json; charset=utf-8'
if not IS_EMAIL_CONFIGURED:
data = json.dumps({'error': _('Failed to send email, email service is not properly configured, please contact administrator.')})
return HttpResponse(data, status=500, content_type=content_type)
form = UploadLinkShareForm(request.POST)
if form.is_valid():
email = form.cleaned_data['email']
shared_upload_link = form.cleaned_data['shared_upload_link']
extra_msg = escape(form.cleaned_data['extra_msg'])
to_email_list = string2list(email)
send_success, send_failed = [], []
# use contact_email, if present
username = Profile.objects.get_contact_email_by_user(request.user.username)
for to_email in to_email_list:
if not is_valid_email(to_email):
send_failed.append(to_email)
continue
# Add email to contacts.
mail_sended.send(sender=None, user=request.user.username,
email=to_email)
c = {
'email': request.user.username,
'to_email': to_email,
'shared_upload_link': shared_upload_link,
}
if extra_msg:
c['extra_msg'] = extra_msg
if REPLACE_FROM_EMAIL:
from_email = username
else:
from_email = None # use default from email
if ADD_REPLY_TO_HEADER:
reply_to = username
else:
reply_to = None
try:
send_html_email(_('An upload link is shared to you on %s') % get_site_name(),
'shared_upload_link_email.html',
c, from_email, [to_email],
reply_to=reply_to)
send_success.append(to_email)
except Exception:
send_failed.append(to_email)
if len(send_success) > 0:
data = json.dumps({"send_success": send_success, "send_failed": send_failed})
return HttpResponse(data, status=200, content_type=content_type)
else:
data = json.dumps({"error": _("Internal server error, or please check the email(s) you entered")})
return HttpResponse(data, status=400, content_type=content_type)
else:
return HttpResponseBadRequest(json.dumps(form.errors),
content_type=content_type)
@login_required_ajax
@require_POST
def ajax_private_share_dir(request):
content_type = 'application/json; charset=utf-8'
repo_id = request.POST.get('repo_id', '')
path = request.POST.get('path', '')
username = request.user.username
result = {}
repo = seafile_api.get_repo(repo_id)
if not repo:
result['error'] = _('Library does not exist.')
return HttpResponse(json.dumps(result), status=400, content_type=content_type)
if seafile_api.get_dir_id_by_path(repo_id, path) is None:
result['error'] = _('Directory does not exist.')
return HttpResponse(json.dumps(result), status=400, content_type=content_type)
if path != '/':
# if share a dir, check sub-repo first
try:
if is_org_context(request):
org_id = request.user.org.org_id
sub_repo = seaserv.seafserv_threaded_rpc.get_org_virtual_repo(
org_id, repo_id, path, username)
else:
sub_repo = seafile_api.get_virtual_repo(repo_id, path, username)
except SearpcError as e:
result['error'] = e.msg
return HttpResponse(json.dumps(result), status=500, content_type=content_type)
if not sub_repo:
name = os.path.basename(path)
# create a sub-lib
try:
# use name as 'repo_name' & 'repo_desc' for sub_repo
if is_org_context(request):
org_id = request.user.org.org_id
sub_repo_id = seaserv.seafserv_threaded_rpc.create_org_virtual_repo(
org_id, repo_id, path, name, name, username)
else:
sub_repo_id = seafile_api.create_virtual_repo(repo_id, path, name, name, username)
sub_repo = seafile_api.get_repo(sub_repo_id)
except SearpcError as e:
result['error'] = e.msg
return HttpResponse(json.dumps(result), status=500, content_type=content_type)
shared_repo_id = sub_repo.id
shared_repo = sub_repo
else:
shared_repo_id = repo_id
shared_repo = repo
emails_string = request.POST.get('emails', '')
groups_string = request.POST.get('groups', '')
perm = request.POST.get('perm', '')
emails = string2list(emails_string)
groups = string2list(groups_string)
# Test whether user is the repo owner.
if not seafile_api.is_repo_owner(username, shared_repo_id) and \
not is_org_repo_owner(username, shared_repo_id):
result['error'] = _('Only the owner of the library has permission to share it.')
return HttpResponse(json.dumps(result), status=500, content_type=content_type)
# Parsing input values.
# no 'share_to_all'
share_to_groups, share_to_users, shared_success, shared_failed = [], [], [], []
for email in emails:
email = email.lower()
if is_valid_username(email):
share_to_users.append(email)
else:
shared_failed.append(email)
for group_id in groups:
share_to_groups.append(seaserv.get_group(group_id))
for email in share_to_users:
# Add email to contacts.
mail_sended.send(sender=None, user=request.user.username, email=email)
if share_to_user(request, shared_repo, email, perm):
shared_success.append(email)
else:
shared_failed.append(email)
for group in share_to_groups:
if share_to_group(request, shared_repo, group, perm):
shared_success.append(group.group_name)
else:
shared_failed.append(group.group_name)
if len(shared_success) > 0:
return HttpResponse(json.dumps({
"shared_success": shared_success,
"shared_failed": shared_failed
}), content_type=content_type)
else:
# for case: only share to users and the emails are not valid
data = json.dumps({"error": _("Please check the email(s) you entered")})
return HttpResponse(data, status=400, content_type=content_type)
def ajax_get_link_audit_code(request):
"""
Generate a token, and record that token with email in cache, expires in
one hour, send token to that email address.
User provide token and email at share link page, if the token and email
are valid, record that email in session.
"""
content_type = 'application/json; charset=utf-8'
token = request.POST.get('token')
email = request.POST.get('email')
if not is_valid_email(email):
return HttpResponse(json.dumps({
'error': _('Email address is not valid')
}), status=400, content_type=content_type)
dfs = FileShare.objects.get_valid_file_link_by_token(token)
ufs = UploadLinkShare.objects.get_valid_upload_link_by_token(token)
fs = dfs if dfs else ufs
if fs is None:
return HttpResponse(json.dumps({
'error': _('Share link is not found')
}), status=400, content_type=content_type)
cache_key = normalize_cache_key(email, 'share_link_audit_')
code = gen_token(max_length=6)
cache.set(cache_key, code, SHARE_LINK_AUDIT_CODE_TIMEOUT)
# send code to user via email
subject = _("Verification code for visiting share links")
c = {'code': code}
send_success = send_html_email_with_dj_template(email,
subject=subject,
dj_template='share/audit_code_email.html',
context=c)
if not send_success:
logger.error('Failed to send audit code via email to %s')
return HttpResponse(json.dumps({
"error": _("Failed to send a verification code, please try again later.")
}), status=500, content_type=content_type)
return HttpResponse(json.dumps({'success': True}), status=200,
content_type=content_type)
| |
# Copyright 2015, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ConfigParser
import io
import json
import os
import yaml
from ansible import errors
from ansible.runner.return_data import ReturnData
from ansible import utils
from ansible.utils import template
CONFIG_TYPES = {
'ini': 'return_config_overrides_ini',
'json': 'return_config_overrides_json',
'yaml': 'return_config_overrides_yaml'
}
class ActionModule(object):
TRANSFERS_FILES = True
def __init__(self, runner):
self.runner = runner
def grab_options(self, complex_args, module_args):
"""Grab passed options from Ansible complex and module args.
:param complex_args: ``dict``
:param module_args: ``dict``
:returns: ``dict``
"""
options = dict()
if complex_args:
options.update(complex_args)
options.update(utils.parse_kv(module_args))
return options
@staticmethod
def return_config_overrides_ini(config_overrides, resultant):
"""Returns string value from a modified config file.
:param config_overrides: ``dict``
:param resultant: ``str`` || ``unicode``
:returns: ``str``
"""
config = ConfigParser.RawConfigParser(allow_no_value=True)
config_object = io.BytesIO(resultant.encode('utf-8'))
config.readfp(config_object)
for section, items in config_overrides.items():
# If the items value is not a dictionary it is assumed that the
# value is a default item for this config type.
if not isinstance(items, dict):
if isinstance(items, list):
items = ','.join(items)
config.set('DEFAULT', str(section), str(items))
else:
# Attempt to add a section to the config file passing if
# an error is raised that is related to the section
# already existing.
try:
config.add_section(str(section))
except (ConfigParser.DuplicateSectionError, ValueError):
pass
for key, value in items.items():
if isinstance(value, list):
value = ','.join(value)
config.set(str(section), str(key), str(value))
else:
config_object.close()
resultant_bytesio = io.BytesIO()
try:
config.write(resultant_bytesio)
return resultant_bytesio.getvalue()
finally:
resultant_bytesio.close()
def return_config_overrides_json(self, config_overrides, resultant):
"""Returns config json
Its important to note that file ordering will not be preserved as the
information within the json file will be sorted by keys.
:param config_overrides: ``dict``
:param resultant: ``str`` || ``unicode``
:returns: ``str``
"""
original_resultant = json.loads(resultant)
merged_resultant = self._merge_dict(
base_items=original_resultant,
new_items=config_overrides
)
return json.dumps(
merged_resultant,
indent=4,
sort_keys=True
)
def return_config_overrides_yaml(self, config_overrides, resultant):
"""Return config yaml.
:param config_overrides: ``dict``
:param resultant: ``str`` || ``unicode``
:returns: ``str``
"""
original_resultant = yaml.safe_load(resultant)
merged_resultant = self._merge_dict(
base_items=original_resultant,
new_items=config_overrides
)
return yaml.safe_dump(
merged_resultant,
default_flow_style=False,
width=1000,
)
def _merge_dict(self, base_items, new_items):
"""Recursively merge new_items into base_items.
:param base_items: ``dict``
:param new_items: ``dict``
:returns: ``dict``
"""
for key, value in new_items.iteritems():
if isinstance(value, dict):
base_items[key] = self._merge_dict(
base_items.get(key, {}),
value
)
elif ',' in value or '\n' in value:
base_items[key] = re.split(', |,|\n', value)
base_items[key] = [i.strip() for i in base_items[key] if i]
elif isinstance(value, list):
base_items[key] = value
else:
base_items[key] = new_items[key]
return base_items
def run(self, conn, tmp, module_name, module_args, inject,
complex_args=None, **kwargs):
"""Run the method"""
if not self.runner.is_playbook:
raise errors.AnsibleError(
'FAILED: `config_templates` are only available in playbooks'
)
options = self.grab_options(complex_args, module_args)
try:
source = options['src']
dest = options['dest']
config_overrides = options.get('config_overrides', dict())
config_type = options['config_type']
assert config_type.lower() in ['ini', 'json', 'yaml']
except KeyError as exp:
result = dict(failed=True, msg=exp)
return ReturnData(conn=conn, comm_ok=False, result=result)
source_template = template.template(
self.runner.basedir,
source,
inject
)
if '_original_file' in inject:
source_file = utils.path_dwim_relative(
inject['_original_file'],
'templates',
source_template,
self.runner.basedir
)
else:
source_file = utils.path_dwim(self.runner.basedir, source_template)
# Open the template file and return the data as a string. This is
# being done here so that the file can be a vault encrypted file.
resultant = template.template_from_file(
self.runner.basedir,
source_file,
inject,
vault_password=self.runner.vault_pass
)
if config_overrides:
type_merger = getattr(self, CONFIG_TYPES.get(config_type))
resultant = type_merger(
config_overrides=config_overrides,
resultant=resultant
)
# Retemplate the resultant object as it may have new data within it
# as provided by an override variable.
template.template_from_string(
basedir=self.runner.basedir,
data=resultant,
vars=inject,
fail_on_undefined=True
)
# Access to protected method is unavoidable in Ansible 1.x.
new_module_args = dict(
src=self.runner._transfer_str(conn, tmp, 'source', resultant),
dest=dest,
original_basename=os.path.basename(source),
follow=True,
)
module_args_tmp = utils.merge_module_args(
module_args,
new_module_args
)
# Remove data types that are not available to the copy module
complex_args.pop('config_overrides')
complex_args.pop('config_type')
# Return the copy module status. Access to protected method is
# unavoidable in Ansible 1.x.
return self.runner._execute_module(
conn,
tmp,
'copy',
module_args_tmp,
inject=inject,
complex_args=complex_args
)
| |
import sys, sublime, re
if sys.version_info < (3, 0):
import modules.csscolours as colour
import modules.cssfuncs as funcs
else:
from . import csscolours as colour
from . import cssfuncs as funcs
#
# Primary functions
#
def apply(view, edit):
cache = get_region_cache(view)
if cache:
return
remove_region_cache(view, edit)
trim_entire_content(view, edit)
append_region_cache(view, edit)
def remove(view, edit):
apply_region_cache(view, edit)
remove_region_cache(view, edit)
def highlights(view):
return view.find_all("@(\w|-)+")
#
# Content functions
#
region_cache_name = "Variables"
# Returns dictionary of variables a. Finds a match of _@varname = "value"_ rather than looking specifically within comments (ie assumes variable names DONT have = usually)
def get_css_variables_dict(view):
# str_hex = '7EBB43'
# str_hex = '5CBB00'
# #str_hex = 'ff0000'
# #str_hex = '111111'
# print str_hex
# print 'lighten is '+ colour.lighten(str_hex, '50%')
# print 'darken is '+ colour.darken(str_hex, '50%')
# print 'saturate is '+ colour.saturate(str_hex, '50%')
# print 'desaturate is '+ colour.desaturate(str_hex, '50%')
# match all @var = "value", or @var='value' or @var-name = 'value'
#matches = view.find_all("@(\w|-)+\s?+=\s?+(\"|\')[^(\"|\')]+(\"|\')",0) # this one only looks for quote marks
value = r"'|\"|#|\w|\(|\)|@|,| |=|%|\.|-|:|;|\+|\*|\/[^\/]" # variable "value" match
matches = view.find_all("@(\w|-)+?(\s+)?=\s?+("+value+")+",0)
d = {}
#print view.substr(matches)
for match in matches:
# grab the actual text matched @var = "val" and split up
text = view.substr(match)
varname,value = text.split("=", 1)
# format variable name text
varname = varname.replace('@', '').strip()
# format value text
value = value.replace('*/', '')
value = value.strip()
if value.endswith('"') or value.endswith("\'"): # previously removed all " and ' chars - now just strip them from the front and end
value = value[:-1]
if value.startswith('"') or value.startswith("\'"):
value = value[1:]
# value = value.replace('\"', '').replace("\'", '').strip()
# add to dictionary
d[varname] = value.strip()
# calculate values
for varname in d:
value = d[varname]
if value.find('@')>=0 or value.find('(')>=0:
d[varname] = calculate_value(value, d)
return d
def calculate_value(value, d):
# Substitute in variables
for varname in d:
if varname in value:
value = value.replace('@'+varname, d[varname])
# look for numeric values (to do maths on them)
metric = ''
if 'px' in value:
metric = 'px'
elif 'em' in value:
metric = 'em'
elif '%' in value:
metric = '%'
numeric = value.replace(metric, '')
match = re.match(r'([0-9]| |\+|\-|\*)+', numeric)
if match:
try:
value = str(eval(numeric)) + metric
return value
except:
pass
# look for css function
match = re.search(r'((\w|-)+)\s*?\((.+)\)', value)
if match:
func = match.group(1)
var = match.group(3).strip()
if func in ('box-shadow', 'linear-gradient', 'transition', 'transform'):
value = eval('funcs.'+ func.replace('-', '_') + '("' + var + '")')
return value
# look for colour function
match = re.search(r'(\w+)\s*?\((.*?),(.*)\)', value)
if match:
func = match.group(1)
var1 = match.group(2).strip()
var2 = match.group(3).strip()
if func in ('lighten', 'darken', 'saturate', 'desaturate'):
value = eval('colour.'+ func + '("' + var1 + '","' + var2 + '")')
return value
def get_first_comment(view):
return view.find(r"\/\*+(\*|\w|\W)*?\*\/",0) # better
def trim_entire_content(view, edit):
while True:
line_region = view.line(view.size())
line = view.substr(line_region).strip()
if line == '':
view.replace(edit, sublime.Region(line_region.a - 1, line_region.b), '')
else:
break;
#
# Region cache functions
#
def get_region_cache(view):
return view.find("\/\*+\s?+"+region_cache_name +"[^\*\/]*\*\/", 0)
def remove_region_cache(view, edit):
region = get_region_cache(view)
if region:
view.replace(edit, region, '')
return
# Converts variables to their values and appends region indexes to the bottom (to be parsed later)
def append_region_cache(view, edit):
variables = get_css_variables_dict(view)
docblock = get_first_comment(view)
if not docblock:
#print "No first comment found"
return
# Loop through region matches (outside of the docblock)
offset = docblock.b
match = view.find("@(\w|-)+", offset)
pos = {} # assoc array of a:b string pairs
highlight = []
for varname in variables:
pos[varname] = []
while match:
varname = view.substr(match).replace('@', '')
scope_name = view.scope_name(match.a)
if varname in variables and scope_name.find('comment')==-1:
# and not 'comment' in scope_name:
value = variables[varname]
view.replace(edit, match, value)
offset = match.a + len(value)
pos[varname].append(str(match.a) + ":" + str(offset)) # store in pos dict
highlight.append( sublime.Region(match.a, offset) ) # store replaced region for highlighting
else:
offset+=1
match = view.find("@(\w|-)+", offset) # go again
# Compile the output
output = []
for varname in variables:
if pos[varname]:
output.append ( varname +"{" + ','.join( pos[varname] ) + "}" )
if len(output) > 0:
cache = "/* " + region_cache_name + ":"
cache+= '.'.join(output)
cache+= " */"
view.insert(edit, view.size(), "\n\n"+cache)
return highlight
# Returns cache regions as array[varname] = [ [n,n], [n,n] ]
def parse_region_cache(view):
region = get_region_cache(view)
if not region:
#print "No region cache found"
return
cache = view.substr( get_region_cache(view) )
cache = cache.replace('/* '+region_cache_name+':', '').replace(' */', '')
regions = {}
# Break into section
sections = cache.split('.')
for section in sections:
varname, data = section.split('{')
data = data.replace('}', '')
pos = data.split(',') # array of n:n, n:n
regions[varname] = [] # empty list for each variable key
for pair in pos:
a, b = pair.split(':')
regions[varname].append([int(a), int(b)])
return regions
# Uses parsed region cache to re-apply the variable names within the content
def apply_region_cache(view, edit):
data = {} # assoc array with key 'a' as position index - storing 'varname' and related 'b' value
ordering = [] # ordered array by a region's 'a' index
parsed = parse_region_cache(view)
if parsed:
# Create data and ordering objects
for varname in parsed:
for pos in parsed[varname]:
data[pos[0]] = {'varname':varname, 'b':pos[1]}
ordering.append(pos[0])
# sort and reverse ordering - replacing regions from bottom to top
ordering.sort()
for a in reversed(ordering):
b = data[a]['b']
varname = data[a]['varname']
view.replace(edit, sublime.Region(a, b), '@'+varname)
# # highlight from top to bottom - accounting for 'shifting' regions during above replacement
# diff = 0
# for a in ordering:
# b = data[a]['b']
# varname = data[a]['varname']
# highlights.append(sublime.Region(diff + a, diff + a + len('@'+varname)))
# diff+= len('@'+varname) - (b-a)
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import urllib
from tempest import config
from tempest import exceptions
CONF = config.CONF
# the following map is used to construct proper URI
# for the given neutron resource
service_resource_prefix_map = {
'networks': '',
'subnets': '',
'ports': '',
'pools': 'lb',
'vips': 'lb',
'health_monitors': 'lb',
'members': 'lb',
'vpnservices': 'vpn',
'ikepolicies': 'vpn',
'ipsecpolicies': 'vpn',
'metering_labels': 'metering',
'metering_label_rules': 'metering',
'firewall_rules': 'fw',
'firewall_policies': 'fw',
'firewalls': 'fw'
}
# The following list represents resource names that do not require
# changing underscore to a hyphen
hyphen_exceptions = ["health_monitors", "firewall_rules", "firewall_policies"]
# map from resource name to a plural name
# needed only for those which can't be constructed as name + 's'
resource_plural_map = {
'security_groups': 'security_groups',
'security_group_rules': 'security_group_rules',
'ikepolicy': 'ikepolicies',
'ipsecpolicy': 'ipsecpolicies',
'quotas': 'quotas',
'firewall_policy': 'firewall_policies'
}
class NetworkClientBase(object):
def __init__(self, auth_provider):
self.rest_client = self.get_rest_client(
auth_provider)
self.rest_client.service = CONF.network.catalog_type
self.version = '2.0'
self.uri_prefix = "v%s" % (self.version)
self.build_timeout = CONF.network.build_timeout
self.build_interval = CONF.network.build_interval
def get_rest_client(self, auth_provider):
raise NotImplementedError
def post(self, uri, body, headers=None):
return self.rest_client.post(uri, body, headers)
def put(self, uri, body, headers=None):
return self.rest_client.put(uri, body, headers)
def get(self, uri, headers=None):
return self.rest_client.get(uri, headers)
def delete(self, uri, headers=None):
return self.rest_client.delete(uri, headers)
def deserialize_list(self, body):
raise NotImplementedError
def deserialize_single(self, body):
raise NotImplementedError
def get_uri(self, plural_name):
# get service prefix from resource name
service_prefix = service_resource_prefix_map.get(
plural_name)
if plural_name not in hyphen_exceptions:
plural_name = plural_name.replace("_", "-")
if service_prefix:
uri = '%s/%s/%s' % (self.uri_prefix, service_prefix,
plural_name)
else:
uri = '%s/%s' % (self.uri_prefix, plural_name)
return uri
def pluralize(self, resource_name):
# get plural from map or just add 's'
return resource_plural_map.get(resource_name, resource_name + 's')
def _lister(self, plural_name):
def _list(**filters):
uri = self.get_uri(plural_name)
if filters:
uri += '?' + urllib.urlencode(filters, doseq=1)
resp, body = self.get(uri)
result = {plural_name: self.deserialize_list(body)}
return resp, result
return _list
def _deleter(self, resource_name):
def _delete(resource_id):
plural = self.pluralize(resource_name)
uri = '%s/%s' % (self.get_uri(plural), resource_id)
return self.delete(uri)
return _delete
def _shower(self, resource_name):
def _show(resource_id, **fields):
# fields is a dict which key is 'fields' and value is a
# list of field's name. An example:
# {'fields': ['id', 'name']}
plural = self.pluralize(resource_name)
uri = '%s/%s' % (self.get_uri(plural), resource_id)
if fields:
uri += '?' + urllib.urlencode(fields, doseq=1)
resp, body = self.get(uri)
body = self.deserialize_single(body)
return resp, body
return _show
def _creater(self, resource_name):
def _create(**kwargs):
plural = self.pluralize(resource_name)
uri = self.get_uri(plural)
post_data = self.serialize({resource_name: kwargs})
resp, body = self.post(uri, post_data)
body = self.deserialize_single(body)
return resp, body
return _create
def _updater(self, resource_name):
def _update(res_id, **kwargs):
plural = self.pluralize(resource_name)
uri = '%s/%s' % (self.get_uri(plural), res_id)
post_data = self.serialize({resource_name: kwargs})
resp, body = self.put(uri, post_data)
body = self.deserialize_single(body)
return resp, body
return _update
def __getattr__(self, name):
method_prefixes = ["list_", "delete_", "show_", "create_", "update_"]
method_functors = [self._lister,
self._deleter,
self._shower,
self._creater,
self._updater]
for index, prefix in enumerate(method_prefixes):
prefix_len = len(prefix)
if name[:prefix_len] == prefix:
return method_functors[index](name[prefix_len:])
raise AttributeError(name)
# Common methods that are hard to automate
def create_bulk_network(self, count, names):
network_list = list()
for i in range(count):
network_list.append({'name': names[i]})
post_data = {'networks': network_list}
body = self.serialize_list(post_data, "networks", "network")
uri = self.get_uri("networks")
resp, body = self.post(uri, body)
body = {'networks': self.deserialize_list(body)}
return resp, body
def create_bulk_subnet(self, subnet_list):
post_data = {'subnets': subnet_list}
body = self.serialize_list(post_data, 'subnets', 'subnet')
uri = self.get_uri('subnets')
resp, body = self.post(uri, body)
body = {'subnets': self.deserialize_list(body)}
return resp, body
def create_bulk_port(self, port_list):
post_data = {'ports': port_list}
body = self.serialize_list(post_data, 'ports', 'port')
uri = self.get_uri('ports')
resp, body = self.post(uri, body)
body = {'ports': self.deserialize_list(body)}
return resp, body
def wait_for_resource_deletion(self, resource_type, id):
"""Waits for a resource to be deleted."""
start_time = int(time.time())
while True:
if self.is_resource_deleted(resource_type, id):
return
if int(time.time()) - start_time >= self.build_timeout:
raise exceptions.TimeoutException
time.sleep(self.build_interval)
def is_resource_deleted(self, resource_type, id):
method = 'show_' + resource_type
try:
getattr(self, method)(id)
except AttributeError:
raise Exception("Unknown resource type %s " % resource_type)
except exceptions.NotFound:
return True
return False
| |
"""
This module contains the :py:class:`USBDevice` interface for the `AD2USB`_.
.. _AD2USB: http://www.alarmdecoder.com
.. moduleauthor:: Scott Petersen <scott@nutech.com>
"""
import time
import threading
from .base_device import Device
from ..util import CommError, TimeoutError, NoDeviceError, bytes_hack
from ..event import event
have_pyftdi = False
try:
from pyftdi.pyftdi.ftdi import Ftdi, FtdiError
import usb.core
import usb.util
have_pyftdi = True
except ImportError:
try:
from pyftdi.ftdi import Ftdi, FtdiError
import usb.core
import usb.util
have_pyftdi = True
except ImportError:
have_pyftdi = False
class USBDevice(Device):
"""
`AD2USB`_ device utilizing PyFTDI's interface.
"""
# Constants
PRODUCT_IDS = ((0x0403, 0x6001), (0x0403, 0x6015))
"""List of Vendor and Product IDs used to recognize `AD2USB`_ devices."""
DEFAULT_VENDOR_ID = PRODUCT_IDS[0][0]
"""Default Vendor ID used to recognize `AD2USB`_ devices."""
DEFAULT_PRODUCT_ID = PRODUCT_IDS[0][1]
"""Default Product ID used to recognize `AD2USB`_ devices."""
# Deprecated constants
FTDI_VENDOR_ID = DEFAULT_VENDOR_ID
"""DEPRECATED: Vendor ID used to recognize `AD2USB`_ devices."""
FTDI_PRODUCT_ID = DEFAULT_PRODUCT_ID
"""DEPRECATED: Product ID used to recognize `AD2USB`_ devices."""
BAUDRATE = 115200
"""Default baudrate for `AD2USB`_ devices."""
__devices = []
__detect_thread = None
@classmethod
def find_all(cls, vid=None, pid=None):
"""
Returns all FTDI devices matching our vendor and product IDs.
:returns: list of devices
:raises: :py:class:`~alarmdecoder.util.CommError`
"""
if not have_pyftdi:
raise ImportError('The USBDevice class has been disabled due to missing requirement: pyftdi or pyusb.')
cls.__devices = []
query = cls.PRODUCT_IDS
if vid and pid:
query = [(vid, pid)]
try:
cls.__devices = Ftdi.find_all(query, nocache=True)
except (usb.core.USBError, FtdiError) as err:
raise CommError('Error enumerating AD2USB devices: {0}'.format(str(err)), err)
return cls.__devices
@classmethod
def devices(cls):
"""
Returns a cached list of `AD2USB`_ devices located on the system.
:returns: cached list of devices found
"""
return cls.__devices
@classmethod
def find(cls, device=None):
"""
Factory method that returns the requested :py:class:`USBDevice` device, or the
first device.
:param device: Tuple describing the USB device to open, as returned
by find_all().
:type device: tuple
:returns: :py:class:`USBDevice` object utilizing the specified device
:raises: :py:class:`~alarmdecoder.util.NoDeviceError`
"""
if not have_pyftdi:
raise ImportError('The USBDevice class has been disabled due to missing requirement: pyftdi or pyusb.')
cls.find_all()
if len(cls.__devices) == 0:
raise NoDeviceError('No AD2USB devices present.')
if device is None:
device = cls.__devices[0]
vendor, product, sernum, ifcount, description = device
return USBDevice(interface=sernum, vid=vendor, pid=product)
@classmethod
def start_detection(cls, on_attached=None, on_detached=None):
"""
Starts the device detection thread.
:param on_attached: function to be called when a device is attached **Callback definition:** *def callback(thread, device)*
:type on_attached: function
:param on_detached: function to be called when a device is detached **Callback definition:** *def callback(thread, device)*
:type on_detached: function
"""
if not have_pyftdi:
raise ImportError('The USBDevice class has been disabled due to missing requirement: pyftdi or pyusb.')
cls.__detect_thread = USBDevice.DetectThread(on_attached, on_detached)
try:
cls.find_all()
except CommError:
pass
cls.__detect_thread.start()
@classmethod
def stop_detection(cls):
"""
Stops the device detection thread.
"""
if not have_pyftdi:
raise ImportError('The USBDevice class has been disabled due to missing requirement: pyftdi or pyusb.')
try:
cls.__detect_thread.stop()
except Exception:
pass
@property
def interface(self):
"""
Retrieves the interface used to connect to the device.
:returns: the interface used to connect to the device
"""
return self._interface
@interface.setter
def interface(self, value):
"""
Sets the interface used to connect to the device.
:param value: may specify either the serial number or the device index
:type value: string or int
"""
self._interface = value
if isinstance(value, int):
self._device_number = value
else:
self._serial_number = value
@property
def serial_number(self):
"""
Retrieves the serial number of the device.
:returns: serial number of the device
"""
return self._serial_number
@serial_number.setter
def serial_number(self, value):
"""
Sets the serial number of the device.
:param value: serial number of the device
:type value: string
"""
self._serial_number = value
@property
def description(self):
"""
Retrieves the description of the device.
:returns: description of the device
"""
return self._description
@description.setter
def description(self, value):
"""
Sets the description of the device.
:param value: description of the device
:type value: string
"""
self._description = value
def __init__(self, interface=0, vid=None, pid=None):
"""
Constructor
:param interface: May specify either the serial number or the device
index.
:type interface: string or int
"""
if not have_pyftdi:
raise ImportError('The USBDevice class has been disabled due to missing requirement: pyftdi or pyusb.')
Device.__init__(self)
self._device = Ftdi()
self._interface = 0
self._device_number = 0
self._serial_number = None
self._vendor_id = USBDevice.DEFAULT_VENDOR_ID
if vid:
self._vendor_id = vid
self._product_id = USBDevice.DEFAULT_PRODUCT_ID
if pid:
self._product_id = pid
self._endpoint = 0
self._description = None
self.interface = interface
def open(self, baudrate=BAUDRATE, no_reader_thread=False):
"""
Opens the device.
:param baudrate: baudrate to use
:type baudrate: int
:param no_reader_thread: whether or not to automatically start the
reader thread.
:type no_reader_thread: bool
:raises: :py:class:`~alarmdecoder.util.NoDeviceError`
"""
# Set up defaults
if baudrate is None:
baudrate = USBDevice.BAUDRATE
self._read_thread = Device.ReadThread(self)
# Open the device and start up the thread.
try:
self._device.open(self._vendor_id,
self._product_id,
self._endpoint,
self._device_number,
self._serial_number,
self._description)
self._device.set_baudrate(baudrate)
if not self._serial_number:
self._serial_number = self._get_serial_number()
self._id = self._serial_number
except (usb.core.USBError, FtdiError) as err:
raise NoDeviceError('Error opening device: {0}'.format(str(err)), err)
except KeyError as err:
raise NoDeviceError('Unsupported device. ({0:04x}:{1:04x}) You probably need a newer version of pyftdi.'.format(err[0][0], err[0][1]))
else:
self._running = True
self.on_open()
if not no_reader_thread:
self._read_thread.start()
return self
def close(self):
"""
Closes the device.
"""
try:
Device.close(self)
# HACK: Probably should fork pyftdi and make this call in .close()
self._device.usb_dev.attach_kernel_driver(self._device_number)
except Exception:
pass
def fileno(self):
"""
File number not supported for USB devices.
:raises: NotImplementedError
"""
raise NotImplementedError('USB devices do not support fileno()')
def write(self, data):
"""
Writes data to the device.
:param data: data to write
:type data: string
:raises: :py:class:`~alarmdecoder.util.CommError`
"""
try:
self._device.write_data(data)
self.on_write(data=data)
except FtdiError as err:
raise CommError('Error writing to device: {0}'.format(str(err)), err)
def read(self):
"""
Reads a single character from the device.
:returns: character read from the device
:raises: :py:class:`~alarmdecoder.util.CommError`
"""
ret = None
try:
ret = self._device.read_data(1)
except (usb.core.USBError, FtdiError) as err:
raise CommError('Error reading from device: {0}'.format(str(err)), err)
return ret
def read_line(self, timeout=0.0, purge_buffer=False):
"""
Reads a line from the device.
:param timeout: read timeout
:type timeout: float
:param purge_buffer: Indicates whether to purge the buffer prior to
reading.
:type purge_buffer: bool
:returns: line that was read
:raises: :py:class:`~alarmdecoder.util.CommError`, :py:class:`~alarmdecoder.util.TimeoutError`
"""
def timeout_event():
"""Handles read timeout event"""
timeout_event.reading = False
timeout_event.reading = True
if purge_buffer:
self._buffer = b''
got_line, ret = False, None
timer = threading.Timer(timeout, timeout_event)
if timeout > 0:
timer.start()
try:
while timeout_event.reading:
buf = self._device.read_data(1)
if buf != b'':
ub = bytes_hack(buf)
self._buffer += ub
if ub == b"\n":
self._buffer = self._buffer.rstrip(b"\r\n")
if len(self._buffer) > 0:
got_line = True
break
else:
time.sleep(0.01)
except (usb.core.USBError, FtdiError) as err:
raise CommError('Error reading from device: {0}'.format(str(err)), err)
else:
if got_line:
ret, self._buffer = self._buffer, b''
self.on_read(data=ret)
else:
raise TimeoutError('Timeout while waiting for line terminator.')
finally:
timer.cancel()
return ret
def purge(self):
"""
Purges read/write buffers.
"""
self._device.purge_buffers()
def _get_serial_number(self):
"""
Retrieves the FTDI device serial number.
:returns: string containing the device serial number
"""
return usb.util.get_string(self._device.usb_dev, 64, self._device.usb_dev.iSerialNumber)
class DetectThread(threading.Thread):
"""
Thread that handles detection of added/removed devices.
"""
on_attached = event.Event("This event is called when an `AD2USB`_ device has been detected.\n\n**Callback definition:** def callback(thread, device*")
on_detached = event.Event("This event is called when an `AD2USB`_ device has been removed.\n\n**Callback definition:** def callback(thread, device*")
def __init__(self, on_attached=None, on_detached=None):
"""
Constructor
:param on_attached: Function to call when a device is attached **Callback definition:** *def callback(thread, device)*
:type on_attached: function
:param on_detached: Function to call when a device is detached **Callback definition:** *def callback(thread, device)*
:type on_detached: function
"""
threading.Thread.__init__(self)
if on_attached:
self.on_attached += on_attached
if on_detached:
self.on_detached += on_detached
self._running = False
def stop(self):
"""
Stops the thread.
"""
self._running = False
def run(self):
"""
The actual detection process.
"""
self._running = True
last_devices = set()
while self._running:
try:
current_devices = set(USBDevice.find_all())
for dev in current_devices.difference(last_devices):
self.on_attached(device=dev)
for dev in last_devices.difference(current_devices):
self.on_detached(device=dev)
last_devices = current_devices
except CommError:
pass
time.sleep(0.25)
| |
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib.common.utils import data_utils # noqa
from tempest_lib import exceptions as lib_exc # noqa
from tempest.api.share import base
from tempest import clients_share as clients
from tempest import test
class ExtraSpecsAdminNegativeTest(base.BaseSharesAdminTest):
def _create_share_type(self):
name = data_utils.rand_name("unique_st_name")
extra_specs = self.add_required_extra_specs_to_dict({"key": "value"})
return self.create_share_type(name, extra_specs=extra_specs)
@classmethod
def resource_setup(cls):
super(ExtraSpecsAdminNegativeTest, cls).resource_setup()
cls.member_shares_client = clients.Manager().shares_client
@test.attr(type=["gate", "smoke", ])
def test_try_create_extra_specs_with_user(self):
st = self._create_share_type()
self.assertRaises(
lib_exc.Forbidden,
self.member_shares_client.create_share_type_extra_specs,
st["share_type"]["id"],
self.add_required_extra_specs_to_dict({"key": "new_value"}))
@test.attr(type=["gate", "smoke", ])
def test_try_list_extra_specs_with_user(self):
st = self._create_share_type()
self.assertRaises(
lib_exc.Forbidden,
self.member_shares_client.get_share_type_extra_specs,
st["share_type"]["id"])
@test.attr(type=["gate", "smoke", ])
def test_try_get_extra_spec_with_user(self):
st = self._create_share_type()
self.assertRaises(
lib_exc.Forbidden,
self.member_shares_client.get_share_type_extra_spec,
st["share_type"]["id"], "key")
@test.attr(type=["gate", "smoke", ])
def test_try_get_extra_specs_with_user(self):
st = self._create_share_type()
self.assertRaises(
lib_exc.Forbidden,
self.member_shares_client.get_share_type_extra_specs,
st["share_type"]["id"])
@test.attr(type=["gate", "smoke", ])
def test_try_update_extra_spec_with_user(self):
st = self._create_share_type()
self.assertRaises(
lib_exc.Forbidden,
self.member_shares_client.update_share_type_extra_spec,
st["share_type"]["id"], "key", "new_value")
@test.attr(type=["gate", "smoke", ])
def test_try_update_extra_specs_with_user(self):
st = self._create_share_type()
self.assertRaises(
lib_exc.Forbidden,
self.member_shares_client.update_share_type_extra_specs,
st["share_type"]["id"], {"key": "new_value"})
@test.attr(type=["gate", "smoke", ])
def test_try_delete_extra_specs_with_user(self):
st = self._create_share_type()
self.assertRaises(
lib_exc.Forbidden,
self.member_shares_client.delete_share_type_extra_spec,
st["share_type"]["id"], "key")
@test.attr(type=["gate", "smoke", ])
def test_try_set_too_long_key(self):
too_big_key = "k" * 256
st = self._create_share_type()
self.assertRaises(
lib_exc.BadRequest,
self.shares_client.create_share_type_extra_specs,
st["share_type"]["id"],
self.add_required_extra_specs_to_dict({too_big_key: "value"}))
@test.attr(type=["gate", "smoke", ])
def test_try_set_too_long_value_with_creation(self):
too_big_value = "v" * 256
st = self._create_share_type()
self.assertRaises(
lib_exc.BadRequest,
self.shares_client.create_share_type_extra_specs,
st["share_type"]["id"],
self.add_required_extra_specs_to_dict({"key": too_big_value}))
@test.attr(type=["gate", "smoke", ])
def test_try_set_too_long_value_with_update(self):
too_big_value = "v" * 256
st = self._create_share_type()
self.shares_client.create_share_type_extra_specs(
st["share_type"]["id"],
self.add_required_extra_specs_to_dict({"key": "value"}))
self.assertRaises(
lib_exc.BadRequest,
self.shares_client.update_share_type_extra_specs,
st["share_type"]["id"],
self.add_required_extra_specs_to_dict({"key": too_big_value}))
@test.attr(type=["gate", "smoke", ])
def test_try_set_too_long_value_with_update_of_one_key(self):
too_big_value = "v" * 256
st = self._create_share_type()
self.shares_client.create_share_type_extra_specs(
st["share_type"]["id"],
self.add_required_extra_specs_to_dict({"key": "value"}))
self.assertRaises(lib_exc.BadRequest,
self.shares_client.update_share_type_extra_spec,
st["share_type"]["id"], "key", too_big_value)
@test.attr(type=["gate", "smoke", ])
def test_try_list_es_with_empty_shr_type_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.get_share_type_extra_specs, "")
@test.attr(type=["gate", "smoke", ])
def test_try_list_es_with_invalid_shr_type_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.get_share_type_extra_specs,
data_utils.rand_name("fake"))
@test.attr(type=["gate", "smoke", ])
def test_try_create_es_with_empty_shr_type_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.create_share_type_extra_specs,
"", {"key1": "value1", })
@test.attr(type=["gate", "smoke", ])
def test_try_create_es_with_invalid_shr_type_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.create_share_type_extra_specs,
data_utils.rand_name("fake"), {"key1": "value1", })
@test.attr(type=["gate", "smoke", ])
def test_try_create_es_with_empty_specs(self):
st = self._create_share_type()
self.assertRaises(lib_exc.BadRequest,
self.shares_client.create_share_type_extra_specs,
st["share_type"]["id"], "")
@test.attr(type=["gate", "smoke", ])
def test_try_create_es_with_invalid_specs(self):
st = self._create_share_type()
self.assertRaises(lib_exc.BadRequest,
self.shares_client.create_share_type_extra_specs,
st["share_type"]["id"], {"": "value_with_empty_key"})
@test.attr(type=["gate", "smoke", ])
def test_try_get_extra_spec_with_empty_key(self):
st = self._create_share_type()
self.assertRaises(lib_exc.NotFound,
self.shares_client.get_share_type_extra_spec,
st["share_type"]["id"], "")
@test.attr(type=["gate", "smoke", ])
def test_try_get_extra_spec_with_invalid_key(self):
st = self._create_share_type()
self.assertRaises(lib_exc.NotFound,
self.shares_client.get_share_type_extra_spec,
st["share_type"]["id"], data_utils.rand_name("fake"))
@test.attr(type=["gate", "smoke", ])
def test_try_get_extra_specs_with_empty_shr_type_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.get_share_type_extra_specs,
"")
@test.attr(type=["gate", "smoke", ])
def test_try_get_extra_specs_with_invalid_shr_type_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.get_share_type_extra_specs,
data_utils.rand_name("fake"))
@test.attr(type=["gate", "smoke", ])
def test_try_delete_es_key_with_empty_shr_type_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.delete_share_type_extra_spec,
"", "key", )
@test.attr(type=["gate", "smoke", ])
def test_try_delete_es_key_with_invalid_shr_type_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.delete_share_type_extra_spec,
data_utils.rand_name("fake"), "key", )
@test.attr(type=["gate", "smoke", ])
def test_try_delete_with_invalid_key(self):
st = self._create_share_type()
self.assertRaises(lib_exc.NotFound,
self.shares_client.delete_share_type_extra_spec,
st["share_type"]["id"], data_utils.rand_name("fake"))
@test.attr(type=["gate", "smoke", ])
def test_try_update_spec_with_empty_shr_type_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.update_share_type_extra_spec,
"", "key", "new_value")
@test.attr(type=["gate", "smoke", ])
def test_try_update_spec_with_invalid_shr_type_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.update_share_type_extra_spec,
data_utils.rand_name("fake"), "key", "new_value")
@test.attr(type=["gate", "smoke", ])
def test_try_update_spec_with_empty_key(self):
st = self._create_share_type()
self.assertRaises(lib_exc.NotFound,
self.shares_client.update_share_type_extra_spec,
st["share_type"]["id"], "", "new_value")
@test.attr(type=["gate", "smoke", ])
def test_try_update_with_invalid_shr_type_id(self):
self.assertRaises(lib_exc.NotFound,
self.shares_client.update_share_type_extra_specs,
data_utils.rand_name("fake"), {"key": "new_value"})
@test.attr(type=["gate", "smoke", ])
def test_try_update_with_invalid_specs(self):
st = self._create_share_type()
self.assertRaises(lib_exc.BadRequest,
self.shares_client.update_share_type_extra_specs,
st["share_type"]["id"], {"": "new_value"})
| |
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for various tensorflow.ops.tf."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.python.platform
import numpy as np
import tensorflow as tf
from tensorflow.python.kernel_tests import gradient_checker as gc
class ShapeOpsTest(tf.test.TestCase):
def _compareShape(self, x, use_gpu=False):
np_ans = np.array(np.shape(x))
with self.test_session(use_gpu=use_gpu):
tf_ans = tf.shape(x)
result = tf_ans.eval()
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
def _compareRank(self, x, use_gpu=False):
np_ans = np.asarray(np.ndim(x))
with self.test_session(use_gpu=use_gpu):
tf_ans = tf.rank(x)
result = tf_ans.eval()
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
def _compareSize(self, x, use_gpu=False):
np_ans = np.asarray(np.size(x))
with self.test_session(use_gpu=use_gpu):
tf_ans = tf.size(x)
result = tf_ans.eval()
self.assertAllEqual(np_ans, result)
self.assertShapeEqual(np_ans, tf_ans)
def _testCpu(self, x):
self._compareShape(x, use_gpu=False)
self._compareRank(x, use_gpu=False)
self._compareSize(x, use_gpu=False)
def _testGpu(self, x):
self._compareShape(x, use_gpu=True)
self._compareRank(x, use_gpu=True)
self._compareSize(x, use_gpu=True)
def _testAll(self, x):
self._testCpu(x)
self._testGpu(x)
def testBasic(self):
self._testAll(np.zeros([2]))
self._testAll(np.zeros([2, 3]))
self._testAll(np.zeros([2, 3, 5]))
self._testAll(np.zeros([2, 3, 5, 7]))
self._testAll(np.zeros([2, 3, 5, 7, 11]))
self._testAll(np.zeros([2, 3, 5, 7, 11, 13]))
def _compareExpandDims(self, x, dim, use_gpu):
np_ans = np.expand_dims(x, axis=dim)
with self.test_session(use_gpu=use_gpu):
tensor = tf.expand_dims(x, dim)
tf_ans = tensor.eval()
self.assertShapeEqual(np_ans, tensor)
self.assertAllEqual(np_ans, tf_ans)
def _compareExpandDimsAll(self, x, dim):
self._compareExpandDims(x, dim, False)
self._compareExpandDims(x, dim, True)
def testExpandDims(self):
self._compareExpandDimsAll(np.zeros([2]), 0)
self._compareExpandDimsAll(np.zeros([2]), 1)
self._compareExpandDimsAll(np.zeros([2]), -1)
self._compareExpandDimsAll(np.zeros([2, 3]), 0)
self._compareExpandDimsAll(np.zeros([2, 3]), 1)
self._compareExpandDimsAll(np.zeros([2, 3]), 2)
self._compareExpandDimsAll(np.zeros([2, 3]), -1)
self._compareExpandDimsAll(np.zeros([2, 3]), -2)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), 0)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), 1)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), 2)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), 3)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), -1)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), -2)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), -3)
self._compareExpandDimsAll(np.zeros([2, 3, 5]), -4)
def testExpandDimsErrors(self):
with self.test_session():
self.assertRaises(ValueError, tf.expand_dims, np.zeros([2, 3, 5]), -5)
self.assertRaises(ValueError, tf.expand_dims, np.zeros([2, 3, 5]), 4)
def testExpandDimsGradient(self):
with self.test_session():
inp = tf.constant(np.random.rand(4, 2).astype("f"),
dtype=tf.float32)
squeezed = tf.expand_dims(inp, 1)
err = gc.ComputeGradientError(inp, [4, 2], squeezed, [4, 1, 2])
self.assertLess(err, 1e-3)
def testExpandDimsScalar(self):
with self.test_session():
inp = tf.constant(7)
self.assertAllEqual([7], tf.expand_dims(inp, 0).eval())
self.assertAllEqual([7], tf.expand_dims(inp, -1).eval())
def _compareSqueeze(self, x, squeeze_dims, use_gpu):
with self.test_session(use_gpu=use_gpu):
if squeeze_dims:
np_ans = np.squeeze(x, axis=tuple(squeeze_dims))
tensor = tf.squeeze(x, squeeze_dims)
tf_ans = tensor.eval()
else:
np_ans = np.squeeze(x)
tensor = tf.squeeze(x)
tf_ans = tensor.eval()
self.assertShapeEqual(np_ans, tensor)
self.assertAllEqual(np_ans, tf_ans)
def _compareSqueezeAll(self, x, squeeze_dims=None):
if squeeze_dims is None:
squeeze_dims = []
self._compareSqueeze(x, squeeze_dims, False)
self._compareSqueeze(x, squeeze_dims, True)
def testSqueeze(self):
# Nothing to squeeze.
self._compareSqueezeAll(np.zeros([2]))
self._compareSqueezeAll(np.zeros([2, 3]))
# Squeeze the middle element away.
self._compareSqueezeAll(np.zeros([2, 1, 2]))
# Squeeze on both ends.
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]))
def testSqueezeSpecificDimension(self):
# Positive squeeze dim index.
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [0])
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [2, 4])
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [0, 4, 2])
# Negative squeeze dim index.
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [-1])
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [-3, -5])
self._compareSqueezeAll(np.zeros([1, 2, 1, 3, 1]), [-3, -5, -1])
def testSqueezeAllOnes(self):
# Numpy squeezes a 1 element tensor into a zero dimensional tensor.
# Verify that we do the same.
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
tensor = tf.squeeze(np.zeros([1, 1, 1]), [])
self.assertEqual(np.shape(1), tensor.get_shape())
tf_ans = tensor.eval()
self.assertEqual(np.shape(1), tf_ans.shape)
def testSqueezeOnlyOnes(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
input_1x1x3 = np.zeros([1, 1, 3])
self._compareSqueezeAll(input_1x1x3)
self._compareSqueezeAll(input_1x1x3, [0])
self._compareSqueezeAll(input_1x1x3, [1])
self.assertRaises(ValueError, tf.squeeze, input_1x1x3, [2])
def testSqueezeErrors(self):
for use_gpu in [False, True]:
with self.test_session(use_gpu=use_gpu):
self.assertRaises(ValueError, tf.squeeze, np.zeros([1, 2, 1]), [-4])
self.assertRaises(ValueError, tf.squeeze, np.zeros([1, 2, 1]), [0, -4])
self.assertRaises(ValueError, tf.squeeze, np.zeros([1, 2, 1]), [3])
self.assertRaises(ValueError, tf.squeeze, np.zeros([1, 2, 1]), [2, 3])
def testSqueezeGradient(self):
with self.test_session():
inp = np.random.rand(4, 2).astype("f")
a = tf.reshape(inp, [4, 1, 2])
squeezed = tf.squeeze(a, [])
err = gc.ComputeGradientError(a, [4, 1, 2], squeezed, [4, 2])
self.assertLess(err, 1e-3)
def testSqueezeGradientWithSqueezeDims(self):
with self.test_session():
inp = np.random.rand(4, 2).astype("f")
a = tf.reshape(inp, [4, 1, 2, 1])
squeezed = tf.squeeze(a, [1])
err = gc.ComputeGradientError(a, [4, 1, 2, 1], squeezed, [4, 2, 1])
self.assertLess(err, 1e-3)
class TileTest(tf.test.TestCase):
def testScalar(self):
with self.test_session():
a = tf.constant(7, shape=[], dtype=tf.float32)
tiled = tf.tile(a, [])
result = tiled.eval()
self.assertEqual(result.shape, ())
self.assertEqual([], tiled.get_shape())
self.assertEqual(7, result)
def testSimple(self):
with self.test_session():
inp = np.random.rand(4, 1).astype("f")
a = tf.constant([float(x) for x in inp.ravel(order="C")],
shape=[4, 1], dtype=tf.float32)
tiled = tf.tile(a, [1, 4])
result = tiled.eval()
self.assertEqual(result.shape, (4, 4))
self.assertEqual([4, 4], tiled.get_shape())
self.assertTrue((result == np.tile(inp, (1, 4))).all())
def testTypes(self):
types_to_test = {
"bool": (tf.bool, bool),
"float32": (tf.float32, float),
"float64": (tf.float64, float),
"uint8": (tf.uint8, int),
"int32": (tf.int32, int),
"int64": (tf.int64, int),
bytes: (tf.string, bytes)
}
for dtype_np, (dtype_tf, cast) in types_to_test.items():
with self.test_session():
inp = np.random.rand(4, 1).astype(dtype_np)
a = tf.constant([cast(x) for x in inp.ravel(order="C")],
shape=[4, 1],
dtype=dtype_tf)
tiled = tf.tile(a, [1, 4])
result = tiled.eval()
self.assertEqual(result.shape, (4, 4))
self.assertEqual([4, 4], tiled.get_shape())
self.assertAllEqual(result, np.tile(inp, (1, 4)))
def testInvalidDim(self):
with self.test_session():
inp = np.random.rand(4, 1).astype("f")
a = tf.constant([float(x) for x in inp.ravel(order="C")],
shape=[4, 1], dtype=tf.float32)
# Wrong length of multiples.
with self.assertRaises(ValueError):
tf.tile(a, [1, 4, 2])
# Wrong rank for multiples.
with self.assertRaises(ValueError):
tf.tile(a, [[2, 3], [3, 4]]).eval()
def _RunAndVerifyResult(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
# Random dims of rank 5
input_shape = np.random.randint(1, 4, size=5)
inp = np.random.rand(*input_shape).astype("f")
a = tf.constant([float(x) for x in inp.ravel(order="C")],
shape=input_shape, dtype=tf.float32)
multiples = np.random.randint(1, 4, size=5).astype(np.int32)
tiled = tf.tile(a, multiples)
result = tiled.eval()
self.assertTrue((np.array(multiples) * np.array(inp.shape) ==
np.array(result.shape)).all())
self.assertAllEqual(result, np.tile(inp, tuple(multiples)))
self.assertShapeEqual(result, tiled)
def testRandom(self):
for _ in range(5):
self._RunAndVerifyResult(use_gpu=False)
for _ in range(5):
self._RunAndVerifyResult(use_gpu=True)
def testGradientSimpleReduction(self):
with self.test_session():
inp = np.random.rand(4, 1).astype("f")
a = tf.constant([float(x) for x in inp.flatten()],
shape=[4, 1], dtype=tf.float32)
tiled = tf.tile(a, [1, 4])
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
grad_tensor = tf.constant([float(x) for x in grad_inp.flatten()],
shape=grad_shape)
grad = tf.gradients([tiled], [a], [grad_tensor])[0]
self.assertShapeEqual(inp, grad)
result = grad.eval()
self.assertAllClose(np.sum(grad_inp, axis=1).reshape(4, 1), result, 1e-3)
def testGradientStridedReduction(self):
with self.test_session():
inp = np.random.rand(4, 2).astype("f")
a = tf.constant([float(x) for x in inp.flatten()],
shape=[4, 2], dtype=tf.float32)
tiled = tf.tile(a, [1, 2])
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
grad_tensor = tf.constant([float(x) for x in grad_inp.flatten()],
shape=grad_shape)
grad = tf.gradients([tiled], [a], [grad_tensor])[0]
self.assertShapeEqual(inp, grad)
result = grad.eval()
expected_shape = [4, 2]
expected = np.zeros(expected_shape)
expected[:, 0] = grad_inp[:, 0] + grad_inp[:, 2]
expected[:, 1] = grad_inp[:, 1] + grad_inp[:, 3]
self.assertTrue((np.abs(expected - result) < 1e-3).all())
def testGradientSimpleReductionOnGPU(self):
with self.test_session(use_gpu=True):
inp = np.random.rand(4, 1).astype("f")
a = tf.constant([float(x) for x in inp.flatten()],
shape=[4, 1], dtype=tf.float32)
tiled = tf.tile(a, [1, 4])
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
grad_tensor = tf.constant([float(x) for x in grad_inp.flatten()],
shape=grad_shape)
grad = tf.gradients([tiled], [a], [grad_tensor])[0]
result = grad.eval()
self.assertAllClose(np.sum(grad_inp, axis=1).reshape(4, 1), result, 1e-3)
def testGradientStridedReductionOnGPU(self):
with self.test_session(use_gpu=True):
inp = np.random.rand(4, 2).astype("f")
a = tf.constant([float(x) for x in inp.flatten()],
shape=[4, 2], dtype=tf.float32)
tiled = tf.tile(a, [1, 2])
grad_shape = [4, 4]
grad_inp = np.random.rand(*grad_shape).astype("f")
grad_tensor = tf.constant([float(x) for x in grad_inp.flatten()],
shape=grad_shape)
grad = tf.gradients([tiled], [a], [grad_tensor])[0]
result = grad.eval()
expected_shape = [4, 2]
expected = np.zeros(expected_shape)
expected[:, 0] = grad_inp[:, 0] + grad_inp[:, 2]
expected[:, 1] = grad_inp[:, 1] + grad_inp[:, 3]
self.assertAllClose(expected, result, 1e-3)
def _RunAndVerifyGradientResult(self, input_shape, multiples):
with self.test_session():
# Random values
inp = np.random.rand(*input_shape)
a = tf.constant([float(x) for x in inp.flatten()],
shape=input_shape, dtype=tf.float64)
tiled = tf.tile(a, multiples)
grad_shape = list(np.array(multiples) * np.array(inp.shape))
err = gc.ComputeGradientError(a, list(input_shape), tiled, grad_shape,
x_init_value=inp)
print("tile(float) error = ", err)
self.assertLess(err, 1e-3)
def testGradientRandom(self):
self._RunAndVerifyGradientResult([2, 2, 1, 1, 3], [1, 2, 1, 3, 1])
self._RunAndVerifyGradientResult([2, 3, 1, 1, 3], [3, 1, 1, 2, 2])
self._RunAndVerifyGradientResult([2, 1, 3, 3, 2], [1, 3, 3, 1, 2])
def testGradientStridedReductionGC(self):
with self.test_session():
inp = np.random.rand(4, 2).astype("f")
a = tf.constant([float(x) for x in inp.flatten()],
shape=[4, 2], dtype=tf.float32)
tiled = tf.tile(a, [1, 2])
err = gc.ComputeGradientError(a, [4, 2], tiled, [4, 4])
self.assertLess(err, 1e-3)
def testShapeFunctionEdgeCases(self):
# Unknown multiples shape.
inp = tf.constant(0.0, shape=[4, 4, 4, 4])
tiled = tf.tile(inp, tf.placeholder(tf.int32))
self.assertEqual([None, None, None, None], tiled.get_shape().as_list())
# Unknown input shape.
inp = tf.placeholder(tf.float32)
tiled = tf.tile(inp, [2, 2, 2, 2])
self.assertEqual([None, None, None, None], tiled.get_shape().as_list())
# Unknown input and multiples shape.
inp = tf.placeholder(tf.float32)
tiled = tf.tile(inp, tf.placeholder(tf.int32))
self.assertIs(None, tiled.get_shape().ndims)
if __name__ == "__main__":
tf.test.main()
| |
# Copyright 2013 Graham McVicker and Bryce van de Geijn
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import math
import time
import gzip
import argparse
from scipy.optimize import *
from scipy import cast
from scipy.special import gammaln
from scipy.special import betaln
import scipy.stats
import numpy as np
from random import shuffle
from random import randint
import pdb
#global log_table
#log_table=[float("-inf")]+[log(x) for x in range(1,1000000)]
class TestSNP:
def __init__(self, name, geno_hap1, geno_hap2, AS_target_ref, AS_target_alt,
hetps, totals, counts):
self.name = name
self.geno_hap1 = geno_hap1
self.geno_hap2 = geno_hap2
self.AS_target_ref = AS_target_ref
self.AS_target_alt = AS_target_alt
self.hetps = hetps
self.totals = totals
self.counts = counts
def is_het(self):
"""returns True if the test SNP is heterozygous"""
return self.geno_hap1 != self.geno_hap2
def is_homo_ref(self):
"""Returns True if test SNP is homozygous for reference allele"""
return self.geno_hap1 == 0 and self.geno_hap2 == 0
def is_homo_alt(self):
"""Returns True if test SNP is homozygous for non-reference allele"""
return self.geno_hap1 == 1 and self.geno_hap2 == 1
def open_input_files(in_filename):
if not os.path.exists(in_filename) or not os.path.isfile(in_filename):
sys.stderr.write("input file %s does not exist or is not a regular file\n" %
in_filename)
exit(2)
# read file that contains list of input files
in_file = open(in_filename)
infiles = []
for line in in_file:
# open each input file and read first line
filename = line.rstrip()
if not filename or not os.path.exists(filename) or not os.path.isfile(filename):
sys.stderr.write("input file '%s' does not exist or is not a regular file\n"
% line)
exit(2)
if filename.endswith(".gz"):
f = gzip.open(filename)
else:
f = open(filename)
# skip header
f.readline()
infiles.append(f)
in_file.close()
if len(infiles) == 0:
sys.stderr.write("no input files specified in file '%s'\n" % options.infile_list)
exit(2)
return infiles
def main():
options = parse_options()
if options.pc_file:
pc_matrix=load_covariates(options.pc_file)
num_pcs=options.num_pcs
else:
pc_matrix=[]
num_pcs=0
if options.out_file.endswith(".gz"):
outfile = gzip.open(options.out_file, "wb")
else:
outfile = open(options.out_file, 'w')
infiles = open_input_files(options.infile_list)
if (options.bnb_disp):
disp_file=open(options.bnb_disp)
line=disp_file.readline()
bnb_sigmas=[]
while line:
bnb_sigmas.append(np.float64(line.strip()))
line=disp_file.readline()
disp_file.close()
else:
bnb_sigmas=[0.001]*len(infiles)
if (options.as_disp):
disp_file=open(options.as_disp)
line=disp_file.readline()
as_sigmas=[]
while line:
as_sigmas.append(np.float64(line.strip()))
line=disp_file.readline()
disp_file.close()
else:
as_sigmas=[0.001]*len(infiles)
# add first row of each input file to snpinfo list
snpinfo = []
for f in infiles:
snpinfo.append(f.readline().strip().split())
row_count = 0
finished=False
while not finished:
try:
test_snps=[]
# parse test SNP and associated info from input file row
for i in range(len(infiles)):
test_snps.append(parse_test_snp(snpinfo[i], options))
# how many allele-specific reads are there across all linked SNPs and
# and individuals?
totcounts = sum([np.sum(x.AS_target_ref) + np.sum(x.AS_target_alt)
for x in test_snps])
if totcounts < options.min_counts:
if options.verbose:
sys.stderr.write("-----\nskipping SNP %s because "
"total AS counts %d <= %d\n" %
(test_snps[0].name, totcounts, options.min_counts))
# skip, not enough allele-specific counts
for i in range(len(infiles)):
line=infiles[i].readline().strip()
if line:
snpinfo[i] = line.split()
else:
# out of lines from at least one file, assume we are finished
finished = True
continue
if options.verbose:
sys.stderr.write("-----\ntesting SNP %s\n" % test_snps[0].name)
row_count+=1
old_genos=[test_snps[y].geno_hap1 + test_snps[y].geno_hap2 for y in range(len(test_snps))]
if options.shuffle:
# permute genotypes
perm = range(len(test_snps))
shuffle(perm)
geno1temp = [test_snps[y].geno_hap1 for y in perm]
geno2temp = [test_snps[y].geno_hap2 for y in perm]
for i in range(len(test_snps)):
test_snps[i].geno_hap1 = geno1temp[i]
test_snps[i].geno_hap2 = geno2temp[i]
t1=time.time()
starting_gene=[np.float64(x) for x in [0.1,0.001]] #np.float64(-4),np.float(2),np.float(10)]
maxlike=10000000000
for start in starting_gene:
starts=[np.float64(0.5),np.float64(start)]
# regress against the covariates and get residuals
#fit_cov(test_snps,cov_table)
# maximize likelihood with alpha = beta (no difference between genotypes)
new_par = fmin(ll_one,starts, args=(test_snps, True, #options.is_bnb_only,
options.is_as_only,
bnb_sigmas,
as_sigmas,
options.read_error_rate,
[],
pc_matrix),
disp=options.verbose,maxiter=50000,maxfun=50000)
new_loglike = ll_one(new_par, test_snps, options.is_bnb_only,
options.is_as_only, bnb_sigmas,
as_sigmas, options.read_error_rate,
[], pc_matrix)
if new_loglike<maxlike:
starting_par = new_par
pc_coefs=[]
for pc in range(num_pcs):
new_coef=fmin(ll_pc,[np.float64(0)], args=(starting_par,test_snps, True, #options.is_bnb_only,
options.is_as_only,
bnb_sigmas,#options.bnb_sigma,
as_sigmas,
options.read_error_rate,
pc_coefs,
pc_matrix),
disp=options.verbose,maxiter=500000,maxfun=500000)
pc_coefs = np.concatenate([pc_coefs,new_coef])
best1par = fmin(ll_one,starting_par, args=(test_snps, options.is_bnb_only,
options.is_as_only,
bnb_sigmas,#options.bnb_sigma,
as_sigmas,
options.read_error_rate,
pc_coefs,
pc_matrix),
disp=options.verbose,maxiter=50000,maxfun=50000,ftol=1e-6, xtol=1e-6)
if options.verbose:
sys.stderr.write("null model optimization took %.3fs\n" % (time.time()-t1))
loglike1par = ll_one(best1par, test_snps, options.is_bnb_only,
options.is_as_only, bnb_sigmas,
as_sigmas, options.read_error_rate,
pc_coefs, pc_matrix)
#start=[best1par[0],best1par[0],best1par[1]]
start=[best1par[0],best1par[0],best1par[1]]
t1=time.time()
# maximize likelihood with alpha and beta as separate parameters
best2par = fmin(ll_two, start, args=(test_snps,
options.is_bnb_only,
options.is_as_only,
bnb_sigmas,
as_sigmas,
options.read_error_rate,
pc_coefs,
pc_matrix),
disp=options.verbose,maxiter=50000,maxfun=50000,ftol=1e-6,xtol=1e-6)
if options.verbose:
sys.stderr.write("alternative model optimization took %.3fs\n" % (time.time()-t1))
loglike2par = ll_two(best2par, test_snps, options.is_bnb_only,
options.is_as_only, bnb_sigmas,
as_sigmas, options.read_error_rate,
pc_coefs, pc_matrix)
# compute likelihood ratio test statistic:
chisq = 2*(loglike1par-loglike2par)
loglike1par_up = ll_one([best1par[0],best2par[2]], test_snps, options.is_bnb_only,
options.is_as_only, bnb_sigmas,
as_sigmas, options.read_error_rate,
pc_coefs, pc_matrix)
if True: #2*abs(loglike2par-loglike1par) > 10:
sys.stderr.write("%f %f %f %f %f %f %f %f %f\n" % (best1par[0],best2par[0],best2par[1],best1par[1],best2par[2],loglike1par,loglike1par_up,loglike2par,(loglike1par-loglike2par)*2))
sys.stderr.write(", ".join([str(test_snps[i].counts) for i in range(len(test_snps))])+"\n")
sys.stderr.write(", ".join([str(test_snps[i].totals) for i in range(len(test_snps))])+"\n")
sys.stderr.write(", ".join([str(test_snps[i].geno_hap1 + test_snps[i].geno_hap2) for i in range(len(test_snps))])+"\n")
sys.stderr.write(", ".join([str(old_genos[i]) for i in range(len(test_snps))])+"\n")
all_counts=sum([test_snps[i].counts for i in range(len(test_snps))])
# write result to output file
outfile.write("\t".join([snpinfo[0][0], snpinfo[0][1],
str(chisq), str(best2par[0]),
str(best2par[1]), str(best2par[2]),
str(totcounts),str(all_counts)]) + '\n')
outfile.flush()
except Exception as e:
# an error occured, write to output file, but put 0s for all params and
sys.stderr.write("An error occurred, writing line with 0s for SNP:\n%s\n" % str(e))
outfile.write("\t".join([snpinfo[0][0], snpinfo[0][1],
"0", "0", "0", "0", "0"]) + '\n')
#continue
raise
# read next set of lines from input file
for i in range(len(infiles)):
line = infiles[i].readline().strip()
if line:
snpinfo[i] = line.split()
else:
# out of lines from at least one file, assume we are finished
finished = True
def parse_options():
parser=argparse.ArgumentParser()
parser.add_argument("-a", action='store_true',
dest='is_as_only', default=False,
help="only perform the allele-specific part (Beta Binomial) "
"part of the test")
parser.add_argument("-d", action='store_true',
dest='is_bnb_only', default=False,
help="only perform the association (Beta Negative Binomial) part "
"of the test")
parser.add_argument("--pc-file", action='store',
dest='pc_file',
help="file containing PC covariates to include in the model"
,default=None)
parser.add_argument("-b", action='store', dest='bnb_disp',
help="file containing depth (Beta Negative Binomial) dispersion parameters", default=None)
parser.add_argument("-o", action='store',
dest='as_disp',
help="file containing allele-specific (Beta Binomial) dispersion "
"parameters", default=None)
parser.add_argument("-s", action='store_true',
dest='shuffle', default=False,
help="permute genotypes")
parser.add_argument("-e", action='store', dest='read_error_rate',
help="estimate of error rate, used to update "
"heterozygous genotype probabilities "
"(currently this option disabled / not used)",
type=float, default=0.005)
parser.add_argument("-m", action='store', dest='min_counts',
type=int, default=0,
help="only perform test when total number of allele-specific "
"read counts across individuals > MIN_COUNTS")
parser.add_argument("--num-pcs", action='store', dest='num_pcs',
type=int, default=0,
help="designates the number of PCs to use as covariates")
parser.add_argument("-v", action='store_true', dest='verbose',
default=False, help="print extra information")
parser.add_argument("infile_list", action='store', default=None)
parser.add_argument("out_file", action='store', default=None)
return parser.parse_args()
def addlogs(loga, logb):
"""Helper function: perform numerically-stable addition in log space"""
return max(loga, logb) + math.log(1 + math.exp(-abs(loga - logb)))
#Given parameters, returns log likelihood. Note that some parts have been cancelled out
def AS_betabinom_loglike(logps, sigma, AS1, AS2, hetp, error):
a = math.exp(logps[0] + math.log(1/sigma**2 - 1))
b = math.exp(logps[1] + math.log(1/sigma**2 - 1))
part1 = 0
part1 += betaln(AS1 + a, AS2 + b)
part1 -= betaln(a, b)
if hetp==1:
return part1
e1 = math.log(error) * AS1 + math.log(1 - error) * AS2
e2 = math.log(error) * AS2 + math.log(1 - error) * AS1
if hetp == 0:
return addlogs(e1, e2)
return addlogs(math.log(hetp)+part1, math.log(1-hetp) + addlogs(e1,e2))
def betaln_asym(a,b):
if b > a:
a,b = b,a
if a < 1e6:
return betaln(a,b)
l=gammaln(b)
l -= b*math.log(a)
l += b*(1-b)/(2*a)
l += b*(1-b)*(1-2*b)/(12*a*a)
l += -((b*(1-b))**2)/(12*a**3)
return l
def BNB_loglike(k,mean,sigma,n):
#Put variables in beta-NB form (n,a,b)
#sys.stderr.write(str(sigma)+"\n")
try:
mean=max(mean,0.00001)
logps = [math.log(n) - math.log(n + mean),
math.log(mean) - math.log(n + mean)]
except:
raise
n_val=n
pdb.set_trace()
p=np.float64(n/(n+mean))
if sigma < 0.00001: #> 18: #20:
loglike=-betaln(n,k+1)-math.log(n+k)+n*logps[0]+k*logps[1]
return loglike
sigma=(1/sigma)**2 #+sigma*n
sigma=sigma #+math.sqrt(sigma)/(p*(1-p))**2
a = p*sigma+1
b = (1-p)*sigma
#Rising Pochhammer = gamma(k+n)/gamma(n)
if k>0:
loglike=-betaln_asym(n,k)-math.log(k)
else:
loglike=0
#Add log(beta(a+n,b+k))
loglike += betaln_asym(a+n,b+k)
#Subtract log(beta(a,b))
loglike -= betaln_asym(a,b)
return loglike
#def ll_pc(x, test_snps, is_bnb_only, is_as_only, bnb_sigma, as_sigma,error, pcs):
#def fit_cov(test_snps, cov_table):
# if len(cov_table)==0:
# return
# counts=[snp.counts for snp in test_snps]
# lambdas=[snp.totals for snp in test_snps]
# sys.stderr.write(str(counts)+"\n")
# sys.stderr.write(str(lambdas)+"\n")
# ys=[(counts[i]-lambdas[i])/lambdas[i] for i in range(len(counts))]
# sys.stderr.write(str(ys)+"\n")
# sys=np.log(ys)
# fit=np.linalg.lstsq(ys,cov_table)
# fitted_values=(ys-residuals+lambdas)*lambdas
#
# for i in range(len(fitted_values)):
# test_snps[i].totals=fitted_values[i]
def ll_one(x, test_snps, is_bnb_only, is_as_only, bnb_sigmas, as_sigmas,error,pc_coefs,pc_matrix):
alpha = x[0]
beta = x[0]
r = x[1]
return loglikelihood(alpha,beta,r, test_snps, is_bnb_only, is_as_only, bnb_sigmas, as_sigmas,error,pc_coefs,pc_matrix)
def ll_pc(x, params, test_snps, is_bnb_only, is_as_only, bnb_sigmas, as_sigmas,error,other_pc_coefs,pc_matrix):
alpha = params[0]
beta = params[0]
r = params[1]
pc_coefs=np.concatenate([other_pc_coefs,x])
return loglikelihood(alpha,beta,r, test_snps, is_bnb_only, is_as_only, bnb_sigmas, as_sigmas,error,pc_coefs,pc_matrix)
def ll_two(x, test_snps, is_bnb_only, is_as_only, bnb_sigmas, as_sigmas,error,pc_coefs,pc_matrix):
alpha = x[0]
beta = x[1]
r = x[2]
#if len(x)>3:
# pc_fits=x[3:]
#else:
# pc_fits=[]
return loglikelihood(alpha,beta,r, test_snps, is_bnb_only, is_as_only, bnb_sigmas, as_sigmas,error,pc_coefs,pc_matrix)
def calc_pc_factor(pc_fits,pcs,i):
if len(pc_fits)>0:
return 1+sum(pc_fits*pcs[i,:len(pc_fits)])
else:
return 1
def loglikelihood(alpha,beta,r, test_snps, is_bnb_only, is_as_only, bnb_sigmas, as_sigmas,error,pc_coefs,pc_matrix):
loglike = 0
#if input values are outside of reasonable range return a very high -loglike
if alpha <= 0 or beta <= 0 or r<=0 or r>1:
return 10000000
#r=math.exp(r)/(1+math.exp(r)) #keep r between 0 and 1
#r=(r+0.01)/(1.01)
#r=math.exp(r)
ratio = (alpha / (alpha + beta))
for i in range(len(test_snps)):
#if i in (17,36,49,51):
# continue
if(test_snps[i].is_homo_ref()):
m = 2*alpha*test_snps[i].totals *calc_pc_factor(pc_coefs,pc_matrix,i)
elif(test_snps[i].is_homo_alt()):
m = 2*beta*test_snps[i].totals *calc_pc_factor(pc_coefs,pc_matrix,i)
else:
m = (alpha+beta)*test_snps[i].totals *calc_pc_factor(pc_coefs,pc_matrix,i)
if m<0:
m=0.000001
if not is_bnb_only:
for j in range(len(test_snps[i].AS_target_ref)):
if test_snps[i].hetps[j]>.9:
hetp = min(.99,test_snps[i].hetps[j])
logps = [math.log(alpha) - math.log(alpha+beta),
math.log(beta) - math.log(alpha+beta)]
loglike += AS_betabinom_loglike(logps, as_sigmas[i],
test_snps[i].AS_target_ref[j],
test_snps[i].AS_target_alt[j],
hetp, error)
if not is_as_only:
#sys.stderr.write(str(bnb_sigmas)+"\n")
#sys.stderr.write(str(bnb_sigmas[i])+"\n")
l = BNB_loglike(test_snps[i].counts, m, r, bnb_sigmas[i])
loglike += l
return -loglike
def parse_test_snp(snpinfo, options):
snp_id = snpinfo[2]
if snpinfo[16] == "NA":
# SNP is missing data
tot = 0
else:
# rescale these to put totals in reasonable range
# better approach might be to divide by minimum total
# across individuals
#if tot>10000:
tot = float(snpinfo[16]) #/1000000
if snpinfo[6] == "NA":
geno_hap1 = 0
geno_hap2 = 0
else:
geno_hap1 = int(snpinfo[6].strip().split("|")[0])
geno_hap2 = int(snpinfo[6].strip().split("|")[1])
if snpinfo[15] == "NA":
count = 0
else:
count = int(snpinfo[15])
#if snpinfo[9].strip() == "NA":
# SNP is homozygous, so there is no AS info
# return TestSNP(snp_id, geno_hap1, geno_hap2, [], [], [], tot, count)
if snpinfo[9].strip() == "NA" or geno_hap1 == geno_hap2:
# SNP is homozygous, so there is no AS info
return TestSNP(snp_id, geno_hap1, geno_hap2, [], [], [], tot, count)
else:
# positions of target SNPs (not currently used)
snplocs=[int(y.strip()) for y in snpinfo[9].split(';')]
# counts of reads that match reference overlapping linked 'target' SNPs
AS_target_ref = [int(y) for y in snpinfo[12].split(';')]
# counts of reads that match alternate allele
AS_target_alt = [int(y) for y in snpinfo[13].split(';')]
# heterozygote probabilities
hetps = [np.float64(y.strip()) for y in snpinfo[10].split(';')]
# linkage probabilities, not currently used
linkageps = [np.float64(y.strip()) for y in snpinfo[11].split(';')]
if options.shuffle:
# permute allele-specific read counts by flipping them randomly at
# each SNP
for y in range(len(AS_target_ref)):
if randint(0,1) == 1:
temp=AS_target_ref[y]
AS_target_ref[y] = AS_target_alt[y]
AS_target_alt[y] = temp
return TestSNP(snp_id, geno_hap1, geno_hap2, AS_target_ref,
AS_target_alt, hetps, tot, count)
def load_covariates(cov_file):
infile=open(cov_file)
cov_table=[]
while True:
line=infile.readline()
if line:
cov_table.append([np.float64(x) for x in line.strip().split()])
else:
break
return np.array(cov_table,dtype=np.float64)
main()
| |
import time
import webapp2
from google.appengine.api import channel
import config
import web.forms as forms
from web.basehandler import BaseHandler
from web.basehandler import user_required, admin_required
from web.models.models import User
class AdminHandler(BaseHandler):
@user_required
@admin_required
def get(self):
# lookup user's auth info
user_info = User.get_by_id(long(self.user_id))
params = {}
return self.render_template('admin/status.html', **params)
class UsersHandler(BaseHandler):
@user_required
@admin_required
def get(self):
# lookup user's auth info
user_info = User.get_by_id(long(self.user_id))
# look up usrs
users = User.get_all()
params = {
'users': users
}
return self.render_template('admin/users.html', **params)
class UsersExportHandler(BaseHandler):
@user_required
@admin_required
def get(self):
# lookup user's auth info
user_info = User.get_by_id(long(self.user_id))
# look up usrs
users = User.get_all()
params = {
'users': users
}
# mime it up
self.response.headers['Content-Type'] = "text/csv"
self.response.headers['Content-Disposition'] = "attachment; filename=users.csv"
return self.render_template('admin/user.csv', **params)
class FlavorsListHandler(BaseHandler):
@user_required
@admin_required
def get(self):
# lookup user's auth info
user_info = User.get_by_id(long(self.user_id))
# look up user's articles
flavors = Flavor.get_all()
# setup channel to do page refresh
channel_token = user_info.key.urlsafe()
refresh_channel = channel.create_channel(channel_token)
params = {
'flavors': flavors,
'refresh_channel': refresh_channel,
'channel_token': channel_token
}
return self.render_template('admin/flavors.html', **params)
@user_required
@admin_required
def post(self):
if not self.form.validate():
self.add_message("The form did not validate.", 'error')
return self.get()
# load values out of the form
name = self.form.name.data.strip()
vpus = self.form.vpus.data
memory = self.form.memory.data
disk = self.form.disk.data
network_down = self.form.network_down.data
network_up = self.form.network_up.data
rate = self.form.rate.data
# save the flavor in our database
flavor = Flavor(
name = name,
vpus = vpus,
memory = memory,
disk = disk,
network_down = network_down,
network_up = network_up,
rate = rate, # current market rate
launches = 0, # number of total launches
hot = 2 # suggest minimum two instance addresses hot
)
flavor.put()
# log to alert
self.add_message(('Flavor %s successfully created!' % name), 'success')
# give it a few seconds to update db, then redirect
time.sleep(1)
return self.redirect_to('admin-flavors')
@webapp2.cached_property
def form(self):
return forms.FlavorForm(self)
class FlavorsActionsHandler(BaseHandler):
@user_required
@admin_required
def put(self, flavor_id = None):
flavor = Flavor.get_by_id(long(flavor_id))
# get the enable/active state
enable = self.request.get("enable")
if flavor:
if enable == '1':
flavor.active = True
flavor.put()
else:
flavor.active = False
flavor.put()
# hangout for a second
time.sleep(1)
return
@user_required
@admin_required
def delete(self, flavor_id = None):
# delete the entry from the db
flavor = Flavor.get_by_id(long(flavor_id))
if flavor:
flavor.key.delete()
self.add_message('Flavor successfully deleted!', 'success')
else:
self.add_message('Flavor was not deleted. Something went horribly wrong somewhere!', 'warning')
# hangout for a second
time.sleep(1)
# use the channel to tell the browser we are done and reload
channel_token = self.request.get('channel_token')
channel.send_message(channel_token, 'reload')
return
class ImagesListHandler(BaseHandler):
@user_required
@admin_required
def get(self):
# lookup user's auth info
user_info = User.get_by_id(long(self.user_id))
# look up user's articles
images = Image.get_all()
# setup channel to do page refresh
channel_token = user_info.key.urlsafe()
refresh_channel = channel.create_channel(channel_token)
params = {
'images': images,
'refresh_channel': refresh_channel,
'channel_token': channel_token
}
return self.render_template('admin/images.html', **params)
@user_required
@admin_required
def post(self):
if not self.form.validate():
self.add_message("The form did not validate.", 'error')
return self.get()
# load values out of the form
name = self.form.name.data.strip()
description = self.form.description.data.strip()
url = self.form.url.data.strip()
disk_format = self.form.disk_format.data.strip()
container_format = self.form.container_format.data.strip()
# save the flavor in our database
image = Image(
name = name,
description = description,
url = url,
disk_format = disk_format,
container_format = container_format
)
image.put()
# log to alert
self.add_message(('Image %s successfully created!' % name), 'success')
# give it a few seconds to update db, then redirect
time.sleep(1)
return self.redirect_to('admin-images')
@webapp2.cached_property
def form(self):
return forms.ImageForm(self)
class ImagesActionsHandler(BaseHandler):
@user_required
@admin_required
def put(self, image_id = None):
image = Image.get_by_id(long(image_id))
# get the enable/active state
enable = self.request.get("enable")
if image:
if enable == '1':
image.active = True
image.put()
else:
image.active = False
image.put()
# hangout for a second
time.sleep(1)
return
@user_required
@admin_required
def delete(self, image_id = None):
# delete the entry from the db
image = Image.get_by_id(long(image_id))
if image:
image.key.delete()
self.add_message('Image successfully deleted!', 'success')
else:
self.add_message('Image was not deleted. Something went horribly wrong somewhere!', 'warning')
# hangout for a second
time.sleep(1)
# use the channel to tell the browser we are done and reload
channel_token = self.request.get('channel_token')
channel.send_message(channel_token, 'reload')
return
class GroupsHandler(BaseHandler):
@user_required
@admin_required
def get(self):
# lookup user's auth info
user_info = User.get_by_id(long(self.user_id))
params = {}
return self.render_template('admin/groups.html', **params)
| |
#!/usr/bin/env python3
import inspect
import logging
import uuid
from datetime import datetime
from .column import Column
from .query import Query
logger = logging.getLogger(__name__)
VALUE_ID_SEPARATOR = '\x00'
MODEL_NAME_ID_SEPARATOR = ':'
DATETIME_FORMAT = '%Y-%m-%d %H:%M:%S.%f'
# Exceptions
class InvalidQuery(Exception):
pass
class InvalidModelDefinition(Exception):
pass
class BadDataError(Exception):
pass
class UnexpectedColumnError(Exception):
pass
class ModelMeta(type):
def __init__(cls, what, bases=None, attributes=None):
super(ModelMeta, cls).__init__(what, bases, attributes)
if cls.__name__ not in ('RedisModel', 'TimeStampedModel'):
columns = []
num_primary, num_composite = 0, 0
cls._pk_name = None
# grab all Columns from the model
for name, column in inspect.getmembers(cls, lambda col: isinstance(col, Column)):
column.name = name
columns.append(column)
if column.primary:
num_primary += 1
cls._pk_name = column.name
if column.composite:
num_composite += 1
# Defensive checks
if num_primary == 0:
if num_composite == 0:
err_msg = 'No primary key or composite key in {}'.format(cls.__name__)
raise InvalidModelDefinition(err_msg)
if num_composite == 1:
err_msg = 'Your composite key is really a primary key in {}'.format(cls.__name__)
raise InvalidModelDefinition(err_msg)
if num_primary == 1:
if num_composite != 0:
err_msg = 'Cannot have both primary and composite keys in {}'.format(cls.__name__)
raise InvalidModelDefinition(err_msg)
cls._columns = tuple(sorted(columns, key=lambda c: c.name))
cls._indexed_columns = tuple(sorted([col for col in cls._columns if col.indexed], key=lambda c: c.name))
cls._sortable_columns = tuple(sorted([col for col in cls._columns if col.sorted], key=lambda c: c.name))
cls._identifier_columns = tuple(
sorted([col for col in cls._columns if col.primary or col.composite],
key=lambda c: c.name))
cls._auto_columns = sorted(
[col for col in cls._columns if getattr(col, 'auto_increment', False)],
key=lambda c: c.name
)
cls._queryable_colnames_set = set(
[col.name for col in cls._indexed_columns + cls._identifier_columns + cls._sortable_columns]
)
cls._sortable_column_names = tuple([x.name for x in cls._sortable_columns])
cls._auto_column_names = {col.name for col in cls._auto_columns}
cls._indexed_column_names = {col.name for col in cls._indexed_columns}
cls._columns_map = {c.name: c for c in cls._columns}
cls._identifier_column_names = tuple([x.name for x in cls._identifier_columns])
class RedisModel(object, metaclass=ModelMeta):
# force only keyword arguments
def __init__(self, **kwargs):
loading = kwargs.pop('loading', False)
for column in self._columns:
if column.name in kwargs:
value = kwargs.pop(column.name)
if type(value) != column.field_type:
err_msg = "Column `{}` in {} has value {}, should be of type {}".format(
column.name,
self.__class__.__name__,
value,
column.field_type,
)
raise BadDataError(err_msg)
if column.enum_choices and value not in column.enum_choices:
err_msg = "Column `{}` in {} has value {}, should be in set {}".format(
column.name,
self.__class__.__name__,
value,
column.enum_choices,
)
raise BadDataError(err_msg)
if getattr(column, 'auto_increment', False) and not loading:
err_msg = "Not allowed to set auto_increment column({})".format(column.name)
raise BadDataError(err_msg)
self.__dict__.update({column.name: value})
else:
if column.required and not getattr(column, 'auto_increment', False):
err_msg = 'Missing column `{}` in `{}` is required'.format(
column.name,
self.__class__.__name__,
)
raise BadDataError(err_msg)
# Require that every kwarg supplied matches an expected column
# TODO: handle TimeStampedModel cols better
known_cols_set = set([column.name for column in self._columns] + ['updated_at', 'created_at'])
supplied_cols_set = set([x for x in kwargs])
unknown_cols_set = supplied_cols_set - known_cols_set
if unknown_cols_set != set():
err_msg = 'Unknown column(s): {} in `{}`'.format(
unknown_cols_set,
self.__class__.__name__,
)
raise UnexpectedColumnError(err_msg)
def __setattr__(self, name, value):
if name in self._auto_column_names:
err_msg = "Not allowed to set auto_increment column({})".format(name)
raise BadDataError(err_msg)
return super(RedisModel, self).__setattr__(name, value)
@classmethod
def key_prefix(cls):
"""Prefix that we use for Redis storage, used for all keys related
to this object. Default to class name.
"""
return cls.__name__
@classmethod
def make_key(cls, identifier):
"""Convenience method for computing the Redis object instance key
from the identifier
"""
return "{}{}{}".format(cls.key_prefix(), MODEL_NAME_ID_SEPARATOR, identifier)
def has_real_data(self, column_name):
return not isinstance(getattr(self, column_name), Column)
def identifier(self):
identifiers = [str(getattr(self, column.name)) for column in self._identifier_columns]
return ':'.join(identifiers)
def redis_key(self):
"""Key used for storage of object instance in Redis.
"""
return "{}{}{}".format(self.key_prefix(), MODEL_NAME_ID_SEPARATOR, self.identifier())
def as_dict(self):
"""Dict version of this object
"""
# WARNING: we have to send a copy, otherwise changing the dict
# changes the object!
# FIXME: this returns no keys for keys whose value is None!
return self.__dict__.copy()
def __repr__(self):
return "<{}>".format(self.redis_key())
@classmethod
def get_index_key(cls, column_name):
return 'index{}{}{}{}'.format(MODEL_NAME_ID_SEPARATOR, cls.key_prefix(), MODEL_NAME_ID_SEPARATOR, column_name)
async def save_index(self, db, stale_object=None):
for indexed_column in self._queryable_colnames_set:
index_key = self.get_index_key(indexed_column)
if stale_object:
stale_index_value = '{}{}{}'.format(
getattr(stale_object, indexed_column),
VALUE_ID_SEPARATOR,
stale_object.identifier()
)
await db.zrem(index_key, stale_index_value)
index_value = '{}{}{}'.format(
getattr(self, indexed_column),
VALUE_ID_SEPARATOR,
self.identifier()
)
# Index it by adding to a sorted set with 0 score. It will be lexically sorted by redis
await db.zadd(index_key, 0, index_value,)
async def save(self, db):
"""Save the object to Redis.
"""
kwargs = {}
for col in self._auto_columns:
if not self.has_real_data(col.name):
kwargs[col.name] = await col.auto_generate(db, self)
self.__dict__.update(kwargs)
# we have to delete the old index key
stale_object = await self.__class__.load(db, identifier=self.identifier())
d = {
k: (v.strftime(DATETIME_FORMAT) if isinstance(v, datetime) else v)
for k, v in self.__dict__.items()
}
success = await db.hmset_dict(self.redis_key(), d)
await self.save_index(db, stale_object=stale_object)
return success
async def exists(self, db):
return await db.exists(self.redis_key())
@classmethod
async def load(cls, db, identifier=None, redis_key=None):
"""Load the object from redis. Use the identifier (colon-separated
composite keys or the primary key) or the redis_key.
"""
if not identifier and not redis_key:
raise InvalidQuery('Must supply identifier or redis_key')
if redis_key is None:
redis_key = cls.make_key(identifier)
if await db.exists(redis_key):
data = await db.hgetall(redis_key)
kwargs = {}
for key_bin, value_bin in data.items():
key, value = key_bin, value_bin
column = getattr(cls, key, False)
if not column or (column.field_type == str):
kwargs[key] = value
elif column.field_type == datetime:
kwargs[key] = datetime.strptime(value, DATETIME_FORMAT)
else:
kwargs[key] = column.field_type(value)
kwargs['loading'] = True
return cls(**kwargs)
else:
logger.debug("No Redis key found: {}".format(redis_key))
return None
@classmethod
async def all(cls, db, order_by=None, limit=None, offset=None):
async for x in cls.filter_by(db, order_by=order_by, limit=limit, offset=offset):
yield x
@classmethod
async def _get_ordered_result(cls, db, list_to_order, order_by, direction):
"""
:param list_to_order:
:param order_by:
:param direction:
:return:
Sort the given list in redis.
https://redis.io/commands/sort#using-hashes-in-codebycode-and-codegetcode
"""
pairs = []
for x in list_to_order:
pairs.extend([0, x])
if pairs:
ordered_res_key = 'filtered_result-{}'.format(uuid.uuid1())
await db.zadd(ordered_res_key, pairs[0], pairs[1], *pairs[2:])
ordered_result = await db.sort(
ordered_res_key,
by='{}:*->{}'.format(cls.__name__, order_by),
alpha=True,
asc=direction
)
# Delete the temp store
await db.delete(ordered_res_key)
return ordered_result
else:
return []
@classmethod
async def _get_ids_filter_by(cls, db, order_by=None, **kwargs):
if order_by:
direction = b'DESC' if order_by[0] == '-' else None
if order_by[0] in ('+', '-'):
order_by = order_by[1:]
if order_by not in cls._queryable_colnames_set:
err_msg = 'order_by field {order_by} is not in {queryable_cols}'.format(
order_by=order_by,
queryable_cols=cls._queryable_colnames_set,
)
raise InvalidQuery(err_msg)
missing_cols_set = set(kwargs.keys()) - cls._queryable_colnames_set
if missing_cols_set:
err_msg = '{missing_cols_set} not in {queryable_cols}'.format(
missing_cols_set=missing_cols_set,
queryable_cols=cls._queryable_colnames_set,
)
raise InvalidQuery(err_msg)
result_set = set()
first_iteration = True
for k, v in kwargs.items():
if v is None:
v = cls._columns_map[k]
if isinstance(v, (list, tuple)):
values = [str(x) for x in v]
elif isinstance(v, datetime):
values = (v.strftime(DATETIME_FORMAT),)
else:
values = (str(v),)
temp_set = set()
for value in values:
temp_set = temp_set.union({x.partition(VALUE_ID_SEPARATOR)[2] for x in await db.zrangebylex(
cls.get_index_key(k),
min='{}{}'.format(value, VALUE_ID_SEPARATOR).encode(),
max='{}{}\xff'.format(value, VALUE_ID_SEPARATOR).encode())})
if first_iteration:
result_set = result_set.union(temp_set)
first_iteration = False
else:
result_set = result_set.intersection(temp_set)
if not kwargs:
for index_entry in await db.zrange(cls.get_index_key(cls._identifier_column_names[0]), 0, -1):
result_set.add(index_entry.split(VALUE_ID_SEPARATOR)[-1])
if order_by:
return await cls._get_ordered_result(db, list_to_order=result_set, order_by=order_by, direction=direction)
return sorted(result_set)
@classmethod
async def filter_by(cls, db, offset=None, limit=None, **kwargs):
"""Query by attributes iteratively. Ordering is not supported
Example:
User.get_by(db, age=[32, 54])
User.get_by(db, age=23, name="guido")
"""
if limit and type(limit) is not int:
raise InvalidQuery('If limit is supplied it must be an int')
if offset and type(offset) is not int:
raise InvalidQuery('If offset is supplied it must be an int')
ids_to_iterate = await cls._get_ids_filter_by(db, **kwargs)
if offset:
# Using offset without order_by is pretty strange, but allowed
if limit:
ids_to_iterate = ids_to_iterate[offset:offset+limit]
else:
ids_to_iterate = ids_to_iterate[offset:]
elif limit:
ids_to_iterate = ids_to_iterate[:limit]
for key in ids_to_iterate:
yield await cls.load(db, key)
@classmethod
async def get_object_or_none(cls, db, **kwargs):
"""
Returns the first object exists for this query or None.
WARNING: if there are more than 1 results in cls that satisfy the conditions in kwargs,
only 1 random result will be returned
"""
async for obj in cls.filter_by(db, limit=1, **kwargs):
return obj
return None
@classmethod
def query(cls, db) -> Query:
return Query(model=cls, db=db)
| |
"""Utility meter from sensors providing raw data."""
from datetime import date, timedelta
from decimal import Decimal, DecimalException
import logging
import voluptuous as vol
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
CONF_NAME,
EVENT_HOMEASSISTANT_START,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import callback
from homeassistant.helpers import entity_platform
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.event import (
async_track_state_change_event,
async_track_time_change,
)
from homeassistant.helpers.restore_state import RestoreEntity
import homeassistant.util.dt as dt_util
from .const import (
ATTR_VALUE,
CONF_METER,
CONF_METER_NET_CONSUMPTION,
CONF_METER_OFFSET,
CONF_METER_TYPE,
CONF_SOURCE_SENSOR,
CONF_TARIFF,
CONF_TARIFF_ENTITY,
DAILY,
DATA_UTILITY,
HOURLY,
MONTHLY,
QUARTERLY,
SERVICE_CALIBRATE_METER,
SIGNAL_RESET_METER,
WEEKLY,
YEARLY,
)
_LOGGER = logging.getLogger(__name__)
ATTR_SOURCE_ID = "source"
ATTR_STATUS = "status"
ATTR_PERIOD = "meter_period"
ATTR_LAST_PERIOD = "last_period"
ATTR_LAST_RESET = "last_reset"
ATTR_TARIFF = "tariff"
ICON = "mdi:counter"
PRECISION = 3
PAUSED = "paused"
COLLECTING = "collecting"
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the utility meter sensor."""
if discovery_info is None:
_LOGGER.error("This platform is only available through discovery")
return
meters = []
for conf in discovery_info:
meter = conf[CONF_METER]
conf_meter_source = hass.data[DATA_UTILITY][meter][CONF_SOURCE_SENSOR]
conf_meter_type = hass.data[DATA_UTILITY][meter].get(CONF_METER_TYPE)
conf_meter_offset = hass.data[DATA_UTILITY][meter][CONF_METER_OFFSET]
conf_meter_net_consumption = hass.data[DATA_UTILITY][meter][
CONF_METER_NET_CONSUMPTION
]
conf_meter_tariff_entity = hass.data[DATA_UTILITY][meter].get(
CONF_TARIFF_ENTITY
)
meters.append(
UtilityMeterSensor(
conf_meter_source,
conf.get(CONF_NAME),
conf_meter_type,
conf_meter_offset,
conf_meter_net_consumption,
conf.get(CONF_TARIFF),
conf_meter_tariff_entity,
)
)
async_add_entities(meters)
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
SERVICE_CALIBRATE_METER,
{vol.Required(ATTR_VALUE): vol.Coerce(Decimal)},
"async_calibrate",
)
class UtilityMeterSensor(RestoreEntity):
"""Representation of an utility meter sensor."""
def __init__(
self,
source_entity,
name,
meter_type,
meter_offset,
net_consumption,
tariff=None,
tariff_entity=None,
):
"""Initialize the Utility Meter sensor."""
self._sensor_source_id = source_entity
self._state = 0
self._last_period = 0
self._last_reset = dt_util.now()
self._collecting = None
if name:
self._name = name
else:
self._name = f"{source_entity} meter"
self._unit_of_measurement = None
self._period = meter_type
self._period_offset = meter_offset
self._sensor_net_consumption = net_consumption
self._tariff = tariff
self._tariff_entity = tariff_entity
@callback
def async_reading(self, event):
"""Handle the sensor state changes."""
old_state = event.data.get("old_state")
new_state = event.data.get("new_state")
if (
old_state is None
or new_state is None
or old_state.state in [STATE_UNKNOWN, STATE_UNAVAILABLE]
or new_state.state in [STATE_UNKNOWN, STATE_UNAVAILABLE]
):
return
if (
self._unit_of_measurement is None
and new_state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) is not None
):
self._unit_of_measurement = new_state.attributes.get(
ATTR_UNIT_OF_MEASUREMENT
)
try:
diff = Decimal(new_state.state) - Decimal(old_state.state)
if (not self._sensor_net_consumption) and diff < 0:
# Source sensor just rolled over for unknown reasons,
return
self._state += diff
except ValueError as err:
_LOGGER.warning("While processing state changes: %s", err)
except DecimalException as err:
_LOGGER.warning(
"Invalid state (%s > %s): %s", old_state.state, new_state.state, err
)
self.async_write_ha_state()
@callback
def async_tariff_change(self, event):
"""Handle tariff changes."""
new_state = event.data.get("new_state")
if new_state is None:
return
if self._tariff == new_state.state:
self._collecting = async_track_state_change_event(
self.hass, [self._sensor_source_id], self.async_reading
)
else:
if self._collecting:
self._collecting()
self._collecting = None
_LOGGER.debug(
"%s - %s - source <%s>",
self._name,
COLLECTING if self._collecting is not None else PAUSED,
self._sensor_source_id,
)
self.async_write_ha_state()
async def _async_reset_meter(self, event):
"""Determine cycle - Helper function for larger than daily cycles."""
now = dt_util.now().date()
if (
self._period == WEEKLY
and now != now - timedelta(days=now.weekday()) + self._period_offset
):
return
if (
self._period == MONTHLY
and now != date(now.year, now.month, 1) + self._period_offset
):
return
if (
self._period == QUARTERLY
and now
!= date(now.year, (((now.month - 1) // 3) * 3 + 1), 1) + self._period_offset
):
return
if self._period == YEARLY and now != date(now.year, 1, 1) + self._period_offset:
return
await self.async_reset_meter(self._tariff_entity)
async def async_reset_meter(self, entity_id):
"""Reset meter."""
if self._tariff_entity != entity_id:
return
_LOGGER.debug("Reset utility meter <%s>", self.entity_id)
self._last_reset = dt_util.now()
self._last_period = str(self._state)
self._state = 0
self.async_write_ha_state()
async def async_calibrate(self, value):
"""Calibrate the Utility Meter with a given value."""
_LOGGER.debug("Calibrate %s = %s", self._name, value)
self._state = value
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Handle entity which will be added."""
await super().async_added_to_hass()
if self._period == HOURLY:
async_track_time_change(
self.hass,
self._async_reset_meter,
minute=self._period_offset.seconds // 60,
second=self._period_offset.seconds % 60,
)
elif self._period in [DAILY, WEEKLY, MONTHLY, QUARTERLY, YEARLY]:
async_track_time_change(
self.hass,
self._async_reset_meter,
hour=self._period_offset.seconds // 3600,
minute=self._period_offset.seconds % 3600 // 60,
second=self._period_offset.seconds % 3600 % 60,
)
async_dispatcher_connect(self.hass, SIGNAL_RESET_METER, self.async_reset_meter)
state = await self.async_get_last_state()
if state:
self._state = Decimal(state.state)
self._unit_of_measurement = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
self._last_period = state.attributes.get(ATTR_LAST_PERIOD)
self._last_reset = state.attributes.get(ATTR_LAST_RESET)
self.async_write_ha_state()
if state.attributes.get(ATTR_STATUS) == PAUSED:
# Fake cancellation function to init the meter paused
self._collecting = lambda: None
@callback
def async_source_tracking(event):
"""Wait for source to be ready, then start meter."""
if self._tariff_entity is not None:
_LOGGER.debug("Track %s", self._tariff_entity)
async_track_state_change_event(
self.hass, [self._tariff_entity], self.async_tariff_change
)
tariff_entity_state = self.hass.states.get(self._tariff_entity)
if self._tariff != tariff_entity_state.state:
return
_LOGGER.debug("tracking source: %s", self._sensor_source_id)
self._collecting = async_track_state_change_event(
self.hass, [self._sensor_source_id], self.async_reading
)
self.hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_START, async_source_tracking
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
state_attr = {
ATTR_SOURCE_ID: self._sensor_source_id,
ATTR_STATUS: PAUSED if self._collecting is None else COLLECTING,
ATTR_LAST_PERIOD: self._last_period,
ATTR_LAST_RESET: self._last_reset,
}
if self._period is not None:
state_attr[ATTR_PERIOD] = self._period
if self._tariff is not None:
state_attr[ATTR_TARIFF] = self._tariff
return state_attr
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
| |
from __future__ import division
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from collections import Counter
from scipy.spatial.distance import euclidean
from random import choice, seed as rseed, uniform as rand
import pandas as pd
import numpy as np
from texttable import Texttable
from stats import abcd
from misc import *
from pdb import set_trace
from sklearn.neighbors import NearestNeighbors, BallTree, KDTree
from sklearn.svm import SVC, SVR
import warnings
from time import time
warnings.filterwarnings('ignore')
def SMOTE(data=None, atleast=50, atmost=100, a=None,b=None, k=5, resample=False):
"Synthetic Minority Oversampling Technique"
# set_trace()
def knn(a,b):
"k nearest neighbors"
b=np.array([bb[:-1] for bb in b])
tree = BallTree(b)
__, indx = tree.query(a[:-1], k=6)
return [b[i] for i in indx]
# set_trace()
# return sorted(b, key=lambda F: euclidean(a[:-1], F[:-1]))
def kfn(me,my_lot,others):
"k farthest neighbors"
my_closest = None
return sorted(b, key=lambda F: euclidean(a[:-1], F[:-1]))
def extrapolate(one, two):
# t=time()
new = len(one)*[None]
new[:-1] = [a + rand(0,1) * (b-a) for
a, b in zip(one[:-1], two[:-1])]
new[-1] = int(one[-1])
return new
def populate(data, atleast):
t=time()
newData = [dd.tolist() for dd in data]
if atleast-len(newData)<0:
try:
return [choice(newData) for _ in xrange(atleast)]
except:
set_trace()
else:
for _ in xrange(atleast-len(newData)):
one = choice(data)
neigh = knn(one, data)[1:k + 1]
try:
two = choice(neigh)
except IndexError:
two = one
newData.append(extrapolate(one, two))
return newData
def populate2(data1, data2):
newData = []
for _ in xrange(atleast):
for one in data1:
neigh = kfn(one, data)[1:k + 1]
try:
two = choice(neigh)
except IndexError:
two = one
newData.append(extrapolate(one, two))
return [choice(newData) for _ in xrange(atleast)]
def depopulate(data):
# if resample:
# newer = []
# for _ in xrange(atmost):
# orig = choice(data)
# newer.append(extrapolate(orig, knn(orig, data)[1]))
# return newer
# else:
return [choice(data).tolist() for _ in xrange(atmost)]
newCells = []
# rseed(1)
klass = lambda df: df[df.columns[-1]]
count = Counter(klass(data))
# set_trace()
atleast=50# if a==None else int(a*max([count[k] for k in count.keys()]))
atmost=100# if b==None else int(b*max([count[k] for k in count.keys()]))
major, minor = count.keys()
# set_trace()
for u in count.keys():
if u==minor:
newCells.extend(populate([r for r in data.as_matrix() if r[-1] == u], atleast=atleast))
if u==major:
newCells.extend(depopulate([r for r in data.as_matrix() if r[-1] == u]))
else:
newCells.extend([r.tolist() for r in data.as_matrix() if r[-1] == u])
# set_trace()
return pd.DataFrame(newCells, columns=data.columns)
def _smote():
"Test SMOTE"
dir = '../Data/Jureczko/camel/camel-1.6.csv'
Tbl = csv2DF([dir], as_mtx=False)
newTbl = SMOTE(Tbl)
print('Before SMOTE: ', Counter(Tbl[Tbl.columns[-1]]))
print('After SMOTE: ', Counter(newTbl[newTbl.columns[-1]]))
# ---- ::DEBUG:: -----
set_trace()
def rforest(train, test, tunings=None, smoteit=True, bin=True, smoteTune=True,regress=False):
"RF "
if tunings and smoteTune==False:
a=b=None
elif tunings and smoteTune==True:
a=tunings[-2]
b=tunings[-1]
if not isinstance(train, pd.core.frame.DataFrame):
train = csv2DF(train, as_mtx=False, toBin=bin)
if not isinstance(test, pd.core.frame.DataFrame):
test = csv2DF(test, as_mtx=False, toBin=True)
if smoteit:
if not tunings:
train = SMOTE(train, resample=True)
else:
train = SMOTE(train, a, b, resample=True)
# except: set_trace()
if not tunings:
if regress:
clf = RandomForestRegressor(n_estimators=100, random_state=1, warm_start=True,n_jobs=-1)
else:
clf = RandomForestClassifier(n_estimators=100, random_state=1, warm_start=True,n_jobs=-1)
else:
if regress:
clf = RandomForestRegressor(n_estimators=int(tunings[0]),
max_features=tunings[1] / 100,
min_samples_leaf=int(tunings[2]),
min_samples_split=int(tunings[3]),
warm_start=True,n_jobs=-1)
else:
clf = RandomForestClassifier(n_estimators=int(tunings[0]),
max_features=tunings[1] / 100,
min_samples_leaf=int(tunings[2]),
min_samples_split=int(tunings[3]),
warm_start=True,n_jobs=-1)
features = train.columns[:-1]
klass = train[train.columns[-1]]
clf.fit(train[features], klass)
actual = test[test.columns[-1]].as_matrix()
try: preds = clf.predict(test[test.columns[:-1]])
except: set_trace()
return actual, preds
def SVM(train, test, tunings=None, smoteit=True, bin=True, regress=False):
"SVM "
if not isinstance(train, pd.core.frame.DataFrame):
train = csv2DF(train, as_mtx=False, toBin=bin)
if not isinstance(test, pd.core.frame.DataFrame):
test = csv2DF(test, as_mtx=False, toBin=True)
if smoteit:
train = SMOTE(train, resample=True)
# except: set_trace()
if not tunings:
if regress:
clf = SVR()
else:
clf = SVC()
else:
if regress:
clf = SVR()
else:
clf = SVC()
features = train.columns[:-1]
klass = train[train.columns[-1]]
# set_trace()
clf.fit(train[features], klass)
actual = test[test.columns[-1]].as_matrix()
try: preds = clf.predict(test[test.columns[:-1]])
except: set_trace()
return actual, preds
def _RF():
dir = '../Data/Jureczko/'
train, test = explore(dir)
print('Dataset, Expt(F-Score)')
for tr,te in zip(train, test):
say(tr[0].split('/')[-1][:-8])
actual, predicted = rforest(tr, te)
abcd = ABCD(before=actual, after=predicted)
F = np.array([k.stats()[-2] for k in abcd()])
tC = Counter(actual)
FreqClass=[tC[kk]/len(actual) for kk in list(set(actual))]
ExptF = np.sum(F*FreqClass)
say(', %0.2f\n' % (ExptF))
# ---- ::DEBUG:: -----
set_trace()
if __name__ == '__main__':
_RF()
| |
# -*- coding: utf-8 -*-
import logging
import os
import re
import socket
import warnings
from conf_d import Configuration
from beaver.utils import eglob
class BeaverConfig():
def __init__(self, args, logger=None):
self._logger = logger or logging.getLogger(__name__)
self._logger.debug('Processing beaver portion of config file %s' % args.config)
self._section_defaults = {
'add_field': '',
'debug': '0',
'discover_interval': '15',
'encoding': 'utf_8',
# should be a python regex of files to remove
'exclude': '',
'format': '',
# throw out empty lines instead of shipping them
'ignore_empty': '0',
# allow ignoring copytruncate results
'ignore_truncate': '0',
# buffered tokenization
# we string-escape the delimiter later so that we can put escaped characters in our config file
'delimiter': '\n',
'size_limit': '',
# multiline events support. Default is disabled
'multiline_regex_after': '',
'multiline_regex_before': '',
'message_format': '',
'sincedb_write_interval': '15',
'stat_interval': '1',
'start_position': 'end',
'tags': '',
'tail_lines': '0',
'type': '',
}
self._main_defaults = {
'mqtt_clientid': 'mosquitto',
'mqtt_host': 'localhost',
'mqtt_port': '1883',
'mqtt_topic': '/logstash',
'mqtt_keepalive': '60',
'rabbitmq_host': os.environ.get('RABBITMQ_HOST', 'localhost'),
'rabbitmq_port': os.environ.get('RABBITMQ_PORT', '5672'),
'rabbitmq_vhost': os.environ.get('RABBITMQ_VHOST', '/'),
'rabbitmq_username': os.environ.get('RABBITMQ_USERNAME', 'guest'),
'rabbitmq_password': os.environ.get('RABBITMQ_PASSWORD', 'guest'),
'rabbitmq_queue': os.environ.get('RABBITMQ_QUEUE', 'logstash-queue'),
'rabbitmq_exchange_type': os.environ.get('RABBITMQ_EXCHANGE_TYPE', 'direct'),
'rabbitmq_exchange_durable': os.environ.get('RABBITMQ_EXCHANGE_DURABLE', '0'),
'rabbitmq_queue_durable': os.environ.get('RABBITMQ_QUEUE_DURABLE', '0'),
'rabbitmq_ha_queue': os.environ.get('RABBITMQ_HA_QUEUE', '0'),
'rabbitmq_key': os.environ.get('RABBITMQ_KEY', 'logstash-key'),
'rabbitmq_exchange': os.environ.get('RABBITMQ_EXCHANGE', 'logstash-exchange'),
'redis_url': os.environ.get('REDIS_URL', 'redis://localhost:6379/0'),
'redis_namespace': os.environ.get('REDIS_NAMESPACE', 'logstash:beaver'),
'redis_password': '',
'sqs_aws_access_key': '',
'sqs_aws_secret_key': '',
'sqs_aws_region': 'us-east-1',
'sqs_aws_queue': '',
'tcp_host': '127.0.0.1',
'tcp_port': '9999',
'tcp_ssl_enable': False,
'tcp_ssl_verify': False,
'tcp_ssl_cacert': None,
'tcp_ssl_cert': None,
'udp_host': os.environ.get('UDP_HOST', '127.0.0.1'),
'udp_port': os.environ.get('UDP_PORT', '9999'),
'zeromq_address': os.environ.get('ZEROMQ_ADDRESS', 'tcp://localhost:2120'),
'zeromq_pattern': 'push',
'zeromq_hwm': os.environ.get('ZEROMQ_HWM', ''),
# exponential backoff
'respawn_delay': '3',
'max_failure': '7',
# interprocess queue max size before puts block
'max_queue_size': '100',
# time in seconds before updating the file mapping
'update_file_mapping_time': '', # deprecated
'discover_interval': '15',
# time in seconds from last command sent before a queue kills itself
'queue_timeout': '60',
# kill and respawn worker process after given number of seconds
'refresh_worker_process': '',
# time in seconds to wait on queue.get() block before raising Queue.Empty exception
'wait_timeout': '5',
# path to sincedb sqlite db
'sincedb_path': '',
'logstash_version': '',
# ssh tunnel support
'ssh_key_file': '',
'ssh_tunnel': '',
'ssh_tunnel_port': '',
'ssh_remote_host': '',
'ssh_remote_port': '',
'ssh_options': '',
'subprocess_poll_sleep': '1',
# the following can be passed via argparse
'zeromq_bind': os.environ.get('BEAVER_MODE', 'bind' if os.environ.get('BIND', False) else 'connect'),
'files': os.environ.get('BEAVER_FILES', ''),
'format': os.environ.get('BEAVER_FORMAT', 'json'),
'fqdn': '0',
'hostname': '',
'output': '',
'path': os.environ.get('BEAVER_PATH', '/var/log'),
'transport': os.environ.get('BEAVER_TRANSPORT', 'stdout'), # this needs to be passed to the import class somehow
# Path to individual file configs. These override any sections in the main beaver.ini file
'confd_path': '/etc/beaver/conf.d',
# the following are parsed before the config file is parsed
# but may be useful at runtime
'config': '/dev/null',
'debug': '0',
'daemonize': '0',
'pid': '',
}
self._configfile = args.config
self._globbed = []
self._parse(args)
for key in self._beaver_config:
self._logger.debug('[CONFIG] "{0}" => "{1}"'.format(key, self._beaver_config.get(key)))
self._update_files()
self._check_for_deprecated_usage()
def beaver_config(self):
return self._beaver_config
def get(self, key, default=None):
return self._beaver_config.get(key, default)
def set(self, key, value):
self._beaver_config[key] = value
def get_field(self, field, filename):
return self._files.get(os.path.realpath(filename), self._section_defaults)[field]
def addglob(self, globname, globbed):
if globname not in self._globbed:
self._logger.debug('Adding glob {0}'.format(globname))
config = self._file_config[globname]
self._file_config[globname] = config
for key in config:
self._logger.debug('Config: "{0}" => "{1}"'.format(key, config[key]))
else:
config = self._file_config.get(globname)
for filename in globbed:
self._files[filename] = config
self._globbed.append(globname)
def getfilepaths(self):
return self._files.keys()
def getglobs(self):
globs = []
[globs.extend([name, self._file_config[name].get('exclude')]) for name in self._file_config]
return dict(zip(globs[0::2], globs[1::2]))
def use_ssh_tunnel(self):
required = [
'ssh_key_file',
'ssh_tunnel',
'ssh_tunnel_port',
'ssh_remote_host',
'ssh_remote_port',
]
has = len(filter(lambda x: self.get(x) is not None, required))
if has > 0 and has != len(required):
self._logger.warning('Missing {0} of {1} required config variables for ssh'.format(len(required) - has, len(required)))
return has == len(required)
def _check_for_deprecated_usage(self):
env_vars = [
'RABBITMQ_HOST',
'RABBITMQ_PORT',
'RABBITMQ_VHOST',
'RABBITMQ_USERNAME',
'RABBITMQ_PASSWORD',
'RABBITMQ_QUEUE',
'RABBITMQ_EXCHANGE_TYPE',
'RABBITMQ_EXCHANGE_DURABLE',
'RABBITMQ_KEY',
'RABBITMQ_EXCHANGE',
'REDIS_URL',
'REDIS_NAMESPACE',
'UDP_HOST',
'UDP_PORT',
'ZEROMQ_ADDRESS',
'BEAVER_FILES',
'BEAVER_FORMAT',
'BEAVER_MODE',
'BEAVER_PATH',
'BEAVER_TRANSPORT',
]
deprecated_env_var_usage = []
for e in env_vars:
v = os.environ.get(e, None)
if v is not None:
deprecated_env_var_usage.append(e)
if len(deprecated_env_var_usage) > 0:
warnings.simplefilter('default')
warnings.warn('ENV Variable support will be removed by version 20. Stop using: {0}'.format(', '.join(deprecated_env_var_usage)), DeprecationWarning)
update_file_mapping_time = self.get('update_file_mapping_time')
if update_file_mapping_time:
self.set('discover_interval', update_file_mapping_time)
warnings.simplefilter('default')
warnings.warn('"update_file_mapping_time" has been supersceded by "discover_interval". Stop using: "update_file_mapping_time', DeprecationWarning)
def _parse(self, args):
def _main_parser(config):
transpose = ['config', 'confd_path', 'debug', 'daemonize', 'files', 'format', 'fqdn', 'hostname', 'path', 'pid', 'transport']
namspace_dict = vars(args)
for key in transpose:
if key not in namspace_dict or namspace_dict[key] is None or namspace_dict[key] == '':
continue
config[key] = namspace_dict[key]
if args.mode:
config['zeromq_bind'] = args.mode
# HACK: Python 2.6 ConfigParser does not properly
# handle non-string values
for key in config:
if config[key] == '':
config[key] = None
require_bool = ['debug', 'daemonize', 'fqdn', 'rabbitmq_exchange_durable', 'rabbitmq_queue_durable', 'rabbitmq_ha_queue']
for key in require_bool:
config[key] = bool(int(config[key]))
require_int = [
'max_failure',
'max_queue_size',
'queue_timeout',
'rabbitmq_port',
'respawn_delay',
'subprocess_poll_sleep',
'refresh_worker_process',
'tcp_port',
'udp_port',
'wait_timeout',
'zeromq_hwm',
'logstash_version',
]
for key in require_int:
if config[key] is not None:
config[key] = int(config[key])
require_float = [
'update_file_mapping_time',
'discover_interval',
]
for key in require_float:
if config[key] is not None:
config[key] = float(config[key])
if config.get('format') == 'null':
config['format'] = 'raw'
if config['files'] is not None and type(config['files']) == str:
config['files'] = config['files'].split(',')
if config['path'] is not None:
config['path'] = os.path.realpath(config['path'])
if not os.path.isdir(config['path']):
raise LookupError('{0} does not exist'.format(config['path']))
if config.get('hostname') is None:
if config.get('fqdn') is True:
config['hostname'] = socket.getfqdn()
else:
config['hostname'] = socket.gethostname()
if config.get('sincedb_path'):
config['sincedb_path'] = os.path.realpath(config.get('sincedb_path'))
if config['zeromq_address'] and type(config['zeromq_address']) == str:
config['zeromq_address'] = [x.strip() for x in config.get('zeromq_address').split(',')]
if config.get('ssh_options') is not None:
csv = config.get('ssh_options')
config['ssh_options'] = []
if csv == str:
for opt in csv.split(','):
config['ssh_options'].append('-o %s' % opt.strip())
else:
config['ssh_options'] = []
config['globs'] = {}
return config
def _section_parser(config, raise_exceptions=True):
'''Parse a given INI-style config file using ConfigParser module.
Stanza's names match file names, and properties are defaulted as in
http://logstash.net/docs/1.1.1/inputs/file
Config file example:
[/var/log/syslog]
type: syslog
tags: sys,main
[/var/log/auth]
type: syslog
;tags: auth,main
'''
fields = config.get('add_field', '')
if type(fields) != dict:
try:
if type(fields) == str:
fields = filter(None, fields.split(','))
if len(fields) == 0:
config['fields'] = {}
elif (len(fields) % 2) == 1:
if raise_exceptions:
raise Exception('Wrong number of values for add_field')
else:
fieldkeys = fields[0::2]
fieldvalues = [[x] for x in fields[1::2]]
config['fields'] = dict(zip(fieldkeys, fieldvalues))
except TypeError:
config['fields'] = {}
if 'add_field' in config:
del config['add_field']
try:
tags = config.get('tags', '')
if type(tags) == str:
tags = filter(None, tags.split(','))
if len(tags) == 0:
tags = []
config['tags'] = tags
except TypeError:
config['tags'] = []
if config.get('format') == 'null':
config['format'] = 'raw'
file_type = config.get('type', None)
if not file_type:
config['type'] = 'file'
require_bool = ['debug', 'ignore_empty', 'ignore_truncate']
for k in require_bool:
config[k] = bool(int(config[k]))
config['delimiter'] = config['delimiter'].decode('string-escape')
if config['multiline_regex_after']:
config['multiline_regex_after'] = re.compile(config['multiline_regex_after'])
if config['multiline_regex_before']:
config['multiline_regex_before'] = re.compile(config['multiline_regex_before'])
require_int = ['sincedb_write_interval', 'stat_interval', 'tail_lines']
for k in require_int:
config[k] = int(config[k])
return config
conf = Configuration(
name='beaver',
path=self._configfile,
main_defaults=self._main_defaults,
section_defaults=self._section_defaults,
main_parser=_main_parser,
section_parser=_section_parser,
path_from_main='confd_path'
)
config = conf.raw()
self._beaver_config = config['beaver']
self._file_config = config['sections']
self._main_parser = _main_parser(self._main_defaults)
self._section_defaults = _section_parser(self._section_defaults, raise_exceptions=False)
self._files = {}
for section in config['sections']:
globs = eglob(section, config['sections'][section].get('exclude', ''))
if not globs:
self._logger.debug('Skipping glob due to no files found: %s' % section)
continue
for globbed_file in globs:
self._files[os.path.realpath(globbed_file)] = config['sections'][section]
def _update_files(self):
globs = self.get('files', default=[])
files = self.get('files', default=[])
if globs:
globs = dict(zip(globs, [None]*len(globs)))
else:
globs = {}
try:
files.extend(self.getfilepaths())
globs.update(self.getglobs())
except AttributeError:
files = self.getfilepaths()
globs = self.getglobs()
self.set('globs', globs)
self.set('files', files)
for f in files:
if f not in self._file_config:
self._file_config[f] = self._section_defaults
| |
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
'''
Hartree-Fock for periodic systems with k-point sampling
See Also:
hf.py : Hartree-Fock for periodic systems at a single k-point
'''
from functools import reduce
import numpy as np
import scipy.linalg
from pyscf.scf import hf as mol_hf
from pyscf.scf import uhf as mol_uhf
from pyscf.pbc.scf import khf
from pyscf.pbc.scf import uhf as pbcuhf
from pyscf import lib
from pyscf.lib import logger
from pyscf.pbc.scf import addons
from pyscf.pbc.scf import chkfile
from pyscf import __config__
WITH_META_LOWDIN = getattr(__config__, 'pbc_scf_analyze_with_meta_lowdin', True)
PRE_ORTH_METHOD = getattr(__config__, 'pbc_scf_analyze_pre_orth_method', 'ANO')
CHECK_COULOMB_IMAG = getattr(__config__, 'pbc_scf_check_coulomb_imag', True)
canonical_occ = canonical_occ_ = addons.canonical_occ_
def make_rdm1(mo_coeff_kpts, mo_occ_kpts, **kwargs):
'''Alpha and beta spin one particle density matrices for all k-points.
Returns:
dm_kpts : (2, nkpts, nao, nao) ndarray
'''
nkpts = len(mo_occ_kpts[0])
nao, nmo = mo_coeff_kpts[0][0].shape
def make_dm(mos, occs):
return [np.dot(mos[k]*occs[k], mos[k].T.conj()) for k in range(nkpts)]
dm_kpts =(make_dm(mo_coeff_kpts[0], mo_occ_kpts[0]) +
make_dm(mo_coeff_kpts[1], mo_occ_kpts[1]))
return lib.asarray(dm_kpts).reshape(2,nkpts,nao,nao)
def get_fock(mf, h1e=None, s1e=None, vhf=None, dm=None, cycle=-1, diis=None,
diis_start_cycle=None, level_shift_factor=None, damp_factor=None):
h1e_kpts, s_kpts, vhf_kpts, dm_kpts = h1e, s1e, vhf, dm
if h1e_kpts is None: h1e_kpts = mf.get_hcore()
if vhf_kpts is None: vhf_kpts = mf.get_veff(mf.cell, dm_kpts)
f_kpts = h1e_kpts + vhf_kpts
if cycle < 0 and diis is None: # Not inside the SCF iteration
return f_kpts
if diis_start_cycle is None:
diis_start_cycle = mf.diis_start_cycle
if level_shift_factor is None:
level_shift_factor = mf.level_shift
if damp_factor is None:
damp_factor = mf.damp
if s_kpts is None: s_kpts = mf.get_ovlp()
if dm_kpts is None: dm_kpts = mf.make_rdm1()
if isinstance(level_shift_factor, (tuple, list, np.ndarray)):
shifta, shiftb = level_shift_factor
else:
shifta = shiftb = level_shift_factor
if diis and cycle >= diis_start_cycle:
f_kpts = diis.update(s_kpts, dm_kpts, f_kpts, mf, h1e_kpts, vhf_kpts)
if abs(level_shift_factor) > 1e-4:
f_kpts =([mol_hf.level_shift(s, dm_kpts[0,k], f_kpts[0,k], shifta)
for k, s in enumerate(s_kpts)],
[mol_hf.level_shift(s, dm_kpts[1,k], f_kpts[1,k], shiftb)
for k, s in enumerate(s_kpts)])
return lib.asarray(f_kpts)
def get_fermi(mf, mo_energy_kpts=None, mo_occ_kpts=None):
'''A pair of Fermi level for spin-up and spin-down orbitals
'''
if mo_energy_kpts is None: mo_energy_kpts = mf.mo_energy
if mo_occ_kpts is None: mo_occ_kpts = mf.mo_occ
# mo_energy_kpts and mo_occ_kpts are k-point UHF quantities
assert(mo_energy_kpts[0][0].ndim == 1)
assert(mo_occ_kpts[0][0].ndim == 1)
nocca = sum(mo_occ.sum() for mo_occ in mo_occ_kpts[0])
noccb = sum(mo_occ.sum() for mo_occ in mo_occ_kpts[1])
# nocc may not be perfect integer when smearing is enabled
nocca = int(nocca.round(3))
noccb = int(noccb.round(3))
fermi_a = np.sort(np.hstack(mo_energy_kpts[0]))[nocca-1]
fermi_b = np.sort(np.hstack(mo_energy_kpts[1]))[noccb-1]
for k, mo_e in enumerate(mo_energy_kpts[0]):
mo_occ = mo_occ_kpts[0][k]
if mo_occ[mo_e > fermi_a].sum() > 0.5:
logger.warn(mf, 'Alpha occupied band above Fermi level: \n'
'k=%d, mo_e=%s, mo_occ=%s', k, mo_e, mo_occ)
for k, mo_e in enumerate(mo_energy_kpts[1]):
mo_occ = mo_occ_kpts[1][k]
if mo_occ[mo_e > fermi_b].sum() > 0.5:
logger.warn(mf, 'Beta occupied band above Fermi level: \n'
'k=%d, mo_e=%s, mo_occ=%s', k, mo_e, mo_occ)
return (fermi_a, fermi_b)
def get_occ(mf, mo_energy_kpts=None, mo_coeff_kpts=None):
'''Label the occupancies for each orbital for sampled k-points.
This is a k-point version of scf.hf.SCF.get_occ
'''
if mo_energy_kpts is None: mo_energy_kpts = mf.mo_energy
nocc_a, nocc_b = mf.nelec
mo_energy = np.sort(np.hstack(mo_energy_kpts[0]))
fermi_a = mo_energy[nocc_a-1]
mo_occ_kpts = [[], []]
for mo_e in mo_energy_kpts[0]:
mo_occ_kpts[0].append((mo_e <= fermi_a).astype(np.double))
if nocc_a < len(mo_energy):
logger.info(mf, 'alpha HOMO = %.12g LUMO = %.12g', fermi_a, mo_energy[nocc_a])
else:
logger.info(mf, 'alpha HOMO = %.12g (no LUMO because of small basis) ', fermi_a)
if nocc_b > 0:
mo_energy = np.sort(np.hstack(mo_energy_kpts[1]))
fermi_b = mo_energy[nocc_b-1]
for mo_e in mo_energy_kpts[1]:
mo_occ_kpts[1].append((mo_e <= fermi_b).astype(np.double))
if nocc_b < len(mo_energy):
logger.info(mf, 'beta HOMO = %.12g LUMO = %.12g', fermi_b, mo_energy[nocc_b])
else:
logger.info(mf, 'beta HOMO = %.12g (no LUMO because of small basis) ', fermi_b)
if mf.verbose >= logger.DEBUG:
np.set_printoptions(threshold=len(mo_energy))
logger.debug(mf, ' k-point alpha mo_energy')
for k,kpt in enumerate(mf.cell.get_scaled_kpts(mf.kpts)):
logger.debug(mf, ' %2d (%6.3f %6.3f %6.3f) %s %s',
k, kpt[0], kpt[1], kpt[2],
mo_energy_kpts[0][k][mo_occ_kpts[0][k]> 0],
mo_energy_kpts[0][k][mo_occ_kpts[0][k]==0])
logger.debug(mf, ' k-point beta mo_energy')
for k,kpt in enumerate(mf.cell.get_scaled_kpts(mf.kpts)):
logger.debug(mf, ' %2d (%6.3f %6.3f %6.3f) %s %s',
k, kpt[0], kpt[1], kpt[2],
mo_energy_kpts[1][k][mo_occ_kpts[1][k]> 0],
mo_energy_kpts[1][k][mo_occ_kpts[1][k]==0])
np.set_printoptions(threshold=1000)
return mo_occ_kpts
def energy_elec(mf, dm_kpts=None, h1e_kpts=None, vhf_kpts=None):
'''Following pyscf.scf.hf.energy_elec()
'''
if dm_kpts is None: dm_kpts = mf.make_rdm1()
if h1e_kpts is None: h1e_kpts = mf.get_hcore()
if vhf_kpts is None: vhf_kpts = mf.get_veff(mf.cell, dm_kpts)
nkpts = len(h1e_kpts)
e1 = 1./nkpts * np.einsum('kij,kji', dm_kpts[0], h1e_kpts)
e1+= 1./nkpts * np.einsum('kij,kji', dm_kpts[1], h1e_kpts)
e_coul = 1./nkpts * np.einsum('kij,kji', dm_kpts[0], vhf_kpts[0]) * 0.5
e_coul+= 1./nkpts * np.einsum('kij,kji', dm_kpts[1], vhf_kpts[1]) * 0.5
mf.scf_summary['e1'] = e1.real
mf.scf_summary['e2'] = e_coul.real
logger.debug(mf, 'E1 = %s E_coul = %s', e1, e_coul)
if CHECK_COULOMB_IMAG and abs(e_coul.imag > mf.cell.precision*10):
logger.warn(mf, "Coulomb energy has imaginary part %s. "
"Coulomb integrals (e-e, e-N) may not converge !",
e_coul.imag)
return (e1+e_coul).real, e_coul.real
def mulliken_meta(cell, dm_ao_kpts, verbose=logger.DEBUG,
pre_orth_method=PRE_ORTH_METHOD, s=None):
'''A modified Mulliken population analysis, based on meta-Lowdin AOs.
Note this function only computes the Mulliken population for the gamma
point density matrix.
'''
from pyscf.lo import orth
if s is None:
s = khf.get_ovlp(cell)
log = logger.new_logger(cell, verbose)
log.note('Analyze output for *gamma point*.')
log.info(' To include the contributions from k-points, transform to a '
'supercell then run the population analysis on the supercell\n'
' from pyscf.pbc.tools import k2gamma\n'
' k2gamma.k2gamma(mf).mulliken_meta()')
log.note("KUHF mulliken_meta")
dm_ao_gamma = dm_ao_kpts[:,0,:,:].real
s_gamma = s[0,:,:].real
c = orth.restore_ao_character(cell, pre_orth_method)
orth_coeff = orth.orth_ao(cell, 'meta_lowdin', pre_orth_ao=c, s=s_gamma)
c_inv = np.dot(orth_coeff.T, s_gamma)
dm_a = reduce(np.dot, (c_inv, dm_ao_gamma[0], c_inv.T.conj()))
dm_b = reduce(np.dot, (c_inv, dm_ao_gamma[1], c_inv.T.conj()))
log.note(' ** Mulliken pop alpha/beta on meta-lowdin orthogonal AOs **')
return mol_uhf.mulliken_pop(cell, (dm_a,dm_b), np.eye(orth_coeff.shape[0]), log)
def canonicalize(mf, mo_coeff_kpts, mo_occ_kpts, fock=None):
'''Canonicalization diagonalizes the UHF Fock matrix within occupied,
virtual subspaces separatedly (without change occupancy).
'''
if fock is None:
dm = mf.make_rdm1(mo_coeff_kpts, mo_occ_kpts)
fock = mf.get_fock(dm=dm)
def eig_(fock, mo_coeff, idx, es, cs):
if np.count_nonzero(idx) > 0:
orb = mo_coeff[:,idx]
f1 = reduce(np.dot, (orb.T.conj(), fock, orb))
e, c = scipy.linalg.eigh(f1)
es[idx] = e
cs[:,idx] = np.dot(orb, c)
mo_coeff = [[], []]
mo_energy = [[], []]
for k, mo in enumerate(mo_coeff_kpts[0]):
mo1 = np.empty_like(mo)
mo_e = np.empty_like(mo_occ_kpts[0][k])
occidxa = mo_occ_kpts[0][k] == 1
viridxa = ~occidxa
eig_(fock[0][k], mo, occidxa, mo_e, mo1)
eig_(fock[0][k], mo, viridxa, mo_e, mo1)
mo_coeff[0].append(mo1)
mo_energy[0].append(mo_e)
for k, mo in enumerate(mo_coeff_kpts[1]):
mo1 = np.empty_like(mo)
mo_e = np.empty_like(mo_occ_kpts[1][k])
occidxb = mo_occ_kpts[1][k] == 1
viridxb = ~occidxb
eig_(fock[1][k], mo, occidxb, mo_e, mo1)
eig_(fock[1][k], mo, viridxb, mo_e, mo1)
mo_coeff[1].append(mo1)
mo_energy[1].append(mo_e)
return mo_energy, mo_coeff
def init_guess_by_chkfile(cell, chkfile_name, project=None, kpts=None):
'''Read the KHF results from checkpoint file, then project it to the
basis defined by ``cell``
Returns:
Density matrix, 3D ndarray
'''
from pyscf import gto
chk_cell, scf_rec = chkfile.load_scf(chkfile_name)
if project is None:
project = not gto.same_basis_set(chk_cell, cell)
if kpts is None:
kpts = scf_rec['kpts']
if 'kpt' in scf_rec:
chk_kpts = scf_rec['kpt'].reshape(-1,3)
elif 'kpts' in scf_rec:
chk_kpts = scf_rec['kpts']
else:
chk_kpts = np.zeros((1,3))
mo = scf_rec['mo_coeff']
mo_occ = scf_rec['mo_occ']
if 'kpts' not in scf_rec: # gamma point or single k-point
if mo.ndim == 2:
mo = np.expand_dims(mo, axis=0)
mo_occ = np.expand_dims(mo_occ, axis=0)
else: # UHF
mo = [np.expand_dims(mo[0], axis=0),
np.expand_dims(mo[1], axis=0)]
mo_occ = [np.expand_dims(mo_occ[0], axis=0),
np.expand_dims(mo_occ[1], axis=0)]
if project:
s = cell.pbc_intor('int1e_ovlp', kpts=kpts)
def fproj(mo, kpts):
if project:
mo = addons.project_mo_nr2nr(chk_cell, mo, cell, kpts)
for k, c in enumerate(mo):
norm = np.einsum('pi,pi->i', c.conj(), s[k].dot(c))
mo[k] /= np.sqrt(norm)
return mo
if kpts.shape == chk_kpts.shape and np.allclose(kpts, chk_kpts):
def makedm(mos, occs):
moa, mob = mos
mos =([fproj(mo, None) for mo in moa],
[fproj(mo, None) for mo in mob])
return make_rdm1(mos, occs)
else:
def makedm(mos, occs):
where = [np.argmin(lib.norm(chk_kpts-kpt, axis=1)) for kpt in kpts]
moa, mob = mos
occa, occb = occs
dkpts = [chk_kpts[w]-kpts[i] for i,w in enumerate(where)]
mos = (fproj([moa[w] for w in where], dkpts),
fproj([mob[w] for w in where], dkpts))
occs = ([occa[i] for i in where], [occb[i] for i in where])
return make_rdm1(mos, occs)
if getattr(mo[0], 'ndim', None) == 2: # KRHF
mo_occa = [(occ>1e-8).astype(np.double) for occ in mo_occ]
mo_occb = [occ-mo_occa[k] for k,occ in enumerate(mo_occ)]
dm = makedm((mo, mo), (mo_occa, mo_occb))
else: # KUHF
dm = makedm(mo, mo_occ)
# Real DM for gamma point
if np.allclose(kpts, 0):
dm = dm.real
return dm
def dip_moment(cell, dm_kpts, unit='Debye', verbose=logger.NOTE,
grids=None, rho=None, kpts=np.zeros((1,3))):
''' Dipole moment in the unit cell.
Args:
cell : an instance of :class:`Cell`
dm_kpts (two lists of ndarrays) : KUHF density matrices of k-points
Return:
A list: the dipole moment on x, y and z components
'''
dm_kpts = dm_kpts[0] + dm_kpts[1]
return khf.dip_moment(cell, dm_kpts, unit, verbose, grids, rho, kpts)
get_rho = khf.get_rho
class KUHF(khf.KSCF, pbcuhf.UHF):
'''UHF class with k-point sampling.
'''
conv_tol = getattr(__config__, 'pbc_scf_KSCF_conv_tol', 1e-7)
conv_tol_grad = getattr(__config__, 'pbc_scf_KSCF_conv_tol_grad', None)
direct_scf = getattr(__config__, 'pbc_scf_SCF_direct_scf', False)
def __init__(self, cell, kpts=np.zeros((1,3)),
exxdiv=getattr(__config__, 'pbc_scf_SCF_exxdiv', 'ewald')):
khf.KSCF.__init__(self, cell, kpts, exxdiv)
self.nelec = None
@property
def nelec(self):
if self._nelec is not None:
return self._nelec
else:
cell = self.cell
nkpts = len(self.kpts)
ne = cell.tot_electrons(nkpts)
nalpha = (ne + cell.spin) // 2
nbeta = nalpha - cell.spin
if nalpha + nbeta != ne:
raise RuntimeError('Electron number %d and spin %d are not consistent\n'
'Note cell.spin = 2S = Nalpha - Nbeta, not 2S+1' %
(ne, cell.spin))
return nalpha, nbeta
@nelec.setter
def nelec(self, x):
self._nelec = x
def dump_flags(self, verbose=None):
khf.KSCF.dump_flags(self, verbose)
logger.info(self, 'number of electrons per unit cell '
'alpha = %d beta = %d', *self.nelec)
return self
def get_init_guess(self, cell=None, key='minao'):
if cell is None:
cell = self.cell
dm_kpts = None
key = key.lower()
if key == '1e' or key == 'hcore':
dm_kpts = self.init_guess_by_1e(cell)
elif getattr(cell, 'natm', 0) == 0:
logger.info(self, 'No atom found in cell. Use 1e initial guess')
dm_kpts = self.init_guess_by_1e(cell)
elif key == 'atom':
dm = self.init_guess_by_atom(cell)
elif key[:3] == 'chk':
try:
dm_kpts = self.from_chk()
except (IOError, KeyError):
logger.warn(self, 'Fail to read %s. Use MINAO initial guess',
self.chkfile)
dm = self.init_guess_by_minao(cell)
else:
dm = self.init_guess_by_minao(cell)
if dm_kpts is None:
nao = dm[0].shape[-1]
nkpts = len(self.kpts)
# dm[spin,nao,nao] at gamma point -> dm_kpts[spin,nkpts,nao,nao]
dm_kpts = np.repeat(dm[:,None,:,:], nkpts, axis=1)
dm_kpts[0,:] *= 1.01
dm_kpts[1,:] *= 0.99 # To slightly break spin symmetry
assert dm_kpts.shape[0]==2
ne = np.einsum('xkij,kji->x', dm_kpts, self.get_ovlp(cell)).real
# FIXME: consider the fractional num_electron or not? This maybe
# relates to the charged system.
nkpts = len(self.kpts)
nelec = np.asarray(self.nelec)
if np.any(abs(ne - nelec) > 1e-7*nkpts):
logger.debug(self, 'Big error detected in the electron number '
'of initial guess density matrix (Ne/cell = %g)!\n'
' This can cause huge error in Fock matrix and '
'lead to instability in SCF for low-dimensional '
'systems.\n DM is normalized wrt the number '
'of electrons %s', ne.mean()/nkpts, nelec/nkpts)
dm_kpts *= (nelec / ne).reshape(2,-1,1,1)
return dm_kpts
get_fock = get_fock
get_fermi = get_fermi
get_occ = get_occ
energy_elec = energy_elec
get_rho = get_rho
def get_veff(self, cell=None, dm_kpts=None, dm_last=0, vhf_last=0, hermi=1,
kpts=None, kpts_band=None):
vj, vk = self.get_jk(cell, dm_kpts, hermi, kpts, kpts_band)
vhf = vj[0] + vj[1] - vk
return vhf
def analyze(self, verbose=None, with_meta_lowdin=WITH_META_LOWDIN,
**kwargs):
if verbose is None: verbose = self.verbose
return khf.analyze(self, verbose, with_meta_lowdin, **kwargs)
def get_grad(self, mo_coeff_kpts, mo_occ_kpts, fock=None):
if fock is None:
dm1 = self.make_rdm1(mo_coeff_kpts, mo_occ_kpts)
fock = self.get_hcore(self.cell, self.kpts) + self.get_veff(self.cell, dm1)
def grad(mo, mo_occ, fock):
occidx = mo_occ > 0
viridx = ~occidx
g = reduce(np.dot, (mo[:,viridx].T.conj(), fock, mo[:,occidx]))
return g.ravel()
nkpts = len(self.kpts)
grad_kpts = [grad(mo_coeff_kpts[0][k], mo_occ_kpts[0][k], fock[0][k])
for k in range(nkpts)]
grad_kpts+= [grad(mo_coeff_kpts[1][k], mo_occ_kpts[1][k], fock[1][k])
for k in range(nkpts)]
return np.hstack(grad_kpts)
def eig(self, h_kpts, s_kpts):
e_a, c_a = khf.KSCF.eig(self, h_kpts[0], s_kpts)
e_b, c_b = khf.KSCF.eig(self, h_kpts[1], s_kpts)
return (e_a,e_b), (c_a,c_b)
def make_rdm1(self, mo_coeff_kpts=None, mo_occ_kpts=None, **kwargs):
if mo_coeff_kpts is None: mo_coeff_kpts = self.mo_coeff
if mo_occ_kpts is None: mo_occ_kpts = self.mo_occ
return make_rdm1(mo_coeff_kpts, mo_occ_kpts, **kwargs)
def get_bands(self, kpts_band, cell=None, dm_kpts=None, kpts=None):
'''Get energy bands at the given (arbitrary) 'band' k-points.
Returns:
mo_energy : (nmo,) ndarray or a list of (nmo,) ndarray
Bands energies E_n(k)
mo_coeff : (nao, nmo) ndarray or a list of (nao,nmo) ndarray
Band orbitals psi_n(k)
'''
if cell is None: cell = self.cell
if dm_kpts is None: dm_kpts = self.make_rdm1()
if kpts is None: kpts = self.kpts
kpts_band = np.asarray(kpts_band)
single_kpt_band = (kpts_band.ndim == 1)
kpts_band = kpts_band.reshape(-1,3)
fock = self.get_hcore(cell, kpts_band)
fock = fock + self.get_veff(cell, dm_kpts, kpts=kpts, kpts_band=kpts_band)
s1e = self.get_ovlp(cell, kpts_band)
(e_a,e_b), (c_a,c_b) = self.eig(fock, s1e)
if single_kpt_band:
e_a = e_a[0]
e_b = e_b[0]
c_a = c_a[0]
c_b = c_b[0]
return (e_a,e_b), (c_a,c_b)
def init_guess_by_chkfile(self, chk=None, project=True, kpts=None):
if chk is None: chk = self.chkfile
if kpts is None: kpts = self.kpts
return init_guess_by_chkfile(self.cell, chk, project, kpts)
init_guess_by_1e = pbcuhf.UHF.init_guess_by_1e
init_guess_by_minao = pbcuhf.UHF.init_guess_by_minao
init_guess_by_atom = pbcuhf.UHF.init_guess_by_atom
init_guess_by_huckel = pbcuhf.UHF.init_guess_by_huckel
@lib.with_doc(mulliken_meta.__doc__)
def mulliken_meta(self, cell=None, dm=None, verbose=logger.DEBUG,
pre_orth_method=PRE_ORTH_METHOD, s=None):
if cell is None: cell = self.cell
if dm is None: dm = self.make_rdm1()
if s is None: s = self.get_ovlp(cell)
return mulliken_meta(cell, dm, s=s, verbose=verbose,
pre_orth_method=pre_orth_method)
def mulliken_pop(self):
raise NotImplementedError
@lib.with_doc(dip_moment.__doc__)
def dip_moment(self, cell=None, dm=None, unit='Debye', verbose=logger.NOTE,
**kwargs):
if cell is None: cell = self.cell
if dm is None: dm = self.make_rdm1()
rho = kwargs.pop('rho', None)
if rho is None:
rho = self.get_rho(dm)
return dip_moment(cell, dm, unit, verbose, rho=rho, kpts=self.kpts, **kwargs)
@lib.with_doc(mol_uhf.spin_square.__doc__)
def spin_square(self, mo_coeff=None, s=None):
'''Treating the k-point sampling wfn as a giant Slater determinant,
the spin_square value is the <S^2> of the giant determinant.
'''
nkpts = len(self.kpts)
if mo_coeff is None:
mo_a = [self.mo_coeff[0][k][:,self.mo_occ[0][k]>0] for k in range(nkpts)]
mo_b = [self.mo_coeff[1][k][:,self.mo_occ[1][k]>0] for k in range(nkpts)]
else:
mo_a, mo_b = mo_coeff
if s is None:
s = self.get_ovlp()
nelec_a = sum([mo_a[k].shape[1] for k in range(nkpts)])
nelec_b = sum([mo_b[k].shape[1] for k in range(nkpts)])
ssxy = (nelec_a + nelec_b) * .5
for k in range(nkpts):
sij = reduce(np.dot, (mo_a[k].T.conj(), s[k], mo_b[k]))
ssxy -= np.einsum('ij,ij->', sij.conj(), sij).real
ssz = (nelec_b-nelec_a)**2 * .25
ss = ssxy + ssz
s = np.sqrt(ss+.25) - .5
return ss, s*2+1
canonicalize = canonicalize
def stability(self,
internal=getattr(__config__, 'pbc_scf_KSCF_stability_internal', True),
external=getattr(__config__, 'pbc_scf_KSCF_stability_external', False),
verbose=None):
from pyscf.pbc.scf.stability import uhf_stability
return uhf_stability(self, internal, external, verbose)
def convert_from_(self, mf):
'''Convert given mean-field object to KUHF'''
addons.convert_to_uhf(mf, self)
return self
del(WITH_META_LOWDIN, PRE_ORTH_METHOD)
if __name__ == '__main__':
from pyscf.pbc import gto
cell = gto.Cell()
cell.atom = '''
He 0 0 1
He 1 0 1
'''
cell.basis = '321g'
cell.a = np.eye(3) * 3
cell.mesh = [11] * 3
cell.verbose = 5
cell.build()
mf = KUHF(cell, [2,1,1])
mf.kernel()
mf.analyze()
| |
"""Plot validation results from variant calling comparisons.
Handles data normalization and plotting, emphasizing comparisons on methodology
differences.
"""
import collections
import os
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from bcbio.log import logger
from bcbio import utils
from bcbio.variation import bamprep
mpl = utils.LazyImport("matplotlib")
plt = utils.LazyImport("matplotlib.pyplot")
mpl_ticker = utils.LazyImport("matplotlib.ticker")
sns = utils.LazyImport("seaborn")
def classifyplot_from_plotfiles(plot_files, out_csv, outtype="png", title=None, size=None):
"""Create a plot from individual summary csv files with classification metrics.
"""
dfs = [pd.read_csv(x) for x in plot_files]
samples = []
for df in dfs:
for sample in df["sample"].unique():
if sample not in samples:
samples.append(sample)
df = pd.concat(dfs)
df.to_csv(out_csv, index=False)
return classifyplot_from_valfile(out_csv, outtype, title, size, samples)
def classifyplot_from_valfile(val_file, outtype="png", title=None, size=None,
samples=None, callers=None):
"""Create a plot from a summarized validation file.
Does new-style plotting of summarized metrics of
false negative rate and false discovery rate.
https://en.wikipedia.org/wiki/Sensitivity_and_specificity
"""
mpl.use('Agg', force=True)
df = pd.read_csv(val_file)
grouped = df.groupby(["sample", "caller", "vtype"])
df = grouped.apply(_calculate_fnr_fdr)
df = df.reset_index()
out_file = "%s.%s" % (os.path.splitext(val_file)[0], outtype)
_do_classifyplot(df, out_file, title, size, samples, callers)
return [out_file]
def _calculate_fnr_fdr(group):
"""Calculate the false negative rate (1 - sensitivity) and false discovery rate (1 - precision).
"""
data = {k: d["value"] for k, d in group.set_index("metric").T.to_dict().items()}
return pd.DataFrame([{"fnr": data["fn"] / float(data["tp"] + data["fn"]) * 100.0 if data["tp"] > 0 else 0.0,
"fdr": data["fp"] / float(data["tp"] + data["fp"]) * 100.0 if data["tp"] > 0 else 0.0,
"tpr": "TP: %s FN: %s" % (data["tp"], data["fn"]),
"spc": "FP: %s" % (data["fp"])}])
def _do_classifyplot(df, out_file, title=None, size=None, samples=None, callers=None):
"""Plot using classification-based plot using seaborn.
"""
metric_labels = {"fdr": "False discovery rate",
"fnr": "False negative rate"}
metrics = [("fnr", "tpr"), ("fdr", "spc")]
is_mpl2 = LooseVersion(mpl.__version__) >= LooseVersion('2.0')
colors = ["light grey", "greyish"] * 10
data_dict = df.set_index(["sample", "caller", "vtype"]).T.to_dict()
plt.ioff()
plt.style.use('seaborn-white')
vtypes = sorted(df["vtype"].unique(), reverse=True)
if not callers:
callers = sorted(df["caller"].unique())
if not samples:
samples = sorted(df["sample"].unique())
if len(samples) >= len(callers):
cats, groups = (samples, callers)
data_dict = df.set_index(["sample", "caller", "vtype"]).T.to_dict()
else:
cats, groups = (callers, samples)
data_dict = df.set_index(["caller", "sample", "vtype"]).T.to_dict()
fig, axs = plt.subplots(len(vtypes) * len(groups), len(metrics))
fig.text(.5, .95, title if title else "", horizontalalignment='center', size=14)
for vi, vtype in enumerate(vtypes):
for gi, group in enumerate(groups):
for mi, (metric, label) in enumerate(metrics):
row_plots = axs if len(vtypes) * len(groups) == 1 else axs[vi * len(groups) + gi]
cur_plot = row_plots if len(metrics) == 1 else row_plots[mi]
vals, labels = [], []
for cat in cats:
cur_data = data_dict.get((cat, group, vtype))
if cur_data:
vals.append(cur_data[metric])
labels.append(cur_data[label])
cur_plot.barh(np.arange(len(vals)), vals, color=sns.xkcd_palette([colors[vi]]))
all_vals = []
for k, d in data_dict.items():
if k[-1] == vtype:
for m in metrics:
all_vals.append(d[m[0]])
metric_max = max(all_vals)
cur_plot.set_xlim(0, metric_max)
pad = 0.1 * metric_max
ai_adjust = 0.0 if is_mpl2 else 0.35
for ai, (val, label) in enumerate(zip(vals, labels)):
cur_plot.annotate(label, (pad + (0 if max(vals) > metric_max / 2.0 else max(vals)),
ai + ai_adjust),
va='center', size=7)
cur_plot.locator_params(nbins=len(cats) + (2 if len(cats) > 2 else 1), axis="y", tight=True)
if mi == 0:
cur_plot.tick_params(axis='y', which='major', labelsize=8)
plot_cats = ([""] + cats) if is_mpl2 else cats
plot_va = "center" if is_mpl2 else "bottom"
cur_plot.set_yticklabels(plot_cats, size=8, va=plot_va)
cur_plot.set_title("%s: %s" % (vtype, group), fontsize=12, loc="left")
else:
cur_plot.get_yaxis().set_ticks([])
if gi == len(groups) - 1:
cur_plot.tick_params(axis='x', which='major', labelsize=8)
cur_plot.get_xaxis().set_major_formatter(
mpl_ticker.FuncFormatter(lambda v, p: "%s%%" % (int(v) if round(v) == v else v)))
if vi == len(vtypes) - 1:
cur_plot.get_xaxis().set_label_text(metric_labels[metric], size=12)
else:
cur_plot.get_xaxis().set_ticks([])
cur_plot.spines['bottom'].set_visible(False)
cur_plot.spines['left'].set_visible(False)
cur_plot.spines['top'].set_visible(False)
cur_plot.spines['right'].set_visible(False)
x, y = (6, len(vtypes) * len(groups) + 1 * 0.5 * len(cats)) if size is None else size
fig.set_size_inches(x, y)
fig.tight_layout(rect=(0, 0, 1, 0.95))
plt.subplots_adjust(hspace=0.6)
fig.savefig(out_file)
def create_from_csv(in_csv, config=None, outtype="png", title=None, size=None):
df = pd.read_csv(in_csv)
create(df, None, 0, config or {}, os.path.splitext(in_csv)[0], outtype, title,
size)
def create(plot_data, header, ploti, sample_config, out_file_base, outtype="png",
title=None, size=None):
"""Create plots of validation results for a sample, labeling prep strategies.
"""
if mpl is None or plt is None or sns is None:
not_found = ", ".join([x for x in ['mpl', 'plt', 'sns'] if eval(x) is None])
logger.info("No validation plot. Missing imports: %s" % not_found)
return None
mpl.use('Agg', force=True)
if header:
df = pd.DataFrame(plot_data, columns=header)
else:
df = plot_data
df["aligner"] = [get_aligner(x, sample_config) for x in df["sample"]]
df["bamprep"] = [get_bamprep(x, sample_config) for x in df["sample"]]
floors = get_group_floors(df, cat_labels)
df["value.floor"] = [get_floor_value(x, cat, vartype, floors)
for (x, cat, vartype) in zip(df["value"], df["category"], df["variant.type"])]
out = []
for i, prep in enumerate(df["bamprep"].unique()):
out.append(plot_prep_methods(df, prep, i + ploti, out_file_base, outtype, title, size))
return out
cat_labels = {"concordant": "Concordant",
"discordant-missing-total": "Discordant (missing)",
"discordant-extra-total": "Discordant (extra)",
"discordant-shared-total": "Discordant (shared)"}
vtype_labels = {"snp": "SNPs", "indel": "Indels"}
prep_labels = {}
caller_labels = {"ensemble": "Ensemble", "freebayes": "FreeBayes",
"gatk": "GATK Unified\nGenotyper", "gatk-haplotype": "GATK Haplotype\nCaller"}
def plot_prep_methods(df, prep, prepi, out_file_base, outtype, title=None,
size=None):
"""Plot comparison between BAM preparation methods.
"""
samples = df[(df["bamprep"] == prep)]["sample"].unique()
assert len(samples) >= 1, samples
out_file = "%s-%s.%s" % (out_file_base, samples[0], outtype)
df = df[df["category"].isin(cat_labels)]
_seaborn(df, prep, prepi, out_file, title, size)
return out_file
def _seaborn(df, prep, prepi, out_file, title=None, size=None):
"""Plot using seaborn wrapper around matplotlib.
"""
plt.ioff()
sns.set(style='dark')
vtypes = df["variant.type"].unique()
callers = sorted(df["caller"].unique())
cats = _check_cats(["concordant", "discordant-missing-total",
"discordant-extra-total", "discordant-shared-total"],
vtypes, df, prep, callers)
fig, axs = plt.subplots(len(vtypes), len(cats))
width = 0.8
for i, vtype in enumerate(vtypes):
ax_row = axs[i] if len(vtypes) > 1 else axs
for j, cat in enumerate(cats):
vals, labels, maxval = _get_chart_info(df, vtype, cat, prep, callers)
if len(cats) == 1:
assert j == 0
ax = ax_row
else:
ax = ax_row[j]
if i == 0:
ax.set_title(cat_labels[cat], size=14)
ax.get_yaxis().set_ticks([])
if j == 0:
ax.set_ylabel(vtype_labels[vtype], size=14)
ax.bar(np.arange(len(callers)), vals, width=width)
ax.set_ylim(0, maxval)
if i == len(vtypes) - 1:
ax.set_xticks(np.arange(len(callers)) + width / 2.0)
ax.set_xticklabels([caller_labels.get(x, x).replace("__", "\n") if x else ""
for x in callers], size=8, rotation=45)
else:
ax.get_xaxis().set_ticks([])
_annotate(ax, labels, vals, np.arange(len(callers)), width)
fig.text(.5, .95, prep_labels.get(prep, "") if title is None else title, horizontalalignment='center', size=16)
fig.subplots_adjust(left=0.05, right=0.95, top=0.87, bottom=0.15, wspace=0.1, hspace=0.1)
x, y = (10, 5) if size is None else size
fig.set_size_inches(x, y)
fig.savefig(out_file)
def _check_cats(cats, vtypes, df, prep, callers):
"""Only include categories in the final output if they have values.
"""
out = []
for cat in cats:
all_vals = []
for vtype in vtypes:
vals, labels, maxval = _get_chart_info(df, vtype, cat, prep, callers)
all_vals.extend(vals)
if sum(all_vals) / float(len(all_vals)) > 2:
out.append(cat)
if len(out) == 0:
return cats
else:
return out
def _get_chart_info(df, vtype, cat, prep, callers):
"""Retrieve values for a specific variant type, category and prep method.
"""
maxval_raw = max(list(df["value.floor"]))
curdf = df[(df["variant.type"] == vtype) & (df["category"] == cat)
& (df["bamprep"] == prep)]
vals = []
labels = []
for c in callers:
row = curdf[df["caller"] == c]
if len(row) > 0:
vals.append(list(row["value.floor"])[0])
labels.append(list(row["value"])[0])
else:
vals.append(1)
labels.append("")
return vals, labels, maxval_raw
def _annotate(ax, annotate, height, left, width):
"""Annotate axis with labels.
"""
annotate_yrange_factor = 0.010
xticks = np.array(left) + width / 2.0
ymin, ymax = ax.get_ylim()
yrange = ymax - ymin
# Reset ymax and ymin so there's enough room to see the annotation of
# the top-most
if ymax > 0:
ymax += yrange * 0.15
if ymin < 0:
ymin -= yrange * 0.15
ax.set_ylim(ymin, ymax)
yrange = ymax - ymin
offset_ = yrange * annotate_yrange_factor
if isinstance(annotate, collections.Iterable):
annotations = map(str, annotate)
else:
annotations = ['%.3f' % h if type(h) is np.float_ else str(h)
for h in height]
for x, h, annotation in zip(xticks, height, annotations):
# Adjust the offset to account for negative bars
offset = offset_ if h >= 0 else -1 * offset_
verticalalignment = 'bottom' if h >= 0 else 'top'
if len(str(annotation)) > 6:
size = 7
elif len(str(annotation)) > 5:
size = 8
else:
size = 10
# Finally, add the text to the axes
ax.annotate(annotation, (x, h + offset),
verticalalignment=verticalalignment,
horizontalalignment='center',
size=size)
def _ggplot(df, out_file):
"""Plot faceted items with ggplot wrapper on top of matplotlib.
XXX Not yet functional
"""
import ggplot as gg
df["variant.type"] = [vtype_labels[x] for x in df["variant.type"]]
df["category"] = [cat_labels[x] for x in df["category"]]
df["caller"] = [caller_labels.get(x, None) for x in df["caller"]]
p = (gg.ggplot(df, gg.aes(x="caller", y="value.floor")) + gg.geom_bar()
+ gg.facet_wrap("variant.type", "category")
+ gg.theme_seaborn())
gg.ggsave(p, out_file)
def get_floor_value(x, cat, vartype, floors):
"""Modify values so all have the same relative scale for differences.
Using the chosen base heights, adjusts an individual sub-plot to be consistent
relative to that height.
"""
all_base = floors[vartype]
cur_max = floors[(cat, vartype)]
if cur_max > all_base:
diff = cur_max - all_base
x = max(1, x - diff)
return x
def get_group_floors(df, cat_labels):
"""Retrieve the floor for a given row of comparisons, creating a normalized set of differences.
We need to set non-zero floors so large numbers (like concordance) don't drown out small
numbers (like discordance). This defines the height for a row of comparisons as either
the minimum height of any sub-plot, or the maximum difference between higher and lower
(plus 10%).
"""
group_maxes = collections.defaultdict(list)
group_diffs = collections.defaultdict(list)
diff_pad = 0.1 # 10% padding onto difference to avoid large numbers looking like zero
for name, group in df.groupby(["category", "variant.type"]):
label, stype = name
if label in cat_labels:
diff = max(group["value"]) - min(group["value"])
group_diffs[stype].append(diff + int(diff_pad * diff))
group_maxes[stype].append(max(group["value"]))
group_maxes[name].append(max(group["value"]))
out = {}
for k, vs in group_maxes.items():
if k in group_diffs:
out[k] = max(max(group_diffs[stype]), min(vs))
else:
out[k] = min(vs)
return out
def get_aligner(x, config):
return utils.get_in(config, ("algorithm", "aligner"), "")
def get_bamprep(x, config):
params = bamprep._get_prep_params({"config": {"algorithm": config.get("algorithm", {})}})
if params["realign"] == "gatk" and params["recal"] == "gatk":
return "gatk"
elif not params["realign"] and not params["recal"]:
return "none"
elif not params.get("recal") or not params.get("realign"):
return "mixed"
else:
return ""
# ## Frequency plots
def facet_freq_plot(freq_csv, caller):
"""Prepare a facet plot of frequencies stratified by variant type and status (TP, FP, FN).
Makes a nice plot with the output from validate.freq_summary
"""
out_file = "%s.png" % os.path.splitext(freq_csv)[0]
plt.ioff()
sns.set(style='dark')
df = pd.read_csv(freq_csv)
g = sns.FacetGrid(df, row="vtype", col="valclass", margin_titles=True,
col_order=["TP", "FN", "FP"], row_order=["snp", "indel"],
sharey=False)
g.map(plt.hist, "freq", bins=20, align="left")
g.set(xlim=(0.0, 1.0))
g.fig.set_size_inches(8, 6)
g.fig.text(.05, .97, caller, horizontalalignment='center', size=14)
g.fig.savefig(out_file)
| |
# -*- coding: utf-8 -*-
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import Kamaelia.ReadFileAdaptor
from Axon import Component
from Kamaelia.Util.PipelineComponent import pipeline
from Kamaelia.Util.Chargen import Chargen
from Kamaelia.Util.ConsoleEcho import consoleEchoer
from DL_Util import SerialChargen
from Encryption import BasicEncryption
import random
"""
============================
Basic Data Integrity Checker
============================
This module contains a series of components which ensure the
integrity of data transferred between components.
It basically adds a hash code of the data to be transferred. Baisc Usage is
Data Producer -- (data) -- IntegrityStamper() -- (data, hash) -- <other components> ...
... <other components> -- (data, hash) -- IntegrityChecker() -- (data) -- Data Consumer
"""
class IntegrityError(Exception):
def __str__(self):
return "Checksum failed"
class BasicIntegrity(Component.component):
def __init__(self, algorithm="SHA"):
super(BasicIntegrity,self).__init__()
self.algorithm = algorithm
self.setAlgorithm()
def setAlgorithm(self):
if self.algorithm is "SHA":
from Crypto.Hash import SHA
self.method = SHA
elif self.algorithm is "MD5":
from Crypto.Hash import MD5
self.method = MD5
# elif self.algorithm is "RIPEMD":
# from Crypto.Hash import RIPEMD # Cannot do this for some reason
# self.method = RIPEMD
def calcHash(self, data):
hashobj = self.method.new(data)
return hashobj.digest()
class IntegrityStamper(BasicIntegrity):
def __init__(self, algorithm="SHA"):
super(IntegrityStamper,self).__init__(algorithm)
def main(self):
while 1:
if self.dataReady("inbox"):
data = self.recv("inbox")
checksum = self.calcHash(data)
#print "Integrity stamper :", data, " ", checksum
self.send((data, checksum), "outbox")
yield 1
class IntegrityChecker(BasicIntegrity):
def __init__(self, algorithm="SHA"):
super(IntegrityChecker, self).__init__(algorithm)
def main(self):
while 1:
try:
if self.dataReady("inbox"):
(data, checksum) = self.recv("inbox")
#print data , checksum , self.calcHash(data)
if checksum == self.calcHash(data):
self.send(data, "outbox")
else: # we have a hash failure
raise IntegrityError # This mechanism needs improvement
except IntegrityError:
print "Integrity Error"
yield 1
class DisruptiveComponent(Component.component):
""" This component causes a minor change in the data
so that data and its checksum will not match.
Used for testing of integrity service. """
def __init__(self, probability=0.2): # Probability of Disruption
super(DisruptiveComponent, self).__init__()
self.probability = probability
def main(self):
while 1:
if self.dataReady("inbox"):
(data, checksum) = self.recv("inbox")
if random.random() < self.probability:
#print "Corrupting Data"
data = data[:-1] #Corrupt Data
self.send((data, checksum), "outbox")
yield 1
class MAC_Stamper(BasicIntegrity):
""" Provides message authentication only, message is still sent in plain text
"""
def __init__(self, key, encryption="AES", mode="ECB", hash="SHA"):
super(MAC_Stamper,self).__init__(hash)
self.encryptobj = BasicEncryption(key, encryption, mode)
def main(self):
while 1:
if self.dataReady("inbox"):
data = self.recv("inbox")
mac = self.encryptobj.encrypt(self.calcHash(data))
self.send((data, mac), "outbox")
yield 1
class MAC_Checker(BasicIntegrity):
def __init__(self, key, encryption="AES", mode="ECB", hash="SHA"):
super(MAC_Checker, self).__init__(hash)
self.decryptobj = BasicEncryption(key, encryption, mode)
def main(self):
while 1:
try:
if self.dataReady("inbox"):
(data, mac) = self.recv("inbox")
checksum = self.decryptobj.decrypt(mac)
if checksum == self.calcHash(data):
self.send(data, "outbox")
else: # we have a hash failure
raise IntegrityError # This mechanism needs improvement
except IntegrityError:
print "Integrity Error"
yield 1
if __name__ == "__main__":
pipeline(
SerialChargen(),
MAC_Stamper("1234567812345678", mode="CBC"),
DisruptiveComponent(),
MAC_Checker("1234567812345678", mode="CBC"),
consoleEchoer()
).run()
| |
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import binascii
from distutils import version
import os
import sys
import time
import uuid
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import strutils
from nova.api.metadata import password
from nova.compute import utils as compute_utils
from nova import context
from nova import crypto
from nova import exception
from nova.i18n import _, _LE, _LI, _LW
from nova import objects
from nova import utils
USE_AGENT_KEY = "xenapi_use_agent"
USE_AGENT_SM_KEY = utils.SM_IMAGE_PROP_PREFIX + USE_AGENT_KEY
SKIP_SSH_KEY = "xenapi_skip_agent_inject_ssh"
SKIP_SSH_SM_KEY = utils.SM_IMAGE_PROP_PREFIX + SKIP_SSH_KEY
SKIP_FILES_AT_BOOT_KEY = "xenapi_skip_agent_inject_files_at_boot"
SKIP_FILES_AT_BOOT_SM_KEY = utils.SM_IMAGE_PROP_PREFIX \
+ SKIP_FILES_AT_BOOT_KEY
LOG = logging.getLogger(__name__)
xenapi_agent_opts = [
cfg.IntOpt('agent_timeout',
default=30,
help='Number of seconds to wait for agent reply'),
cfg.IntOpt('agent_version_timeout',
default=300,
help='Number of seconds to wait for agent '
'to be fully operational'),
cfg.IntOpt('agent_resetnetwork_timeout',
default=60,
help='Number of seconds to wait for agent reply '
'to resetnetwork request'),
cfg.StrOpt('agent_path',
default='usr/sbin/xe-update-networking',
help='Specifies the path in which the XenAPI guest agent '
'should be located. If the agent is present, network '
'configuration is not injected into the image. '
'Used if compute_driver=xenapi.XenAPIDriver and '
'flat_injected=True'),
cfg.BoolOpt('disable_agent',
default=False,
help='Disables the use of the XenAPI agent in any image '
'regardless of what image properties are present.'),
cfg.BoolOpt('use_agent_default',
default=False,
help='Determines if the XenAPI agent should be used when '
'the image used does not contain a hint to declare if '
'the agent is present or not. '
'The hint is a glance property "' + USE_AGENT_KEY + '" '
'that has the value "True" or "False". '
'Note that waiting for the agent when it is not present '
'will significantly increase server boot times.'),
]
CONF = cfg.CONF
CONF.register_opts(xenapi_agent_opts, 'xenserver')
def _call_agent(session, instance, vm_ref, method, addl_args=None,
timeout=None, success_codes=None):
"""Abstracts out the interaction with the agent xenapi plugin."""
if addl_args is None:
addl_args = {}
if timeout is None:
timeout = CONF.xenserver.agent_timeout
if success_codes is None:
success_codes = ['0']
# always fetch domid because VM may have rebooted
dom_id = session.VM.get_domid(vm_ref)
args = {
'id': str(uuid.uuid4()),
'dom_id': str(dom_id),
'timeout': str(timeout),
}
args.update(addl_args)
try:
ret = session.call_plugin('agent', method, args)
except session.XenAPI.Failure as e:
err_msg = e.details[-1].splitlines()[-1]
if 'TIMEOUT:' in err_msg:
LOG.error(_LE('TIMEOUT: The call to %(method)s timed out. '
'args=%(args)r'),
{'method': method, 'args': args}, instance=instance)
raise exception.AgentTimeout(method=method)
elif 'REBOOT:' in err_msg:
LOG.debug('REBOOT: The call to %(method)s detected a reboot. '
'args=%(args)r',
{'method': method, 'args': args}, instance=instance)
_wait_for_new_dom_id(session, vm_ref, dom_id, method)
return _call_agent(session, instance, vm_ref, method,
addl_args, timeout, success_codes)
elif 'NOT IMPLEMENTED:' in err_msg:
LOG.error(_LE('NOT IMPLEMENTED: The call to %(method)s is not '
'supported by the agent. args=%(args)r'),
{'method': method, 'args': args}, instance=instance)
raise exception.AgentNotImplemented(method=method)
else:
LOG.error(_LE('The call to %(method)s returned an error: %(e)s. '
'args=%(args)r'),
{'method': method, 'args': args, 'e': e},
instance=instance)
raise exception.AgentError(method=method)
if not isinstance(ret, dict):
try:
ret = jsonutils.loads(ret)
except TypeError:
LOG.error(_LE('The agent call to %(method)s returned an invalid '
'response: %(ret)r. args=%(args)r'),
{'method': method, 'ret': ret, 'args': args},
instance=instance)
raise exception.AgentError(method=method)
if ret['returncode'] not in success_codes:
LOG.error(_LE('The agent call to %(method)s returned an '
'an error: %(ret)r. args=%(args)r'),
{'method': method, 'ret': ret, 'args': args},
instance=instance)
raise exception.AgentError(method=method)
LOG.debug('The agent call to %(method)s was successful: '
'%(ret)r. args=%(args)r',
{'method': method, 'ret': ret, 'args': args},
instance=instance)
# Some old versions of the Windows agent have a trailing \\r\\n
# (ie CRLF escaped) for some reason. Strip that off.
return ret['message'].replace('\\r\\n', '')
def _wait_for_new_dom_id(session, vm_ref, old_dom_id, method):
expiration = time.time() + CONF.xenserver.agent_timeout
while True:
dom_id = session.VM.get_domid(vm_ref)
if dom_id and dom_id != -1 and dom_id != old_dom_id:
LOG.debug("Found new dom_id %s" % dom_id)
return
if time.time() > expiration:
LOG.debug("Timed out waiting for new dom_id %s" % dom_id)
raise exception.AgentTimeout(method=method)
time.sleep(1)
def is_upgrade_required(current_version, available_version):
# NOTE(johngarbutt): agent version numbers are four part,
# so we need to use the loose version to compare them
current = version.LooseVersion(current_version)
available = version.LooseVersion(available_version)
return available > current
class XenAPIBasedAgent(object):
def __init__(self, session, virtapi, instance, vm_ref):
self.session = session
self.virtapi = virtapi
self.instance = instance
self.vm_ref = vm_ref
def _add_instance_fault(self, error, exc_info):
LOG.warning(_LW("Ignoring error while configuring instance with "
"agent: %s"), error,
instance=self.instance, exc_info=True)
try:
ctxt = context.get_admin_context()
compute_utils.add_instance_fault_from_exc(
ctxt, self.instance, error, exc_info=exc_info)
except Exception:
LOG.debug("Error setting instance fault.", exc_info=True)
def _call_agent(self, method, addl_args=None, timeout=None,
success_codes=None, ignore_errors=True):
try:
return _call_agent(self.session, self.instance, self.vm_ref,
method, addl_args, timeout, success_codes)
except exception.AgentError as error:
if ignore_errors:
self._add_instance_fault(error, sys.exc_info())
else:
raise
def get_version(self):
LOG.debug('Querying agent version', instance=self.instance)
# The agent can be slow to start for a variety of reasons. On Windows,
# it will generally perform a setup process on first boot that can
# take a couple of minutes and then reboot. On Linux, the system can
# also take a while to boot.
expiration = time.time() + CONF.xenserver.agent_version_timeout
while True:
try:
# NOTE(johngarbutt): we can't use the xapi plugin
# timeout, because the domid may change when
# the server is rebooted
return self._call_agent('version', ignore_errors=False)
except exception.AgentError as error:
if time.time() > expiration:
self._add_instance_fault(error, sys.exc_info())
return
def _get_expected_build(self):
ctxt = context.get_admin_context()
agent_build = objects.Agent.get_by_triple(
ctxt, 'xen', self.instance['os_type'],
self.instance['architecture'])
if agent_build:
LOG.debug('Latest agent build for %(hypervisor)s/%(os)s'
'/%(architecture)s is %(version)s', {
'hypervisor': agent_build.hypervisor,
'os': agent_build.os,
'architecture': agent_build.architecture,
'version': agent_build.version})
else:
LOG.debug('No agent build found for %(hypervisor)s/%(os)s'
'/%(architecture)s', {
'hypervisor': 'xen',
'os': self.instance['os_type'],
'architecture': self.instance['architecture']})
return agent_build
def update_if_needed(self, version):
agent_build = self._get_expected_build()
if version and agent_build and \
is_upgrade_required(version, agent_build.version):
LOG.debug('Updating agent to %s', agent_build.version,
instance=self.instance)
self._perform_update(agent_build)
else:
LOG.debug('Skipping agent update.', instance=self.instance)
def _perform_update(self, agent_build):
args = {'url': agent_build.url, 'md5sum': agent_build.md5hash}
try:
self._call_agent('agentupdate', args)
except exception.AgentError as exc:
# Silently fail for agent upgrades
LOG.warning(_LW("Unable to update the agent due "
"to: %(exc)s"), dict(exc=exc),
instance=self.instance)
def _exchange_key_with_agent(self):
dh = SimpleDH()
args = {'pub': str(dh.get_public())}
resp = self._call_agent('key_init', args, success_codes=['D0'],
ignore_errors=False)
agent_pub = int(resp)
dh.compute_shared(agent_pub)
return dh
def _save_instance_password_if_sshkey_present(self, new_pass):
sshkey = self.instance.get('key_data')
if sshkey and sshkey.startswith("ssh-rsa"):
ctxt = context.get_admin_context()
enc = crypto.ssh_encrypt_text(sshkey, new_pass)
self.instance.system_metadata.update(
password.convert_password(ctxt, base64.b64encode(enc)))
self.instance.save()
def set_admin_password(self, new_pass):
"""Set the root/admin password on the VM instance.
This is done via an agent running on the VM. Communication between nova
and the agent is done via writing xenstore records. Since communication
is done over the XenAPI RPC calls, we need to encrypt the password.
We're using a simple Diffie-Hellman class instead of a more advanced
library (such as M2Crypto) for compatibility with the agent code.
"""
LOG.debug('Setting admin password', instance=self.instance)
try:
dh = self._exchange_key_with_agent()
except exception.AgentError as error:
self._add_instance_fault(error, sys.exc_info())
return
# Some old versions of Linux and Windows agent expect trailing \n
# on password to work correctly.
enc_pass = dh.encrypt(new_pass + '\n')
args = {'enc_pass': enc_pass}
self._call_agent('password', args)
self._save_instance_password_if_sshkey_present(new_pass)
def inject_ssh_key(self):
sshkey = self.instance.get('key_data')
if not sshkey:
return
if self.instance['os_type'] == 'windows':
LOG.debug("Skipping setting of ssh key for Windows.",
instance=self.instance)
return
if self._skip_ssh_key_inject():
LOG.debug("Skipping agent ssh key injection for this image.",
instance=self.instance)
return
sshkey = str(sshkey)
keyfile = '/root/.ssh/authorized_keys'
key_data = ''.join([
'\n',
'# The following ssh key was injected by Nova',
'\n',
sshkey.strip(),
'\n',
])
return self.inject_file(keyfile, key_data)
def inject_files(self, injected_files):
if self._skip_inject_files_at_boot():
LOG.debug("Skipping agent file injection for this image.",
instance=self.instance)
else:
for path, contents in injected_files:
self.inject_file(path, contents)
def inject_file(self, path, contents):
LOG.debug('Injecting file path: %r', path, instance=self.instance)
# Files/paths must be base64-encoded for transmission to agent
b64_path = base64.b64encode(path)
b64_contents = base64.b64encode(contents)
args = {'b64_path': b64_path, 'b64_contents': b64_contents}
return self._call_agent('inject_file', args)
def resetnetwork(self):
LOG.debug('Resetting network', instance=self.instance)
# NOTE(johngarbutt) old FreeBSD and Gentoo agents return 500 on success
return self._call_agent('resetnetwork',
timeout=CONF.xenserver.agent_resetnetwork_timeout,
success_codes=['0', '500'])
def _skip_ssh_key_inject(self):
return self._get_sys_meta_key(SKIP_SSH_SM_KEY)
def _skip_inject_files_at_boot(self):
return self._get_sys_meta_key(SKIP_FILES_AT_BOOT_SM_KEY)
def _get_sys_meta_key(self, key):
sys_meta = utils.instance_sys_meta(self.instance)
raw_value = sys_meta.get(key, 'False')
return strutils.bool_from_string(raw_value, strict=False)
def find_guest_agent(base_dir):
"""tries to locate a guest agent at the path
specified by agent_rel_path
"""
if CONF.xenserver.disable_agent:
return False
agent_rel_path = CONF.xenserver.agent_path
agent_path = os.path.join(base_dir, agent_rel_path)
if os.path.isfile(agent_path):
# The presence of the guest agent
# file indicates that this instance can
# reconfigure the network from xenstore data,
# so manipulation of files in /etc is not
# required
LOG.info(_LI('XenServer tools installed in this '
'image are capable of network injection. '
'Networking files will not be'
'manipulated'))
return True
xe_daemon_filename = os.path.join(base_dir,
'usr', 'sbin', 'xe-daemon')
if os.path.isfile(xe_daemon_filename):
LOG.info(_LI('XenServer tools are present '
'in this image but are not capable '
'of network injection'))
else:
LOG.info(_LI('XenServer tools are not '
'installed in this image'))
return False
def should_use_agent(instance):
sys_meta = utils.instance_sys_meta(instance)
if USE_AGENT_SM_KEY not in sys_meta:
return CONF.xenserver.use_agent_default
else:
use_agent_raw = sys_meta[USE_AGENT_SM_KEY]
try:
return strutils.bool_from_string(use_agent_raw, strict=True)
except ValueError:
LOG.warning(_LW("Invalid 'agent_present' value. "
"Falling back to the default."),
instance=instance)
return CONF.xenserver.use_agent_default
class SimpleDH(object):
"""This class wraps all the functionality needed to implement
basic Diffie-Hellman-Merkle key exchange in Python. It features
intelligent defaults for the prime and base numbers needed for the
calculation, while allowing you to supply your own. It requires that
the openssl binary be installed on the system on which this is run,
as it uses that to handle the encryption and decryption. If openssl
is not available, a RuntimeError will be raised.
"""
def __init__(self):
self._prime = 162259276829213363391578010288127
self._base = 5
self._public = None
self._shared = None
self.generate_private()
def generate_private(self):
self._private = int(binascii.hexlify(os.urandom(10)), 16)
return self._private
def get_public(self):
self._public = pow(self._base, self._private, self._prime)
return self._public
def compute_shared(self, other):
self._shared = pow(other, self._private, self._prime)
return self._shared
def _run_ssl(self, text, decrypt=False):
cmd = ['openssl', 'aes-128-cbc', '-A', '-a', '-pass',
'pass:%s' % self._shared, '-nosalt']
if decrypt:
cmd.append('-d')
out, err = utils.execute(*cmd, process_input=text)
if err:
raise RuntimeError(_('OpenSSL error: %s') % err)
return out
def encrypt(self, text):
return self._run_ssl(text).strip('\n')
def decrypt(self, text):
return self._run_ssl(text, decrypt=True)
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TPU Strategy."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import copy
import weakref
import numpy as np
from tensorflow.python.distribute import cross_device_ops as cross_device_ops_lib
from tensorflow.python.distribute import device_util
from tensorflow.python.distribute import distribute_lib
from tensorflow.python.distribute import input_lib
from tensorflow.python.distribute import numpy_dataset
from tensorflow.python.distribute import reduce_util
from tensorflow.python.distribute import values
from tensorflow.python.distribute.cluster_resolver import TPUClusterResolver
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import tape
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.tpu import device_assignment as device_assignment_lib
from tensorflow.python.tpu import tpu
from tensorflow.python.tpu import tpu_strategy_util
from tensorflow.python.tpu import tpu_system_metadata as tpu_system_metadata_lib
from tensorflow.python.tpu import training_loop
from tensorflow.python.tpu.ops import tpu_ops
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
def get_tpu_system_metadata(tpu_cluster_resolver):
"""Retrieves TPU system metadata given a TPUClusterResolver."""
master = tpu_cluster_resolver.master()
# pylint: disable=protected-access
cluster_spec = tpu_cluster_resolver.cluster_spec()
cluster_def = cluster_spec.as_cluster_def() if cluster_spec else None
tpu_system_metadata = (
tpu_system_metadata_lib._query_tpu_system_metadata(
master,
cluster_def=cluster_def,
query_topology=False))
return tpu_system_metadata
@contextlib.contextmanager
def maybe_init_scope():
if ops.executing_eagerly_outside_functions():
yield
else:
with ops.init_scope():
yield
# TODO(jhseu): Deduplicate with MirroredStrategy?
def _create_tpu_mirrored_variable( # pylint: disable=missing-docstring
strategy, device_map, logical_device, real_mirrored_creator,
*args, **kwargs):
# Figure out what collections this variable should be added to.
# We'll add the TPUMirroredVariable to those collections instead.
var_collections = kwargs.pop("collections", None)
if var_collections is None:
var_collections = [ops.GraphKeys.GLOBAL_VARIABLES]
kwargs["collections"] = []
# TODO(jhseu): Should we have different behavior for different
# synchronization settings?
# Get aggregation value
# TODO(jhseu): Support aggregation in a replica context.
aggregation = kwargs.pop("aggregation", vs.VariableAggregation.NONE)
if aggregation not in [
vs.VariableAggregation.NONE,
vs.VariableAggregation.SUM,
vs.VariableAggregation.MEAN,
vs.VariableAggregation.ONLY_FIRST_REPLICA,
]:
raise ValueError("Invalid variable aggregation mode: {} for variable: {}"
.format(aggregation, kwargs["name"]))
# Ignore user-specified caching device, not needed for mirrored variables.
kwargs.pop("caching_device", None)
# TODO(josh11b,apassos): It would be better if variable initialization
# was never recorded on the tape instead of having to do this manually
# here.
with tape.stop_recording():
devices = device_map.logical_to_actual_devices(logical_device)
value_list = real_mirrored_creator(devices, *args, **kwargs)
result = values.TPUMirroredVariable(
strategy, device_map, value_list, aggregation,
logical_device=logical_device)
if not (context.executing_eagerly() or ops.inside_function()):
g = ops.get_default_graph()
# If "trainable" is True, next_creator() will add the member variables
# to the TRAINABLE_VARIABLES collection, so we manually remove
# them and replace with the MirroredVariable. We can't set
# "trainable" to False for next_creator() since that causes functions
# like implicit_gradients to skip those variables.
if kwargs.get("trainable", True):
var_collections.append(ops.GraphKeys.TRAINABLE_VARIABLES)
l = g.get_collection_ref(ops.GraphKeys.TRAINABLE_VARIABLES)
for v in value_list:
l.remove(v)
g.add_to_collections(var_collections, result)
return result
@tf_export("distribute.experimental.TPUStrategy", v1=[])
class TPUStrategy(distribute_lib.Strategy):
"""TPU distribution strategy implementation."""
def __init__(self,
tpu_cluster_resolver=None,
device_assignment=None):
"""Initializes the TPUStrategy object.
Args:
tpu_cluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver,
which provides information about the TPU cluster.
device_assignment: Optional `tf.tpu.experimental.DeviceAssignment` to
specify the placement of replicas on the TPU cluster. Currently only
supports the usecase of using a single core within a TPU cluster.
"""
super(TPUStrategy, self).__init__(TPUExtended(
self, tpu_cluster_resolver, device_assignment=device_assignment))
# TODO(cjfj): Modify `_call_for_each_replica` in `TPUExtended` such that this
# can use the default implementation.
# This implementation runs a single step. It does not use infeed or outfeed.
def experimental_run_v2(self, fn, args=(), kwargs=None):
"""See base class."""
return self.extended.tpu_run(fn, args, kwargs)
@tf_export(v1=["distribute.experimental.TPUStrategy"])
class TPUStrategyV1(distribute_lib.StrategyV1):
"""TPU distribution strategy implementation."""
def __init__(self,
tpu_cluster_resolver=None,
steps_per_run=None,
device_assignment=None):
"""Initializes the TPUStrategy object.
Args:
tpu_cluster_resolver: A tf.distribute.cluster_resolver.TPUClusterResolver,
which provides information about the TPU cluster.
steps_per_run: Number of steps to run on device before returning to the
host. Note that this can have side-effects on performance, hooks,
metrics, summaries etc.
This parameter is only used when Distribution Strategy is used with
estimator or keras.
device_assignment: Optional `tf.tpu.experimental.DeviceAssignment` to
specify the placement of replicas on the TPU cluster. Currently only
supports the usecase of using a single core within a TPU cluster.
"""
super(TPUStrategyV1, self).__init__(TPUExtended(
self, tpu_cluster_resolver, steps_per_run, device_assignment))
@property
def steps_per_run(self):
"""DEPRECATED: use .extended.steps_per_run instead."""
return self._extended.steps_per_run
# TODO(cjfj): Modify `_call_for_each_replica` in `TPUExtended` such that this
# can use the default implementation.
# This implementation runs a single step. It does not use infeed or outfeed.
def experimental_run_v2(self, fn, args=(), kwargs=None):
"""See base class."""
return self.extended.tpu_run(fn, args, kwargs)
# TODO(josh11b): Switch to V2 when we no longer need to support tf.compat.v1.
class TPUExtended(distribute_lib.StrategyExtendedV1):
"""Implementation of TPUStrategy."""
def __init__(self,
container_strategy,
tpu_cluster_resolver=None,
steps_per_run=None,
device_assignment=None):
super(TPUExtended, self).__init__(container_strategy)
if tpu_cluster_resolver is None:
tpu_cluster_resolver = TPUClusterResolver("")
if steps_per_run is None:
# TODO(frankchn): Warn when we are being used by DS/Keras and this is
# not specified.
steps_per_run = 1
self._tpu_function_cache = weakref.WeakKeyDictionary()
self._tpu_cluster_resolver = tpu_cluster_resolver
self._tpu_metadata = get_tpu_system_metadata(self._tpu_cluster_resolver)
self._device_assignment = device_assignment
# Device assignment is currently only supported for 1 core case.
if self._device_assignment:
assert isinstance(self._device_assignment,
device_assignment_lib.DeviceAssignment)
if self._device_assignment.num_replicas != 1:
raise ValueError("Device assignment is only supported for a single "
"core single replica case currently.")
if self._device_assignment.num_cores_per_replica != 1:
raise ValueError("Device assignment is only supported for a single "
"core single replica case currently.")
if not all(self._device_assignment.core_assignment[0][0] == [0, 0, 0]):
raise ValueError("Device assignment is only supported for a single "
"core single replica case currently.")
# TODO(jhseu): Switch to DeviceAssignment to support pods and model
# parallelism.
self._tpu_devices = [d.name for d in self._tpu_metadata.devices
if "device:TPU:" in d.name]
self._host_device = device_util.get_host_for_device(self._tpu_devices[0])
# Only create variables for the number of replicas we're running.
self._tpu_devices = self._tpu_devices[:self._num_replicas_in_sync]
self._device_map = values.ReplicaDeviceMap(self._tpu_devices)
# Preload the data onto the TPUs.
input_worker_devices = collections.OrderedDict()
for tpu_device in self._tpu_devices:
host_device = device_util.get_host_for_device(tpu_device)
input_worker_devices.setdefault(host_device, [])
input_worker_devices[host_device].append(tpu_device)
self._input_workers = input_lib.InputWorkers(
self._device_map, tuple(input_worker_devices.items()))
# TODO(sourabhbajaj): Remove this once performance of running one step
# at a time is comparable to multiple steps.
self.steps_per_run = steps_per_run
self._require_static_shapes = True
self.experimental_enable_get_next_as_optional = True
def _validate_colocate_with_variable(self, colocate_with_variable):
values.validate_colocate_tpu_variable(colocate_with_variable, self)
def _make_dataset_iterator(self, dataset):
"""Make iterators for each of the TPU hosts."""
return input_lib.DatasetIterator(
dataset,
self._input_workers,
self._container_strategy(),
split_batch_by=self._num_replicas_in_sync)
def _make_input_fn_iterator(
self,
input_fn,
replication_mode=distribute_lib.InputReplicationMode.PER_WORKER):
input_contexts = []
num_workers = self._input_workers.num_workers
for i in range(num_workers):
input_contexts.append(distribute_lib.InputContext(
num_input_pipelines=num_workers,
input_pipeline_id=i,
num_replicas_in_sync=self._num_replicas_in_sync))
return input_lib.InputFunctionIterator(
input_fn,
self._input_workers,
input_contexts,
self._container_strategy())
def _experimental_make_numpy_dataset(self, numpy_input, session):
return numpy_dataset.one_host_numpy_dataset(
numpy_input, numpy_dataset.SingleDevice(self._host_device),
session)
def _experimental_distribute_dataset(self, dataset):
return input_lib.get_distributed_dataset(
dataset,
self._input_workers,
self._container_strategy(),
split_batch_by=self._num_replicas_in_sync)
def _experimental_distribute_datasets_from_function(self, dataset_fn):
input_contexts = []
num_workers = self._input_workers.num_workers
for i in range(num_workers):
input_contexts.append(distribute_lib.InputContext(
num_input_pipelines=num_workers,
input_pipeline_id=i,
num_replicas_in_sync=self._num_replicas_in_sync))
return input_lib.DistributedDatasetsFromFunction(
dataset_fn,
self._input_workers,
input_contexts,
self._container_strategy())
# TODO(priyag): Deal with OutOfRange errors once b/111349762 is fixed.
# TODO(sourabhbajaj): Remove the initial_loop_values parameter when we have
# a mechanism to infer the outputs of `fn`. Pending b/110550782.
def _experimental_run_steps_on_iterator(
self, fn, multi_worker_iterator, iterations, initial_loop_values=None):
# Wrap `fn` for repeat.
if initial_loop_values is None:
initial_loop_values = {}
initial_loop_values = nest.flatten(initial_loop_values)
ctx = input_lib.MultiStepContext()
def run_fn(inputs):
"""Single step on the TPU device."""
fn_result = fn(ctx, inputs)
flat_last_step_outputs = nest.flatten(ctx.last_step_outputs)
if flat_last_step_outputs:
with ops.control_dependencies([fn_result]):
return [array_ops.identity(f) for f in flat_last_step_outputs]
else:
return fn_result
# We capture the control_flow_context at this point, before we run `fn`
# inside a while_loop and TPU replicate context. This is useful in cases
# where we might need to exit these contexts and get back to the outer
# context to do some things, for e.g. create an op which should be
# evaluated only once at the end of the loop on the host. One such usage
# is in creating metrics' value op.
self._outer_control_flow_context = (
ops.get_default_graph()._get_control_flow_context()) # pylint: disable=protected-access
def rewrite_fn(*args):
"""The rewritten step fn running on TPU."""
del args
per_replica_inputs = multi_worker_iterator.get_next()
replicate_inputs = []
for replica_id in range(self._num_replicas_in_sync):
select_replica = lambda x: values.select_replica(replica_id, x) # pylint: disable=cell-var-from-loop
replicate_inputs.append((nest.map_structure(
select_replica, per_replica_inputs),))
replicate_outputs = tpu.replicate(
run_fn, replicate_inputs, device_assignment=self._device_assignment)
# If run_fn has tensor outputs, tpu.replicate returns a list of list. We
# will flatten it in this case. If run_fn has no tensor outputs,
# tpu.replicate returns a list of no_ops, we will keep the output as it
# is.
if isinstance(replicate_outputs[0], list):
replicate_outputs = nest.flatten(replicate_outputs)
return replicate_outputs
# TODO(sourabhbajaj): The input to while loop should be based on the
# output type of the step_fn
assert isinstance(initial_loop_values, list)
initial_loop_values = initial_loop_values * self._num_replicas_in_sync
# Put the while loop op on TPU host 0.
with ops.device(self._host_device):
if self.steps_per_run == 1:
replicate_outputs = rewrite_fn()
else:
replicate_outputs = training_loop.repeat(iterations, rewrite_fn,
initial_loop_values)
del self._outer_control_flow_context
ctx.run_op = control_flow_ops.group(replicate_outputs)
if isinstance(replicate_outputs, list):
# Filter out any ops from the outputs, typically this would be the case
# when there were no tensor outputs.
last_step_tensor_outputs = [
x for x in replicate_outputs if not isinstance(x, ops.Operation)
]
# Outputs are currently of the structure (flattened)
# [output0_device0, output1_device0, output2_device0,
# output0_device1, output1_device1, output2_device1,
# ...]
# Convert this to the following structure instead: (grouped by output)
# [[output0_device0, output0_device1],
# [output1_device0, output1_device1],
# [output2_device0, output2_device1]]
output_num = len(last_step_tensor_outputs) // self._num_replicas_in_sync
last_step_tensor_outputs = [
last_step_tensor_outputs[i::output_num] for i in range(output_num)
]
else:
# no tensors returned.
last_step_tensor_outputs = []
_set_last_step_outputs(ctx, last_step_tensor_outputs)
return ctx
def _call_for_each_replica(self, fn, args, kwargs):
# TODO(jhseu): Consider making it so call_for_each_replica implies that
# we're in a tpu.rewrite(), and update TPUMirroredVariable accordingly.
with _TPUReplicaContext(self._container_strategy()):
return fn(*args, **kwargs)
def _experimental_initialize_system(self):
"""Experimental method added to be used by Estimator.
This is a private method only to be used by Estimator. Other frameworks
should directly be calling `tf.tpu.experimental.initialize_tpu_system`
"""
tpu_strategy_util.initialize_tpu_system(self._tpu_cluster_resolver)
def _create_variable(self, next_creator, *args, **kwargs):
"""Create a TPUMirroredVariable. See `DistributionStrategy.scope`."""
colocate_with = kwargs.pop("colocate_with", None)
if colocate_with is None:
device_map = self._device_map
logical_device = 0 # TODO(josh11b): Get logical device from scope here.
elif isinstance(colocate_with, numpy_dataset.SingleDevice):
with ops.device(colocate_with.device):
return next_creator(*args, **kwargs)
else:
device_map = colocate_with.device_map
logical_device = colocate_with.logical_device
def _real_mirrored_creator(devices, *args, **kwargs): # pylint: disable=g-missing-docstring
initial_value = None
value_list = []
for i, d in enumerate(devices):
with ops.device(d):
if i == 0:
initial_value = kwargs["initial_value"]
# Note: some v1 code expects variable initializer creation to happen
# inside a init_scope.
with maybe_init_scope():
initial_value = initial_value() if callable(
initial_value) else initial_value
if i > 0:
# Give replicas meaningful distinct names:
var0name = value_list[0].name.split(":")[0]
# We append a / to variable names created on replicas with id > 0 to
# ensure that we ignore the name scope and instead use the given
# name as the absolute name of the variable.
kwargs["name"] = "%s/replica_%d/" % (var0name, i)
kwargs["initial_value"] = initial_value
with context.device_policy(context.DEVICE_PLACEMENT_SILENT):
v = next_creator(*args, **kwargs)
assert not isinstance(v, values.TPUMirroredVariable)
value_list.append(v)
return value_list
return _create_tpu_mirrored_variable(
self._container_strategy(), device_map, logical_device,
_real_mirrored_creator, *args, **kwargs)
def _reduce_to(self, reduce_op, value, destinations):
if values._enclosing_tpu_context() is not None: # pylint: disable=protected-access
if reduce_op == reduce_util.ReduceOp.MEAN:
# TODO(jhseu): Revisit once we support model-parallelism.
value *= (1. / self._num_replicas_in_sync)
elif reduce_op != reduce_util.ReduceOp.SUM:
raise NotImplementedError(
"Currently only support sum & mean in TPUStrategy.")
return tpu_ops.cross_replica_sum(value)
if not isinstance(value, values.DistributedValues):
# This function handles reducing values that are not PerReplica or
# Mirrored values. For example, the same value could be present on all
# replicas in which case `value` would be a single value or value could
# be 0.
return cross_device_ops_lib.reduce_non_distributed_value(
reduce_op, self._device_map, value, destinations)
# TODO(cjfj): Detect when it is possible to use `cross_replica_sum`.
# Always performs the reduction on the TPU host.
with ops.device(self._host_device):
output = math_ops.add_n(value.values)
if reduce_op == reduce_util.ReduceOp.MEAN:
output *= (1. / len(value.values))
devices = cross_device_ops_lib.get_devices_from(destinations)
if len(devices) == 1:
# If necessary, copy to requested destination.
dest_canonical = device_util.canonicalize(devices[0])
host_canonical = device_util.canonicalize(self._host_device)
if dest_canonical != host_canonical:
with ops.device(dest_canonical):
output = array_ops.identity(output)
else:
output = cross_device_ops_lib.simple_broadcast(output, destinations)
return output
def _update(self, var, fn, args, kwargs, group):
assert isinstance(var, values.TPUMirroredVariable) or isinstance(
var, resource_variable_ops.ResourceVariable)
if values._enclosing_tpu_context() is not None: # pylint: disable=protected-access
if group:
return fn(var, *args, **kwargs)
else:
return (fn(var, *args, **kwargs),)
# Otherwise, we revert to MirroredStrategy behavior and update each variable
# directly.
updates = []
for i, (d, v) in enumerate(zip(var.devices, var.values)):
name = "update_%d" % i
with ops.device(d), distribute_lib.UpdateContext(d), ops.name_scope(name):
# If args and kwargs are not mirrored, the value is returned as is.
updates.append(fn(v,
*values.select_device_mirrored(d, args),
**values.select_device_mirrored(d, kwargs)))
return values.update_regroup(self, self._device_map, updates, group)
def read_var(self, var):
assert isinstance(var, values.TPUMirroredVariable) or isinstance(
var, resource_variable_ops.ResourceVariable)
return var.read_value()
def _local_results(self, val):
if isinstance(val, values.DistributedValues):
# Return in a deterministic order.
return tuple(val.get(device=d) for d in sorted(val.devices))
elif isinstance(val, list):
# TODO(josh11b): We need to remove this case; per device values should
# be represented using a PerReplica wrapper instead of a list with
# one entry per device.
return tuple(val)
elif isinstance(val, values.TPUMirroredVariable):
# pylint: disable=protected-access
if values._enclosing_tpu_context() is not None:
return (val,)
return val.values
return (val,)
def value_container(self, value):
return value
def _broadcast_to(self, tensor, destinations):
del destinations
return tensor
@property
def num_hosts(self):
if self._device_assignment is None:
return self._tpu_metadata.num_hosts
return len(set([self._device_assignment.host_device(r)
for r in range(self._device_assignment.num_replicas)]))
@property
def num_replicas_per_host(self):
if self._device_assignment is None:
return self._tpu_metadata.num_of_cores_per_host
# TODO(sourabhbajaj): Remove this method we use inputs and remove infeed
# as the computation of num_replicas_per_host is not a constant
# when using device_assignment. This is a temporary workaround to support
# StatefulRNN as everything is 1 in that case.
# This method needs to take host_id as input for correct computation.
max_models_per_host = (self._tpu_metadata.num_of_cores_per_host //
self._device_assignment.num_cores_per_replica)
models_per_host = min(self._device_assignment.num_replicas,
max_models_per_host)
return models_per_host * self._device_assignment.num_cores_per_replica
@property
def _num_replicas_in_sync(self):
if self._device_assignment is None:
return self._tpu_metadata.num_cores
return (self._device_assignment.num_replicas *
self._device_assignment.num_cores_per_replica)
@property
def experimental_between_graph(self):
return False
@property
def experimental_should_init(self):
return True
@property
def should_checkpoint(self):
return True
@property
def should_save_summary(self):
return True
@property
def worker_devices(self):
return self._tpu_devices
@property
def parameter_devices(self):
return self._tpu_devices
def non_slot_devices(self, var_list):
return self._host_device
def _update_non_slot(self, colocate_with, fn, args, kwargs, group):
del colocate_with
with ops.device(self._host_device), distribute_lib.UpdateContext(
self._host_device):
result = fn(*args, **kwargs)
if group:
return result
else:
return nest.map_structure(self._local_results, result)
def _configure(self,
session_config=None,
cluster_spec=None,
task_type=None,
task_id=None):
del cluster_spec, task_type, task_id
if session_config:
session_config.CopyFrom(self._update_config_proto(session_config))
def _update_config_proto(self, config_proto):
updated_config = copy.deepcopy(config_proto)
updated_config.isolate_session_state = True
cluster_spec = self._tpu_cluster_resolver.cluster_spec()
if cluster_spec:
updated_config.cluster_def.CopyFrom(cluster_spec.as_cluster_def())
return updated_config
# TODO(priyag): Delete this once all strategies use global batch size.
@property
def _global_batch_size(self):
"""`make_dataset_iterator` and `make_numpy_iterator` use global batch size.
`make_input_fn_iterator` assumes per-replica batching.
Returns:
Boolean.
"""
return True
def tpu_run(self, fn, args, kwargs):
func = self._tpu_function_creator(fn)
return func(args, kwargs)
def _tpu_function_creator(self, fn):
if fn in self._tpu_function_cache:
return self._tpu_function_cache[fn]
strategy = self._container_strategy()
def tpu_function(args, kwargs):
"""TF Function used to replicate the user computation."""
if kwargs is None:
kwargs = {}
# Remove None at the end of args as they are not replicatable
# If there are None in the middle we can't do anything about it
# so let those cases fail.
# For example when Keras model predict is used they pass the targets as
# None. We want to handle it here so all client libraries don't have to
# do this as other strategies can handle None values better.
while args and args[-1] is None:
args = args[:-1]
# Used to re-structure flattened output tensors from `tpu.replicate()`
# into a structured format.
result = [[]]
def replicated_fn(replica_id, replica_args, replica_kwargs):
"""Wraps user function to provide replica ID and `Tensor` inputs."""
with _TPUReplicaContext(strategy, replica_id_in_sync_group=replica_id):
result[0] = fn(*replica_args, **replica_kwargs)
return result[0]
replicate_inputs = [] # By replica.
for i in range(strategy.num_replicas_in_sync):
replicate_inputs.append(
[constant_op.constant(i, dtype=dtypes.int32),
values.select_replica(i, args),
values.select_replica(i, kwargs)])
# Construct and pass `maximum_shapes` so that we could support dynamic
# shapes using dynamic padder.
if replicate_inputs:
maximum_shapes = []
flattened_list = nest.flatten(replicate_inputs[0])
for input_tensor in flattened_list:
if tensor_util.is_tensor(input_tensor):
maximum_shape = input_tensor.get_shape()
else:
maximum_shape = tensor_shape.TensorShape(np.shape(input_tensor))
maximum_shapes.append(maximum_shape)
maximum_shapes = nest.pack_sequence_as(replicate_inputs[0],
maximum_shapes)
else:
maximum_shapes = None
with strategy.scope():
replicate_outputs = tpu.replicate(
replicated_fn,
replicate_inputs,
device_assignment=self._device_assignment,
maximum_shapes=maximum_shapes)
# Remove all no ops that may have been added during 'tpu.replicate()'
if isinstance(result[0], list):
result[0] = [
output for output in result[0] if tensor_util.is_tensor(output)
]
# Workaround for `tpu.replicate` behaviour when single `Tensor` returned.
if result[0] is None:
replicate_outputs = [None] * len(replicate_outputs)
else:
replicate_outputs = [
nest.pack_sequence_as(result[0], nest.flatten(replica_output))
for replica_output in replicate_outputs
]
device_map = self._device_map # pylint: disable=protected-access
return values.regroup(device_map, replicate_outputs)
if context.executing_eagerly():
tpu_function = def_function.function(tpu_function)
self._tpu_function_cache[fn] = tpu_function
return tpu_function
class _TPUReplicaContext(distribute_lib.ReplicaContext):
"""Replication Context class for TPU Strategy."""
# TODO(sourabhbajaj): Call for each replica should be updating this.
# TODO(b/118385803): Always properly initialize replica_id.
def __init__(self, strategy, replica_id_in_sync_group=None):
if replica_id_in_sync_group is None:
replica_id_in_sync_group = constant_op.constant(0, dtypes.int32)
distribute_lib.ReplicaContext.__init__(
self, strategy, replica_id_in_sync_group=replica_id_in_sync_group)
@property
def devices(self):
distribute_lib.require_replica_context(self)
ds = self._strategy
replica_id = tensor_util.constant_value(self._replica_id_in_sync_group)
if replica_id is None: # Non-constant `Tensor` inside `tpu.replicate`.
# TODO(cjfj): Return other devices when model parallelism is supported.
return (tpu.core(0),)
else:
return (ds.extended.worker_devices[replica_id],)
def _set_last_step_outputs(ctx, last_step_tensor_outputs):
"""Sets the last step outputs on the given context."""
# Convert replicate_outputs to the original dict structure of
# last_step_outputs.
last_step_tensor_outputs_dict = nest.pack_sequence_as(
ctx.last_step_outputs, last_step_tensor_outputs)
for name, reduce_op in ctx._last_step_outputs_reduce_ops.items(): # pylint: disable=protected-access
output = last_step_tensor_outputs_dict[name]
# For outputs that have already been reduced, take the first value
# from the list as each value should be the same. Else return the full
# list of values.
# TODO(josh11b): If reduce_op is NONE, we should return a PerReplica
# value.
if reduce_op is not None:
# TODO(priyag): Should this return the element or a list with 1 element
last_step_tensor_outputs_dict[name] = output[0]
ctx._set_last_step_outputs(last_step_tensor_outputs_dict) # pylint: disable=protected-access
| |
# -*- coding: utf-8 -*-
# Miguel Angel Asencio Hurtado
# Gabriel Giovanni Gonzalez Galindo
from ply import lex, yacc
from sys import stdin
tipos = ['tk_cadena', 'tk_caracter', 'tk_real', 'tk_entero']
reserved = {
'funcion_principal': 'funcion_principal',
'fin_principal': 'fin_principal',
'leer': 'leer',
'imprimir': 'imprimir',
'verdadero': 'verdadero',
'falso': 'falso',
'booleano': 'booleano',
'cadena': 'cadena',
'caracter': 'caracter',
'real': 'real',
'entero': 'entero',
'defecto': 'defecto',
'si': 'si',
'entonces': 'entonces',
'si_no': 'si_no',
'fin_si': 'fin_si',
'mientras': 'mientras',
'hacer': 'hacer',
'fin_mientras': 'fin_mientras',
'para': 'para',
'fin_para': 'fin_para',
'seleccionar': 'seleccionar',
'entre': 'entre',
'caso': 'caso',
'romper': 'romper',
'fin_seleccionar': 'fin_seleccionar',
'estructura': 'estructura',
'fin_estructura': 'fin_estructura',
'funcion': 'funcion',
'retornar': 'retornar'
}
def find_column(input, token):
last_cr = input.rfind('\n',0,token.lexpos)
if last_cr < 0:
last_cr = 0
column = (token.lexpos - last_cr) + 1
else:
column = (token.lexpos - last_cr)
return column
start = 'S'
precedence = (
('left', 'tk_mas', 'tk_menos'),
('left', 'tk_mult', 'tk_div'),
)
tokens = (
'fin_funcion',
'tk_cadena',
'tk_caracter',
'tk_real',
'tk_entero',
'tk_mas',
'tk_menos',
'tk_mult',
'tk_div',
'tk_mod',
'tk_asig',
'tk_menor',
'tk_mayor',
'tk_menor_igual',
'tk_mayor_igual',
'tk_igual',
'tk_y',
'tk_o',
'tk_dif',
'tk_neg',
'tk_dosp',
'tk_comilla_sen',
'tk_comilla_dob',
'tk_pyc',
'tk_coma',
'tk_punto',
'tk_par_izq',
'tk_par_der',
'id'
)
states = (
('comment', 'exclusive'),
)
t_ignore = ' \t\v\r'
def t_error(token):
print '>>> Error lexico (linea:'+str(token.lexer.lineno)+', posicion: '+str(find_column(code, token))+')'
def t_comment(token):
r'/\*'
token.lexer.begin('comment')
def t_comment_end(token):
r'\*/'
token.lexer.lineno += (token.value.count('\n') + 2)
token.lexer.begin('INITIAL')
def t_comment_error(token):
token.lexer.skip(1)
t_comment_ignore = ' '
def t_newline(token):
r'\n'
token.lexer.lineno += 1
pass
def t_eolcomment(token):
r'//[^\n]*'
pass
def t_tk_cadena(token):
r'"[^"]*"'
return token
def t_tk_caracter(token):
r'\'[^\']\''
return token
def t_tk_real(token):
r'[0-9]+\.[0-9]+'
return token
def t_tk_entero(token):
r'[0-9]+'
return token
def t_tk_mas(token):
r'\+'
return token
def t_tk_menos(token):
r'-'
return token
def t_tk_mult(token):
r'\*'
return token
def t_tk_div(token):
r'/'
return token
def t_tk_mod(token):
r'%'
return token
def t_tk_menor_igual(token):
r'<='
return token
def t_tk_mayor_igual(token):
r'>='
return token
def t_tk_igual(token):
r'=='
return token
def t_tk_dif(token):
r'!='
return token
def t_tk_asig(token):
r'='
return token
def t_tk_menor(token):
r'<'
return token
def t_tk_mayor(token):
r'>'
return token
def t_tk_y(token):
r'&&'
return token
def t_tk_o(token):
r'\|\|'
return token
def t_tk_neg(token):
r'!'
return token
def t_tk_dosp(token):
r':'
return token
def t_tk_comilla_sen(token):
r'\''
return token
def t_tk_comilla_dob(token):
r'"'
return token
def t_tk_pyc(token):
r';'
return token
def t_tk_coma(token):
r','
return token
def t_tk_punto(token):
r'\.'
return token
def t_tk_par_izq(token):
r'\('
return token
def t_tk_par_der(token):
r'\)'
return token
def t_id(token):
r'[a-zA-Z]+[a-zA-Z0-9_]*'
return token
def p_start(p):
'S : exp'
p[0] = p[1]
def p_exp_binop(p):
'''exp : exp tk_mas exp
| exp tk_menos exp
| exp tk_mult exp
| exp tk_div exp'''
p[0] = ("binop", p[1], p[2], p[3])
def p_exp_empty(p):
'exp : '
p[0] = []
def p_exp_entero(p):
'exp : tk_entero'
p[0] = p[1]
code = stdin.read()
psilexer = lex.lex()
psiparser = yacc.yacc()
psi_code = psiparser.parse(code,lexer=psilexer)
print psi_code
| |
"""Support for sensors through the SmartThings cloud API."""
from __future__ import annotations
from collections import namedtuple
from collections.abc import Sequence
from pysmartthings import Attribute, Capability
from pysmartthings.device import DeviceEntity
from homeassistant.components.sensor import (
STATE_CLASS_MEASUREMENT,
STATE_CLASS_TOTAL_INCREASING,
SensorEntity,
)
from homeassistant.const import (
AREA_SQUARE_METERS,
CONCENTRATION_PARTS_PER_MILLION,
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_CO,
DEVICE_CLASS_CO2,
DEVICE_CLASS_ENERGY,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_POWER,
DEVICE_CLASS_SIGNAL_STRENGTH,
DEVICE_CLASS_TEMPERATURE,
DEVICE_CLASS_TIMESTAMP,
DEVICE_CLASS_VOLTAGE,
ELECTRIC_POTENTIAL_VOLT,
ENERGY_KILO_WATT_HOUR,
ENTITY_CATEGORY_CONFIG,
ENTITY_CATEGORY_DIAGNOSTIC,
LIGHT_LUX,
MASS_KILOGRAMS,
PERCENTAGE,
POWER_WATT,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
VOLUME_CUBIC_METERS,
)
from homeassistant.util import dt as dt_util
from . import SmartThingsEntity
from .const import DATA_BROKERS, DOMAIN
Map = namedtuple(
"map", "attribute name default_unit device_class state_class entity_category"
)
CAPABILITY_TO_SENSORS = {
Capability.activity_lighting_mode: [
Map(
Attribute.lighting_mode,
"Activity Lighting Mode",
None,
None,
None,
ENTITY_CATEGORY_CONFIG,
)
],
Capability.air_conditioner_mode: [
Map(
Attribute.air_conditioner_mode,
"Air Conditioner Mode",
None,
None,
None,
ENTITY_CATEGORY_CONFIG,
)
],
Capability.air_quality_sensor: [
Map(
Attribute.air_quality,
"Air Quality",
"CAQI",
None,
STATE_CLASS_MEASUREMENT,
None,
)
],
Capability.alarm: [Map(Attribute.alarm, "Alarm", None, None, None, None)],
Capability.audio_volume: [
Map(Attribute.volume, "Volume", PERCENTAGE, None, None, None)
],
Capability.battery: [
Map(
Attribute.battery,
"Battery",
PERCENTAGE,
DEVICE_CLASS_BATTERY,
None,
ENTITY_CATEGORY_DIAGNOSTIC,
)
],
Capability.body_mass_index_measurement: [
Map(
Attribute.bmi_measurement,
"Body Mass Index",
f"{MASS_KILOGRAMS}/{AREA_SQUARE_METERS}",
None,
STATE_CLASS_MEASUREMENT,
None,
)
],
Capability.body_weight_measurement: [
Map(
Attribute.body_weight_measurement,
"Body Weight",
MASS_KILOGRAMS,
None,
STATE_CLASS_MEASUREMENT,
None,
)
],
Capability.carbon_dioxide_measurement: [
Map(
Attribute.carbon_dioxide,
"Carbon Dioxide Measurement",
CONCENTRATION_PARTS_PER_MILLION,
DEVICE_CLASS_CO2,
STATE_CLASS_MEASUREMENT,
None,
)
],
Capability.carbon_monoxide_detector: [
Map(
Attribute.carbon_monoxide,
"Carbon Monoxide Detector",
None,
None,
None,
None,
)
],
Capability.carbon_monoxide_measurement: [
Map(
Attribute.carbon_monoxide_level,
"Carbon Monoxide Measurement",
CONCENTRATION_PARTS_PER_MILLION,
DEVICE_CLASS_CO,
STATE_CLASS_MEASUREMENT,
None,
)
],
Capability.dishwasher_operating_state: [
Map(
Attribute.machine_state, "Dishwasher Machine State", None, None, None, None
),
Map(
Attribute.dishwasher_job_state,
"Dishwasher Job State",
None,
None,
None,
None,
),
Map(
Attribute.completion_time,
"Dishwasher Completion Time",
None,
DEVICE_CLASS_TIMESTAMP,
None,
None,
),
],
Capability.dryer_mode: [
Map(
Attribute.dryer_mode,
"Dryer Mode",
None,
None,
None,
ENTITY_CATEGORY_CONFIG,
)
],
Capability.dryer_operating_state: [
Map(Attribute.machine_state, "Dryer Machine State", None, None, None, None),
Map(Attribute.dryer_job_state, "Dryer Job State", None, None, None, None),
Map(
Attribute.completion_time,
"Dryer Completion Time",
None,
DEVICE_CLASS_TIMESTAMP,
None,
None,
),
],
Capability.dust_sensor: [
Map(
Attribute.fine_dust_level,
"Fine Dust Level",
None,
None,
STATE_CLASS_MEASUREMENT,
None,
),
Map(
Attribute.dust_level,
"Dust Level",
None,
None,
STATE_CLASS_MEASUREMENT,
None,
),
],
Capability.energy_meter: [
Map(
Attribute.energy,
"Energy Meter",
ENERGY_KILO_WATT_HOUR,
DEVICE_CLASS_ENERGY,
STATE_CLASS_TOTAL_INCREASING,
None,
)
],
Capability.equivalent_carbon_dioxide_measurement: [
Map(
Attribute.equivalent_carbon_dioxide_measurement,
"Equivalent Carbon Dioxide Measurement",
CONCENTRATION_PARTS_PER_MILLION,
None,
STATE_CLASS_MEASUREMENT,
None,
)
],
Capability.formaldehyde_measurement: [
Map(
Attribute.formaldehyde_level,
"Formaldehyde Measurement",
CONCENTRATION_PARTS_PER_MILLION,
None,
STATE_CLASS_MEASUREMENT,
None,
)
],
Capability.gas_meter: [
Map(
Attribute.gas_meter,
"Gas Meter",
ENERGY_KILO_WATT_HOUR,
None,
STATE_CLASS_MEASUREMENT,
None,
),
Map(
Attribute.gas_meter_calorific, "Gas Meter Calorific", None, None, None, None
),
Map(
Attribute.gas_meter_time,
"Gas Meter Time",
None,
DEVICE_CLASS_TIMESTAMP,
None,
None,
),
Map(
Attribute.gas_meter_volume,
"Gas Meter Volume",
VOLUME_CUBIC_METERS,
None,
STATE_CLASS_MEASUREMENT,
None,
),
],
Capability.illuminance_measurement: [
Map(
Attribute.illuminance,
"Illuminance",
LIGHT_LUX,
DEVICE_CLASS_ILLUMINANCE,
STATE_CLASS_MEASUREMENT,
None,
)
],
Capability.infrared_level: [
Map(
Attribute.infrared_level,
"Infrared Level",
PERCENTAGE,
None,
STATE_CLASS_MEASUREMENT,
None,
)
],
Capability.media_input_source: [
Map(Attribute.input_source, "Media Input Source", None, None, None, None)
],
Capability.media_playback_repeat: [
Map(
Attribute.playback_repeat_mode,
"Media Playback Repeat",
None,
None,
None,
None,
)
],
Capability.media_playback_shuffle: [
Map(
Attribute.playback_shuffle, "Media Playback Shuffle", None, None, None, None
)
],
Capability.media_playback: [
Map(Attribute.playback_status, "Media Playback Status", None, None, None, None)
],
Capability.odor_sensor: [
Map(Attribute.odor_level, "Odor Sensor", None, None, None, None)
],
Capability.oven_mode: [
Map(
Attribute.oven_mode,
"Oven Mode",
None,
None,
None,
ENTITY_CATEGORY_CONFIG,
)
],
Capability.oven_operating_state: [
Map(Attribute.machine_state, "Oven Machine State", None, None, None, None),
Map(Attribute.oven_job_state, "Oven Job State", None, None, None, None),
Map(Attribute.completion_time, "Oven Completion Time", None, None, None, None),
],
Capability.oven_setpoint: [
Map(Attribute.oven_setpoint, "Oven Set Point", None, None, None, None)
],
Capability.power_consumption_report: [],
Capability.power_meter: [
Map(
Attribute.power,
"Power Meter",
POWER_WATT,
DEVICE_CLASS_POWER,
STATE_CLASS_MEASUREMENT,
None,
)
],
Capability.power_source: [
Map(
Attribute.power_source,
"Power Source",
None,
None,
None,
ENTITY_CATEGORY_DIAGNOSTIC,
)
],
Capability.refrigeration_setpoint: [
Map(
Attribute.refrigeration_setpoint,
"Refrigeration Setpoint",
None,
DEVICE_CLASS_TEMPERATURE,
None,
None,
)
],
Capability.relative_humidity_measurement: [
Map(
Attribute.humidity,
"Relative Humidity Measurement",
PERCENTAGE,
DEVICE_CLASS_HUMIDITY,
STATE_CLASS_MEASUREMENT,
None,
)
],
Capability.robot_cleaner_cleaning_mode: [
Map(
Attribute.robot_cleaner_cleaning_mode,
"Robot Cleaner Cleaning Mode",
None,
None,
None,
ENTITY_CATEGORY_CONFIG,
)
],
Capability.robot_cleaner_movement: [
Map(
Attribute.robot_cleaner_movement,
"Robot Cleaner Movement",
None,
None,
None,
None,
)
],
Capability.robot_cleaner_turbo_mode: [
Map(
Attribute.robot_cleaner_turbo_mode,
"Robot Cleaner Turbo Mode",
None,
None,
None,
ENTITY_CATEGORY_CONFIG,
)
],
Capability.signal_strength: [
Map(
Attribute.lqi,
"LQI Signal Strength",
None,
None,
STATE_CLASS_MEASUREMENT,
ENTITY_CATEGORY_DIAGNOSTIC,
),
Map(
Attribute.rssi,
"RSSI Signal Strength",
None,
DEVICE_CLASS_SIGNAL_STRENGTH,
STATE_CLASS_MEASUREMENT,
ENTITY_CATEGORY_DIAGNOSTIC,
),
],
Capability.smoke_detector: [
Map(Attribute.smoke, "Smoke Detector", None, None, None, None)
],
Capability.temperature_measurement: [
Map(
Attribute.temperature,
"Temperature Measurement",
None,
DEVICE_CLASS_TEMPERATURE,
STATE_CLASS_MEASUREMENT,
None,
)
],
Capability.thermostat_cooling_setpoint: [
Map(
Attribute.cooling_setpoint,
"Thermostat Cooling Setpoint",
None,
DEVICE_CLASS_TEMPERATURE,
None,
None,
)
],
Capability.thermostat_fan_mode: [
Map(
Attribute.thermostat_fan_mode,
"Thermostat Fan Mode",
None,
None,
None,
ENTITY_CATEGORY_CONFIG,
)
],
Capability.thermostat_heating_setpoint: [
Map(
Attribute.heating_setpoint,
"Thermostat Heating Setpoint",
None,
DEVICE_CLASS_TEMPERATURE,
None,
ENTITY_CATEGORY_CONFIG,
)
],
Capability.thermostat_mode: [
Map(
Attribute.thermostat_mode,
"Thermostat Mode",
None,
None,
None,
ENTITY_CATEGORY_CONFIG,
)
],
Capability.thermostat_operating_state: [
Map(
Attribute.thermostat_operating_state,
"Thermostat Operating State",
None,
None,
None,
None,
)
],
Capability.thermostat_setpoint: [
Map(
Attribute.thermostat_setpoint,
"Thermostat Setpoint",
None,
DEVICE_CLASS_TEMPERATURE,
None,
ENTITY_CATEGORY_CONFIG,
)
],
Capability.three_axis: [],
Capability.tv_channel: [
Map(Attribute.tv_channel, "Tv Channel", None, None, None, None),
Map(Attribute.tv_channel_name, "Tv Channel Name", None, None, None, None),
],
Capability.tvoc_measurement: [
Map(
Attribute.tvoc_level,
"Tvoc Measurement",
CONCENTRATION_PARTS_PER_MILLION,
None,
STATE_CLASS_MEASUREMENT,
None,
)
],
Capability.ultraviolet_index: [
Map(
Attribute.ultraviolet_index,
"Ultraviolet Index",
None,
None,
STATE_CLASS_MEASUREMENT,
None,
)
],
Capability.voltage_measurement: [
Map(
Attribute.voltage,
"Voltage Measurement",
ELECTRIC_POTENTIAL_VOLT,
DEVICE_CLASS_VOLTAGE,
STATE_CLASS_MEASUREMENT,
None,
)
],
Capability.washer_mode: [
Map(
Attribute.washer_mode,
"Washer Mode",
None,
None,
None,
ENTITY_CATEGORY_CONFIG,
)
],
Capability.washer_operating_state: [
Map(Attribute.machine_state, "Washer Machine State", None, None, None, None),
Map(Attribute.washer_job_state, "Washer Job State", None, None, None, None),
Map(
Attribute.completion_time,
"Washer Completion Time",
None,
DEVICE_CLASS_TIMESTAMP,
None,
None,
),
],
}
UNITS = {"C": TEMP_CELSIUS, "F": TEMP_FAHRENHEIT}
THREE_AXIS_NAMES = ["X Coordinate", "Y Coordinate", "Z Coordinate"]
POWER_CONSUMPTION_REPORT_NAMES = [
"energy",
"power",
"deltaEnergy",
"powerEnergy",
"energySaved",
]
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add binary sensors for a config entry."""
broker = hass.data[DOMAIN][DATA_BROKERS][config_entry.entry_id]
sensors = []
for device in broker.devices.values():
for capability in broker.get_assigned(device.device_id, "sensor"):
if capability == Capability.three_axis:
sensors.extend(
[
SmartThingsThreeAxisSensor(device, index)
for index in range(len(THREE_AXIS_NAMES))
]
)
elif capability == Capability.power_consumption_report:
sensors.extend(
[
SmartThingsPowerConsumptionSensor(device, report_name)
for report_name in POWER_CONSUMPTION_REPORT_NAMES
]
)
else:
maps = CAPABILITY_TO_SENSORS[capability]
sensors.extend(
[
SmartThingsSensor(
device,
m.attribute,
m.name,
m.default_unit,
m.device_class,
m.state_class,
m.entity_category,
)
for m in maps
]
)
if broker.any_assigned(device.device_id, "switch"):
for capability in (Capability.energy_meter, Capability.power_meter):
maps = CAPABILITY_TO_SENSORS[capability]
sensors.extend(
[
SmartThingsSensor(
device,
m.attribute,
m.name,
m.default_unit,
m.device_class,
m.state_class,
m.entity_category,
)
for m in maps
]
)
async_add_entities(sensors)
def get_capabilities(capabilities: Sequence[str]) -> Sequence[str] | None:
"""Return all capabilities supported if minimum required are present."""
return [
capability for capability in CAPABILITY_TO_SENSORS if capability in capabilities
]
class SmartThingsSensor(SmartThingsEntity, SensorEntity):
"""Define a SmartThings Sensor."""
def __init__(
self,
device: DeviceEntity,
attribute: str,
name: str,
default_unit: str,
device_class: str,
state_class: str | None,
entity_category: str | None,
) -> None:
"""Init the class."""
super().__init__(device)
self._attribute = attribute
self._name = name
self._device_class = device_class
self._default_unit = default_unit
self._attr_state_class = state_class
self._attr_entity_category = entity_category
@property
def name(self) -> str:
"""Return the name of the binary sensor."""
return f"{self._device.label} {self._name}"
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return f"{self._device.device_id}.{self._attribute}"
@property
def native_value(self):
"""Return the state of the sensor."""
value = self._device.status.attributes[self._attribute].value
if self._device_class != DEVICE_CLASS_TIMESTAMP:
return value
return dt_util.parse_datetime(value)
@property
def device_class(self):
"""Return the device class of the sensor."""
return self._device_class
@property
def native_unit_of_measurement(self):
"""Return the unit this state is expressed in."""
unit = self._device.status.attributes[self._attribute].unit
return UNITS.get(unit, unit) if unit else self._default_unit
class SmartThingsThreeAxisSensor(SmartThingsEntity, SensorEntity):
"""Define a SmartThings Three Axis Sensor."""
def __init__(self, device, index):
"""Init the class."""
super().__init__(device)
self._index = index
@property
def name(self) -> str:
"""Return the name of the binary sensor."""
return f"{self._device.label} {THREE_AXIS_NAMES[self._index]}"
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return f"{self._device.device_id}.{THREE_AXIS_NAMES[self._index]}"
@property
def native_value(self):
"""Return the state of the sensor."""
three_axis = self._device.status.attributes[Attribute.three_axis].value
try:
return three_axis[self._index]
except (TypeError, IndexError):
return None
class SmartThingsPowerConsumptionSensor(SmartThingsEntity, SensorEntity):
"""Define a SmartThings Sensor."""
def __init__(
self,
device: DeviceEntity,
report_name: str,
) -> None:
"""Init the class."""
super().__init__(device)
self.report_name = report_name
self._attr_state_class = STATE_CLASS_MEASUREMENT
if self.report_name != "power":
self._attr_state_class = STATE_CLASS_TOTAL_INCREASING
@property
def name(self) -> str:
"""Return the name of the binary sensor."""
return f"{self._device.label} {self.report_name}"
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return f"{self._device.device_id}.{self.report_name}_meter"
@property
def native_value(self):
"""Return the state of the sensor."""
value = self._device.status.attributes[Attribute.power_consumption].value
if value is None or value.get(self.report_name) is None:
return None
if self.report_name == "power":
return value[self.report_name]
return value[self.report_name] / 1000
@property
def device_class(self):
"""Return the device class of the sensor."""
if self.report_name == "power":
return DEVICE_CLASS_POWER
return DEVICE_CLASS_ENERGY
@property
def native_unit_of_measurement(self):
"""Return the unit this state is expressed in."""
if self.report_name == "power":
return POWER_WATT
return ENERGY_KILO_WATT_HOUR
| |
# encoding: utf-8
# Copyright 2013 maker
# License
"""
Project Management module views
"""
from django.shortcuts import get_object_or_404
from django.template import RequestContext
from django.http import HttpResponseRedirect, Http404
from django.core.urlresolvers import reverse
from django.db.models import Q
from maker.core.models import Object, ModuleSetting, UpdateRecord
from maker.core.views import user_denied
from maker.core.rendering import render_to_response
from maker.core.decorators import maker_login_required, handle_response_format
from maker.projects.models import Project, Milestone, Task, TaskStatus, TaskTimeSlot
from maker.projects.forms import ProjectForm, MilestoneForm, TaskForm, FilterForm, TaskRecordForm, \
MassActionForm, TaskTimeSlotForm, TaskStatusForm, SettingsForm
from django.utils.translation import ugettext as _
from datetime import datetime
import simplejson as json
def _get_filter_query(args):
"Creates a query to filter Tasks based on FilterForm arguments"
query = Q()
for arg in args:
if hasattr(Task, arg) and args[arg]:
kwargs = {str(arg + '__id'): long(args[arg])}
query = query & Q(**kwargs)
return query
def _get_default_context(request):
"Returns default context as a dict()"
projects = Object.filter_by_request(request, Project.objects)
statuses = Object.filter_by_request(request, TaskStatus.objects)
massform = MassActionForm(request.user.get_profile())
context = {'projects': projects,
'statuses': statuses,
'massform': massform}
return context
def _process_mass_form(f):
"Pre-process request to handle mass action form for Tasks and Milestones"
def wrap(request, *args, **kwargs):
"Wrap"
if 'massform' in request.POST:
for key in request.POST:
if 'mass-milestone' in key:
try:
milestone = Milestone.objects.get(pk=request.POST[key])
form = MassActionForm(request.user.get_profile(), request.POST, instance=milestone)
if form.is_valid() and request.user.get_profile().has_permission(milestone, mode='w'):
form.save()
except Exception:
pass
for key in request.POST:
if 'mass-task' in key:
try:
task = Task.objects.get(pk=request.POST[key])
form = MassActionForm(request.user.get_profile(), request.POST, instance=task)
if form.is_valid() and request.user.get_profile().has_permission(task, mode='w'):
form.save()
except Exception:
pass
return f(request, *args, **kwargs)
wrap.__doc__ = f.__doc__
wrap.__name__ = f.__name__
return wrap
@handle_response_format
@maker_login_required
@_process_mass_form
def index(request, response_format='html'):
"Project Management index page"
query = Q(parent__isnull=True)
if request.GET:
if 'status' in request.GET and request.GET['status']:
query = query & _get_filter_query(request.GET)
else:
query = query & Q(status__hidden=False) & _get_filter_query(request.GET)
else:
query = query & Q(status__hidden=False)
tasks = Object.filter_by_request(request, Task.objects.filter(query))
milestones = Object.filter_by_request(request, Milestone.objects.filter(status__hidden=False))
filters = FilterForm(request.user.get_profile(), '', request.GET)
context = _get_default_context(request)
context.update({'milestones':milestones,
'tasks':tasks,
'filters': filters})
return render_to_response('projects/index', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@maker_login_required
@_process_mass_form
def index_owned(request, response_format='html'):
"Tasks owned by current user"
query = Q(parent__isnull=True, caller__related_user=request.user.get_profile())
if request.GET:
if 'status' in request.GET and request.GET['status']:
query = query & _get_filter_query(request.GET)
else:
query = query & Q(status__hidden=False) & _get_filter_query(request.GET)
else:
query = query & Q(status__hidden=False)
tasks = Object.filter_by_request(request, Task.objects.filter(query))
milestones = Object.filter_by_request(request, Milestone.objects.filter(status__hidden=False))
filters = FilterForm(request.user.get_profile(), 'status', request.GET)
context = _get_default_context(request)
context.update({'milestones': milestones,
'tasks': tasks,
'filters': filters})
return render_to_response('projects/index_owned', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@maker_login_required
@_process_mass_form
def index_assigned(request, response_format='html'):
"Tasks assigned to current user"
query = Q(parent__isnull=True, assigned=request.user.get_profile())
if request.GET:
if 'status' in request.GET and request.GET['status']:
query = query & _get_filter_query(request.GET)
else:
query = query & Q(status__hidden=False) & _get_filter_query(request.GET)
else:
query = query & Q(status__hidden=False)
tasks = Object.filter_by_request(request, Task.objects.filter(query))
milestones = Object.filter_by_request(request, Milestone.objects.filter(status__hidden=False))
filters = FilterForm(request.user.get_profile(), 'assigned', request.GET)
context = _get_default_context(request)
context.update({'milestones':milestones,
'tasks':tasks,
'filters': filters})
return render_to_response('projects/index_assigned', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@maker_login_required
@_process_mass_form
def index_by_status(request, status_id, response_format='html'):
"Sort tasks by status"
status = get_object_or_404(TaskStatus, pk=status_id)
if not request.user.get_profile().has_permission(status):
return user_denied(request, message="You don't have access to this Task Status")
query = Q(parent__isnull=True, status=status)
if request.GET:
query = query & _get_filter_query(request.GET)
tasks = Object.filter_by_request(request, Task.objects.filter(query))
milestones = Object.filter_by_request(request, Milestone.objects.filter(task__status=status).distinct())
filters = FilterForm(request.user.get_profile(), 'status', request.GET)
context = _get_default_context(request)
context.update({'milestones': milestones,
'tasks': tasks,
'status': status,
'filters': filters})
return render_to_response('projects/index_by_status', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@maker_login_required
@_process_mass_form
def index_in_progress(request, response_format='html'):
"A page with a list of tasks in progress"
query = Q(parent__isnull=True)
if request.GET:
query = query & Q(status__hidden=False) & _get_filter_query(request.GET)
else:
query = query & Q(status__hidden=False)
tasks = Object.filter_by_request(request, Task.objects.filter(query))
milestones = Object.filter_by_request(request, Milestone.objects.filter(status__hidden=False))
filters = FilterForm(request.user.get_profile(), 'status', request.GET)
time_slots = Object.filter_by_request(request, TaskTimeSlot.objects.filter(time_from__isnull=False, time_to__isnull=True))
context = _get_default_context(request)
context.update({'milestones':milestones,
'tasks':tasks,
'filters': filters,
'time_slots':time_slots})
return render_to_response('projects/index_in_progress', context,
context_instance=RequestContext(request), response_format=response_format)
#
# Projects
#
@handle_response_format
@maker_login_required
def project_add(request, response_format='html'):
"New project form"
if request.POST:
if not 'cancel' in request.POST:
project = Project()
form = ProjectForm(request.user.get_profile(), None, request.POST, instance=project)
if form.is_valid():
project = form.save()
project.set_user_from_request(request)
return HttpResponseRedirect(reverse('projects_project_view', args=[project.id]))
else:
return HttpResponseRedirect(reverse('projects'))
else:
form = ProjectForm(request.user.get_profile(), None)
context = _get_default_context(request)
context.update({'form': form})
return render_to_response('projects/project_add', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@maker_login_required
def project_add_typed(request, project_id=None, response_format='html'):
"Project add to preselected parent project"
parent_project = None
if project_id:
parent_project = get_object_or_404(Project, pk=project_id)
if not request.user.get_profile().has_permission(parent_project, mode='x'):
parent_project = None
if request.POST:
if not 'cancel' in request.POST:
project = Project()
form = ProjectForm(request.user.get_profile(), project_id, request.POST, instance=project)
if form.is_valid():
project = form.save()
project.set_user_from_request(request)
return HttpResponseRedirect(reverse('projects_project_view', args=[project.id]))
else:
return HttpResponseRedirect(reverse('projects'))
else:
form = ProjectForm(request.user.get_profile(), project_id)
context = _get_default_context(request)
context.update({'form': form, 'project':parent_project})
return render_to_response('projects/project_add_typed', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@maker_login_required
@_process_mass_form
def project_view(request, project_id, response_format='html'):
"Single project view page"
project = get_object_or_404(Project, pk=project_id)
if not request.user.get_profile().has_permission(project):
return user_denied(request, message="You don't have access to this Project")
query = Q(parent__isnull=True, project=project)
if request.GET:
if 'status' in request.GET and request.GET['status']:
query = query & _get_filter_query(request.GET)
else:
query = query & Q(status__hidden=False) & _get_filter_query(request.GET)
else:
query = query & Q(status__hidden=False)
if request.user.get_profile().has_permission(project, mode='r'):
if request.POST:
record = UpdateRecord()
record.record_type = 'manual'
form = TaskRecordForm(request.user.get_profile(), request.POST, instance=record)
if form.is_valid():
record = form.save()
record.set_user_from_request(request)
record.save()
record.about.add(project)
project.set_last_updated()
return HttpResponseRedirect(reverse('projects_project_view', args=[project.id]))
else:
form = TaskRecordForm(request.user.get_profile())
else:
form = None
tasks = Object.filter_by_request(request, Task.objects.filter(query))
tasks_progress = float(0)
tasks_progress_query = Object.filter_by_request(request, Task.objects.filter(Q(parent__isnull=True, project=project)))
if tasks_progress_query:
for task in tasks_progress_query:
if not task.status.active:
tasks_progress += 1
tasks_progress = (tasks_progress / len(tasks_progress_query)) * 100
tasks_progress = round(tasks_progress, ndigits=1)
filters = FilterForm(request.user.get_profile(), 'project', request.GET)
milestones = Object.filter_by_request(request,
Milestone.objects.filter(project=project).filter(status__hidden=False))
subprojects = Project.objects.filter(parent=project)
context = _get_default_context(request)
context.update({'project': project,
'milestones': milestones,
'tasks':tasks,
'tasks_progress':tasks_progress,
'record_form':form,
'subprojects':subprojects,
'filters': filters})
return render_to_response('projects/project_view', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@maker_login_required
def project_edit(request, project_id, response_format='html'):
"Project edit page"
project = get_object_or_404(Project, pk=project_id)
if not request.user.get_profile().has_permission(project, mode='w'):
return user_denied(request, message="You don't have access to this Project")
if request.POST:
if not 'cancel' in request.POST:
form = ProjectForm(request.user.get_profile(), None, request.POST, instance=project)
if form.is_valid():
project = form.save()
return HttpResponseRedirect(reverse('projects_project_view', args=[project.id]))
else:
return HttpResponseRedirect(reverse('projects_project_view', args=[project.id]))
else:
form = ProjectForm(request.user.get_profile(), None, instance=project)
context = _get_default_context(request)
context.update({'form': form, 'project': project})
return render_to_response('projects/project_edit', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@maker_login_required
def project_delete(request, project_id, response_format='html'):
"Project delete"
project = get_object_or_404(Project, pk=project_id)
if not request.user.get_profile().has_permission(project, mode='w'):
return user_denied(request, message="You don't have access to this Project")
if request.POST:
if 'delete' in request.POST:
if 'trash' in request.POST:
project.trash = True
project.save()
else:
project.delete()
return HttpResponseRedirect(reverse('projects_index'))
elif 'cancel' in request.POST:
return HttpResponseRedirect(reverse('projects_project_view', args=[project.id]))
context = _get_default_context(request)
context.update({'project': project})
return render_to_response('projects/project_delete', context,
context_instance=RequestContext(request), response_format=response_format)
#
# Milestones
#
@handle_response_format
@maker_login_required
def milestone_add(request, response_format='html'):
"New milestone form"
if request.POST:
if not 'cancel' in request.POST:
milestone = Milestone()
form = MilestoneForm(request.user.get_profile(), None, request.POST, instance=milestone)
if form.is_valid():
milestone = form.save()
milestone.set_user_from_request(request)
return HttpResponseRedirect(reverse('projects_milestone_view', args=[milestone.id]))
else:
return HttpResponseRedirect(reverse('projects'))
else:
form = MilestoneForm(request.user.get_profile(), None)
context = _get_default_context(request)
context.update({'form': form})
return render_to_response('projects/milestone_add', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@maker_login_required
def milestone_add_typed(request, project_id=None, response_format='html'):
"Milestone add to preselected project"
project = None
if project_id:
project = get_object_or_404(Project, pk=project_id)
if not request.user.get_profile().has_permission(project, mode='x'):
project = None
if request.POST:
if not 'cancel' in request.POST:
milestone = Milestone()
form = MilestoneForm(request.user.get_profile(), project_id, request.POST, instance=milestone)
if form.is_valid():
milestone = form.save()
milestone.set_user_from_request(request)
return HttpResponseRedirect(reverse('projects_milestone_view', args=[milestone.id]))
else:
return HttpResponseRedirect(reverse('projects'))
else:
form = MilestoneForm(request.user.get_profile(), project_id)
context = _get_default_context(request)
context.update({'form': form, 'project': project})
return render_to_response('projects/milestone_add_typed', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@maker_login_required
@_process_mass_form
def milestone_view(request, milestone_id, response_format='html'):
"Single milestone view page"
milestone = get_object_or_404(Milestone, pk=milestone_id)
project = milestone.project
if not request.user.get_profile().has_permission(milestone):
return user_denied(request, message="You don't have access to this Milestone")
query = Q(milestone=milestone, parent__isnull=True)
if request.GET:
if 'status' in request.GET and request.GET['status']:
query = query & _get_filter_query(request.GET)
else:
query = query & Q(status__hidden=False) & _get_filter_query(request.GET)
tasks = Object.filter_by_request(request, Task.objects.filter(query))
else:
tasks = Object.filter_by_request(request,
Task.objects.filter(query & Q(status__hidden=False)))
filters = FilterForm(request.user.get_profile(), 'milestone', request.GET)
tasks_progress = float(0)
tasks_progress_query = Object.filter_by_request(request, Task.objects.filter(Q(parent__isnull=True, milestone=milestone)))
if tasks_progress_query:
for task in tasks_progress_query:
if not task.status.active:
tasks_progress += 1
tasks_progress = (tasks_progress / len(tasks_progress_query)) * 100
tasks_progress = round(tasks_progress, ndigits=1)
context = _get_default_context(request)
context.update({'milestone': milestone,
'tasks':tasks,
'tasks_progress':tasks_progress,
'filters':filters,
'project':project})
return render_to_response('projects/milestone_view', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@maker_login_required
def milestone_edit(request, milestone_id, response_format='html'):
"Milestone edit page"
milestone = get_object_or_404(Milestone, pk=milestone_id)
project = milestone.project
if not request.user.get_profile().has_permission(milestone, mode='w'):
return user_denied(request, message="You don't have access to this Milestone")
if request.POST:
if not 'cancel' in request.POST:
form = MilestoneForm(request.user.get_profile(), None, request.POST, instance=milestone)
if form.is_valid():
milestone = form.save()
return HttpResponseRedirect(reverse('projects_milestone_view', args=[milestone.id]))
else:
return HttpResponseRedirect(reverse('projects_milestone_view', args=[milestone.id]))
else:
form = MilestoneForm(request.user.get_profile(), None, instance=milestone)
context = _get_default_context(request)
context.update({'form': form,
'milestone': milestone,
'project':project})
return render_to_response('projects/milestone_edit', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@maker_login_required
def milestone_delete(request, milestone_id, response_format='html'):
"Milestone delete"
milestone = get_object_or_404(Milestone, pk=milestone_id)
project = milestone.project
if not request.user.get_profile().has_permission(milestone, mode='w'):
return user_denied(request, message="You don't have access to this Milestone")
query = Q(milestone=milestone, parent__isnull=True)
if request.GET:
query = query & _get_filter_query(request.GET)
tasks = Object.filter_by_request(request, Task.objects.filter(query))
if request.POST:
if 'delete' in request.POST:
if 'trash' in request.POST:
milestone.trash = True
milestone.save()
else:
milestone.delete()
return HttpResponseRedirect(reverse('projects_index'))
elif 'cancel' in request.POST:
return HttpResponseRedirect(reverse('projects_milestone_view', args=[milestone.id]))
context = _get_default_context(request)
context.update({'milestone': milestone,
'tasks': tasks,
'project':project})
return render_to_response('projects/milestone_delete', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@maker_login_required
def milestone_set_status(request, milestone_id, status_id, response_format='html'):
"Milestone quick set: Status"
milestone = get_object_or_404(Milestone, pk=milestone_id)
if not request.user.get_profile().has_permission(milestone, mode='x'):
return user_denied(request, message="You don't have access to this Milestone")
status = get_object_or_404(TaskStatus, pk=status_id)
if not request.user.get_profile().has_permission(status):
return user_denied(request, message="You don't have access to this Milestone Status")
if not milestone.status == status:
milestone.status = status
milestone.save()
return milestone_view(request, milestone_id, response_format)
#
# Tasks
#
@handle_response_format
@maker_login_required
def task_add(request, response_format='html'):
"New task form"
if request.POST:
if not 'cancel' in request.POST:
task = Task()
form = TaskForm(request.user.get_profile(), None, None, None, request.POST, instance=task)
if form.is_valid():
task = form.save()
task.set_user_from_request(request)
return HttpResponseRedirect(reverse('projects_task_view', args=[task.id]))
else:
return HttpResponseRedirect(reverse('projects'))
else:
form = TaskForm(request.user.get_profile(), None, None, None)
context = _get_default_context(request)
context.update({'form': form})
return render_to_response('projects/task_add', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@maker_login_required
def task_add_typed(request, project_id=None, response_format='html'):
"Task add to preselected project"
project = None
if project_id:
project = get_object_or_404(Project, pk=project_id)
if not request.user.get_profile().has_permission(project, mode='x'):
project = None
if request.POST:
if not 'cancel' in request.POST:
task = Task()
form = TaskForm(request.user.get_profile(), None, project_id, None, request.POST, instance=task)
if form.is_valid():
task = form.save()
task.set_user_from_request(request)
return HttpResponseRedirect(reverse('projects_task_view', args=[task.id]))
else:
return HttpResponseRedirect(reverse('projects_project_view', args=[project.id]))
else:
form = TaskForm(request.user.get_profile(), None, project_id, None)
context = _get_default_context(request)
context.update({'form': form,
'project':project})
return render_to_response('projects/task_add_typed', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@maker_login_required
def task_add_to_milestone(request, milestone_id=None, response_format='html'):
"Task add to preselected project"
milestone = None
if milestone_id:
milestone = get_object_or_404(Milestone, pk=milestone_id)
if not request.user.get_profile().has_permission(milestone, mode='x'):
milestone = None
project = milestone.project
project_id = milestone.project.id
if request.POST:
if not 'cancel' in request.POST:
task = Task()
form = TaskForm(request.user.get_profile(), None,
project_id, milestone_id, request.POST, instance=task)
if form.is_valid():
task = form.save()
task.set_user_from_request(request)
return HttpResponseRedirect(reverse('projects_task_view', args=[task.id]))
else:
return HttpResponseRedirect(reverse('projects_milestone_view', args=[milestone.id]))
else:
form = TaskForm(request.user.get_profile(), None, project_id, milestone_id)
context = _get_default_context(request)
context.update({'form': form,
'project':project,
'milestone':milestone})
return render_to_response('projects/task_add_to_milestone', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@maker_login_required
def task_add_subtask(request, task_id=None, response_format='html'):
"New subtask form"
parent = None
if task_id:
parent = get_object_or_404(Task, pk=task_id)
if not request.user.get_profile().has_permission(parent, mode='x'):
parent = None
if request.POST:
if not 'cancel' in request.POST:
task = Task()
form = TaskForm(request.user.get_profile(), parent, None, None, request.POST, instance=task)
if form.is_valid():
task = form.save()
task.set_user_from_request(request)
return HttpResponseRedirect(reverse('projects_task_view', args=[parent.id]))
else:
return HttpResponseRedirect(reverse('projects_task_view', args=[parent.id]))
else:
form = TaskForm(request.user.get_profile(), parent, None, None)
context = _get_default_context(request)
context.update({'form': form,
'task': parent})
return render_to_response('projects/task_add_subtask', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@maker_login_required
@_process_mass_form
def task_view(request, task_id, response_format='html'):
"Single task view page"
task = get_object_or_404(Task, pk=task_id)
if not request.user.get_profile().has_permission(task):
return user_denied(request, message="You don't have access to this Task")
if request.user.get_profile().has_permission(task, mode='x'):
if request.POST:
if 'add-work' in request.POST:
return HttpResponseRedirect(reverse('projects_task_time_slot_add', args=[task.id]))
elif 'start-work' in request.POST:
return HttpResponseRedirect(reverse('projects_task_view', args=[task.id]))
record = UpdateRecord()
record.record_type = 'manual'
form = TaskRecordForm(request.user.get_profile(), request.POST, instance=record)
if form.is_valid():
record = form.save()
record.set_user_from_request(request)
record.save()
record.about.add(task)
task.set_last_updated()
return HttpResponseRedirect(reverse('projects_task_view', args=[task.id]))
else:
form = TaskRecordForm(request.user.get_profile())
else:
form = None
subtasks = Object.filter_by_request(request, Task.objects.filter(parent=task))
time_slots = Object.filter_by_request(request, TaskTimeSlot.objects.filter(task=task))
context = _get_default_context(request)
context.update({'task': task,
'subtasks': subtasks,
'record_form': form,
'time_slots': time_slots})
if 'massform' in context and 'project' in context['massform'].fields:
del context['massform'].fields['project']
return render_to_response('projects/task_view', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@maker_login_required
def task_edit(request, task_id, response_format='html'):
"Task edit page"
task = get_object_or_404(Task, pk=task_id)
if not request.user.get_profile().has_permission(task, mode='w'):
return user_denied(request, message="You don't have access to this Task")
if request.POST:
if not 'cancel' in request.POST:
form = TaskForm(request.user.get_profile(), None, None, None, request.POST, instance=task)
if form.is_valid():
task = form.save()
return HttpResponseRedirect(reverse('projects_task_view', args=[task.id]))
else:
return HttpResponseRedirect(reverse('projects_task_view', args=[task.id]))
else:
form = TaskForm(request.user.get_profile(), None, None, None, instance=task)
context = _get_default_context(request)
context.update({'form': form,
'task': task} )
return render_to_response('projects/task_edit', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@maker_login_required
def task_delete(request, task_id, response_format='html'):
"Task delete"
task = get_object_or_404(Task, pk=task_id)
if not request.user.get_profile().has_permission(task, mode='w'):
return user_denied(request, message="You don't have access to this Task")
if request.POST:
if 'delete' in request.POST:
if 'trash' in request.POST:
task.trash = True
task.save()
else:
task.delete()
return HttpResponseRedirect(reverse('projects_index'))
elif 'cancel' in request.POST:
return HttpResponseRedirect(reverse('projects_task_view', args=[task.id]))
subtasks = Object.filter_by_request(request, Task.objects.filter(parent=task))
time_slots = Object.filter_by_request(request, TaskTimeSlot.objects.filter(task=task))
context = _get_default_context(request)
context.update({'task': task,
'subtasks': subtasks,
'time_slots': time_slots})
return render_to_response('projects/task_delete', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@maker_login_required
def task_set_status(request, task_id, status_id, response_format='html'):
"Task quick set: Status"
task = get_object_or_404(Task, pk=task_id)
if not request.user.get_profile().has_permission(task, mode='x'):
return user_denied(request, message="You don't have access to this Task")
status = get_object_or_404(TaskStatus, pk=status_id)
if not request.user.get_profile().has_permission(status):
return user_denied(request, message="You don't have access to this Task Status")
if not task.status == status:
task.status = status
task.save()
return task_view(request, task_id, response_format)
#
# Task Time Slots
#
@handle_response_format
@maker_login_required
def task_time_slot_start(request, task_id, response_format='html'):
"Start TaskTimeSlot for preselected Task"
task = get_object_or_404(Task, pk=task_id)
if not request.user.get_profile().has_permission(task, mode='x'):
return user_denied(request, message="You don't have access to this Task")
if not task.is_being_done_by(request.user.get_profile()):
task_time_slot = TaskTimeSlot(task=task, time_from=datetime.now(), user=request.user.get_profile())
task_time_slot.save()
task_time_slot.set_user_from_request(request)
return HttpResponseRedirect(reverse('projects_task_view', args=[task_id]))
@handle_response_format
@maker_login_required
def task_time_slot_stop(request, slot_id, response_format='html'):
"Stop TaskTimeSlot for preselected Task"
slot = get_object_or_404(TaskTimeSlot, pk=slot_id)
if not request.user.get_profile().has_permission(slot, mode='w'):
return user_denied(request, message="You don't have access to this TaskTimeSlot")
if request.POST and 'stop' in request.POST:
slot.time_to = datetime.now()
slot.details = request.POST['details']
slot.save()
return HttpResponseRedirect(reverse('projects_task_view', args=[slot.task_id]))
@handle_response_format
@maker_login_required
def task_time_slot_add(request, task_id, response_format='html'):
"Time slot add to preselected task"
task = get_object_or_404(Task, pk=task_id)
if not request.user.get_profile().has_permission(task, mode='x'):
return user_denied(request, message="You don't have access to this Task")
if request.POST:
task_time_slot = TaskTimeSlot(task=task, time_to=datetime.now(), user=request.user.get_profile())
form = TaskTimeSlotForm(request.user.get_profile(), task_id, request.POST, instance=task_time_slot)
if 'cancel' in request.POST:
return HttpResponseRedirect(reverse('projects_task_view', args=[task.id]))
elif form.is_valid():
task_time_slot = form.save()
task_time_slot.set_user_from_request(request)
return HttpResponseRedirect(reverse('projects_task_view', args=[task.id]))
else:
form = TaskTimeSlotForm(request.user.get_profile(), task_id)
subtasks = Object.filter_by_request(request, Task.objects.filter(parent=task))
time_slots = Object.filter_by_request(request, TaskTimeSlot.objects.filter(task=task))
context = _get_default_context(request)
context.update({'form': form,
'task': task,
'subtasks': subtasks,
'time_slots': time_slots})
return render_to_response('projects/task_time_add', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@maker_login_required
def task_time_slot_view(request, time_slot_id, response_format='html'):
"Task time slot edit page"
task_time_slot = get_object_or_404(TaskTimeSlot, pk=time_slot_id)
task = task_time_slot.task
if not request.user.get_profile().has_permission(task_time_slot) \
and not request.user.get_profile().has_permission(task):
return user_denied(request, message="You don't have access to this Task Time Slot")
context = _get_default_context(request)
context.update({'task_time_slot': task_time_slot,
'task':task})
return render_to_response('projects/task_time_view', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@maker_login_required
def task_time_slot_edit(request, time_slot_id, response_format='html'):
"Task time slot edit page"
task_time_slot = get_object_or_404(TaskTimeSlot, pk=time_slot_id)
task = task_time_slot.task
if not request.user.get_profile().has_permission(task_time_slot, mode='w') \
and not request.user.get_profile().has_permission(task, mode='w'):
return user_denied(request, message="You don't have access to this Task Time Slot")
if request.POST:
form = TaskTimeSlotForm(request.user.get_profile(), None, request.POST, instance=task_time_slot)
if form.is_valid():
task_time_slot = form.save()
return HttpResponseRedirect(reverse('projects_task_view', args=[task.id]))
elif 'cancel' in request.POST:
return HttpResponseRedirect(reverse('projects_task_view', args=[task.id]))
else:
form = TaskTimeSlotForm(request.user.get_profile(), None, instance=task_time_slot)
context = _get_default_context(request)
context.update({'form': form,
'task_time_slot': task_time_slot,
'task':task})
return render_to_response('projects/task_time_edit', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@maker_login_required
def task_time_slot_delete(request, time_slot_id, response_format='html'):
"Task time slot delete"
task_time_slot = get_object_or_404(TaskTimeSlot, pk=time_slot_id)
task = task_time_slot.task
if not request.user.get_profile().has_permission(task_time_slot, mode='w') \
and not request.user.get_profile().has_permission(task, mode='w'):
return user_denied(request, message="You don't have access to this Task Time Slot")
if request.POST:
if 'delete' in request.POST:
if 'trash' in request.POST:
task_time_slot.trash = True
task_time_slot.save()
else:
task_time_slot.delete()
return HttpResponseRedirect(reverse('projects_task_view', args=[task.id]))
elif 'cancel' in request.POST:
return HttpResponseRedirect(reverse('projects_task_view', args=[task.id]))
context = _get_default_context(request)
context.update({'task_time_slot': task_time_slot,
'task':task})
return render_to_response('projects/task_time_delete', context,
context_instance=RequestContext(request), response_format=response_format)
#
# Task Statuses
#
@handle_response_format
@maker_login_required
def task_status_add(request, response_format='html'):
"TaskStatus add"
if not request.user.get_profile().is_admin('maker.projects'):
return user_denied(request, message="You don't have administrator access to the Projects module")
if request.POST:
if not 'cancel' in request.POST:
status = TaskStatus()
form = TaskStatusForm(request.user.get_profile(), request.POST, instance=status)
if form.is_valid():
status = form.save()
status.set_user_from_request(request)
return HttpResponseRedirect(reverse('projects_index_by_status', args=[status.id]))
else:
return HttpResponseRedirect(reverse('projects_settings_view'))
else:
form = TaskStatusForm(request.user.get_profile())
context = _get_default_context(request)
context.update({'form': form})
return render_to_response('projects/status_add', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@maker_login_required
def task_status_edit(request, status_id, response_format='html'):
"TaskStatus edit"
status = get_object_or_404(TaskStatus, pk=status_id)
if not request.user.get_profile().has_permission(status, mode='w'):
return user_denied(request, message="You don't have access to this Task Status")
if request.POST:
if not 'cancel' in request.POST:
form = TaskStatusForm(request.user.get_profile(), request.POST, instance=status)
if form.is_valid():
status = form.save()
return HttpResponseRedirect(reverse('projects_index_by_status', args=[status.id]))
else:
return HttpResponseRedirect(reverse('projects_index_by_status', args=[status.id]))
else:
form = TaskStatusForm(request.user.get_profile(), instance=status)
context = _get_default_context(request)
context.update({'form': form,
'status':status})
return render_to_response('projects/status_edit', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@maker_login_required
def task_status_delete(request, status_id, response_format='html'):
"TaskStatus delete"
status = get_object_or_404(TaskStatus, pk=status_id)
if not request.user.get_profile().has_permission(status, mode='w'):
return user_denied(request, message="You don't have access to this Task Status")
if request.POST:
if 'delete' in request.POST:
if 'trash' in request.POST:
status.trash = True
status.save()
else:
status.delete()
return HttpResponseRedirect(reverse('projects_index'))
elif 'cancel' in request.POST:
return HttpResponseRedirect(reverse('projects_index_by_status', args=[status.id]))
milestones = Object.filter_by_request(request, Milestone.objects)
context = _get_default_context(request)
context.update({'status':status,
'milestones':milestones})
return render_to_response('projects/status_delete', context,
context_instance=RequestContext(request), response_format=response_format)
#
# Settings
#
@handle_response_format
@maker_login_required
def settings_view(request, response_format='html'):
"Settings"
if not request.user.get_profile().is_admin('maker.projects'):
return user_denied(request, message="You don't have administrator access to the Projects module")
# default task status
try:
conf = ModuleSetting.get_for_module('maker.projects', 'default_task_status')[0]
default_task_status = TaskStatus.objects.get(pk=long(conf.value), trash=False)
except Exception:
default_task_status = None
statuses = TaskStatus.objects.filter(trash=False)
context = _get_default_context(request)
context.update({'default_task_status':default_task_status,
'statuses': statuses})
return render_to_response('projects/settings_view', context,
context_instance=RequestContext(request), response_format=response_format)
@handle_response_format
@maker_login_required
def settings_edit(request, response_format='html'):
"Settings"
if not request.user.get_profile().is_admin('maker.projects'):
return user_denied(request, message="You don't have administrator access to the Projects module")
form = None
if request.POST:
if not 'cancel' in request.POST:
form = SettingsForm(request.user.get_profile(), request.POST)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('projects_settings_view'))
else:
return HttpResponseRedirect(reverse('projects_settings_view'))
else:
form = SettingsForm(request.user.get_profile())
context = _get_default_context(request)
context.update({'form': form})
return render_to_response('projects/settings_edit', context,
context_instance=RequestContext(request), response_format=response_format)
#
# AJAX lookups
#
@maker_login_required
def ajax_task_lookup(request, response_format='html'):
"Returns a list of matching tasks"
tasks = []
if request.GET and 'term' in request.GET:
tasks = Task.objects.filter(name__icontains=request.GET['term'])[:10]
return render_to_response('projects/ajax_task_lookup',
{'tasks': tasks},
context_instance=RequestContext(request),
response_format=response_format)
#
# Widgets
#
@maker_login_required
def widget_tasks_assigned_to_me(request, response_format='html'):
"A list of tasks assigned to current user"
query = Q(parent__isnull=True) & Q(status__hidden=False)
tasks = Object.filter_by_request(request, Task.objects.filter(query))
return render_to_response('projects/widgets/tasks_assigned_to_me',
{'tasks': tasks},
context_instance=RequestContext(request), response_format=response_format)
#
# Gantt Chart
#
@maker_login_required
def gantt_view(request, project_id, response_format='html'):
projects = Project.objects.filter(trash=False)
project = projects.filter(pk=project_id)[0]
if not project:
raise Http404
ganttData = []
#generate json
milestones = Milestone.objects.filter(project=project).filter(trash=False)
for milestone in milestones:
tasks = Task.objects.filter(milestone=milestone).filter(start_date__isnull=False).filter(end_date__isnull=False).filter(trash=False)
series = []
for task in tasks:
tlabel = (task.name[:30] + '..') if len(task.name) > 30 else task.name
tn = '<a href="%s" class="popup-link">%s</a>' % (reverse('projects_task_view', args=[task.id]), tlabel)
series.append({'id': task.id,
'name':tn,
'label':tlabel,
'start':task.start_date.date().isoformat(),
'end':task.end_date.date().isoformat()})
mlabel = (milestone.name[:30] + '..') if len(milestone.name) > 30 else milestone.name
mn = '<a href="%s" class="popup-link projects-milestone">%s</a>' % (reverse('projects_milestone_view', args=[milestone.id]), mlabel)
a = {'id': milestone.id, 'name': mn, 'label': mlabel}
if series:
a['series'] = series
else:
a['series'] = []
if milestone.start_date and milestone.end_date:
a['start'] = milestone.start_date.date().isoformat()
a['end'] = milestone.end_date.date().isoformat()
a['color'] = '#E3F3D9'
if series or (milestone.start_date and milestone.end_date):
ganttData.append(a)
unclassified = Task.objects.filter(project=project).filter(milestone__isnull=True).filter(start_date__isnull=False).filter(end_date__isnull=False).filter(trash=False)
series = []
for task in unclassified:
tlabel = (task.name[:30] + '..') if len(task.name) > 30 else task.name
tn = '<a href="%s" class="popup-link">%s</a>' % (reverse('projects_task_view', args=[task.id]), tlabel)
series.append({'id': task.id,
'name':tn,
'label':tlabel,
'start':task.start_date.date().isoformat(),
'end':task.end_date.date().isoformat()})
if series:
ganttData.append({'id': 0, 'name':_('Unclassified Tasks'),'series':series})
if ganttData:
jdata = json.dumps(ganttData)
else:
jdata = None
return render_to_response('projects/gantt_view',
{'jdata': jdata,
'project': project,
'projects': projects},
context_instance=RequestContext(request), response_format=response_format)
#@maker_login_required
def task_ajax(request, response_format='html'):
"For AJAX"
print request
if request.POST:
print request.POST
#return HttpResponse(options, mimetype=settings.MAKER_RESPONSE_FORMATS['json'])
| |
from ..visualization import _WindowManager,_ThreadedWindowManager,_globalLock,VisualizationScene
from .vis_gl import GLVisualizationFrontend,GLVisualizationPlugin,WindowInfo
from .. import glinit,gldraw,glcommon
from ...robotsim import WorldModel,RobotModel
import threading
if not glinit.available('GLUT'):
raise ImportError("Can't import vis_glut without first calling glinit.init()")
from OpenGL.GL import *
from OpenGL.GLUT import *
import time
import weakref
class GLUTWindowManager(_ThreadedWindowManager):
def __init__(self):
self._frontend = GLUTVisualizationFrontend(None)
#a list of WorldModel indices in the current window. A world cannot be used in multiple simultaneous
#windows in GLUT. If a world is reused with a different window, its display lists will be refreshed.
self.current_worlds = []
#list of WindowInfo's
self.windows = []
#the index of the current window
self.current_window = None
#the name of a window, if no windows exist yet
self.window_title = "Klamp't visualizer (%s)"%(sys.argv[0],)
#a callback sent to run
self.callback = None
#the current temp frontend if len(self.windows)=0, or windows[current_window].frontend
_ThreadedWindowManager.__init__(self)
def reset(self):
_ThreadedWindowManager.reset(self)
self.cleanup()
def run_app_thread(self,callback=None):
global _globalLock
assert not self.vis_thread_running,"Can't run a new GLUT thread, a thread is already running"
self.vis_thread_running = True
if len(self.windows)==0:
self.windows.append(WindowInfo(self.window_title,self._frontend))
self.current_window = 0
winfo = self.windows[self.current_window]
winfo.mode = 'shown'
winfo.worlds = self.current_worlds
winfo.active_worlds = self.current_worlds[:]
glinit._GLBackend.initialize("Klamp't visualization")
winfo = self.windows[self.current_window]
print("GLUTWindowManager.run_app_thread: creating window with name",winfo.name,"and status",winfo.mode)
w = glinit._GLBackend.createWindow(winfo.name)
self._frontend.windowinfo = weakref.proxy(winfo)
self._frontend.window_manager = weakref.proxy(self)
self._frontend.name = winfo.name
w.setProgram(self._frontend)
winfo.glwindow = w
self.callback = callback
print("Windows",[winfo.name for winfo in self.windows])
glinit._GLBackend.run()
print("GLUTWindowManager.run_app_thread: Visualization thread closing...")
self.cleanup()
self.vis_thread_running = False
print("GLUTWindowManager.run_app_thread: terminating.")
return
def cleanup(self):
print("GLUTWindowManager.cleanup()")
for w in self.windows:
w.frontend.scene.clear()
w.worlds = []
w.active_worlds = []
#for some reason, destroying windows causes everything to terminate early
if w.glwindow:
print("GLUTWindowManager: destroying window",w.glwindow.glutWindowID)
#glutDestroyWindow(w.glwindow.glutWindowID)
w.glwindow = None
self._frontend = GLUTVisualizationFrontend(None)
self.current_worlds = []
self.windows = []
self.current_window = None
self.window_title = "Klamp't visualizer (%s)"%(sys.argv[0],)
self.callback = None
def frontend(self):
return self._frontend
def scene(self):
return self._frontend.scene
def getWindowName(self):
return self.window_title
def setWindowName(self,title):
self.window_title = title
self.onFrontendChange()
def resizeWindow(self,w,h):
self._frontend.reshape(w,h)
def createWindow(self,title):
if len(self.windows) == 0:
#save the defaults in window 0
self.windows.append(WindowInfo(self.window_title,self._frontend))
self.windows[-1].worlds = self.current_worlds
self.windows[-1].active_worlds = self.current_worlds[:]
if title is None:
title = "Window "+str(len(self.windows))
#make a new window
self._frontend = GLUTVisualizationFrontend(None)
self._frontend.window_manager = weakref.proxy(self)
self.windows.append(WindowInfo(title,self._frontend))
self.window_title = title
print("GLUTWindowManager.createWindow: window title",self.window_title,", id",len(self.windows)-1)
self.current_worlds = []
id = len(self.windows)-1
self.current_window = id
return id
def setWindow(self,id):
if id == self.current_window:
return
assert id >= 0 and id < len(self.windows),"Invalid window id"
self._frontend = self.windows[id].frontend
self.current_worlds = self.windows[id].worlds
self.window_title = self.windows[id].name
#print "vis.setWindow(",id,") the window has status",_windows[id].mode
#PyQt interface allows sharing display lists but GLUT does not.
#refresh all worlds' display lists that were once active.
for w in self.current_worlds:
if w in self.windows[self.current_window].active_worlds:
print("klampt.vis.setWindow(): world",w,"becoming active in the new window",id)
for item in self.windows[self.current_window].worldDisplayListItems[w]:
self._refreshDisplayLists(item)
self.windows[self.current_window].active_worlds.remove(w)
self.windows[id].active_worlds = self.current_worlds[:]
self.current_window = id
def getWindow(self):
return self.current_window
def setPlugin(self,plugin):
if not isinstance(self._frontend,GLUTVisualizationFrontend):
#was multi-view -- now setting plugin
self._frontend = GLUTVisualizationFrontend()
if self.current_window is not None:
if self.windows[self.current_window].glwindow is not None:
self._frontend.window = self.windows[self.current_window].glwindow
if plugin is None:
self._frontend.setPlugin(self._frontend.scene)
else:
self._frontend.setPlugin(plugin)
if hasattr(plugin,'world'):
self._checkWindowCurrent(plugin.world)
self.onFrontendChange()
def pushPlugin(self,plugin):
assert isinstance(self._frontend,glcommon.GLPluginProgram),"Can't push a plugin after splitView"
if len(self._frontend.plugins) == 0:
self._frontend.setPlugin(self._frontend.scene)
self._frontend.pushPlugin(plugin)
self.onFrontendChange()
def popPlugin(self):
self._frontend.popPlugin()
self.onFrontendChange()
def splitView(self,plugin):
#create a multi-view widget
if plugin is None:
plugin = GLVisualizationPlugin()
if isinstance(self._frontend,glcommon.GLMultiViewportProgram):
self._frontend.add_view(plugin)
if hasattr(plugin,'scene') and isinstance(plugin.scene,VisualizationScene):
self._frontend.scene = plugin.scene
else:
if len(self._frontend.plugins) == 0:
self.setPlugin(None)
multiProgram = GLUTMultiWindowVisualizationFrontend(None)
multiProgram.windowinfo = weakref.proxy(self.windows[self.current_window])
multiProgram.window = None
if self.current_window is not None:
if self.windows[self.current_window].glwindow is not None:
multiProgram.window = self.windows[self.current_window].glwindow
multiProgram.add_view(self._frontend)
multiProgram.add_view(plugin)
multiProgram.name = self.window_title
self._frontend = multiProgram
multiProgram.scene = self._frontend
if hasattr(plugin,'scene') and isinstance(plugin.scene,VisualizationScene):
multiProgram.scene = plugin.scene
if isinstance(plugin,GLVisualizationPlugin):
plugin.program = weakref.proxy(self._frontend.views[-1])
self.onFrontendChange()
def unlock(self):
_ThreadedWindowManager.unlock(self)
self.update()
def update(self):
for w in self.windows:
if w.glwindow:
w.doRefresh = True
def show(self):
if len(self.windows)==0:
self.windows.append(WindowInfo(self.window_title,self._frontend))
self.current_window = 0
print("First show(), window title",self.window_title)
winfo = self.windows[self.current_window]
winfo.mode = 'shown'
winfo.worlds = self.current_worlds
winfo.active_worlds = self.current_worlds[:]
if not self.vis_thread_running:
print("GLUTWindowManager.show(): first window shown, starting the visualization thread")
self._start_app_thread()
def shown(self):
return (self.vis_thread_running and self.current_window is not None and self.windows[self.current_window].mode in ['shown','dialog'])
def hide(self):
if self.current_window is None:
return
self.windows[self.current_window].mode = 'hidden'
def dialog(self):
global _globalLock
if len(self.windows)==0:
self.windows.append(WindowInfo(self.window_title,self._frontend))
self.current_window = 0
w = self.windows[self.current_window]
w.mode = 'dialog'
w.worlds = self.current_worlds
w.active_worlds = self.current_worlds[:]
if self.multithreaded():
print("#########################################")
print("klampt.vis: Running multi-threaded dialog, waiting to complete...")
if not self.vis_thread_running:
self._start_app_thread()
while w.mode == 'dialog':
time.sleep(0.1)
print("klampt.vis: ... dialog done.")
print("#########################################")
return None
else:
print("#########################################")
print("klampt.vis: Running single-threaded dialog")
self.in_vis_loop = True
res = self.run_app_thread()
self._in_vis_loop = False
print("klampt.vis: ... dialog done.")
print("#########################################")
return res
def set_custom_ui(self,func):
if len(self.windows)==0:
print("Making first window for custom ui")
self.windows.append(WindowInfo(self.window_title,self._frontend))
self.current_window = 0
self.windows[self.current_window].custom_ui = func
print("klampt.vis: setting custom ui on window",self.current_window)
return
def onFrontendChange(self):
if self.current_window is None:
return
w = self.windows[self.current_window]
w.doReload = True
w.frontend = self._frontend
if w.glwindow:
w.glwindow.reshape(self._frontend.view.w,self._frontend.view.h)
if w.name != self.window_title:
glutSetWindow(w.glwindow.glutWindowID)
glutSetWindowTitle(self.window_title)
w.name = self.window_title
def _refreshDisplayLists(self,item):
if isinstance(item,WorldModel):
for i in range(item.numRobots()):
self._refreshDisplayLists(item.robot(i))
for i in range(item.numRigidObjects()):
self._refreshDisplayLists(item.rigidObject(i))
for i in range(item.numTerrains()):
self._refreshDisplayLists(item.terrain(i))
elif isinstance(item,RobotModel):
for i in range(item.numLinks()):
self._refreshDisplayLists(item.link(i))
elif hasattr(item,'appearance'):
item.appearance().refresh(False)
def _checkWindowCurrent(self,item):
#print("Checking whether item",item,"is current in the context of window",self.current_window)
#print("Current worlds",self.current_worlds)
#print("Current window's active worlds",self.windows[self.current_window].active_worlds)
if isinstance(item,WorldModel):
if item.index not in self.current_worlds:
#PyQt interface allows sharing display lists but GLUT does not.
#refresh all worlds' display lists that will be shifted to the current window.
for i,win in enumerate(self.windows):
#print("Window",i,"active worlds",win.active_worlds)
if item.index in win.active_worlds:
#GLUT SPECIFIC
print("klampt.vis: world",item.index,"was shown in a different window, now refreshing display lists")
self._refreshDisplayLists(item)
win.active_worlds.remove(item.index)
self.current_worlds.append(item.index)
if self.current_window is not None:
self.windows[self.current_window].worldDisplayListItems[item.index].append(weakref.proxy(item))
#print("klampt.vis: world added to the visualization's world (items:",self.current_worlds,")")
#else:
# print("klampt.vis: world",item,"is already in the current window's world")
elif hasattr(item,'world'):
if isinstance(item.world,WorldModel):
return self._checkWindowCurrent(item.world)
if isinstance(item.world,int):
if item.world < 0:
return
if item.world not in self.current_worlds:
for i,win in enumerate(self.windows):
#print("Window",i,"active worlds",win.active_worlds)
if item.world in win.active_worlds:
#GLUT SPECIFIC
print("klampt.vis: world",item.index,"was shown in a different window, now refreshing display lists")
self._refreshDisplayLists(item)
win.active_worlds.remove(item.world)
self.current_worlds.append(item.world)
if self.current_window is not None:
self.windows[self.current_window].worldDisplayListItems[item.index].append(weakref.proxy(item))
#print("klampt.vis: world added to the visualization's world (items:",self.current_worlds,")")
def do_idle_checks(self):
#print("GLUTWindowManager.idle checks")
if self.quit:
if bool(glutLeaveMainLoop):
glutLeaveMainLoop()
else:
for w in self.windows:
w.close()
w.glwindow = None
return
for windex,winfo in enumerate(self.windows):
#print(winfo.name,winfo.glwindow,winfo.mode)
if winfo.glwindow is None and winfo.mode in ['shown','dialog']:
print("GLUTWindowManager: Launching window %d inside vis thread"%(windex,))
w = glinit._GLBackend.createWindow(winfo.name)
self._frontend.windowinfo = weakref.proxy(winfo)
self._frontend.window_manager = weakref.proxy(self)
self._frontend.name = winfo.name
w.setProgram(self._frontend)
winfo.glwindow = w
w.initialize()
if not winfo.frontend.hidden:
if winfo.mode == 'hidden':
print("GLUTWindowManager: hiding window %d (%s)"%(windex,winfo.name))
winfo.frontend.hidden = True
if winfo.glwindow is not None:
glutSetWindow(winfo.glwindow.glutWindowID)
glutHideWindow()
else:
#print("hidden, waiting...",self.windowinfo.mode)
if winfo.mode == 'shown':
print("GLUTWindowManager: showing window %d (%s)"%(windex,winfo.name))
print("GLUT ID",winfo.glwindow.glutWindowID)
glutSetWindow(winfo.glwindow.glutWindowID)
glutShowWindow()
winfo.frontend.hidden = False
elif winfo.mode == 'dialog':
print("GLUTWindowManager: showing window %d (%s) in dialog mode"%(windex,winfo.name))
print("GLUT ID",winfo.glwindow.glutWindowID)
winfo.frontend.inDialog = True
glutSetWindow(winfo.glwindow.glutWindowID)
glutShowWindow()
winfo.frontend.hidden = False
if self.in_vis_loop and (len(self.windows)==0 or all(w.mode == 'hidden' for w in self.windows)):
print("klampt.vis: No windows shown, breaking out of vis loop")
if bool(glutLeaveMainLoop):
glutLeaveMainLoop()
else:
while glutGetWindow():
for w in self.windows:
w.close()
for w in self.windows:
w.glwindow = None
return
self.in_app_thread = True
calls = self.threadcalls
self.threadcalls = []
for c in calls:
c()
if self.callback:
self.callback()
self.in_app_thread = False
return
def screenshot(self,format,want_depth):
if threading.current_thread().__class__.__name__ != '_MainThread':
#already in visualization loop -- just get the image
return self._frontend.get_screen(format,want_depth)
return_values = []
def storeScreenshot(img,depth=None,return_values=return_values):
return_values.append((img,depth))
self.screenshotCallback(storeScreenshot,format,want_depth)
#wait for the vis thread to call the function
while len(return_values)==0:
time.sleep(0.01)
res = return_values[0]
if not want_depth:
return res[0]
else:
return res
def screenshotCallback(self,fn,format,want_depth):
if threading.current_thread().__class__.__name__ != '_MainThread':
#already in visualization loop -- just get the image
res = self._frontend.get_screen(format,want_depth)
if want_depth:
fn(*res)
else:
fn(res)
def do_screenshot_callback(fn=fn,format=format,want_depth=want_depth):
res = self._frontend.get_screen(format,want_depth)
if want_depth:
fn(*res)
else:
fn(res)
self.threadCall(do_screenshot_callback)
class GLUTVisualizationFrontend(GLVisualizationFrontend):
def __init__(self,windowinfo):
GLVisualizationFrontend.__init__(self)
self.scene = GLUTVisualizationPlugin()
self.setPlugin(self.scene)
self.scene.program = weakref.proxy(self)
self.windowinfo = windowinfo
self.window_manager = None
self.inDialog = False
self.hidden = False
self.inSubwindow = False
def display(self):
global _globalLock
_globalLock.acquire()
GLVisualizationFrontend.display(self)
_globalLock.release()
return True
def display_screen(self):
global _globalLock
_globalLock.acquire()
GLVisualizationFrontend.display_screen(self)
_globalLock.release()
if self.inSubwindow:
return
glDisable(GL_LIGHTING)
glColor3f(1,1,1)
y = 30
glRasterPos(20,y)
gldraw.glutBitmapString(GLUT_BITMAP_HELVETICA_18,"(Do not close this window except to quit)")
y += 25
if self.inDialog:
glColor3f(1,1,0)
glRasterPos(20,y)
gldraw.glutBitmapString(GLUT_BITMAP_HELVETICA_18,"In Dialog mode. Press 'Esc' to return to normal mode")
y += 25
else:
glColor3f(1,1,0)
glRasterPos(20,y)
gldraw.glutBitmapString(GLUT_BITMAP_HELVETICA_18,"In Window mode. Press 'Esc' to hide window")
y += 25
for a in self.actions:
if a.key is not None:
glColor3f(0,0,0)
glRasterPos(20,y)
desc = a.short_text
if a.description is not None and a.description != a.short_text:
desc = desc + ". "+a.description
gldraw.glutBitmapString(GLUT_BITMAP_HELVETICA_12,a.key+": "+desc)
y += 14
def keyboardfunc(self,c,x,y):
if not self.inSubwindow:
if len(c)==1 and ord(c)==27:
if self.inDialog:
print("Esc pressed, hiding dialog")
self.inDialog = False
else:
print("Esc pressed, hiding window")
global _globalLock
_globalLock.acquire()
self.windowinfo.mode = 'hidden'
_globalLock.release()
return True
if isinstance(c,bytes):
c = c.decode('utf-8')
for a in self.actions:
if a.key is None:
continue
if a.key.startswith('Ctrl'):
if 'ctrl' in self.modifiers():
if a.key[5:] == c:
a.hook()
elif a.key.startswith('Shift'):
if 'shift' in self.modifiers():
if a.key[6:] == c:
a.hook()
elif a.key.startswith('Alt'):
if 'alt' in self.modifiers():
if a.key[4:] == c:
a.hook()
elif a.key == c:
a.hook()
else:
return GLVisualizationFrontend.keyboardfunc(self,c,x,y)
def idlefunc(self):
global _globalLock
_globalLock.acquire()
self.window_manager.do_idle_checks()
_globalLock.release()
return GLVisualizationFrontend.idlefunc(self)
class GLUTMultiWindowVisualizationFrontend(glcommon.GLMultiViewportProgram):
def __init__(self,windowinfo):
glcommon.GLMultiViewportProgram.__init__(self)
self.windowinfo = windowinfo
self.window_manager = None
self.inDialog = False
self.hidden = False
self.inSubwindow = False
def addView(self,view):
warnings.warn("addView will be deprecated in favor of add_view in a future version of Klampt",DeprecationWarning)
self.add_view(view)
def add_view(self,view):
if isinstance(view,(GLUTVisualizationFrontend,GLUTMultiWindowVisualizationFrontend)):
view.inSubwindow = True
glcommon.GLMultiViewportProgram.add_view(self,view)
def display_screen(self):
glcommon.GLMultiViewportProgram.display_screen(self)
if not self.inSubwindow:
glDisable(GL_LIGHTING)
glColor3f(1,1,1)
glRasterPos(20,50)
gldraw.glutBitmapString(GLUT_BITMAP_HELVETICA_18,"(Do not close this window except to quit)")
if self.inDialog:
glColor3f(1,1,0)
glRasterPos(20,80)
gldraw.glutBitmapString(GLUT_BITMAP_HELVETICA_18,"In Dialog mode. Press 'Esc' to return to normal mode")
else:
glColor3f(1,1,0)
glRasterPos(20,80)
gldraw.glutBitmapString(GLUT_BITMAP_HELVETICA_18,"In Window mode. Press 'Esc' to hide window")
def keyboardfunc(self,c,x,y):
if len(c)==1 and ord(c)==27:
if self.inDialog:
print("Esc pressed, hiding dialog")
self.inDialog = False
else:
print("Esc pressed, hiding window")
global _globalLock
_globalLock.acquire()
self.windowinfo.mode = 'hidden'
_globalLock.release()
return True
else:
return glcommon.GLMultiViewportProgram.keyboardfunc(self,c,x,y)
def idlefunc(self):
global _globalLock
_globalLock.acquire()
self.window_manager.do_idle_checks()
_globalLock.release()
return glcommon.GLMultiViewportProgram.idlefunc(self)
class GLUTVisualizationPlugin(GLVisualizationPlugin):
def __init__(self):
GLVisualizationPlugin.__init__(self)
def add(self,name,item,keepAppearance=False,**kwargs):
GLVisualizationPlugin.add(self,name,item,keepAppearance,**kwargs)
#need to check whether the item is part of the current GLUT world
if self.program and self.program.window_manager:
self.program.window_manager._checkWindowCurrent(item)
| |
import httplib2
import pytest
import tests
from six.moves import urllib
def test_credentials():
c = httplib2.Credentials()
c.add("joe", "password")
assert tuple(c.iter("bitworking.org"))[0] == ("joe", "password")
assert tuple(c.iter(""))[0] == ("joe", "password")
c.add("fred", "password2", "wellformedweb.org")
assert tuple(c.iter("bitworking.org"))[0] == ("joe", "password")
assert len(tuple(c.iter("bitworking.org"))) == 1
assert len(tuple(c.iter("wellformedweb.org"))) == 2
assert ("fred", "password2") in tuple(c.iter("wellformedweb.org"))
c.clear()
assert len(tuple(c.iter("bitworking.org"))) == 0
c.add("fred", "password2", "wellformedweb.org")
assert ("fred", "password2") in tuple(c.iter("wellformedweb.org"))
assert len(tuple(c.iter("bitworking.org"))) == 0
assert len(tuple(c.iter(""))) == 0
def test_basic():
# Test Basic Authentication
http = httplib2.Http()
password = tests.gen_password()
handler = tests.http_reflect_with_auth(
allow_scheme="basic", allow_credentials=(("joe", password),)
)
with tests.server_request(handler, request_count=3) as uri:
response, content = http.request(uri, "GET")
assert response.status == 401
http.add_credentials("joe", password)
response, content = http.request(uri, "GET")
assert response.status == 200
def test_basic_for_domain():
# Test Basic Authentication
http = httplib2.Http()
password = tests.gen_password()
handler = tests.http_reflect_with_auth(
allow_scheme="basic", allow_credentials=(("joe", password),)
)
with tests.server_request(handler, request_count=4) as uri:
response, content = http.request(uri, "GET")
assert response.status == 401
http.add_credentials("joe", password, "example.org")
response, content = http.request(uri, "GET")
assert response.status == 401
domain = urllib.parse.urlparse(uri)[1]
http.add_credentials("joe", password, domain)
response, content = http.request(uri, "GET")
assert response.status == 200
def test_basic_two_credentials():
# Test Basic Authentication with multiple sets of credentials
http = httplib2.Http()
password1 = tests.gen_password()
password2 = tests.gen_password()
allowed = [("joe", password1)] # exploit shared mutable list
handler = tests.http_reflect_with_auth(
allow_scheme="basic", allow_credentials=allowed
)
with tests.server_request(handler, request_count=7) as uri:
http.add_credentials("fred", password2)
response, content = http.request(uri, "GET")
assert response.status == 401
http.add_credentials("joe", password1)
response, content = http.request(uri, "GET")
assert response.status == 200
allowed[0] = ("fred", password2)
response, content = http.request(uri, "GET")
assert response.status == 200
def test_digest():
# Test that we support Digest Authentication
http = httplib2.Http()
password = tests.gen_password()
handler = tests.http_reflect_with_auth(
allow_scheme="digest", allow_credentials=(("joe", password),)
)
with tests.server_request(handler, request_count=3) as uri:
response, content = http.request(uri, "GET")
assert response.status == 401
http.add_credentials("joe", password)
response, content = http.request(uri, "GET")
assert response.status == 200, content.decode()
def test_digest_next_nonce_nc():
# Test that if the server sets nextnonce that we reset
# the nonce count back to 1
http = httplib2.Http()
password = tests.gen_password()
grenew_nonce = [None]
handler = tests.http_reflect_with_auth(
allow_scheme="digest",
allow_credentials=(("joe", password),),
out_renew_nonce=grenew_nonce,
)
with tests.server_request(handler, request_count=5) as uri:
http.add_credentials("joe", password)
response1, _ = http.request(uri, "GET")
info = httplib2._parse_www_authenticate(response1, "authentication-info")
assert response1.status == 200
assert info.get("digest", {}).get("nc") == "00000001", info
assert not info.get("digest", {}).get("nextnonce"), info
response2, _ = http.request(uri, "GET")
info2 = httplib2._parse_www_authenticate(response2, "authentication-info")
assert info2.get("digest", {}).get("nc") == "00000002", info2
grenew_nonce[0]()
response3, content = http.request(uri, "GET")
info3 = httplib2._parse_www_authenticate(response3, "authentication-info")
assert response3.status == 200
assert info3.get("digest", {}).get("nc") == "00000001", info3
def test_digest_auth_stale():
# Test that we can handle a nonce becoming stale
http = httplib2.Http()
password = tests.gen_password()
grenew_nonce = [None]
requests = []
handler = tests.http_reflect_with_auth(
allow_scheme="digest",
allow_credentials=(("joe", password),),
out_renew_nonce=grenew_nonce,
out_requests=requests,
)
with tests.server_request(handler, request_count=4) as uri:
http.add_credentials("joe", password)
response, _ = http.request(uri, "GET")
assert response.status == 200
info = httplib2._parse_www_authenticate(
requests[0][1].headers, "www-authenticate"
)
grenew_nonce[0]()
response, _ = http.request(uri, "GET")
assert response.status == 200
assert not response.fromcache
assert getattr(response, "_stale_digest", False)
info2 = httplib2._parse_www_authenticate(
requests[2][1].headers, "www-authenticate"
)
nonce1 = info.get("digest", {}).get("nonce", "")
nonce2 = info2.get("digest", {}).get("nonce", "")
assert nonce1 != ""
assert nonce2 != ""
assert nonce1 != nonce2, (nonce1, nonce2)
@pytest.mark.parametrize(
"data",
(
({}, {}),
({"www-authenticate": ""}, {}),
(
{
"www-authenticate": 'Test realm="test realm" , foo=foo ,bar="bar", baz=baz,qux=qux'
},
{
"test": {
"realm": "test realm",
"foo": "foo",
"bar": "bar",
"baz": "baz",
"qux": "qux",
}
},
),
(
{"www-authenticate": 'T*!%#st realm=to*!%#en, to*!%#en="quoted string"'},
{"t*!%#st": {"realm": "to*!%#en", "to*!%#en": "quoted string"}},
),
(
{"www-authenticate": 'Test realm="a \\"test\\" realm"'},
{"test": {"realm": 'a "test" realm'}},
),
({"www-authenticate": 'Basic realm="me"'}, {"basic": {"realm": "me"}}),
(
{"www-authenticate": 'Basic realm="me", algorithm="MD5"'},
{"basic": {"realm": "me", "algorithm": "MD5"}},
),
(
{"www-authenticate": 'Basic realm="me", algorithm=MD5'},
{"basic": {"realm": "me", "algorithm": "MD5"}},
),
(
{"www-authenticate": 'Basic realm="me",other="fred" '},
{"basic": {"realm": "me", "other": "fred"}},
),
({"www-authenticate": 'Basic REAlm="me" '}, {"basic": {"realm": "me"}}),
(
{
"www-authenticate": 'Digest realm="digest1", qop="auth,auth-int", nonce="7102dd2", opaque="e9517f"'
},
{
"digest": {
"realm": "digest1",
"qop": "auth,auth-int",
"nonce": "7102dd2",
"opaque": "e9517f",
}
},
),
# multiple schema choice
(
{
"www-authenticate": 'Digest realm="multi-d", nonce="8b11d0f6", opaque="cc069c" Basic realm="multi-b" '
},
{
"digest": {"realm": "multi-d", "nonce": "8b11d0f6", "opaque": "cc069c"},
"basic": {"realm": "multi-b"},
},
),
# FIXME
# comma between schemas (glue for multiple headers with same name)
# ({'www-authenticate': 'Digest realm="2-comma-d", qop="auth-int", nonce="c0c8ff1", Basic realm="2-comma-b"'},
# {'digest': {'realm': '2-comma-d', 'qop': 'auth-int', 'nonce': 'c0c8ff1'},
# 'basic': {'realm': '2-comma-b'}}),
# FIXME
# comma between schemas + WSSE (glue for multiple headers with same name)
# ({'www-authenticate': 'Digest realm="com3d", Basic realm="com3b", WSSE realm="com3w", profile="token"'},
# {'digest': {'realm': 'com3d'}, 'basic': {'realm': 'com3b'}, 'wsse': {'realm': 'com3w', profile': 'token'}}),
# FIXME
# multiple syntax figures
# ({'www-authenticate':
# 'Digest realm="brig", qop \t=\t"\tauth,auth-int", nonce="(*)&^&$%#",opaque="5ccc"' +
# ', Basic REAlm="zoo", WSSE realm="very", profile="UsernameToken"'},
# {'digest': {'realm': 'brig', 'qop': 'auth,auth-int', 'nonce': '(*)&^&$%#', 'opaque': '5ccc'},
# 'basic': {'realm': 'zoo'},
# 'wsse': {'realm': 'very', 'profile': 'UsernameToken'}}),
# more quote combos
(
{
"www-authenticate": 'Digest realm="myrealm", nonce="KBAA=3", algorithm=MD5, qop="auth", stale=true'
},
{
"digest": {
"realm": "myrealm",
"nonce": "KBAA=3",
"algorithm": "MD5",
"qop": "auth",
"stale": "true",
}
},
),
),
ids=lambda data: str(data[0]),
)
@pytest.mark.parametrize("strict", (True, False), ids=("strict", "relax"))
def test_parse_www_authenticate_correct(data, strict):
headers, info = data
# FIXME: move strict to parse argument
httplib2.USE_WWW_AUTH_STRICT_PARSING = strict
try:
assert httplib2._parse_www_authenticate(headers) == info
finally:
httplib2.USE_WWW_AUTH_STRICT_PARSING = 0
def test_parse_www_authenticate_malformed():
# TODO: test (and fix) header value 'barbqwnbm-bb...:asd' leads to dead loop
with tests.assert_raises(httplib2.MalformedHeader):
httplib2._parse_www_authenticate(
{
"www-authenticate": 'OAuth "Facebook Platform" "invalid_token" "Invalid OAuth access token."'
}
)
def test_digest_object():
credentials = ("joe", "password")
host = None
request_uri = "/test/digest/"
headers = {}
response = {
"www-authenticate": 'Digest realm="myrealm", nonce="KBAA=35", algorithm=MD5, qop="auth"'
}
content = b""
d = httplib2.DigestAuthentication(
credentials, host, request_uri, headers, response, content, None
)
d.request("GET", request_uri, headers, content, cnonce="33033375ec278a46")
our_request = "authorization: " + headers["authorization"]
working_request = (
'authorization: Digest username="joe", realm="myrealm", '
'nonce="KBAA=35", uri="/test/digest/"'
+ ', algorithm=MD5, response="de6d4a123b80801d0e94550411b6283f", '
'qop=auth, nc=00000001, cnonce="33033375ec278a46"'
)
assert our_request == working_request
def test_digest_object_with_opaque():
credentials = ("joe", "password")
host = None
request_uri = "/digest/opaque/"
headers = {}
response = {
"www-authenticate": 'Digest realm="myrealm", nonce="30352fd", algorithm=MD5, '
'qop="auth", opaque="atestopaque"'
}
content = ""
d = httplib2.DigestAuthentication(
credentials, host, request_uri, headers, response, content, None
)
d.request("GET", request_uri, headers, content, cnonce="5ec2")
our_request = "authorization: " + headers["authorization"]
working_request = (
'authorization: Digest username="joe", realm="myrealm", '
'nonce="30352fd", uri="/digest/opaque/", algorithm=MD5'
+ ', response="a1fab43041f8f3789a447f48018bee48", qop=auth, nc=00000001, '
'cnonce="5ec2", opaque="atestopaque"'
)
assert our_request == working_request
def test_digest_object_stale():
credentials = ("joe", "password")
host = None
request_uri = "/digest/stale/"
headers = {}
response = httplib2.Response({})
response["www-authenticate"] = (
'Digest realm="myrealm", nonce="bd669f", '
'algorithm=MD5, qop="auth", stale=true'
)
response.status = 401
content = b""
d = httplib2.DigestAuthentication(
credentials, host, request_uri, headers, response, content, None
)
# Returns true to force a retry
assert d.response(response, content)
def test_digest_object_auth_info():
credentials = ("joe", "password")
host = None
request_uri = "/digest/nextnonce/"
headers = {}
response = httplib2.Response({})
response["www-authenticate"] = (
'Digest realm="myrealm", nonce="barney", '
'algorithm=MD5, qop="auth", stale=true'
)
response["authentication-info"] = 'nextnonce="fred"'
content = b""
d = httplib2.DigestAuthentication(
credentials, host, request_uri, headers, response, content, None
)
# Returns true to force a retry
assert not d.response(response, content)
assert d.challenge["nonce"] == "fred"
assert d.challenge["nc"] == 1
def test_wsse_algorithm():
digest = httplib2._wsse_username_token(
"d36e316282959a9ed4c89851497a717f", "2003-12-15T14:43:07Z", "taadtaadpstcsm"
)
expected = b"quR/EWLAV4xLf9Zqyw4pDmfV9OY="
assert expected == digest
| |
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test suite for VMwareAPI.
"""
import collections
import datetime
from eventlet import greenthread
import mock
from mox3 import mox
from oslo_config import cfg
from oslo_utils import fixture as utils_fixture
from oslo_utils import units
from oslo_utils import uuidutils
from oslo_vmware import exceptions as vexc
from oslo_vmware.objects import datastore as ds_obj
from oslo_vmware import pbm
from oslo_vmware import vim_util as oslo_vim_util
from nova import block_device
from nova.compute import api as compute_api
from nova.compute import power_state
from nova.compute import task_states
from nova.compute import vm_states
from nova import context
from nova import exception
from nova.image import glance
from nova.network import model as network_model
from nova import objects
from nova import test
from nova.tests.unit import fake_instance
import nova.tests.unit.image.fake
from nova.tests.unit import matchers
from nova.tests.unit import test_flavors
from nova.tests.unit import utils
from nova.tests.unit.virt.vmwareapi import fake as vmwareapi_fake
from nova.tests.unit.virt.vmwareapi import stubs
from nova.tests import uuidsentinel
from nova.virt import driver as v_driver
from nova.virt.vmwareapi import constants
from nova.virt.vmwareapi import driver
from nova.virt.vmwareapi import ds_util
from nova.virt.vmwareapi import error_util
from nova.virt.vmwareapi import imagecache
from nova.virt.vmwareapi import images
from nova.virt.vmwareapi import vif
from nova.virt.vmwareapi import vim_util
from nova.virt.vmwareapi import vm_util
from nova.virt.vmwareapi import vmops
from nova.virt.vmwareapi import volumeops
CONF = cfg.CONF
CONF.import_opt('host', 'nova.netconf')
CONF.import_opt('remove_unused_original_minimum_age_seconds',
'nova.virt.imagecache')
def _fake_create_session(inst):
session = vmwareapi_fake.DataObject()
session.key = 'fake_key'
session.userName = 'fake_username'
session._pbm_wsdl_loc = None
session._pbm = None
inst._session = session
class VMwareDriverStartupTestCase(test.NoDBTestCase):
def _start_driver_with_flags(self, expected_exception_type, startup_flags):
self.flags(**startup_flags)
with mock.patch(
'nova.virt.vmwareapi.driver.VMwareAPISession.__init__'):
e = self.assertRaises(
Exception, driver.VMwareVCDriver, None) # noqa
self.assertIs(type(e), expected_exception_type)
def test_start_driver_no_user(self):
self._start_driver_with_flags(
Exception,
dict(host_ip='ip', host_password='password',
group='vmware'))
def test_start_driver_no_host(self):
self._start_driver_with_flags(
Exception,
dict(host_username='username', host_password='password',
group='vmware'))
def test_start_driver_no_password(self):
self._start_driver_with_flags(
Exception,
dict(host_ip='ip', host_username='username',
group='vmware'))
def test_start_driver_with_user_host_password(self):
# Getting the InvalidInput exception signifies that no exception
# is raised regarding missing user/password/host
self._start_driver_with_flags(
nova.exception.InvalidInput,
dict(host_ip='ip', host_password='password',
host_username="user", datastore_regex="bad(regex",
group='vmware'))
class VMwareSessionTestCase(test.NoDBTestCase):
@mock.patch.object(driver.VMwareAPISession, '_is_vim_object',
return_value=False)
def test_call_method(self, mock_is_vim):
with test.nested(
mock.patch.object(driver.VMwareAPISession, '_create_session',
_fake_create_session),
mock.patch.object(driver.VMwareAPISession, 'invoke_api'),
) as (fake_create, fake_invoke):
session = driver.VMwareAPISession()
session._vim = mock.Mock()
module = mock.Mock()
session._call_method(module, 'fira')
fake_invoke.assert_called_once_with(module, 'fira', session._vim)
@mock.patch.object(driver.VMwareAPISession, '_is_vim_object',
return_value=True)
def test_call_method_vim(self, mock_is_vim):
with test.nested(
mock.patch.object(driver.VMwareAPISession, '_create_session',
_fake_create_session),
mock.patch.object(driver.VMwareAPISession, 'invoke_api'),
) as (fake_create, fake_invoke):
session = driver.VMwareAPISession()
module = mock.Mock()
session._call_method(module, 'fira')
fake_invoke.assert_called_once_with(module, 'fira')
class VMwareAPIVMTestCase(test.NoDBTestCase):
"""Unit tests for Vmware API connection calls."""
REQUIRES_LOCKING = True
@mock.patch.object(driver.VMwareVCDriver, '_register_openstack_extension')
def setUp(self, mock_register):
super(VMwareAPIVMTestCase, self).setUp()
ds_util.dc_cache_reset()
vm_util.vm_refs_cache_reset()
self.context = context.RequestContext('fake', 'fake', is_admin=False)
self.flags(cluster_name='test_cluster',
host_ip='test_url',
host_username='test_username',
host_password='test_pass',
api_retry_count=1,
use_linked_clone=False, group='vmware')
self.flags(enabled=False, group='vnc')
self.flags(image_cache_subdirectory_name='vmware_base',
my_ip='')
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
stubs.set_stubs(self)
vmwareapi_fake.reset()
nova.tests.unit.image.fake.stub_out_image_service(self)
self.conn = driver.VMwareVCDriver(None, False)
self._set_exception_vars()
self.node_name = self.conn._nodename
self.ds = 'ds1'
self._display_name = 'fake-display-name'
self.vim = vmwareapi_fake.FakeVim()
# NOTE(vish): none of the network plugging code is actually
# being tested
self.network_info = utils.get_test_network_info()
image_ref = nova.tests.unit.image.fake.get_valid_image_id()
(image_service, image_id) = glance.get_remote_image_service(
self.context, image_ref)
metadata = image_service.show(self.context, image_id)
self.image = objects.ImageMeta.from_dict({
'id': image_ref,
'disk_format': 'vmdk',
'size': int(metadata['size']),
})
self.fake_image_uuid = self.image.id
nova.tests.unit.image.fake.stub_out_image_service(self)
self.vnc_host = 'ha-host'
def tearDown(self):
super(VMwareAPIVMTestCase, self).tearDown()
vmwareapi_fake.cleanup()
nova.tests.unit.image.fake.FakeImageService_reset()
def test_legacy_block_device_info(self):
self.assertFalse(self.conn.need_legacy_block_device_info)
def test_get_host_ip_addr(self):
self.assertEqual('test_url', self.conn.get_host_ip_addr())
def test_init_host_with_no_session(self):
self.conn._session = mock.Mock()
self.conn._session.vim = None
self.conn.init_host('fake_host')
self.conn._session._create_session.assert_called_once_with()
def test_init_host(self):
try:
self.conn.init_host("fake_host")
except Exception as ex:
self.fail("init_host raised: %s" % ex)
def _set_exception_vars(self):
self.wait_task = self.conn._session._wait_for_task
self.call_method = self.conn._session._call_method
self.task_ref = None
self.exception = False
def test_cleanup_host(self):
self.conn.init_host("fake_host")
try:
self.conn.cleanup_host("fake_host")
except Exception as ex:
self.fail("cleanup_host raised: %s" % ex)
def test_driver_capabilities(self):
self.assertTrue(self.conn.capabilities['has_imagecache'])
self.assertFalse(self.conn.capabilities['supports_recreate'])
self.assertTrue(
self.conn.capabilities['supports_migrate_to_same_host'])
@mock.patch.object(pbm, 'get_profile_id_by_name')
def test_configuration_pbm(self, get_profile_mock):
get_profile_mock.return_value = 'fake-profile'
self.flags(pbm_enabled=True,
pbm_default_policy='fake-policy',
pbm_wsdl_location='fake-location', group='vmware')
self.conn._validate_configuration()
@mock.patch.object(pbm, 'get_profile_id_by_name')
def test_configuration_pbm_bad_default(self, get_profile_mock):
get_profile_mock.return_value = None
self.flags(pbm_enabled=True,
pbm_wsdl_location='fake-location',
pbm_default_policy='fake-policy', group='vmware')
self.assertRaises(error_util.PbmDefaultPolicyDoesNotExist,
self.conn._validate_configuration)
def test_login_retries(self):
self.attempts = 0
self.login_session = vmwareapi_fake.FakeVim()._login()
def _fake_login(_self):
self.attempts += 1
if self.attempts == 1:
raise vexc.VimConnectionException('Here is my fake exception')
return self.login_session
def _fake_check_session(_self):
return True
self.stub_out('nova.tests.unit.virt.vmwareapi.fake.FakeVim._login',
_fake_login)
self.stub_out('nova.tests.unit.virt.vmwareapi.'
'fake.FakeVim._check_session',
_fake_check_session)
with mock.patch.object(greenthread, 'sleep'):
self.conn = driver.VMwareAPISession()
self.assertEqual(2, self.attempts)
def _get_instance_type_by_name(self, type):
for instance_type in test_flavors.DEFAULT_FLAVOR_OBJS:
if instance_type.name == type:
return instance_type
if type == 'm1.micro':
return {'memory_mb': 128, 'root_gb': 0, 'deleted_at': None,
'name': 'm1.micro', 'deleted': 0, 'created_at': None,
'ephemeral_gb': 0, 'updated_at': None,
'disabled': False, 'vcpus': 1, 'extra_specs': {},
'swap': 0, 'rxtx_factor': 1.0, 'is_public': True,
'flavorid': '1', 'vcpu_weight': None, 'id': 2}
def _create_instance(self, node=None, set_image_ref=True,
uuid=None, instance_type='m1.large',
ephemeral=None, instance_type_updates=None):
if not node:
node = self.node_name
if not uuid:
uuid = uuidutils.generate_uuid()
self.type_data = dict(self._get_instance_type_by_name(instance_type))
if instance_type_updates:
self.type_data.update(instance_type_updates)
if ephemeral is not None:
self.type_data['ephemeral_gb'] = ephemeral
values = {'name': 'fake_name',
'display_name': self._display_name,
'id': 1,
'uuid': uuid,
'project_id': self.project_id,
'user_id': self.user_id,
'kernel_id': "fake_kernel_uuid",
'ramdisk_id': "fake_ramdisk_uuid",
'mac_address': "de:ad:be:ef:be:ef",
'flavor': objects.Flavor(**self.type_data),
'node': node,
'memory_mb': self.type_data['memory_mb'],
'root_gb': self.type_data['root_gb'],
'ephemeral_gb': self.type_data['ephemeral_gb'],
'vcpus': self.type_data['vcpus'],
'swap': self.type_data['swap'],
'expected_attrs': ['system_metadata'],
}
if set_image_ref:
values['image_ref'] = self.fake_image_uuid
self.instance_node = node
self.uuid = uuid
self.instance = fake_instance.fake_instance_obj(
self.context, **values)
def _create_vm(self, node=None, num_instances=1, uuid=None,
instance_type='m1.large', powered_on=True,
ephemeral=None, bdi=None, instance_type_updates=None):
"""Create and spawn the VM."""
if not node:
node = self.node_name
self._create_instance(node=node, uuid=uuid,
instance_type=instance_type,
ephemeral=ephemeral,
instance_type_updates=instance_type_updates)
self.assertIsNone(vm_util.vm_ref_cache_get(self.uuid))
self.conn.spawn(self.context, self.instance, self.image,
injected_files=[], admin_password=None,
network_info=self.network_info,
block_device_info=bdi)
self._check_vm_record(num_instances=num_instances,
powered_on=powered_on,
uuid=uuid)
self.assertIsNotNone(vm_util.vm_ref_cache_get(self.uuid))
def _get_vm_record(self):
# Get record for VM
vms = vmwareapi_fake._get_objects("VirtualMachine")
for vm in vms.objects:
if vm.get('name') == vm_util._get_vm_name(self._display_name,
self.uuid):
return vm
self.fail('Unable to find VM backing!')
def _get_info(self, uuid=None, node=None, name=None):
uuid = uuid if uuid else self.uuid
node = node if node else self.instance_node
name = name if node else '1'
return self.conn.get_info(fake_instance.fake_instance_obj(
None,
**{'uuid': uuid,
'name': name,
'node': node}))
def _check_vm_record(self, num_instances=1, powered_on=True, uuid=None):
"""Check if the spawned VM's properties correspond to the instance in
the db.
"""
instances = self.conn.list_instances()
if uuidutils.is_uuid_like(uuid):
self.assertEqual(num_instances, len(instances))
# Get Nova record for VM
vm_info = self._get_info()
vm = self._get_vm_record()
# Check that m1.large above turned into the right thing.
mem_kib = int(self.type_data['memory_mb']) << 10
vcpus = self.type_data['vcpus']
self.assertEqual(vm_info.max_mem_kb, mem_kib)
self.assertEqual(vm_info.mem_kb, mem_kib)
self.assertEqual(vm.get("summary.config.instanceUuid"), self.uuid)
self.assertEqual(vm.get("summary.config.numCpu"), vcpus)
self.assertEqual(vm.get("summary.config.memorySizeMB"),
self.type_data['memory_mb'])
self.assertEqual("ns0:VirtualE1000",
vm.get("config.hardware.device").VirtualDevice[2].obj_name)
if powered_on:
# Check that the VM is running according to Nova
self.assertEqual(power_state.RUNNING, vm_info.state)
# Check that the VM is running according to vSphere API.
self.assertEqual('poweredOn', vm.get("runtime.powerState"))
else:
# Check that the VM is not running according to Nova
self.assertEqual(power_state.SHUTDOWN, vm_info.state)
# Check that the VM is not running according to vSphere API.
self.assertEqual('poweredOff', vm.get("runtime.powerState"))
found_vm_uuid = False
found_iface_id = False
extras = vm.get("config.extraConfig")
for c in extras.OptionValue:
if (c.key == "nvp.vm-uuid" and c.value == self.instance['uuid']):
found_vm_uuid = True
if (c.key == "nvp.iface-id.0" and c.value == "vif-xxx-yyy-zzz"):
found_iface_id = True
self.assertTrue(found_vm_uuid)
self.assertTrue(found_iface_id)
def _check_vm_info(self, info, pwr_state=power_state.RUNNING):
"""Check if the get_info returned values correspond to the instance
object in the db.
"""
mem_kib = int(self.type_data['memory_mb']) << 10
self.assertEqual(info.state, pwr_state)
self.assertEqual(info.max_mem_kb, mem_kib)
self.assertEqual(info.mem_kb, mem_kib)
self.assertEqual(info.num_cpu, self.type_data['vcpus'])
def test_instance_exists(self):
self._create_vm()
self.assertTrue(self.conn.instance_exists(self.instance))
invalid_instance = fake_instance.fake_instance_obj(
None, uuid=uuidsentinel.foo, name='bar',
node=self.node_name)
self.assertFalse(self.conn.instance_exists(invalid_instance))
def test_list_instances_1(self):
self._create_vm()
instances = self.conn.list_instances()
self.assertEqual(1, len(instances))
def test_list_instance_uuids(self):
self._create_vm()
uuids = self.conn.list_instance_uuids()
self.assertEqual(1, len(uuids))
def _cached_files_exist(self, exists=True):
cache = ds_obj.DatastorePath(self.ds, 'vmware_base',
self.fake_image_uuid,
'%s.vmdk' % self.fake_image_uuid)
if exists:
vmwareapi_fake.assertPathExists(self, str(cache))
else:
vmwareapi_fake.assertPathNotExists(self, str(cache))
@mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_instance_dir_disk_created(self, mock_from_image):
"""Test image file is cached when even when use_linked_clone
is False
"""
img_props = images.VMwareImage(
image_id=self.fake_image_uuid,
linked_clone=False)
mock_from_image.return_value = img_props
self._create_vm()
path = ds_obj.DatastorePath(self.ds, self.uuid, '%s.vmdk' % self.uuid)
vmwareapi_fake.assertPathExists(self, str(path))
self._cached_files_exist()
@mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_cache_dir_disk_created(self, mock_from_image):
"""Test image disk is cached when use_linked_clone is True."""
self.flags(use_linked_clone=True, group='vmware')
img_props = images.VMwareImage(
image_id=self.fake_image_uuid,
file_size=1 * units.Ki,
disk_type=constants.DISK_TYPE_SPARSE)
mock_from_image.return_value = img_props
self._create_vm()
path = ds_obj.DatastorePath(self.ds, 'vmware_base',
self.fake_image_uuid,
'%s.vmdk' % self.fake_image_uuid)
root = ds_obj.DatastorePath(self.ds, 'vmware_base',
self.fake_image_uuid,
'%s.80.vmdk' % self.fake_image_uuid)
vmwareapi_fake.assertPathExists(self, str(path))
vmwareapi_fake.assertPathExists(self, str(root))
def _iso_disk_type_created(self, instance_type='m1.large'):
self.image.disk_format = 'iso'
self._create_vm(instance_type=instance_type)
path = ds_obj.DatastorePath(self.ds, 'vmware_base',
self.fake_image_uuid,
'%s.iso' % self.fake_image_uuid)
vmwareapi_fake.assertPathExists(self, str(path))
def test_iso_disk_type_created(self):
self._iso_disk_type_created()
path = ds_obj.DatastorePath(self.ds, self.uuid, '%s.vmdk' % self.uuid)
vmwareapi_fake.assertPathExists(self, str(path))
def test_iso_disk_type_created_with_root_gb_0(self):
self._iso_disk_type_created(instance_type='m1.micro')
path = ds_obj.DatastorePath(self.ds, self.uuid, '%s.vmdk' % self.uuid)
vmwareapi_fake.assertPathNotExists(self, str(path))
def test_iso_disk_cdrom_attach(self):
iso_path = ds_obj.DatastorePath(self.ds, 'vmware_base',
self.fake_image_uuid,
'%s.iso' % self.fake_image_uuid)
def fake_attach_cdrom(vm_ref, instance, data_store_ref,
iso_uploaded_path):
self.assertEqual(iso_uploaded_path, str(iso_path))
self.stub_out('nova.virt.vmwareapi.vmops._attach_cdrom_to_vm',
fake_attach_cdrom)
self.image.disk_format = 'iso'
self._create_vm()
@mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_iso_disk_cdrom_attach_with_config_drive(self,
mock_from_image):
img_props = images.VMwareImage(
image_id=self.fake_image_uuid,
file_size=80 * units.Gi,
file_type='iso',
linked_clone=False)
mock_from_image.return_value = img_props
self.flags(force_config_drive=True)
iso_path = [
ds_obj.DatastorePath(self.ds, 'vmware_base',
self.fake_image_uuid,
'%s.iso' % self.fake_image_uuid),
ds_obj.DatastorePath(self.ds, 'fake-config-drive')]
self.iso_index = 0
def fake_attach_cdrom(vm_ref, instance, data_store_ref,
iso_uploaded_path):
self.assertEqual(iso_uploaded_path, str(iso_path[self.iso_index]))
self.iso_index += 1
with test.nested(
mock.patch.object(self.conn._vmops,
'_attach_cdrom_to_vm',
side_effect=fake_attach_cdrom),
mock.patch.object(self.conn._vmops,
'_create_config_drive',
return_value='fake-config-drive'),
) as (fake_attach_cdrom_to_vm, fake_create_config_drive):
self.image.disk_format = 'iso'
self._create_vm()
self.assertEqual(2, self.iso_index)
self.assertEqual(fake_attach_cdrom_to_vm.call_count, 2)
self.assertEqual(fake_create_config_drive.call_count, 1)
def test_ephemeral_disk_attach(self):
self._create_vm(ephemeral=50)
path = ds_obj.DatastorePath(self.ds, self.uuid,
'ephemeral_0.vmdk')
vmwareapi_fake.assertPathExists(self, str(path))
def test_ephemeral_disk_attach_from_bdi(self):
ephemerals = [{'device_type': 'disk',
'disk_bus': constants.DEFAULT_ADAPTER_TYPE,
'size': 25},
{'device_type': 'disk',
'disk_bus': constants.DEFAULT_ADAPTER_TYPE,
'size': 25}]
bdi = {'ephemerals': ephemerals}
self._create_vm(bdi=bdi, ephemeral=50)
path = ds_obj.DatastorePath(self.ds, self.uuid,
'ephemeral_0.vmdk')
vmwareapi_fake.assertPathExists(self, str(path))
path = ds_obj.DatastorePath(self.ds, self.uuid,
'ephemeral_1.vmdk')
vmwareapi_fake.assertPathExists(self, str(path))
def test_ephemeral_disk_attach_from_bdii_with_no_ephs(self):
bdi = {'ephemerals': []}
self._create_vm(bdi=bdi, ephemeral=50)
path = ds_obj.DatastorePath(self.ds, self.uuid,
'ephemeral_0.vmdk')
vmwareapi_fake.assertPathExists(self, str(path))
def test_cdrom_attach_with_config_drive(self):
self.flags(force_config_drive=True)
iso_path = ds_obj.DatastorePath(self.ds, 'fake-config-drive')
self.cd_attach_called = False
def fake_attach_cdrom(vm_ref, instance, data_store_ref,
iso_uploaded_path):
self.assertEqual(iso_uploaded_path, str(iso_path))
self.cd_attach_called = True
with test.nested(
mock.patch.object(self.conn._vmops, '_attach_cdrom_to_vm',
side_effect=fake_attach_cdrom),
mock.patch.object(self.conn._vmops, '_create_config_drive',
return_value='fake-config-drive'),
) as (fake_attach_cdrom_to_vm, fake_create_config_drive):
self._create_vm()
self.assertTrue(self.cd_attach_called)
@mock.patch.object(vmops.VMwareVMOps, 'power_off')
@mock.patch.object(driver.VMwareVCDriver, 'detach_volume')
@mock.patch.object(vmops.VMwareVMOps, 'destroy')
def test_destroy_with_attached_volumes(self,
mock_destroy,
mock_detach_volume,
mock_power_off):
self._create_vm()
connection_info = {'data': 'fake-data', 'serial': 'volume-fake-id'}
bdm = [{'connection_info': connection_info,
'disk_bus': 'fake-bus',
'device_name': 'fake-name',
'mount_device': '/dev/sdb'}]
bdi = {'block_device_mapping': bdm, 'root_device_name': '/dev/sda'}
self.assertNotEqual(vm_states.STOPPED, self.instance.vm_state)
self.conn.destroy(self.context, self.instance, self.network_info,
block_device_info=bdi)
mock_power_off.assert_called_once_with(self.instance)
mock_detach_volume.assert_called_once_with(
connection_info, self.instance, 'fake-name')
mock_destroy.assert_called_once_with(self.instance, True)
@mock.patch.object(vmops.VMwareVMOps, 'power_off',
side_effect=vexc.ManagedObjectNotFoundException())
@mock.patch.object(vmops.VMwareVMOps, 'destroy')
def test_destroy_with_attached_volumes_missing(self,
mock_destroy,
mock_power_off):
self._create_vm()
connection_info = {'data': 'fake-data', 'serial': 'volume-fake-id'}
bdm = [{'connection_info': connection_info,
'disk_bus': 'fake-bus',
'device_name': 'fake-name',
'mount_device': '/dev/sdb'}]
bdi = {'block_device_mapping': bdm, 'root_device_name': '/dev/sda'}
self.assertNotEqual(vm_states.STOPPED, self.instance.vm_state)
self.conn.destroy(self.context, self.instance, self.network_info,
block_device_info=bdi)
mock_power_off.assert_called_once_with(self.instance)
mock_destroy.assert_called_once_with(self.instance, True)
@mock.patch.object(driver.VMwareVCDriver, 'detach_volume',
side_effect=exception.NovaException())
@mock.patch.object(vmops.VMwareVMOps, 'destroy')
def test_destroy_with_attached_volumes_with_exception(
self, mock_destroy, mock_detach_volume):
self._create_vm()
connection_info = {'data': 'fake-data', 'serial': 'volume-fake-id'}
bdm = [{'connection_info': connection_info,
'disk_bus': 'fake-bus',
'device_name': 'fake-name',
'mount_device': '/dev/sdb'}]
bdi = {'block_device_mapping': bdm, 'root_device_name': '/dev/sda'}
self.assertRaises(exception.NovaException,
self.conn.destroy, self.context, self.instance,
self.network_info, block_device_info=bdi)
mock_detach_volume.assert_called_once_with(
connection_info, self.instance, 'fake-name')
self.assertFalse(mock_destroy.called)
@mock.patch.object(driver.VMwareVCDriver, 'detach_volume',
side_effect=exception.DiskNotFound(message='oh man'))
@mock.patch.object(vmops.VMwareVMOps, 'destroy')
def test_destroy_with_attached_volumes_with_disk_not_found(
self, mock_destroy, mock_detach_volume):
self._create_vm()
connection_info = {'data': 'fake-data', 'serial': 'volume-fake-id'}
bdm = [{'connection_info': connection_info,
'disk_bus': 'fake-bus',
'device_name': 'fake-name',
'mount_device': '/dev/sdb'}]
bdi = {'block_device_mapping': bdm, 'root_device_name': '/dev/sda'}
self.conn.destroy(self.context, self.instance, self.network_info,
block_device_info=bdi)
mock_detach_volume.assert_called_once_with(
connection_info, self.instance, 'fake-name')
self.assertTrue(mock_destroy.called)
mock_destroy.assert_called_once_with(self.instance, True)
def test_spawn(self):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
def test_spawn_vm_ref_cached(self):
uuid = uuidutils.generate_uuid()
self.assertIsNone(vm_util.vm_ref_cache_get(uuid))
self._create_vm(uuid=uuid)
self.assertIsNotNone(vm_util.vm_ref_cache_get(uuid))
def test_spawn_power_on(self):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
def test_spawn_root_size_0(self):
self._create_vm(instance_type='m1.micro')
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
cache = ('[%s] vmware_base/%s/%s.vmdk' %
(self.ds, self.fake_image_uuid, self.fake_image_uuid))
gb_cache = ('[%s] vmware_base/%s/%s.0.vmdk' %
(self.ds, self.fake_image_uuid, self.fake_image_uuid))
vmwareapi_fake.assertPathExists(self, cache)
vmwareapi_fake.assertPathNotExists(self, gb_cache)
def _spawn_with_delete_exception(self, fault=None):
def fake_call_method(module, method, *args, **kwargs):
task_ref = self.call_method(module, method, *args, **kwargs)
if method == "DeleteDatastoreFile_Task":
self.exception = True
task_mdo = vmwareapi_fake.create_task(method, "error",
error_fault=fault)
return task_mdo.obj
return task_ref
with (
mock.patch.object(self.conn._session, '_call_method',
fake_call_method)
):
if fault:
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
else:
self.assertRaises(vexc.VMwareDriverException, self._create_vm)
self.assertTrue(self.exception)
def test_spawn_with_delete_exception_not_found(self):
self._spawn_with_delete_exception(vmwareapi_fake.FileNotFound())
def test_spawn_with_delete_exception_file_fault(self):
self._spawn_with_delete_exception(vmwareapi_fake.FileFault())
def test_spawn_with_delete_exception_cannot_delete_file(self):
self._spawn_with_delete_exception(vmwareapi_fake.CannotDeleteFile())
def test_spawn_with_delete_exception_file_locked(self):
self._spawn_with_delete_exception(vmwareapi_fake.FileLocked())
def test_spawn_with_delete_exception_general(self):
self._spawn_with_delete_exception()
def test_spawn_disk_extend(self):
self.mox.StubOutWithMock(self.conn._vmops, '_extend_virtual_disk')
requested_size = 80 * units.Mi
self.conn._vmops._extend_virtual_disk(mox.IgnoreArg(),
requested_size, mox.IgnoreArg(), mox.IgnoreArg())
self.mox.ReplayAll()
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
def test_spawn_disk_extend_exists(self):
root = ds_obj.DatastorePath(self.ds, 'vmware_base',
self.fake_image_uuid,
'%s.80.vmdk' % self.fake_image_uuid)
def _fake_extend(instance, requested_size, name, dc_ref):
vmwareapi_fake._add_file(str(root))
with test.nested(
mock.patch.object(self.conn._vmops, '_extend_virtual_disk',
side_effect=_fake_extend)
) as (fake_extend_virtual_disk):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
vmwareapi_fake.assertPathExists(self, str(root))
self.assertEqual(1, fake_extend_virtual_disk[0].call_count)
@mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_spawn_disk_extend_sparse(self, mock_from_image):
img_props = images.VMwareImage(
image_id=self.fake_image_uuid,
file_size=units.Ki,
disk_type=constants.DISK_TYPE_SPARSE,
linked_clone=True)
mock_from_image.return_value = img_props
with test.nested(
mock.patch.object(self.conn._vmops, '_extend_virtual_disk'),
mock.patch.object(self.conn._vmops, 'get_datacenter_ref_and_name'),
) as (mock_extend, mock_get_dc):
dc_val = mock.Mock()
dc_val.ref = "fake_dc_ref"
dc_val.name = "dc1"
mock_get_dc.return_value = dc_val
self._create_vm()
iid = img_props.image_id
cached_image = ds_obj.DatastorePath(self.ds, 'vmware_base',
iid, '%s.80.vmdk' % iid)
mock_extend.assert_called_once_with(
self.instance, self.instance.root_gb * units.Mi,
str(cached_image), "fake_dc_ref")
def test_spawn_disk_extend_failed_copy(self):
# Spawn instance
# copy for extend fails without creating a file
#
# Expect the copy error to be raised
self.flags(use_linked_clone=True, group='vmware')
CopyError = vexc.FileFaultException
def fake_wait_for_task(task_ref):
if task_ref == 'fake-copy-task':
raise CopyError('Copy failed!')
return self.wait_task(task_ref)
def fake_call_method(module, method, *args, **kwargs):
if method == "CopyVirtualDisk_Task":
return 'fake-copy-task'
return self.call_method(module, method, *args, **kwargs)
with test.nested(
mock.patch.object(self.conn._session, '_call_method',
new=fake_call_method),
mock.patch.object(self.conn._session, '_wait_for_task',
new=fake_wait_for_task)):
self.assertRaises(CopyError, self._create_vm)
def test_spawn_disk_extend_failed_partial_copy(self):
# Spawn instance
# Copy for extend fails, leaving a file behind
#
# Expect the file to be cleaned up
# Expect the copy error to be raised
self.flags(use_linked_clone=True, group='vmware')
self.task_ref = None
uuid = self.fake_image_uuid
cached_image = '[%s] vmware_base/%s/%s.80.vmdk' % (self.ds,
uuid, uuid)
CopyError = vexc.FileFaultException
def fake_wait_for_task(task_ref):
if task_ref == self.task_ref:
self.task_ref = None
vmwareapi_fake.assertPathExists(self, cached_image)
# N.B. We don't test for -flat here because real
# CopyVirtualDisk_Task doesn't actually create it
raise CopyError('Copy failed!')
return self.wait_task(task_ref)
def fake_call_method(module, method, *args, **kwargs):
task_ref = self.call_method(module, method, *args, **kwargs)
if method == "CopyVirtualDisk_Task":
self.task_ref = task_ref
return task_ref
with test.nested(
mock.patch.object(self.conn._session, '_call_method',
new=fake_call_method),
mock.patch.object(self.conn._session, '_wait_for_task',
new=fake_wait_for_task)):
self.assertRaises(CopyError, self._create_vm)
vmwareapi_fake.assertPathNotExists(self, cached_image)
def test_spawn_disk_extend_failed_partial_copy_failed_cleanup(self):
# Spawn instance
# Copy for extend fails, leaves file behind
# File cleanup fails
#
# Expect file to be left behind
# Expect file cleanup error to be raised
self.flags(use_linked_clone=True, group='vmware')
self.task_ref = None
uuid = self.fake_image_uuid
cached_image = '[%s] vmware_base/%s/%s.80.vmdk' % (self.ds,
uuid, uuid)
CopyError = vexc.FileFaultException
DeleteError = vexc.CannotDeleteFileException
def fake_wait_for_task(task_ref):
if task_ref == self.task_ref:
self.task_ref = None
vmwareapi_fake.assertPathExists(self, cached_image)
# N.B. We don't test for -flat here because real
# CopyVirtualDisk_Task doesn't actually create it
raise CopyError('Copy failed!')
elif task_ref == 'fake-delete-task':
raise DeleteError('Delete failed!')
return self.wait_task(task_ref)
def fake_call_method(module, method, *args, **kwargs):
if method == "DeleteDatastoreFile_Task":
return 'fake-delete-task'
task_ref = self.call_method(module, method, *args, **kwargs)
if method == "CopyVirtualDisk_Task":
self.task_ref = task_ref
return task_ref
with test.nested(
mock.patch.object(self.conn._session, '_wait_for_task',
new=fake_wait_for_task),
mock.patch.object(self.conn._session, '_call_method',
new=fake_call_method)):
self.assertRaises(DeleteError, self._create_vm)
vmwareapi_fake.assertPathExists(self, cached_image)
@mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_spawn_disk_invalid_disk_size(self, mock_from_image):
img_props = images.VMwareImage(
image_id=self.fake_image_uuid,
file_size=82 * units.Gi,
disk_type=constants.DISK_TYPE_SPARSE,
linked_clone=True)
mock_from_image.return_value = img_props
self.assertRaises(exception.InstanceUnacceptable,
self._create_vm)
@mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_spawn_disk_extend_insufficient_disk_space(self, mock_from_image):
img_props = images.VMwareImage(
image_id=self.fake_image_uuid,
file_size=1024,
disk_type=constants.DISK_TYPE_SPARSE,
linked_clone=True)
mock_from_image.return_value = img_props
cached_image = ds_obj.DatastorePath(self.ds, 'vmware_base',
self.fake_image_uuid,
'%s.80.vmdk' %
self.fake_image_uuid)
tmp_file = ds_obj.DatastorePath(self.ds, 'vmware_base',
self.fake_image_uuid,
'%s.80-flat.vmdk' %
self.fake_image_uuid)
NoDiskSpace = vexc.get_fault_class('NoDiskSpace')
def fake_wait_for_task(task_ref):
if task_ref == self.task_ref:
self.task_ref = None
raise NoDiskSpace()
return self.wait_task(task_ref)
def fake_call_method(module, method, *args, **kwargs):
task_ref = self.call_method(module, method, *args, **kwargs)
if method == 'ExtendVirtualDisk_Task':
self.task_ref = task_ref
return task_ref
with test.nested(
mock.patch.object(self.conn._session, '_wait_for_task',
fake_wait_for_task),
mock.patch.object(self.conn._session, '_call_method',
fake_call_method)
) as (mock_wait_for_task, mock_call_method):
self.assertRaises(NoDiskSpace, self._create_vm)
vmwareapi_fake.assertPathNotExists(self, str(cached_image))
vmwareapi_fake.assertPathNotExists(self, str(tmp_file))
def test_spawn_with_move_file_exists_exception(self):
# The test will validate that the spawn completes
# successfully. The "MoveDatastoreFile_Task" will
# raise an file exists exception. The flag
# self.exception will be checked to see that
# the exception has indeed been raised.
def fake_wait_for_task(task_ref):
if task_ref == self.task_ref:
self.task_ref = None
self.exception = True
raise vexc.FileAlreadyExistsException()
return self.wait_task(task_ref)
def fake_call_method(module, method, *args, **kwargs):
task_ref = self.call_method(module, method, *args, **kwargs)
if method == "MoveDatastoreFile_Task":
self.task_ref = task_ref
return task_ref
with test.nested(
mock.patch.object(self.conn._session, '_wait_for_task',
fake_wait_for_task),
mock.patch.object(self.conn._session, '_call_method',
fake_call_method)
) as (_wait_for_task, _call_method):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
self.assertTrue(self.exception)
def test_spawn_with_move_general_exception(self):
# The test will validate that the spawn completes
# successfully. The "MoveDatastoreFile_Task" will
# raise a general exception. The flag self.exception
# will be checked to see that the exception has
# indeed been raised.
def fake_wait_for_task(task_ref):
if task_ref == self.task_ref:
self.task_ref = None
self.exception = True
raise vexc.VMwareDriverException('Exception!')
return self.wait_task(task_ref)
def fake_call_method(module, method, *args, **kwargs):
task_ref = self.call_method(module, method, *args, **kwargs)
if method == "MoveDatastoreFile_Task":
self.task_ref = task_ref
return task_ref
with test.nested(
mock.patch.object(self.conn._session, '_wait_for_task',
fake_wait_for_task),
mock.patch.object(self.conn._session, '_call_method',
fake_call_method)
) as (_wait_for_task, _call_method):
self.assertRaises(vexc.VMwareDriverException,
self._create_vm)
self.assertTrue(self.exception)
def test_spawn_with_move_poll_exception(self):
self.call_method = self.conn._session._call_method
def fake_call_method(module, method, *args, **kwargs):
task_ref = self.call_method(module, method, *args, **kwargs)
if method == "MoveDatastoreFile_Task":
task_mdo = vmwareapi_fake.create_task(method, "error")
return task_mdo.obj
return task_ref
with (
mock.patch.object(self.conn._session, '_call_method',
fake_call_method)
):
self.assertRaises(vexc.VMwareDriverException,
self._create_vm)
def test_spawn_with_move_file_exists_poll_exception(self):
# The test will validate that the spawn completes
# successfully. The "MoveDatastoreFile_Task" will
# raise a file exists exception. The flag self.exception
# will be checked to see that the exception has
# indeed been raised.
def fake_call_method(module, method, *args, **kwargs):
task_ref = self.call_method(module, method, *args, **kwargs)
if method == "MoveDatastoreFile_Task":
self.exception = True
task_mdo = vmwareapi_fake.create_task(method, "error",
error_fault=vmwareapi_fake.FileAlreadyExists())
return task_mdo.obj
return task_ref
with (
mock.patch.object(self.conn._session, '_call_method',
fake_call_method)
):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
self.assertTrue(self.exception)
def _spawn_attach_volume_vmdk(self, set_image_ref=True):
self._create_instance(set_image_ref=set_image_ref)
self.mox.StubOutWithMock(block_device, 'volume_in_mapping')
self.mox.StubOutWithMock(v_driver, 'block_device_info_get_mapping')
connection_info = self._test_vmdk_connection_info('vmdk')
root_disk = [{'connection_info': connection_info,
'boot_index': 0}]
v_driver.block_device_info_get_mapping(
mox.IgnoreArg()).AndReturn(root_disk)
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_get_res_pool_of_vm')
volumeops.VMwareVolumeOps._get_res_pool_of_vm(
mox.IgnoreArg()).AndReturn('fake_res_pool')
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_relocate_vmdk_volume')
volumeops.VMwareVolumeOps._relocate_vmdk_volume(mox.IgnoreArg(),
'fake_res_pool', mox.IgnoreArg())
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'attach_volume')
volumeops.VMwareVolumeOps.attach_volume(connection_info,
self.instance, constants.DEFAULT_ADAPTER_TYPE)
self.mox.ReplayAll()
block_device_info = {'block_device_mapping': root_disk}
self.conn.spawn(self.context, self.instance, self.image,
injected_files=[], admin_password=None,
network_info=self.network_info,
block_device_info=block_device_info)
def test_spawn_attach_volume_iscsi(self):
self._create_instance()
self.mox.StubOutWithMock(block_device, 'volume_in_mapping')
self.mox.StubOutWithMock(v_driver, 'block_device_info_get_mapping')
connection_info = self._test_vmdk_connection_info('iscsi')
root_disk = [{'connection_info': connection_info,
'boot_index': 0}]
v_driver.block_device_info_get_mapping(
mox.IgnoreArg()).AndReturn(root_disk)
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'attach_volume')
volumeops.VMwareVolumeOps.attach_volume(connection_info,
self.instance, constants.DEFAULT_ADAPTER_TYPE)
self.mox.ReplayAll()
block_device_info = {'mount_device': 'vda'}
self.conn.spawn(self.context, self.instance, self.image,
injected_files=[], admin_password=None,
network_info=self.network_info,
block_device_info=block_device_info)
def test_spawn_hw_versions(self):
updates = {'extra_specs': {'vmware:hw_version': 'vmx-08'}}
self._create_vm(instance_type_updates=updates)
vm = self._get_vm_record()
version = vm.get("version")
self.assertEqual('vmx-08', version)
def mock_upload_image(self, context, image, instance, session, **kwargs):
self.assertEqual('Test-Snapshot', image)
self.assertEqual(self.instance, instance)
self.assertEqual(1024, kwargs['vmdk_size'])
def test_get_vm_ref_using_extra_config(self):
self._create_vm()
vm_ref = vm_util._get_vm_ref_from_extraconfig(self.conn._session,
self.instance['uuid'])
self.assertIsNotNone(vm_ref, 'VM Reference cannot be none')
# Disrupt the fake Virtual Machine object so that extraConfig
# cannot be matched.
fake_vm = self._get_vm_record()
fake_vm.get('config.extraConfig["nvp.vm-uuid"]').value = ""
# We should not get a Virtual Machine through extraConfig.
vm_ref = vm_util._get_vm_ref_from_extraconfig(self.conn._session,
self.instance['uuid'])
self.assertIsNone(vm_ref, 'VM Reference should be none')
# Check if we can find the Virtual Machine using the name.
vm_ref = vm_util.get_vm_ref(self.conn._session, self.instance)
self.assertIsNotNone(vm_ref, 'VM Reference cannot be none')
def test_search_vm_ref_by_identifier(self):
self._create_vm()
vm_ref = vm_util.search_vm_ref_by_identifier(self.conn._session,
self.instance['uuid'])
self.assertIsNotNone(vm_ref, 'VM Reference cannot be none')
fake_vm = self._get_vm_record()
fake_vm.set("summary.config.instanceUuid", "foo")
fake_vm.set("name", "foo")
fake_vm.get('config.extraConfig["nvp.vm-uuid"]').value = "foo"
self.assertIsNone(vm_util.search_vm_ref_by_identifier(
self.conn._session, self.instance['uuid']),
"VM Reference should be none")
self.assertIsNotNone(
vm_util.search_vm_ref_by_identifier(self.conn._session, "foo"),
"VM Reference should not be none")
def test_get_object_for_optionvalue(self):
self._create_vm()
vms = self.conn._session._call_method(vim_util, "get_objects",
"VirtualMachine", ['config.extraConfig["nvp.vm-uuid"]'])
vm_ref = vm_util._get_object_for_optionvalue(vms,
self.instance["uuid"])
self.assertIsNotNone(vm_ref, 'VM Reference cannot be none')
def _test_snapshot(self):
expected_calls = [
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_PENDING_UPLOAD}},
{'args': (),
'kwargs':
{'task_state': task_states.IMAGE_UPLOADING,
'expected_state': task_states.IMAGE_PENDING_UPLOAD}}]
func_call_matcher = matchers.FunctionCallMatcher(expected_calls)
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
with mock.patch.object(images, 'upload_image_stream_optimized',
self.mock_upload_image):
self.conn.snapshot(self.context, self.instance, "Test-Snapshot",
func_call_matcher.call)
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
self.assertIsNone(func_call_matcher.match())
def test_snapshot(self):
self._create_vm()
self._test_snapshot()
def test_snapshot_no_root_disk(self):
self._iso_disk_type_created(instance_type='m1.micro')
self.assertRaises(error_util.NoRootDiskDefined, self.conn.snapshot,
self.context, self.instance, "Test-Snapshot",
lambda *args, **kwargs: None)
def test_snapshot_non_existent(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound, self.conn.snapshot,
self.context, self.instance, "Test-Snapshot",
lambda *args, **kwargs: None)
def test_snapshot_delete_vm_snapshot(self):
self._create_vm()
fake_vm = self._get_vm_record()
snapshot_ref = vmwareapi_fake.ManagedObjectReference(
value="Snapshot-123",
name="VirtualMachineSnapshot")
self.mox.StubOutWithMock(vmops.VMwareVMOps,
'_create_vm_snapshot')
self.conn._vmops._create_vm_snapshot(
self.instance, fake_vm.obj).AndReturn(snapshot_ref)
self.mox.StubOutWithMock(vmops.VMwareVMOps,
'_delete_vm_snapshot')
self.conn._vmops._delete_vm_snapshot(
self.instance, fake_vm.obj, snapshot_ref).AndReturn(None)
self.mox.ReplayAll()
self._test_snapshot()
def _snapshot_delete_vm_snapshot_exception(self, exception, call_count=1):
self._create_vm()
fake_vm = vmwareapi_fake._get_objects("VirtualMachine").objects[0].obj
snapshot_ref = vmwareapi_fake.ManagedObjectReference(
value="Snapshot-123",
name="VirtualMachineSnapshot")
with test.nested(
mock.patch.object(self.conn._session, '_wait_for_task',
side_effect=exception),
mock.patch.object(vmops, '_time_sleep_wrapper')
) as (_fake_wait, _fake_sleep):
if exception != vexc.TaskInProgress:
self.assertRaises(exception,
self.conn._vmops._delete_vm_snapshot,
self.instance, fake_vm, snapshot_ref)
self.assertEqual(0, _fake_sleep.call_count)
else:
self.conn._vmops._delete_vm_snapshot(self.instance, fake_vm,
snapshot_ref)
self.assertEqual(call_count - 1, _fake_sleep.call_count)
self.assertEqual(call_count, _fake_wait.call_count)
def test_snapshot_delete_vm_snapshot_exception(self):
self._snapshot_delete_vm_snapshot_exception(exception.NovaException)
def test_snapshot_delete_vm_snapshot_exception_retry(self):
self.flags(api_retry_count=5, group='vmware')
self._snapshot_delete_vm_snapshot_exception(vexc.TaskInProgress,
5)
def test_reboot(self):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
reboot_type = "SOFT"
self.conn.reboot(self.context, self.instance, self.network_info,
reboot_type)
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
def test_reboot_hard(self):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
reboot_type = "HARD"
self.conn.reboot(self.context, self.instance, self.network_info,
reboot_type)
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
def test_reboot_with_uuid(self):
"""Test fall back to use name when can't find by uuid."""
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
reboot_type = "SOFT"
self.conn.reboot(self.context, self.instance, self.network_info,
reboot_type)
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
def test_reboot_non_existent(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound, self.conn.reboot,
self.context, self.instance, self.network_info,
'SOFT')
def test_poll_rebooting_instances(self):
self.mox.StubOutWithMock(compute_api.API, 'reboot')
compute_api.API.reboot(mox.IgnoreArg(), mox.IgnoreArg(),
mox.IgnoreArg())
self.mox.ReplayAll()
self._create_vm()
instances = [self.instance]
self.conn.poll_rebooting_instances(60, instances)
def test_reboot_not_poweredon(self):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
self.conn.suspend(self.context, self.instance)
info = self._get_info()
self._check_vm_info(info, power_state.SUSPENDED)
self.assertRaises(exception.InstanceRebootFailure, self.conn.reboot,
self.context, self.instance, self.network_info,
'SOFT')
def test_suspend(self):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
self.conn.suspend(self.context, self.instance)
info = self._get_info()
self._check_vm_info(info, power_state.SUSPENDED)
def test_suspend_non_existent(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound, self.conn.suspend,
self.context, self.instance)
def test_resume(self):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
self.conn.suspend(self.context, self.instance)
info = self._get_info()
self._check_vm_info(info, power_state.SUSPENDED)
self.conn.resume(self.context, self.instance, self.network_info)
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
def test_resume_non_existent(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound, self.conn.resume,
self.context, self.instance, self.network_info)
def test_resume_not_suspended(self):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
self.assertRaises(exception.InstanceResumeFailure, self.conn.resume,
self.context, self.instance, self.network_info)
def test_power_on(self):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
self.conn.power_off(self.instance)
info = self._get_info()
self._check_vm_info(info, power_state.SHUTDOWN)
self.conn.power_on(self.context, self.instance, self.network_info)
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
def test_power_on_non_existent(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound, self.conn.power_on,
self.context, self.instance, self.network_info)
def test_power_off(self):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
self.conn.power_off(self.instance)
info = self._get_info()
self._check_vm_info(info, power_state.SHUTDOWN)
def test_power_off_non_existent(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound, self.conn.power_off,
self.instance)
@mock.patch.object(driver.VMwareVCDriver, 'reboot')
@mock.patch.object(vm_util, 'get_vm_state',
return_value='poweredOff')
def test_resume_state_on_host_boot(self, mock_get_vm_state,
mock_reboot):
self._create_instance()
self.conn.resume_state_on_host_boot(self.context, self.instance,
'network_info')
mock_get_vm_state.assert_called_once_with(self.conn._session,
self.instance)
mock_reboot.assert_called_once_with(self.context, self.instance,
'network_info', 'hard', None)
def test_resume_state_on_host_boot_no_reboot(self):
self._create_instance()
for state in ['poweredOn', 'suspended']:
with test.nested(
mock.patch.object(driver.VMwareVCDriver, 'reboot'),
mock.patch.object(vm_util, 'get_vm_state',
return_value=state)
) as (mock_reboot, mock_get_vm_state):
self.conn.resume_state_on_host_boot(self.context,
self.instance,
'network_info')
mock_get_vm_state.assert_called_once_with(self.conn._session,
self.instance)
self.assertFalse(mock_reboot.called)
@mock.patch('nova.virt.driver.block_device_info_get_mapping')
@mock.patch('nova.virt.vmwareapi.driver.VMwareVCDriver.detach_volume')
def test_detach_instance_volumes(
self, detach_volume, block_device_info_get_mapping):
self._create_vm()
def _mock_bdm(connection_info, device_name):
return {'connection_info': connection_info,
'device_name': device_name}
disk_1 = _mock_bdm(mock.sentinel.connection_info_1, 'dev1')
disk_2 = _mock_bdm(mock.sentinel.connection_info_2, 'dev2')
block_device_info_get_mapping.return_value = [disk_1, disk_2]
detach_volume.side_effect = [None, exception.DiskNotFound("Error")]
with mock.patch.object(self.conn, '_vmops') as vmops:
block_device_info = mock.sentinel.block_device_info
self.conn._detach_instance_volumes(self.instance,
block_device_info)
block_device_info_get_mapping.assert_called_once_with(
block_device_info)
vmops.power_off.assert_called_once_with(self.instance)
exp_detach_calls = [mock.call(mock.sentinel.connection_info_1,
self.instance, 'dev1'),
mock.call(mock.sentinel.connection_info_2,
self.instance, 'dev2')]
self.assertEqual(exp_detach_calls, detach_volume.call_args_list)
def test_destroy(self):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
instances = self.conn.list_instances()
self.assertEqual(1, len(instances))
self.conn.destroy(self.context, self.instance, self.network_info)
instances = self.conn.list_instances()
self.assertEqual(0, len(instances))
self.assertIsNone(vm_util.vm_ref_cache_get(self.uuid))
def test_destroy_no_datastore(self):
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
instances = self.conn.list_instances()
self.assertEqual(1, len(instances))
# Delete the vmPathName
vm = self._get_vm_record()
vm.delete('config.files.vmPathName')
self.conn.destroy(self.context, self.instance, self.network_info)
instances = self.conn.list_instances()
self.assertEqual(0, len(instances))
def test_destroy_non_existent(self):
self.destroy_disks = True
with mock.patch.object(self.conn._vmops,
"destroy") as mock_destroy:
self._create_instance()
self.conn.destroy(self.context, self.instance,
self.network_info,
None, self.destroy_disks)
mock_destroy.assert_called_once_with(self.instance,
self.destroy_disks)
def test_destroy_instance_without_compute(self):
instance = fake_instance.fake_instance_obj(None)
self.destroy_disks = True
with mock.patch.object(self.conn._vmops,
"destroy") as mock_destroy:
self.conn.destroy(self.context, instance,
self.network_info,
None, self.destroy_disks)
self.assertFalse(mock_destroy.called)
def _destroy_instance_without_vm_ref(self,
task_state=None):
def fake_vm_ref_from_name(session, vm_name):
return 'fake-ref'
self._create_instance()
with test.nested(
mock.patch.object(vm_util, 'get_vm_ref_from_name',
fake_vm_ref_from_name),
mock.patch.object(self.conn._session,
'_call_method'),
mock.patch.object(self.conn._vmops,
'_destroy_instance')
) as (mock_get, mock_call, mock_destroy):
self.instance.task_state = task_state
self.conn.destroy(self.context, self.instance,
self.network_info,
None, True)
if task_state == task_states.RESIZE_REVERTING:
expected = 0
else:
expected = 1
self.assertEqual(expected, mock_destroy.call_count)
self.assertFalse(mock_call.called)
def test_destroy_instance_without_vm_ref(self):
self._destroy_instance_without_vm_ref()
def test_destroy_instance_without_vm_ref_with_resize_revert(self):
self._destroy_instance_without_vm_ref(
task_state=task_states.RESIZE_REVERTING)
def _rescue(self, config_drive=False):
# validate that the power on is only called once
self._power_on = vm_util.power_on_instance
self._power_on_called = 0
def fake_attach_disk_to_vm(vm_ref, instance,
adapter_type, disk_type, vmdk_path=None,
disk_size=None, linked_clone=False,
controller_key=None, unit_number=None,
device_name=None):
info = self.conn.get_info(instance)
self._check_vm_info(info, power_state.SHUTDOWN)
if config_drive:
def fake_create_config_drive(instance, injected_files, password,
network_info, data_store_name,
folder, instance_uuid, cookies):
self.assertTrue(uuidutils.is_uuid_like(instance['uuid']))
return str(ds_obj.DatastorePath(data_store_name,
instance_uuid, 'fake.iso'))
self.stub_out('nova.virt.vmwareapi.vmops._create_config_drive',
fake_create_config_drive)
self._create_vm()
def fake_power_on_instance(session, instance, vm_ref=None):
self._power_on_called += 1
return self._power_on(session, instance, vm_ref=vm_ref)
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
self.stub_out('nova.virt.vmwareapi.vm_util.power_on_instance',
fake_power_on_instance)
self.stub_out('nova.virt.vmwareapi.volumeops.'
'VMwareVolumeOps.attach_disk_to_vm',
fake_attach_disk_to_vm)
self.conn.rescue(self.context, self.instance, self.network_info,
self.image, 'fake-password')
info = self.conn.get_info({'name': '1',
'uuid': self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.RUNNING)
info = self.conn.get_info({'name': '1-orig',
'uuid': '%s-orig' % self.uuid,
'node': self.instance_node})
self._check_vm_info(info, power_state.SHUTDOWN)
self.assertIsNotNone(vm_util.vm_ref_cache_get(self.uuid))
self.assertEqual(1, self._power_on_called)
def test_get_diagnostics(self):
self._create_vm()
expected = {'memoryReservation': 0, 'suspendInterval': 0,
'maxCpuUsage': 2000, 'toolsInstallerMounted': False,
'consumedOverheadMemory': 20, 'numEthernetCards': 1,
'numCpu': 1, 'featureRequirement': [{'key': 'cpuid.AES'}],
'memoryOverhead': 21417984,
'guestMemoryUsage': 0, 'connectionState': 'connected',
'memorySizeMB': 512, 'balloonedMemory': 0,
'vmPathName': 'fake_path', 'template': False,
'overallCpuUsage': 0, 'powerState': 'poweredOn',
'cpuReservation': 0, 'overallCpuDemand': 0,
'numVirtualDisks': 1, 'hostMemoryUsage': 141}
expected = {'vmware:' + k: v for k, v in expected.items()}
instance = fake_instance.fake_instance_obj(None,
name=1,
uuid=self.uuid,
node=self.instance_node)
self.assertThat(
self.conn.get_diagnostics(instance),
matchers.DictMatches(expected))
def test_get_instance_diagnostics(self):
self._create_vm()
expected = {'uptime': 0,
'memory_details': {'used': 0, 'maximum': 512},
'nic_details': [],
'driver': 'vmwareapi',
'state': 'running',
'version': '1.0',
'cpu_details': [],
'disk_details': [],
'hypervisor_os': 'esxi',
'config_drive': 'False'}
instance = objects.Instance(uuid=self.uuid,
config_drive=False,
system_metadata={},
node=self.instance_node)
actual = self.conn.get_instance_diagnostics(instance)
self.assertThat(actual.serialize(), matchers.DictMatches(expected))
def test_get_console_output(self):
self.assertRaises(NotImplementedError, self.conn.get_console_output,
None, None)
def test_get_vnc_console_non_existent(self):
self._create_instance()
self.assertRaises(exception.InstanceNotFound,
self.conn.get_vnc_console,
self.context,
self.instance)
def _test_get_vnc_console(self):
self._create_vm()
fake_vm = self._get_vm_record()
OptionValue = collections.namedtuple('OptionValue', ['key', 'value'])
opt_val = OptionValue(key='', value=5906)
fake_vm.set(vm_util.VNC_CONFIG_KEY, opt_val)
vnc_console = self.conn.get_vnc_console(self.context, self.instance)
self.assertEqual(self.vnc_host, vnc_console.host)
self.assertEqual(5906, vnc_console.port)
def test_get_vnc_console(self):
self._test_get_vnc_console()
def test_get_vnc_console_noport(self):
self._create_vm()
self.assertRaises(exception.ConsoleTypeUnavailable,
self.conn.get_vnc_console,
self.context,
self.instance)
def test_get_volume_connector(self):
self._create_vm()
connector_dict = self.conn.get_volume_connector(self.instance)
fake_vm = self._get_vm_record()
fake_vm_id = fake_vm.obj.value
self.assertEqual('test_url', connector_dict['ip'])
self.assertEqual('iscsi-name', connector_dict['initiator'])
self.assertEqual('test_url', connector_dict['host'])
self.assertEqual(fake_vm_id, connector_dict['instance'])
def _test_vmdk_connection_info(self, type):
return {'driver_volume_type': type,
'serial': 'volume-fake-id',
'data': {'volume': 'vm-10',
'volume_id': 'volume-fake-id'}}
def test_volume_attach_vmdk(self):
self._create_vm()
connection_info = self._test_vmdk_connection_info('vmdk')
mount_point = '/dev/vdc'
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_attach_volume_vmdk')
volumeops.VMwareVolumeOps._attach_volume_vmdk(connection_info,
self.instance, None)
self.mox.ReplayAll()
self.conn.attach_volume(None, connection_info, self.instance,
mount_point)
def test_volume_detach_vmdk(self):
self._create_vm()
connection_info = self._test_vmdk_connection_info('vmdk')
mount_point = '/dev/vdc'
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_detach_volume_vmdk')
volumeops.VMwareVolumeOps._detach_volume_vmdk(connection_info,
self.instance)
self.mox.ReplayAll()
self.conn.detach_volume(connection_info, self.instance, mount_point,
encryption=None)
def test_attach_vmdk_disk_to_vm(self):
self._create_vm()
connection_info = self._test_vmdk_connection_info('vmdk')
adapter_type = constants.DEFAULT_ADAPTER_TYPE
disk_type = constants.DEFAULT_DISK_TYPE
disk_uuid = 'e97f357b-331e-4ad1-b726-89be048fb811'
backing = mock.Mock(uuid=disk_uuid)
device = mock.Mock(backing=backing)
vmdk_info = vm_util.VmdkInfo('fake-path', adapter_type, disk_type, 64,
device)
with test.nested(
mock.patch.object(vm_util, 'get_vm_ref',
return_value=mock.sentinel.vm_ref),
mock.patch.object(volumeops.VMwareVolumeOps, '_get_volume_ref'),
mock.patch.object(vm_util, 'get_vmdk_info',
return_value=vmdk_info),
mock.patch.object(volumeops.VMwareVolumeOps, 'attach_disk_to_vm'),
mock.patch.object(volumeops.VMwareVolumeOps,
'_update_volume_details')
) as (get_vm_ref, get_volume_ref, get_vmdk_info,
attach_disk_to_vm, update_volume_details):
self.conn.attach_volume(None, connection_info, self.instance,
'/dev/vdc')
get_vm_ref.assert_called_once_with(self.conn._session,
self.instance)
get_volume_ref.assert_called_once_with(
connection_info['data']['volume'])
self.assertTrue(get_vmdk_info.called)
attach_disk_to_vm.assert_called_once_with(mock.sentinel.vm_ref,
self.instance, adapter_type, disk_type, vmdk_path='fake-path')
update_volume_details.assert_called_once_with(
mock.sentinel.vm_ref, connection_info['data']['volume_id'],
disk_uuid)
def test_detach_vmdk_disk_from_vm(self):
self._create_vm()
connection_info = self._test_vmdk_connection_info('vmdk')
with mock.patch.object(volumeops.VMwareVolumeOps,
'detach_volume') as detach_volume:
self.conn.detach_volume(connection_info, self.instance,
'/dev/vdc', encryption=None)
detach_volume.assert_called_once_with(connection_info,
self.instance)
def test_volume_attach_iscsi(self):
self._create_vm()
connection_info = self._test_vmdk_connection_info('iscsi')
mount_point = '/dev/vdc'
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_attach_volume_iscsi')
volumeops.VMwareVolumeOps._attach_volume_iscsi(connection_info,
self.instance, None)
self.mox.ReplayAll()
self.conn.attach_volume(None, connection_info, self.instance,
mount_point)
def test_volume_detach_iscsi(self):
self._create_vm()
connection_info = self._test_vmdk_connection_info('iscsi')
mount_point = '/dev/vdc'
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_detach_volume_iscsi')
volumeops.VMwareVolumeOps._detach_volume_iscsi(connection_info,
self.instance)
self.mox.ReplayAll()
self.conn.detach_volume(connection_info, self.instance, mount_point,
encryption=None)
def test_attach_iscsi_disk_to_vm(self):
self._create_vm()
connection_info = self._test_vmdk_connection_info('iscsi')
connection_info['data']['target_portal'] = 'fake_target_host:port'
connection_info['data']['target_iqn'] = 'fake_target_iqn'
mount_point = '/dev/vdc'
discover = ('fake_name', 'fake_uuid')
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_iscsi_get_target')
# simulate target not found
volumeops.VMwareVolumeOps._iscsi_get_target(
connection_info['data']).AndReturn((None, None))
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_iscsi_add_send_target_host')
# rescan gets called with target portal
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_iscsi_rescan_hba')
volumeops.VMwareVolumeOps._iscsi_rescan_hba(
connection_info['data']['target_portal'])
# simulate target found
volumeops.VMwareVolumeOps._iscsi_get_target(
connection_info['data']).AndReturn(discover)
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'attach_disk_to_vm')
volumeops.VMwareVolumeOps.attach_disk_to_vm(mox.IgnoreArg(),
self.instance, mox.IgnoreArg(), 'rdmp',
device_name=mox.IgnoreArg())
self.mox.ReplayAll()
self.conn.attach_volume(None, connection_info, self.instance,
mount_point)
def test_iscsi_rescan_hba(self):
fake_target_portal = 'fake_target_host:port'
host_storage_sys = vmwareapi_fake._get_objects(
"HostStorageSystem").objects[0]
iscsi_hba_array = host_storage_sys.get('storageDeviceInfo'
'.hostBusAdapter')
iscsi_hba = iscsi_hba_array.HostHostBusAdapter[0]
# Check the host system does not have the send target
self.assertRaises(AttributeError, getattr, iscsi_hba,
'configuredSendTarget')
# Rescan HBA with the target portal
vops = volumeops.VMwareVolumeOps(self.conn._session)
vops._iscsi_rescan_hba(fake_target_portal)
# Check if HBA has the target portal configured
self.assertEqual('fake_target_host',
iscsi_hba.configuredSendTarget[0].address)
# Rescan HBA with same portal
vops._iscsi_rescan_hba(fake_target_portal)
self.assertEqual(1, len(iscsi_hba.configuredSendTarget))
def test_iscsi_get_target(self):
data = {'target_portal': 'fake_target_host:port',
'target_iqn': 'fake_target_iqn'}
host = vmwareapi_fake._get_objects('HostSystem').objects[0]
host._add_iscsi_target(data)
vops = volumeops.VMwareVolumeOps(self.conn._session)
result = vops._iscsi_get_target(data)
self.assertEqual(('fake-device', 'fake-uuid'), result)
def test_detach_iscsi_disk_from_vm(self):
self._create_vm()
connection_info = self._test_vmdk_connection_info('iscsi')
connection_info['data']['target_portal'] = 'fake_target_portal'
connection_info['data']['target_iqn'] = 'fake_target_iqn'
mount_point = '/dev/vdc'
find = ('fake_name', 'fake_uuid')
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'_iscsi_get_target')
volumeops.VMwareVolumeOps._iscsi_get_target(
connection_info['data']).AndReturn(find)
self.mox.StubOutWithMock(vm_util, 'get_rdm_disk')
device = 'fake_device'
vm_util.get_rdm_disk(mox.IgnoreArg(), 'fake_uuid').AndReturn(device)
self.mox.StubOutWithMock(volumeops.VMwareVolumeOps,
'detach_disk_from_vm')
volumeops.VMwareVolumeOps.detach_disk_from_vm(mox.IgnoreArg(),
self.instance, device, destroy_disk=True)
self.mox.ReplayAll()
self.conn.detach_volume(connection_info, self.instance, mount_point,
encryption=None)
def test_connection_info_get(self):
self._create_vm()
connector = self.conn.get_volume_connector(self.instance)
self.assertEqual('test_url', connector['ip'])
self.assertEqual('test_url', connector['host'])
self.assertEqual('iscsi-name', connector['initiator'])
self.assertIn('instance', connector)
def test_connection_info_get_after_destroy(self):
self._create_vm()
self.conn.destroy(self.context, self.instance, self.network_info)
connector = self.conn.get_volume_connector(self.instance)
self.assertEqual('test_url', connector['ip'])
self.assertEqual('test_url', connector['host'])
self.assertEqual('iscsi-name', connector['initiator'])
self.assertNotIn('instance', connector)
def test_refresh_instance_security_rules(self):
self.assertRaises(NotImplementedError,
self.conn.refresh_instance_security_rules,
instance=None)
@mock.patch.object(objects.block_device.BlockDeviceMappingList,
'get_by_instance_uuid')
def test_image_aging_image_used(self, mock_get_by_inst):
self._create_vm()
all_instances = [self.instance]
self.conn.manage_image_cache(self.context, all_instances)
self._cached_files_exist()
def _get_timestamp_filename(self):
return '%s%s' % (imagecache.TIMESTAMP_PREFIX,
self.old_time.strftime(imagecache.TIMESTAMP_FORMAT))
def _override_time(self):
self.old_time = datetime.datetime(2012, 11, 22, 12, 00, 00)
def _fake_get_timestamp_filename(fake):
return self._get_timestamp_filename()
self.stub_out('nova.virt.vmwareapi.imagecache.'
'ImageCacheManager._get_timestamp_filename',
_fake_get_timestamp_filename)
def _timestamp_file_exists(self, exists=True):
timestamp = ds_obj.DatastorePath(self.ds, 'vmware_base',
self.fake_image_uuid,
self._get_timestamp_filename() + '/')
if exists:
vmwareapi_fake.assertPathExists(self, str(timestamp))
else:
vmwareapi_fake.assertPathNotExists(self, str(timestamp))
def _image_aging_image_marked_for_deletion(self):
self._create_vm(uuid=uuidutils.generate_uuid())
self._cached_files_exist()
all_instances = []
self.conn.manage_image_cache(self.context, all_instances)
self._cached_files_exist()
self._timestamp_file_exists()
def test_image_aging_image_marked_for_deletion(self):
self._override_time()
self._image_aging_image_marked_for_deletion()
def _timestamp_file_removed(self):
self._override_time()
self._image_aging_image_marked_for_deletion()
self._create_vm(num_instances=2,
uuid=uuidutils.generate_uuid())
self._timestamp_file_exists(exists=False)
def test_timestamp_file_removed_spawn(self):
self._timestamp_file_removed()
@mock.patch.object(objects.block_device.BlockDeviceMappingList,
'get_by_instance_uuid')
def test_timestamp_file_removed_aging(self, mock_get_by_inst):
self._timestamp_file_removed()
ts = self._get_timestamp_filename()
ts_path = ds_obj.DatastorePath(self.ds, 'vmware_base',
self.fake_image_uuid, ts + '/')
vmwareapi_fake._add_file(str(ts_path))
self._timestamp_file_exists()
all_instances = [self.instance]
self.conn.manage_image_cache(self.context, all_instances)
self._timestamp_file_exists(exists=False)
@mock.patch.object(objects.block_device.BlockDeviceMappingList,
'get_by_instance_uuid')
def test_image_aging_disabled(self, mock_get_by_inst):
self._override_time()
self.flags(remove_unused_base_images=False)
self._create_vm()
self._cached_files_exist()
all_instances = []
self.conn.manage_image_cache(self.context, all_instances)
self._cached_files_exist(exists=True)
self._timestamp_file_exists(exists=False)
def _image_aging_aged(self, aging_time=100):
self._override_time()
cur_time = datetime.datetime(2012, 11, 22, 12, 00, 10)
self.flags(remove_unused_original_minimum_age_seconds=aging_time)
self._image_aging_image_marked_for_deletion()
all_instances = []
self.useFixture(utils_fixture.TimeFixture(cur_time))
self.conn.manage_image_cache(self.context, all_instances)
def test_image_aging_aged(self):
self._image_aging_aged(aging_time=8)
self._cached_files_exist(exists=False)
def test_image_aging_not_aged(self):
self._image_aging_aged()
self._cached_files_exist()
def test_public_api_signatures(self):
self.assertPublicAPISignatures(v_driver.ComputeDriver(None), self.conn)
def test_register_extension(self):
with mock.patch.object(self.conn._session, '_call_method',
return_value=None) as mock_call_method:
self.conn._register_openstack_extension()
mock_call_method.assert_has_calls(
[mock.call(oslo_vim_util, 'find_extension',
constants.EXTENSION_KEY),
mock.call(oslo_vim_util, 'register_extension',
constants.EXTENSION_KEY,
constants.EXTENSION_TYPE_INSTANCE)])
def test_register_extension_already_exists(self):
with mock.patch.object(self.conn._session, '_call_method',
return_value='fake-extension') as mock_find_ext:
self.conn._register_openstack_extension()
mock_find_ext.assert_called_once_with(oslo_vim_util,
'find_extension',
constants.EXTENSION_KEY)
def test_list_instances(self):
instances = self.conn.list_instances()
self.assertEqual(0, len(instances))
def _setup_mocks_for_session(self, mock_init):
mock_init.return_value = None
vcdriver = driver.VMwareVCDriver(None, False)
vcdriver._session = mock.Mock()
vcdriver._session.vim = None
def side_effect():
vcdriver._session.vim = mock.Mock()
vcdriver._session._create_session.side_effect = side_effect
return vcdriver
def test_host_power_action(self):
self.assertRaises(NotImplementedError,
self.conn.host_power_action, 'action')
def test_host_maintenance_mode(self):
self.assertRaises(NotImplementedError,
self.conn.host_maintenance_mode, 'host', 'mode')
def test_set_host_enabled(self):
self.assertRaises(NotImplementedError,
self.conn.set_host_enabled, 'state')
def test_datastore_regex_configured(self):
self.assertEqual(self.conn._datastore_regex,
self.conn._vmops._datastore_regex)
self.assertEqual(self.conn._datastore_regex,
self.conn._vc_state._datastore_regex)
@mock.patch('nova.virt.vmwareapi.ds_util.get_datastore')
def test_datastore_regex_configured_vcstate(self, mock_get_ds_ref):
vcstate = self.conn._vc_state
self.conn.get_available_resource(self.node_name)
mock_get_ds_ref.assert_called_with(
vcstate._session, vcstate._cluster, vcstate._datastore_regex)
def test_get_available_resource(self):
stats = self.conn.get_available_resource(self.node_name)
self.assertEqual(32, stats['vcpus'])
self.assertEqual(1024, stats['local_gb'])
self.assertEqual(1024 - 500, stats['local_gb_used'])
self.assertEqual(1000, stats['memory_mb'])
self.assertEqual(500, stats['memory_mb_used'])
self.assertEqual('VMware vCenter Server', stats['hypervisor_type'])
self.assertEqual(5001000, stats['hypervisor_version'])
self.assertEqual(self.node_name, stats['hypervisor_hostname'])
self.assertIsNone(stats['cpu_info'])
self.assertEqual(
[("i686", "vmware", "hvm"), ("x86_64", "vmware", "hvm")],
stats['supported_instances'])
def test_invalid_datastore_regex(self):
# Tests if we raise an exception for Invalid Regular Expression in
# vmware_datastore_regex
self.flags(cluster_name='test_cluster', datastore_regex='fake-ds(01',
group='vmware')
self.assertRaises(exception.InvalidInput, driver.VMwareVCDriver, None)
def test_get_available_nodes(self):
nodelist = self.conn.get_available_nodes()
self.assertEqual(1, len(nodelist))
self.assertIn(self.node_name, nodelist)
@mock.patch.object(nova.virt.vmwareapi.images.VMwareImage,
'from_image')
def test_spawn_with_sparse_image(self, mock_from_image):
img_info = images.VMwareImage(
image_id=self.fake_image_uuid,
file_size=1024,
disk_type=constants.DISK_TYPE_SPARSE,
linked_clone=False)
mock_from_image.return_value = img_info
self._create_vm()
info = self._get_info()
self._check_vm_info(info, power_state.RUNNING)
def test_plug_vifs(self):
# Check to make sure the method raises NotImplementedError.
self._create_instance()
self.assertRaises(NotImplementedError,
self.conn.plug_vifs,
instance=self.instance, network_info=None)
def test_unplug_vifs(self):
# Check to make sure the method raises NotImplementedError.
self._create_instance()
self.assertRaises(NotImplementedError,
self.conn.unplug_vifs,
instance=self.instance, network_info=None)
def _create_vif(self):
gw_4 = network_model.IP(address='101.168.1.1', type='gateway')
dns_4 = network_model.IP(address='8.8.8.8', type=None)
subnet_4 = network_model.Subnet(cidr='101.168.1.0/24',
dns=[dns_4],
gateway=gw_4,
routes=None,
dhcp_server='191.168.1.1')
gw_6 = network_model.IP(address='101:1db9::1', type='gateway')
subnet_6 = network_model.Subnet(cidr='101:1db9::/64',
dns=None,
gateway=gw_6,
ips=None,
routes=None)
network_neutron = network_model.Network(id='network-id-xxx-yyy-zzz',
bridge=None,
label=None,
subnets=[subnet_4,
subnet_6],
bridge_interface='eth0',
vlan=99)
vif_bridge_neutron = network_model.VIF(id='new-vif-xxx-yyy-zzz',
address='ca:fe:de:ad:be:ef',
network=network_neutron,
type=None,
devname='tap-xxx-yyy-zzz',
ovs_interfaceid='aaa-bbb-ccc')
return vif_bridge_neutron
def _validate_interfaces(self, id, index, num_iface_ids):
vm = self._get_vm_record()
found_iface_id = False
extras = vm.get("config.extraConfig")
key = "nvp.iface-id.%s" % index
num_found = 0
for c in extras.OptionValue:
if c.key.startswith("nvp.iface-id."):
num_found += 1
if c.key == key and c.value == id:
found_iface_id = True
self.assertTrue(found_iface_id)
self.assertEqual(num_iface_ids, num_found)
def _attach_interface(self, vif):
self.conn.attach_interface(self.instance, self.image, vif)
self._validate_interfaces(vif['id'], 1, 2)
def test_attach_interface(self):
self._create_vm()
vif = self._create_vif()
self._attach_interface(vif)
def test_attach_interface_with_exception(self):
self._create_vm()
vif = self._create_vif()
with mock.patch.object(self.conn._session, '_wait_for_task',
side_effect=Exception):
self.assertRaises(exception.InterfaceAttachFailed,
self.conn.attach_interface,
self.instance, self.image, vif)
@mock.patch.object(vif, 'get_network_device',
return_value='fake_device')
def _detach_interface(self, vif, mock_get_device):
self._create_vm()
self._attach_interface(vif)
self.conn.detach_interface(self.instance, vif)
self._validate_interfaces('free', 1, 2)
def test_detach_interface(self):
vif = self._create_vif()
self._detach_interface(vif)
def test_detach_interface_and_attach(self):
vif = self._create_vif()
self._detach_interface(vif)
self.conn.attach_interface(self.instance, self.image, vif)
self._validate_interfaces(vif['id'], 1, 2)
def test_detach_interface_no_device(self):
self._create_vm()
vif = self._create_vif()
self._attach_interface(vif)
self.assertRaises(exception.NotFound, self.conn.detach_interface,
self.instance, vif)
def test_detach_interface_no_vif_match(self):
self._create_vm()
vif = self._create_vif()
self._attach_interface(vif)
vif['id'] = 'bad-id'
self.assertRaises(exception.NotFound, self.conn.detach_interface,
self.instance, vif)
@mock.patch.object(vif, 'get_network_device',
return_value='fake_device')
def test_detach_interface_with_exception(self, mock_get_device):
self._create_vm()
vif = self._create_vif()
self._attach_interface(vif)
with mock.patch.object(self.conn._session, '_wait_for_task',
side_effect=Exception):
self.assertRaises(exception.InterfaceDetachFailed,
self.conn.detach_interface,
self.instance, vif)
def test_resize_to_smaller_disk(self):
self._create_vm(instance_type='m1.large')
flavor = self._get_instance_type_by_name('m1.small')
self.assertRaises(exception.InstanceFaultRollback,
self.conn.migrate_disk_and_power_off, self.context,
self.instance, 'fake_dest', flavor, None)
def test_spawn_attach_volume_vmdk(self):
self._spawn_attach_volume_vmdk()
def test_spawn_attach_volume_vmdk_no_image_ref(self):
self._spawn_attach_volume_vmdk(set_image_ref=False)
def test_pause(self):
# Tests that the VMwareVCDriver does not implement the pause method.
self._create_instance()
self.assertRaises(NotImplementedError, self.conn.pause, self.instance)
def test_unpause(self):
# Tests that the VMwareVCDriver does not implement the unpause method.
self._create_instance()
self.assertRaises(NotImplementedError, self.conn.unpause,
self.instance)
def test_datastore_dc_map(self):
self.assertEqual({}, ds_util._DS_DC_MAPPING)
self._create_vm()
# currently there are 2 data stores
self.assertEqual(2, len(ds_util._DS_DC_MAPPING))
def test_pre_live_migration(self):
self.assertRaises(NotImplementedError,
self.conn.pre_live_migration, self.context,
'fake_instance', 'fake_block_device_info',
'fake_network_info', 'fake_disk_info')
def test_live_migration(self):
self.assertRaises(NotImplementedError,
self.conn.live_migration, self.context,
'fake_instance', 'fake_dest', 'fake_post_method',
'fake_recover_method')
def test_rollback_live_migration_at_destination(self):
self.assertRaises(NotImplementedError,
self.conn.rollback_live_migration_at_destination,
self.context, 'fake_instance', 'fake_network_info',
'fake_block_device_info')
def test_post_live_migration(self):
self.assertIsNone(self.conn.post_live_migration(self.context,
'fake_instance', 'fake_block_device_info'))
def test_get_instance_disk_info_is_implemented(self):
# Ensure that the method has been implemented in the driver
instance = objects.Instance()
try:
disk_info = self.conn.get_instance_disk_info(instance)
self.assertIsNone(disk_info)
except NotImplementedError:
self.fail("test_get_instance_disk_info() should not raise "
"NotImplementedError")
def test_get_host_uptime(self):
self.assertRaises(NotImplementedError,
self.conn.get_host_uptime)
def test_pbm_wsdl_location(self):
self.flags(pbm_enabled=True,
pbm_wsdl_location='fira',
group='vmware')
self.conn._update_pbm_location()
self.assertEqual('fira', self.conn._session._pbm_wsdl_loc)
self.assertIsNone(self.conn._session._pbm)
def test_nodename(self):
test_mor = "domain-26"
self.assertEqual("%s.%s" % (test_mor,
vmwareapi_fake._FAKE_VCENTER_UUID),
self.conn._create_nodename(test_mor),
"VC driver failed to create the proper node name")
@mock.patch.object(driver.LOG, 'warning')
def test_min_version(self, mock_warning):
self.conn._check_min_version()
self.assertFalse(mock_warning.called)
@mock.patch.object(driver.LOG, 'warning')
@mock.patch.object(oslo_vim_util, 'get_vc_version',
return_value='5.0.0')
def test_invalid_min_version(self, mock_version, mock_warning):
self.conn._check_min_version()
# assert that the min version is in a warning message
expected_arg = {'version': constants.MIN_VC_VERSION}
version_arg_found = False
for call in mock_warning.call_args_list:
if call[0][1] == expected_arg:
version_arg_found = True
break
self.assertTrue(version_arg_found)
| |
# -*- coding: utf-8 -*-
from os import path
from gluon import current
from gluon.html import *
from gluon.storage import Storage
from s3.s3crud import S3CRUD
from s3.s3filter import S3DateFilter, S3OptionsFilter, S3TextFilter
from s3.s3utils import s3_avatar_represent
# =============================================================================
class index():
""" Custom Home Page """
def __call__(self):
return homepage()
# =============================================================================
class datalist():
""" Alternate URL for homepage """
def __call__(self):
return homepage()
# =============================================================================
class datalist_dl_post():
""" AJAX URL for CMS Posts (for Homepage) """
def __call__(self):
return homepage()
# =============================================================================
def homepage():
"""
Custom Homepage
- DataList of CMS Posts
"""
if not current.auth.is_logged_in():
return login()
T = current.T
s3db = current.s3db
request = current.request
response = current.response
s3 = response.s3
current.deployment_settings.ui.customize_cms_post()
list_layout = render_homepage_posts
filter_widgets = [S3TextFilter(["body"],
label="",
_class="filter-search",
_placeholder=T("Search").upper()),
S3OptionsFilter("series_id",
label=T("Filter by Type"),
represent="%(name)s",
cols=3),
S3OptionsFilter("location_id",
label=T("Filter by Location"),
represent="%(name)s",
widget="multiselect",
cols=3),
S3OptionsFilter("created_by$organisation_id",
label=T("Filter by Organization"),
represent="%(name)s",
widget="multiselect",
cols=3),
S3DateFilter("created_on",
label=T("Filter by Date")),
]
s3db.configure("cms_post",
filter_formstyle = filter_formstyle,
filter_submit = (T("Filter Results"), "btn btn-primary"),
filter_widgets = filter_widgets,
list_layout = list_layout,
)
s3.dl_pagelength = 6 # 5 forces an AJAX call
if "datalist_dl_post" in request.args:
ajax = True
else:
ajax = False
def prep(r):
if ajax:
r.representation = "dl"
return True
s3.prep = prep
request.args = ["datalist"]
output = current.rest_controller("cms", "post",
list_ajaxurl = URL(f="index", args="datalist_dl_post"))
if ajax:
response.view = "plain.html"
else:
form = output["form"]
# Remove duplicate Submit button
form[0][-1] = ""
if form.errors:
s3.jquery_ready.append('''$("#myModal").modal("show")''')
# Set Title & View after REST Controller, in order to override
output["title"] = response.title = current.deployment_settings.get_system_name()
view = path.join(request.folder, "private", "templates",
"CSN", "views", "index.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP("404", "Unable to open Custom View: %s" % view)
# Latest 5 Disasters
resource = s3db.resource("event_event")
list_fields = ["name",
"zero_hour",
"closed",
]
orderby = resource.get_config("list_orderby",
~resource.table.created_on)
datalist, numrows, ids = resource.datalist(fields=list_fields,
start=None,
limit=5,
listid="event_datalist",
orderby=orderby,
layout=render_homepage_events)
if numrows == 0:
# Empty table or just no match?
table = resource.table
if "deleted" in table:
available_records = current.db(table.deleted != True)
else:
available_records = current.db(table._id > 0)
if available_records.select(table._id,
limitby=(0, 1)).first():
msg = DIV(S3CRUD.crud_string(resource.tablename,
"msg_no_match"),
_class="empty")
else:
msg = DIV(S3CRUD.crud_string(resource.tablename,
"msg_list_empty"),
_class="empty")
data = msg
else:
# Render the list
dl = datalist.html()
data = dl
output["disasters"] = data
return output
# -----------------------------------------------------------------------------
def login():
"""
Custom Login page
"""
response = current.response
request = current.request
view = path.join(request.folder, "private", "templates",
"CSN", "views", "login.html")
try:
# Pass view as file not str to work in compiled mode
response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP("404", "Unable to open Custom View: %s" % view)
response.title = current.T("Login")
request.args = ["login"]
auth = current.auth
auth.settings.formstyle = "bootstrap"
login = auth()
return dict(
form = login
)
# -----------------------------------------------------------------------------
def filter_formstyle(row_id, label, widget, comment):
"""
Custom Formstyle for FilterForm
@param row_id: HTML id for the row
@param label: the label
@param widget: the form widget
@param comment: the comment
"""
if label:
return DIV(TR(label),
TR(widget))
else:
return widget
# -----------------------------------------------------------------------------
def render_homepage_posts(listid, resource, rfields, record, **attr):
"""
Custom dataList item renderer for CMS Posts on the Homepage
@param listid: the HTML ID for this list
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
@param attr: additional HTML attributes for the item
"""
pkey = "cms_post.id"
# Construct the item ID
listid = "datalist"
if pkey in record:
record_id = record[pkey]
item_id = "%s-%s" % (listid, record_id)
else:
# template
item_id = "%s-[id]" % listid
item_class = "thumbnail"
db = current.db
raw = record._row
series = record["cms_post.series_id"]
date = record["cms_post.created_on"]
body = record["cms_post.body"]
location = record["cms_post.location_id"]
location_id = raw["cms_post.location_id"]
location_url = URL(c="gis", f="location", args=[location_id])
# Attachment(s)?
document = raw["doc_document.file"]
if document:
doc_url = URL(c="default", f="download",
args=[document]
)
doc_link = A(I(_class="icon icon-paper-clip fright"),
_href=doc_url)
else:
doc_link = ""
if series not in ("News", "Twitter", "Ushahidi", "YouTube"):
# We expect an Author
author = record["cms_post.created_by"]
author_id = raw["cms_post.created_by"]
organisation = record["auth_user.organisation_id"]
organisation_id = raw["auth_user.organisation_id"]
org_url = URL(c="org", f="organisation", args=[organisation_id])
# @ToDo: Optimise by not doing DB lookups (especially duplicate) within render, but doing these in the bulk query
avatar = s3_avatar_represent(author_id,
_class="media-object",
_style="width:50px;padding:5px;padding-top:0px;")
s3db = current.s3db
ltable = s3db.pr_person_user
ptable = db.pr_person
query = (ltable.user_id == author_id) & \
(ltable.pe_id == ptable.pe_id)
row = db(query).select(ptable.id,
limitby=(0, 1)
).first()
if row:
person_url = URL(c="hrm", f="person", args=[row.id])
else:
person_url = "#"
author = A(author,
_href=person_url,
)
avatar = A(avatar,
_href=person_url,
_class="pull-left",
)
card_person = DIV(author,
" - ",
A(organisation,
_href=org_url,
_class="card-organisation",
),
doc_link,
_class="card-person",
)
else:
# No Author
card_person = DIV(doc_link,
_class="card-person",
)
avatar = None
if series == "News":
icon = URL(c="static", f="img",
args=["markers", "gis_marker.image.News.png"])
elif series == "Twitter":
icon = URL(c="static", f="img", args=["social", "twitter.png"])
elif series == "Ushahidi":
icon = URL(c="static", f="img",
args=["markers", "gis_marker.image.Ushahidi.png"])
elif series == "YouTube":
#icon = URL(c="static", f="img", args=["social", "YouTube.png"])
avatar = DIV(IFRAME(_width=320,
_height=180,
_src=raw["cms_post.comments"],
_frameborder=0),
_class="pull-left"
)
if not avatar:
avatar = DIV(IMG(_src=icon,
_class="media-object",
_style="width:50px;padding:5px;padding-top:0px;",
),
_class="pull-left")
# Edit Bar
permit = current.auth.s3_has_permission
table = db.cms_post
if permit("update", table, record_id=record_id):
edit_btn = A(I(" ", _class="icon icon-edit"),
_href=URL(c="cms", f="post", args=[record_id, "update"]),
)
else:
edit_btn = ""
if permit("delete", table, record_id=record_id):
delete_btn = A(I(" ", _class="icon icon-remove-sign"),
_href=URL(c="cms", f="post",
args=[record_id, "delete"]),
)
else:
delete_btn = ""
edit_bar = DIV(edit_btn,
delete_btn,
_class="edit-bar fright",
)
if series == "Alert":
item_class = "%s disaster" % item_class
# Overall layout
item = DIV(DIV(I(SPAN(" %s" % current.T(series),
_class="card-title",
),
_class="icon icon-%s" % series.lower(),
),
SPAN(A(location,
_href=location_url,
),
_class="location-title",
),
SPAN(date,
_class="date-title",
),
edit_bar,
_class="card-header",
),
DIV(avatar,
DIV(DIV(body,
card_person,
_class="media",
),
_class="media-body",
),
_class="media",
),
_class=item_class,
_id=item_id,
)
return item
# -----------------------------------------------------------------------------
def render_homepage_events(listid, resource, rfields, record, **attr):
"""
Custom dataList item renderer for CMS Posts on the Homepage
@param listid: the HTML ID for this list
@param resource: the S3Resource to render
@param rfields: the S3ResourceFields to render
@param record: the record as dict
@param attr: additional HTML attributes for the item
"""
pkey = "event_event.id"
# Construct the item ID
listid = "event_datalist"
if pkey in record:
item_id = "%s-%s" % (listid, record[pkey])
else:
# template
item_id = "%s-[id]" % listid
item_class = "thumbnail"
raw = record._row
record_id = raw["event_event.id"]
name = record["event_event.name"]
date = record["event_event.zero_hour"]
closed = raw["event_event.closed"]
if closed:
edit_bar = DIV()
else:
item_class = "%s disaster" % item_class
# @ToDo: Check Permissions
edit_bar = DIV(A(I(" ",
_class="icon icon-edit",
),
_href=URL(c="event", f="event", args=[record_id]),
),
A(I(" ",
_class="icon icon-remove-sign",
),
_href=URL(c="event", f="event",
args=[record_id, "delete"]),
),
_class="edit-bar fright",
)
# Render the item
item = DIV(edit_bar,
H5(name),
SPAN(date,
_class="date-title",
),
_class=item_class,
_id=item_id,
)
return item
# =============================================================================
class secondary():
""" Custom Navigation """
def __call__(self):
view = path.join(current.request.folder, "private", "templates",
"CSN", "views", "secondary.html")
try:
# Pass view as file not str to work in compiled mode
current.response.view = open(view, "rb")
except IOError:
from gluon.http import HTTP
raise HTTP("404", "Unable to open Custom View: %s" % view)
return dict()
# END =========================================================================
| |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import mock
import requests
from brickclient.v1 import client
from brickclient import exceptions
from brickclient.tests import utils
class AuthenticateAgainstKeystoneTests(utils.TestCase):
def test_authenticate_success(self):
cs = client.Client("username", "password", "project_id",
"http://localhost:8776/v1", service_type='volume')
resp = {
"access": {
"token": {
"expires": "2014-11-01T03:32:15-05:00",
"id": "FAKE_ID",
},
"serviceCatalog": [
{
"type": "volume",
"endpoints": [
{
"region": "RegionOne",
"adminURL": "http://localhost:8776/v1",
"internalURL": "http://localhost:8776/v1",
"publicURL": "http://localhost:8776/v1",
},
],
},
],
},
}
auth_response = utils.TestResponse({
"status_code": 200,
"text": json.dumps(resp),
})
mock_request = mock.Mock(return_value=(auth_response))
@mock.patch.object(requests, "request", mock_request)
def test_auth_call():
cs.client.authenticate()
headers = {
'User-Agent': cs.client.USER_AGENT,
'Content-Type': 'application/json',
'Accept': 'application/json',
}
body = {
'auth': {
'passwordCredentials': {
'username': cs.client.user,
'password': cs.client.password,
},
'tenantName': cs.client.projectid,
},
}
token_url = cs.client.auth_url + "/tokens"
mock_request.assert_called_with(
"POST",
token_url,
headers=headers,
data=json.dumps(body),
allow_redirects=True,
**self.TEST_REQUEST_BASE)
endpoints = resp["access"]["serviceCatalog"][0]['endpoints']
public_url = endpoints[0]["publicURL"].rstrip('/')
self.assertEqual(public_url, cs.client.management_url)
token_id = resp["access"]["token"]["id"]
self.assertEqual(token_id, cs.client.auth_token)
test_auth_call()
def test_authenticate_tenant_id(self):
cs = client.Client("username", "password",
auth_url="http://localhost:8776/v1",
tenant_id='tenant_id', service_type='volume')
resp = {
"access": {
"token": {
"expires": "2014-11-01T03:32:15-05:00",
"id": "FAKE_ID",
"tenant": {
"description": None,
"enabled": True,
"id": "tenant_id",
"name": "demo"
} # tenant associated with token
},
"serviceCatalog": [
{
"type": "volume",
"endpoints": [
{
"region": "RegionOne",
"adminURL": "http://localhost:8776/v1",
"internalURL": "http://localhost:8776/v1",
"publicURL": "http://localhost:8776/v1",
},
],
},
],
},
}
auth_response = utils.TestResponse({
"status_code": 200,
"text": json.dumps(resp),
})
mock_request = mock.Mock(return_value=(auth_response))
@mock.patch.object(requests, "request", mock_request)
def test_auth_call():
cs.client.authenticate()
headers = {
'User-Agent': cs.client.USER_AGENT,
'Content-Type': 'application/json',
'Accept': 'application/json',
}
body = {
'auth': {
'passwordCredentials': {
'username': cs.client.user,
'password': cs.client.password,
},
'tenantId': cs.client.tenant_id,
},
}
token_url = cs.client.auth_url + "/tokens"
mock_request.assert_called_with(
"POST",
token_url,
headers=headers,
data=json.dumps(body),
allow_redirects=True,
**self.TEST_REQUEST_BASE)
endpoints = resp["access"]["serviceCatalog"][0]['endpoints']
public_url = endpoints[0]["publicURL"].rstrip('/')
self.assertEqual(public_url, cs.client.management_url)
token_id = resp["access"]["token"]["id"]
self.assertEqual(token_id, cs.client.auth_token)
tenant_id = resp["access"]["token"]["tenant"]["id"]
self.assertEqual(tenant_id, cs.client.tenant_id)
test_auth_call()
def test_authenticate_failure(self):
cs = client.Client("username", "password", "project_id",
"http://localhost:8776/v1")
resp = {"unauthorized": {"message": "Unauthorized", "code": "401"}}
auth_response = utils.TestResponse({
"status_code": 401,
"text": json.dumps(resp),
})
mock_request = mock.Mock(return_value=(auth_response))
@mock.patch.object(requests, "request", mock_request)
def test_auth_call():
self.assertRaises(exceptions.Unauthorized, cs.client.authenticate)
test_auth_call()
def test_auth_redirect(self):
cs = client.Client("username", "password", "project_id",
"http://localhost:8776/v1", service_type='volume')
dict_correct_response = {
"access": {
"token": {
"expires": "2014-11-01T03:32:15-05:00",
"id": "FAKE_ID",
},
"serviceCatalog": [
{
"type": "volume",
"endpoints": [
{
"adminURL": "http://localhost:8776/v1",
"region": "RegionOne",
"internalURL": "http://localhost:8776/v1",
"publicURL": "http://localhost:8776/v1/",
},
],
},
],
},
}
correct_response = json.dumps(dict_correct_response)
dict_responses = [
{"headers": {'location': 'http://127.0.0.1:5001'},
"status_code": 305,
"text": "Use proxy"},
# Configured on admin port, cinder redirects to v2.0 port.
# When trying to connect on it, keystone auth succeed by v1.0
# protocol (through headers) but tokens are being returned in
# body (looks like keystone bug). Leaved for compatibility.
{"headers": {},
"status_code": 200,
"text": correct_response},
{"headers": {},
"status_code": 200,
"text": correct_response}
]
responses = [(utils.TestResponse(resp)) for resp in dict_responses]
def side_effect(*args, **kwargs):
return responses.pop(0)
mock_request = mock.Mock(side_effect=side_effect)
@mock.patch.object(requests, "request", mock_request)
def test_auth_call():
cs.client.authenticate()
headers = {
'User-Agent': cs.client.USER_AGENT,
'Content-Type': 'application/json',
'Accept': 'application/json',
}
body = {
'auth': {
'passwordCredentials': {
'username': cs.client.user,
'password': cs.client.password,
},
'tenantName': cs.client.projectid,
},
}
token_url = cs.client.auth_url + "/tokens"
mock_request.assert_called_with(
"POST",
token_url,
headers=headers,
data=json.dumps(body),
allow_redirects=True,
**self.TEST_REQUEST_BASE)
resp = dict_correct_response
endpoints = resp["access"]["serviceCatalog"][0]['endpoints']
public_url = endpoints[0]["publicURL"].rstrip('/')
self.assertEqual(public_url, cs.client.management_url)
token_id = resp["access"]["token"]["id"]
self.assertEqual(token_id, cs.client.auth_token)
test_auth_call()
class AuthenticationTests(utils.TestCase):
def test_authenticate_success(self):
cs = client.Client("username", "password", "project_id", "auth_url")
management_url = 'https://localhost/v1.1/443470'
auth_response = utils.TestResponse({
'status_code': 204,
'headers': {
'x-server-management-url': management_url,
'x-auth-token': '1b751d74-de0c-46ae-84f0-915744b582d1',
},
})
mock_request = mock.Mock(return_value=(auth_response))
@mock.patch.object(requests, "request", mock_request)
def test_auth_call():
cs.client.authenticate()
headers = {
'Accept': 'application/json',
'X-Auth-User': 'username',
'X-Auth-Key': 'password',
'X-Auth-Project-Id': 'project_id',
'User-Agent': cs.client.USER_AGENT
}
mock_request.assert_called_with(
"GET",
cs.client.auth_url,
headers=headers,
**self.TEST_REQUEST_BASE)
self.assertEqual(auth_response.headers['x-server-management-url'],
cs.client.management_url)
self.assertEqual(auth_response.headers['x-auth-token'],
cs.client.auth_token)
test_auth_call()
def test_authenticate_failure(self):
cs = client.Client("username", "password", "project_id", "auth_url")
auth_response = utils.TestResponse({"status_code": 401})
mock_request = mock.Mock(return_value=(auth_response))
@mock.patch.object(requests, "request", mock_request)
def test_auth_call():
self.assertRaises(exceptions.Unauthorized, cs.client.authenticate)
test_auth_call()
def test_auth_automatic(self):
cs = client.Client("username", "password", "project_id", "auth_url")
http_client = cs.client
http_client.management_url = ''
mock_request = mock.Mock(return_value=(None, None))
@mock.patch.object(http_client, 'request', mock_request)
@mock.patch.object(http_client, 'authenticate')
def test_auth_call(m):
http_client.get('/')
m.assert_called()
mock_request.assert_called()
test_auth_call()
def test_auth_manual(self):
cs = client.Client("username", "password", "project_id", "auth_url")
@mock.patch.object(cs.client, 'authenticate')
def test_auth_call(m):
cs.authenticate()
m.assert_called()
test_auth_call()
| |
import collections
Set = set
try:
from collections import OrderedDict
except ImportError:
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.values():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
KEY, PREV, NEXT = range(3)
class OrderedSet(collections.MutableSet):
"""
From: http://code.activestate.com/recipes/576694/
"""
def __init__(self, iterable=None):
self.end = end = []
end += [None, end, end] # sentinel node for doubly linked list
self.map = {} # key --> [key, prev, next]
if iterable is not None:
self |= iterable
def __len__(self):
return len(self.map)
def __contains__(self, key):
return key in self.map
def add(self, key):
if key not in self.map:
end = self.end
curr = end[PREV]
curr[NEXT] = end[PREV] = self.map[key] = [key, curr, end]
def discard(self, key):
if key in self.map:
key, prev, next = self.map.pop(key)
prev[NEXT] = next
next[PREV] = prev
def __iter__(self):
end = self.end
curr = end[NEXT]
while curr is not end:
yield curr[KEY]
curr = curr[NEXT]
def __reversed__(self):
end = self.end
curr = end[PREV]
while curr is not end:
yield curr[KEY]
curr = curr[PREV]
def pop(self, last=True):
if not self:
raise KeyError('set is empty')
key = next(reversed(self)) if last else next(iter(self))
self.discard(key)
return key
def __repr__(self):
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self))
def __eq__(self, other):
if isinstance(other, OrderedSet):
return len(self) == len(other) and list(self) == list(other)
return set(self) == set(other)
def __del__(self):
self.clear() # remove circular references
| |
#!/usr/bin/env python
from __future__ import print_function
from builtins import str
import sys
import pmagpy.pmag as pmag
def main(command_line=True, **kwargs):
"""
NAME
jr6_txt_magic.py
DESCRIPTION
converts JR6 .txt format files to magic_measurements format files
SYNTAX
jr6_txt_magic.py [command line options]
OPTIONS
-h: prints the help message and quits.
-f FILE: specify input file, or
-F FILE: specify output file, default is magic_measurements.txt
-Fsa: specify er_samples format file for appending, default is new er_samples.txt (Not working yet)
-spc NUM : specify number of characters to designate a specimen, default = 1
-loc LOCNAME : specify location/study name
-A: don't average replicate measurements
-ncn NCON: specify sample naming convention (6 and 7 not yet implemented)
-mcd [SO-MAG,SO-SUN,SO-SIGHT...] supply how these samples were oriented
-v NUM : specify the volume of the sample, default 2.5cm^3.
Sample naming convention:
[1] XXXXY: where XXXX is an arbitrary length site designation and Y
is the single character sample designation. e.g., TG001a is the
first sample from site TG001. [default]
[2] XXXX-YY: YY sample from site XXXX (XXX, YY of arbitary length)
[3] XXXX.YY: YY sample from site XXXX (XXX, YY of arbitary length)
[4-Z] XXXX[YYY]: YYY is sample designation with Z characters from site XXX
[5] site name same as sample
[6] site is entered under a separate column NOT CURRENTLY SUPPORTED
[7-Z] [XXXX]YYY: XXXX is site designation with Z characters with sample name XXXXYYYY
NB: all others you will have to customize your self
or e-mail ltauxe@ucsd.edu for help.
INPUT
JR6 .txt format file
"""
# initialize some stuff
noave=0
volume = 2.5 * 1e-6 # default volume is 2.5 cm^3 (2.5 * 1e-6 meters^3)
inst=""
samp_con,Z='1',""
missing=1
demag="N"
er_location_name="unknown"
citation='This study'
args=sys.argv
meth_code="LP-NO"
specnum=-1
MagRecs=[]
version_num=pmag.get_version()
Samps=[] # keeps track of sample orientations
user=""
mag_file=""
dir_path='.'
ErSamps=[]
SampOuts=[]
samp_file = 'er_samples.txt'
meas_file = 'magic_measurements.txt'
#
# get command line arguments
#
if command_line:
if '-WD' in sys.argv:
ind = sys.argv.index('-WD')
dir_path=sys.argv[ind+1]
if '-ID' in sys.argv:
ind = sys.argv.index('-ID')
input_dir_path = sys.argv[ind+1]
else:
input_dir_path = dir_path
output_dir_path = dir_path
if "-h" in args:
print(main.__doc__)
return False
if '-F' in args:
ind=args.index("-F")
meas_file = args[ind+1]
if '-Fsa' in args:
ind = args.index("-Fsa")
samp_file = args[ind+1]
#try:
# open(samp_file,'r')
# ErSamps,file_type=pmag.magic_read(samp_file)
# print 'sample information will be appended to ', samp_file
#except:
# print samp_file,' not found: sample information will be stored in new er_samples.txt file'
# samp_file = output_dir_path+'/er_samples.txt'
if '-f' in args:
ind = args.index("-f")
mag_file= args[ind+1]
if "-spc" in args:
ind = args.index("-spc")
specnum = int(args[ind+1])
if "-ncn" in args:
ind=args.index("-ncn")
samp_con=sys.argv[ind+1]
if "-loc" in args:
ind=args.index("-loc")
er_location_name=args[ind+1]
if "-A" in args: noave=1
if "-mcd" in args:
ind=args.index("-mcd")
meth_code=args[ind+1]
if "-v" in args:
ind=args.index("-v")
volume=float(args[ind+1]) * 1e-6
if not command_line:
dir_path = kwargs.get('dir_path', '.')
input_dir_path = kwargs.get('input_dir_path', dir_path)
output_dir_path = dir_path
meas_file = kwargs.get('meas_file', 'magic_measurements.txt')
mag_file = kwargs.get('mag_file')
samp_file = kwargs.get('samp_file', 'er_samples.txt')
specnum = kwargs.get('specnum', 1)
samp_con = kwargs.get('samp_con', '1')
er_location_name = kwargs.get('er_location_name', '')
noave = kwargs.get('noave', 0) # default (0) means DO average
meth_code = kwargs.get('meth_code', "LP-NO")
volume = float(kwargs.get('volume', 0))
if not volume:
volume = 2.5 * 1e-6 #default volume is a 2.5 cm cube, translated to meters cubed
else:
#convert cm^3 to m^3
volume *= 1e-6
# format variables
mag_file = input_dir_path+"/" + mag_file
meas_file = output_dir_path+"/" + meas_file
samp_file = output_dir_path+"/" + samp_file
if specnum!=0:
specnum=-specnum
if "4" in samp_con:
if "-" not in samp_con:
print("option [4] must be in form 4-Z where Z is an integer")
return False, "option [4] must be in form 4-Z where Z is an integer"
else:
Z=samp_con.split("-")[1]
samp_con="4"
if "7" in samp_con:
if "-" not in samp_con:
print("option [7] must be in form 7-Z where Z is an integer")
return False, "option [7] must be in form 7-Z where Z is an integer"
else:
Z=samp_con.split("-")[1]
samp_con="7"
ErSampRec,ErSiteRec={},{}
# parse data
data=open(mag_file,'r')
line=data.readline()
line=data.readline()
line=data.readline()
while line !='':
parsedLine=line.split()
sampleName=parsedLine[0]
demagLevel=parsedLine[2]
date=parsedLine[3]
line=data.readline()
line=data.readline()
line=data.readline()
line=data.readline()
parsedLine=line.split()
specimenAngleDec=parsedLine[1]
specimenAngleInc=parsedLine[2]
while parsedLine[0] != 'MEAN' :
line=data.readline()
parsedLine=line.split()
if len(parsedLine) == 0:
parsedLine=["Hello"]
Mx=parsedLine[1]
My=parsedLine[2]
Mz=parsedLine[3]
line=data.readline()
line=data.readline()
parsedLine=line.split()
splitExp = parsedLine[2].split('A')
intensityVolStr=parsedLine[1] + splitExp[0]
intensityVol = float(intensityVolStr)
# check and see if Prec is too big and messes with the parcing.
precisionStr=''
if len(parsedLine) == 6: #normal line
precisionStr=parsedLine[5][0:-1]
else:
precisionStr=parsedLine[4][0:-1]
precisionPer = float(precisionStr)
precision=intensityVol*precisionPer/100
while parsedLine[0] != 'SPEC.' :
line=data.readline()
parsedLine=line.split()
if len(parsedLine) == 0:
parsedLine=["Hello"]
specimenDec=parsedLine[2]
specimenInc=parsedLine[3]
line=data.readline()
line=data.readline()
parsedLine=line.split()
geographicDec=parsedLine[1]
geographicInc=parsedLine[2]
# Add data to various MagIC data tables.
er_specimen_name = sampleName
if specnum!=0:
er_sample_name=er_specimen_name[:specnum]
else:
er_sample_name=er_specimen_name
if int(samp_con) in [1, 2, 3, 4, 5, 7]:
er_site_name=pmag.parse_site(er_sample_name,samp_con,Z)
else:
print("-W- Using unreognized sample convention option: ", samp_con)
# else:
# if 'er_site_name' in ErSampRec.keys():er_site_name=ErSampRec['er_site_name']
# if 'er_location_name' in ErSampRec.keys():er_location_name=ErSampRec['er_location_name']
# check sample list(SampOuts) to see if sample already exists in list before adding new sample info
sampleFlag=0
for sampRec in SampOuts:
if sampRec['er_sample_name'] == er_sample_name:
sampleFlag=1
break
if sampleFlag == 0:
ErSampRec['er_sample_name']=er_sample_name
ErSampRec['sample_azimuth']=specimenAngleDec
sample_dip=str(float(specimenAngleInc)-90.0) #convert to magic orientation
ErSampRec['sample_dip']=sample_dip
ErSampRec['magic_method_codes']=meth_code
ErSampRec['er_location_name']=er_location_name
ErSampRec['er_site_name']=er_site_name
ErSampRec['er_citation_names']='This study'
SampOuts.append(ErSampRec.copy())
MagRec={}
MagRec['measurement_description']='Date: '+date
MagRec["er_citation_names"]="This study"
MagRec['er_location_name']=er_location_name
MagRec['er_site_name']=er_site_name
MagRec['er_sample_name']=er_sample_name
MagRec['magic_software_packages']=version_num
MagRec["treatment_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["measurement_temp"]='%8.3e' % (273) # room temp in kelvin
MagRec["measurement_flag"]='g'
MagRec["measurement_standard"]='u'
MagRec["measurement_number"]='1'
MagRec["er_specimen_name"]=er_specimen_name
MagRec["treatment_ac_field"]='0'
if demagLevel == 'NRM':
meas_type="LT-NO"
elif demagLevel[0] == 'A':
meas_type="LT-AF-Z"
treat=float(demagLevel[1:])
MagRec["treatment_ac_field"]='%8.3e' %(treat*1e-3) # convert from mT to tesla
elif demagLevel[0] == 'T':
meas_type="LT-T-Z"
treat=float(demagLevel[1:])
MagRec["treatment_temp"]='%8.3e' % (treat+273.) # temp in kelvin
else:
print("measurement type unknown", demag_level)
return False, "measurement type unknown"
MagRec["measurement_magn_moment"]=str(intensityVol*volume) # Am^2
MagRec["measurement_magn_volume"]=intensityVolStr # A/m
MagRec["measurement_dec"]=specimenDec
MagRec["measurement_inc"]=specimenInc
MagRec['magic_method_codes']=meas_type
MagRecs.append(MagRec.copy())
#read lines till end of record
line=data.readline()
line=data.readline()
line=data.readline()
line=data.readline()
line=data.readline()
# read all the rest of the special characters. Some data files not consistantly formatted.
while (len(line) <=3 and line!=''):
line=data.readline()
#end of data while loop
MagOuts=pmag.measurements_methods(MagRecs,noave)
pmag.magic_write(samp_file,SampOuts,'er_samples')
print("sample orientations put in ",samp_file)
pmag.magic_write(meas_file,MagOuts,'magic_measurements')
print("results put in ",meas_file)
return True, meas_file
def do_help():
return main.__doc__
if __name__ == "__main__":
main()
| |
#! /usr/bin/env python
#
# Author: Damian Eads
# Date: April 17, 2008
#
# Copyright (C) 2008 Damian Eads
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other 00-courses provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import hierarchy_test_data
import numpy as np
import scipy.cluster.hierarchy
from numpy.testing import (TestCase, run_module_suite, dec, assert_raises,
assert_allclose, assert_equal, assert_, assert_warns)
from scipy._lib.six import xrange, u
from scipy.cluster._hierarchy import Heap
from scipy.cluster.hierarchy import (
ClusterWarning, linkage, from_mlab_linkage, to_mlab_linkage,
num_obs_linkage, inconsistent, cophenet, fclusterdata, fcluster,
is_isomorphic, single, leaders, correspond, is_monotonic, maxdists, maxinconsts, maxRstat,
is_valid_linkage, is_valid_im, to_tree, leaves_list, dendrogram,
set_link_color_palette, cut_tree, _order_cluster_tree,
_hierarchy, _LINKAGE_METHODS)
from scipy.spatial.distance import pdist
# Matplotlib is not a scipy dependency but is optionally used in dendrogram, so
# check if it's available
try:
import matplotlib
# and set the backend to be Agg (no gui)
matplotlib.use('Agg')
# before importing pyplot
import matplotlib.pyplot as plt
have_matplotlib = True
except:
have_matplotlib = False
class TestLinkage(object):
def test_linkage_non_finite_elements_in_distance_matrix(self):
# Tests linkage(Y) where Y contains a non-finite element (e.g. NaN or Inf).
# Exception expected.
y = np.zeros((6,))
y[0] = np.nan
assert_raises(ValueError, linkage, y)
def test_linkage_empty_distance_matrix(self):
# Tests linkage(Y) where Y is a 0x4 linkage matrix. Exception expected.
y = np.zeros((0,))
assert_raises(ValueError, linkage, y)
def test_linkage_tdist(self):
for method in ['single', 'complete', 'average', 'weighted', u('single')]:
yield self.check_linkage_tdist, method
def check_linkage_tdist(self, method):
# Tests linkage(Y, method) on the tdist data set.
Z = linkage(hierarchy_test_data.ytdist, method)
expectedZ = getattr(hierarchy_test_data, 'linkage_ytdist_' + method)
assert_allclose(Z, expectedZ, atol=1e-10)
def test_linkage_X(self):
for method in ['centroid', 'median', 'ward']:
yield self.check_linkage_q, method
def check_linkage_q(self, method):
# Tests linkage(Y, method) on the Q data set.
Z = linkage(hierarchy_test_data.X, method)
expectedZ = getattr(hierarchy_test_data, 'linkage_X_' + method)
assert_allclose(Z, expectedZ, atol=1e-06)
y = scipy.spatial.distance.pdist(hierarchy_test_data.X,
metric="euclidean")
Z = linkage(y, method)
assert_allclose(Z, expectedZ, atol=1e-06)
def test_compare_with_trivial(self):
rng = np.random.RandomState(0)
n = 20
X = rng.rand(n, 2)
d = pdist(X)
for method, code in _LINKAGE_METHODS.items():
Z_trivial = _hierarchy.linkage(d, n, code)
Z = linkage(d, method)
assert_allclose(Z_trivial, Z, rtol=1e-14, atol=1e-15)
class TestLinkageTies(object):
_expectations = {
'single': np.array([[0, 1, 1.41421356, 2],
[2, 3, 1.41421356, 3]]),
'complete': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.82842712, 3]]),
'average': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.12132034, 3]]),
'weighted': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.12132034, 3]]),
'centroid': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.12132034, 3]]),
'median': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.12132034, 3]]),
'ward': np.array([[0, 1, 1.41421356, 2],
[2, 3, 2.44948974, 3]]),
}
def test_linkage_ties(self):
for method in ['single', 'complete', 'average', 'weighted', 'centroid', 'median', 'ward']:
yield self.check_linkage_ties, method
def check_linkage_ties(self, method):
X = np.array([[-1, -1], [0, 0], [1, 1]])
Z = linkage(X, method=method)
expectedZ = self._expectations[method]
assert_allclose(Z, expectedZ, atol=1e-06)
class TestInconsistent(object):
def test_inconsistent_tdist(self):
for depth in hierarchy_test_data.inconsistent_ytdist:
yield self.check_inconsistent_tdist, depth
def check_inconsistent_tdist(self, depth):
Z = hierarchy_test_data.linkage_ytdist_single
assert_allclose(inconsistent(Z, depth),
hierarchy_test_data.inconsistent_ytdist[depth])
class TestCopheneticDistance(object):
def test_linkage_cophenet_tdist_Z(self):
# Tests cophenet(Z) on tdist data set.
expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
295, 138, 219, 295, 295])
Z = hierarchy_test_data.linkage_ytdist_single
M = cophenet(Z)
assert_allclose(M, expectedM, atol=1e-10)
def test_linkage_cophenet_tdist_Z_Y(self):
# Tests cophenet(Z, Y) on tdist data set.
Z = hierarchy_test_data.linkage_ytdist_single
(c, M) = cophenet(Z, hierarchy_test_data.ytdist)
expectedM = np.array([268, 295, 255, 255, 295, 295, 268, 268, 295, 295,
295, 138, 219, 295, 295])
expectedc = 0.639931296433393415057366837573
assert_allclose(c, expectedc, atol=1e-10)
assert_allclose(M, expectedM, atol=1e-10)
class TestMLabLinkageConversion(object):
def test_mlab_linkage_conversion_empty(self):
# Tests from/to_mlab_linkage on empty linkage array.
X = np.asarray([])
assert_equal(from_mlab_linkage([]), X)
assert_equal(to_mlab_linkage([]), X)
def test_mlab_linkage_conversion_single_row(self):
# Tests from/to_mlab_linkage on linkage array with single row.
Z = np.asarray([[0., 1., 3., 2.]])
Zm = [[1, 2, 3]]
assert_equal(from_mlab_linkage(Zm), Z)
assert_equal(to_mlab_linkage(Z), Zm)
def test_mlab_linkage_conversion_multiple_rows(self):
# Tests from/to_mlab_linkage on linkage array with multiple rows.
Zm = np.asarray([[3, 6, 138], [4, 5, 219],
[1, 8, 255], [2, 9, 268], [7, 10, 295]])
Z = np.array([[2., 5., 138., 2.],
[3., 4., 219., 2.],
[0., 7., 255., 3.],
[1., 8., 268., 4.],
[6., 9., 295., 6.]],
dtype=np.double)
assert_equal(from_mlab_linkage(Zm), Z)
assert_equal(to_mlab_linkage(Z), Zm)
class TestFcluster(object):
def test_fclusterdata(self):
for t in hierarchy_test_data.fcluster_inconsistent:
yield self.check_fclusterdata, t, 'inconsistent'
for t in hierarchy_test_data.fcluster_distance:
yield self.check_fclusterdata, t, 'distance'
for t in hierarchy_test_data.fcluster_maxclust:
yield self.check_fclusterdata, t, 'maxclust'
def check_fclusterdata(self, t, criterion):
# Tests fclusterdata(X, criterion=criterion, t=t) on a random 3-cluster data set.
expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]
X = hierarchy_test_data.Q_X
T = fclusterdata(X, criterion=criterion, t=t)
assert_(is_isomorphic(T, expectedT))
def test_fcluster(self):
for t in hierarchy_test_data.fcluster_inconsistent:
yield self.check_fcluster, t, 'inconsistent'
for t in hierarchy_test_data.fcluster_distance:
yield self.check_fcluster, t, 'distance'
for t in hierarchy_test_data.fcluster_maxclust:
yield self.check_fcluster, t, 'maxclust'
def check_fcluster(self, t, criterion):
# Tests fcluster(Z, criterion=criterion, t=t) on a random 3-cluster data set.
expectedT = getattr(hierarchy_test_data, 'fcluster_' + criterion)[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, criterion=criterion, t=t)
assert_(is_isomorphic(T, expectedT))
def test_fcluster_monocrit(self):
for t in hierarchy_test_data.fcluster_distance:
yield self.check_fcluster_monocrit, t
for t in hierarchy_test_data.fcluster_maxclust:
yield self.check_fcluster_maxclust_monocrit, t
def check_fcluster_monocrit(self, t):
expectedT = hierarchy_test_data.fcluster_distance[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, t, criterion='monocrit', monocrit=maxdists(Z))
assert_(is_isomorphic(T, expectedT))
def check_fcluster_maxclust_monocrit(self, t):
expectedT = hierarchy_test_data.fcluster_maxclust[t]
Z = single(hierarchy_test_data.Q_X)
T = fcluster(Z, t, criterion='maxclust_monocrit', monocrit=maxdists(Z))
assert_(is_isomorphic(T, expectedT))
class TestLeaders(object):
def test_leaders_single(self):
# Tests leaders using a flat clustering generated by single linkage.
X = hierarchy_test_data.Q_X
Y = pdist(X)
Z = linkage(Y)
T = fcluster(Z, criterion='maxclust', t=3)
Lright = (np.array([53, 55, 56]), np.array([2, 3, 1]))
L = leaders(Z, T)
assert_equal(L, Lright)
class TestIsIsomorphic(object):
def test_is_isomorphic_1(self):
# Tests is_isomorphic on test case #1 (one flat cluster, different labellings)
a = [1, 1, 1]
b = [2, 2, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_2(self):
# Tests is_isomorphic on test case #2 (two flat clusters, different labelings)
a = [1, 7, 1]
b = [2, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_3(self):
# Tests is_isomorphic on test case #3 (no flat clusters)
a = []
b = []
assert_(is_isomorphic(a, b))
def test_is_isomorphic_4A(self):
# Tests is_isomorphic on test case #4A (3 flat clusters, different labelings, isomorphic)
a = [1, 2, 3]
b = [1, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_4B(self):
# Tests is_isomorphic on test case #4B (3 flat clusters, different labelings, nonisomorphic)
a = [1, 2, 3, 3]
b = [1, 3, 2, 3]
assert_(is_isomorphic(a, b) == False)
assert_(is_isomorphic(b, a) == False)
def test_is_isomorphic_4C(self):
# Tests is_isomorphic on test case #4C (3 flat clusters, different labelings, isomorphic)
a = [7, 2, 3]
b = [6, 3, 2]
assert_(is_isomorphic(a, b))
assert_(is_isomorphic(b, a))
def test_is_isomorphic_5(self):
# Tests is_isomorphic on test case #5 (1000 observations, 2/3/5 random
# clusters, random permutation of the labeling).
for nc in [2, 3, 5]:
yield self.help_is_isomorphic_randperm, 1000, nc
def test_is_isomorphic_6(self):
# Tests is_isomorphic on test case #5A (1000 observations, 2/3/5 random
# clusters, random permutation of the labeling, slightly
# nonisomorphic.)
for nc in [2, 3, 5]:
yield self.help_is_isomorphic_randperm, 1000, nc, True, 5
def test_is_isomorphic_7(self):
# Regression test for gh-6271
assert_(not is_isomorphic([1, 2, 3], [1, 1, 1]))
def help_is_isomorphic_randperm(self, nobs, nclusters, noniso=False, nerrors=0):
for k in range(3):
a = np.int_(np.random.rand(nobs) * nclusters)
b = np.zeros(a.size, dtype=np.int_)
P = np.random.permutation(nclusters)
for i in xrange(0, a.shape[0]):
b[i] = P[a[i]]
if noniso:
Q = np.random.permutation(nobs)
b[Q[0:nerrors]] += 1
b[Q[0:nerrors]] %= nclusters
assert_(is_isomorphic(a, b) == (not noniso))
assert_(is_isomorphic(b, a) == (not noniso))
class TestIsValidLinkage(object):
def test_is_valid_linkage_various_size(self):
for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
(1, 4, True), (2, 4, True)]:
yield self.check_is_valid_linkage_various_size, nrow, ncol, valid
def check_is_valid_linkage_various_size(self, nrow, ncol, valid):
# Tests is_valid_linkage(Z) with linkage matrics of various sizes
Z = np.asarray([[0, 1, 3.0, 2, 5],
[3, 2, 4.0, 3, 3]], dtype=np.double)
Z = Z[:nrow, :ncol]
assert_(is_valid_linkage(Z) == valid)
if not valid:
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_int_type(self):
# Tests is_valid_linkage(Z) with integer type.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=int)
assert_(is_valid_linkage(Z) == False)
assert_raises(TypeError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_empty(self):
# Tests is_valid_linkage(Z) with empty linkage.
Z = np.zeros((0, 4), dtype=np.double)
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3).
for i in xrange(4, 15, 3):
y = np.random.rand(i * (i - 1) // 2)
Z = linkage(y)
assert_(is_valid_linkage(Z) == True)
def test_is_valid_linkage_4_and_up_neg_index_left(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative indices (left).
for i in xrange(4, 15, 3):
y = np.random.rand(i * (i - 1) // 2)
Z = linkage(y)
Z[i // 2, 0] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_index_right(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative indices (right).
for i in xrange(4, 15, 3):
y = np.random.rand(i * (i - 1) // 2)
Z = linkage(y)
Z[i // 2, 1] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_dist(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative distances.
for i in xrange(4, 15, 3):
y = np.random.rand(i * (i - 1) // 2)
Z = linkage(y)
Z[i // 2, 2] = -0.5
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
def test_is_valid_linkage_4_and_up_neg_counts(self):
# Tests is_valid_linkage(Z) on linkage on observation sets between
# sizes 4 and 15 (step size 3) with negative counts.
for i in xrange(4, 15, 3):
y = np.random.rand(i * (i - 1) // 2)
Z = linkage(y)
Z[i // 2, 3] = -2
assert_(is_valid_linkage(Z) == False)
assert_raises(ValueError, is_valid_linkage, Z, throw=True)
class TestIsValidInconsistent(object):
def test_is_valid_im_int_type(self):
# Tests is_valid_im(R) with integer type.
R = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=int)
assert_(is_valid_im(R) == False)
assert_raises(TypeError, is_valid_im, R, throw=True)
def test_is_valid_im_various_size(self):
for nrow, ncol, valid in [(2, 5, False), (2, 3, False),
(1, 4, True), (2, 4, True)]:
yield self.check_is_valid_im_various_size, nrow, ncol, valid
def check_is_valid_im_various_size(self, nrow, ncol, valid):
# Tests is_valid_im(R) with linkage matrics of various sizes
R = np.asarray([[0, 1, 3.0, 2, 5],
[3, 2, 4.0, 3, 3]], dtype=np.double)
R = R[:nrow, :ncol]
assert_(is_valid_im(R) == valid)
if not valid:
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_empty(self):
# Tests is_valid_im(R) with empty inconsistency matrix.
R = np.zeros((0, 4), dtype=np.double)
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3).
for i in xrange(4, 15, 3):
y = np.random.rand(i * (i - 1) // 2)
Z = linkage(y)
R = inconsistent(Z)
assert_(is_valid_im(R) == True)
def test_is_valid_im_4_and_up_neg_index_left(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link height means.
for i in xrange(4, 15, 3):
y = np.random.rand(i * (i - 1) // 2)
Z = linkage(y)
R = inconsistent(Z)
R[i // 2, 0] = -2.0
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up_neg_index_right(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link height standard deviations.
for i in xrange(4, 15, 3):
y = np.random.rand(i * (i - 1) // 2)
Z = linkage(y)
R = inconsistent(Z)
R[i // 2, 1] = -2.0
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
def test_is_valid_im_4_and_up_neg_dist(self):
# Tests is_valid_im(R) on im on observation sets between sizes 4 and 15
# (step size 3) with negative link counts.
for i in xrange(4, 15, 3):
y = np.random.rand(i * (i - 1) // 2)
Z = linkage(y)
R = inconsistent(Z)
R[i // 2, 2] = -0.5
assert_(is_valid_im(R) == False)
assert_raises(ValueError, is_valid_im, R, throw=True)
class TestNumObsLinkage(TestCase):
def test_num_obs_linkage_empty(self):
# Tests num_obs_linkage(Z) with empty linkage.
Z = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, num_obs_linkage, Z)
def test_num_obs_linkage_1x4(self):
# Tests num_obs_linkage(Z) on linkage over 2 observations.
Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
assert_equal(num_obs_linkage(Z), 2)
def test_num_obs_linkage_2x4(self):
# Tests num_obs_linkage(Z) on linkage over 3 observations.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.double)
assert_equal(num_obs_linkage(Z), 3)
def test_num_obs_linkage_4_and_up(self):
# Tests num_obs_linkage(Z) on linkage on observation sets between sizes
# 4 and 15 (step size 3).
for i in xrange(4, 15, 3):
y = np.random.rand(i * (i - 1) // 2)
Z = linkage(y)
assert_equal(num_obs_linkage(Z), i)
class TestLeavesList(object):
def test_leaves_list_1x4(self):
# Tests leaves_list(Z) on a 1x4 linkage.
Z = np.asarray([[0, 1, 3.0, 2]], dtype=np.double)
to_tree(Z)
assert_equal(leaves_list(Z), [0, 1])
def test_leaves_list_2x4(self):
# Tests leaves_list(Z) on a 2x4 linkage.
Z = np.asarray([[0, 1, 3.0, 2],
[3, 2, 4.0, 3]], dtype=np.double)
to_tree(Z)
assert_equal(leaves_list(Z), [0, 1, 2])
def test_leaves_list_Q(self):
for method in ['single', 'complete', 'average', 'weighted', 'centroid',
'median', 'ward']:
yield self.check_leaves_list_Q, method
def check_leaves_list_Q(self, method):
# Tests leaves_list(Z) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
node = to_tree(Z)
assert_equal(node.pre_order(), leaves_list(Z))
def test_Q_subtree_pre_order(self):
# Tests that pre_order() works when called on sub-trees.
X = hierarchy_test_data.Q_X
Z = linkage(X, 'single')
node = to_tree(Z)
assert_equal(node.pre_order(), (node.get_left().pre_order()
+ node.get_right().pre_order()))
class TestCorrespond(TestCase):
def test_correspond_empty(self):
# Tests correspond(Z, y) with empty linkage and condensed distance matrix.
y = np.zeros((0,))
Z = np.zeros((0, 4))
assert_raises(ValueError, correspond, Z, y)
def test_correspond_2_and_up(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes.
for i in xrange(2, 4):
y = np.random.rand(i * (i - 1) // 2)
Z = linkage(y)
assert_(correspond(Z, y))
for i in xrange(4, 15, 3):
y = np.random.rand(i * (i - 1) // 2)
Z = linkage(y)
assert_(correspond(Z, y))
def test_correspond_4_and_up(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes. Correspondance should be false.
for (i, j) in (list(zip(list(range(2, 4)), list(range(3, 5)))) +
list(zip(list(range(3, 5)), list(range(2, 4))))):
y = np.random.rand(i * (i - 1) // 2)
y2 = np.random.rand(j * (j - 1) // 2)
Z = linkage(y)
Z2 = linkage(y2)
assert_equal(correspond(Z, y2), False)
assert_equal(correspond(Z2, y), False)
def test_correspond_4_and_up_2(self):
# Tests correspond(Z, y) on linkage and CDMs over observation sets of
# different sizes. Correspondance should be false.
for (i, j) in (list(zip(list(range(2, 7)), list(range(16, 21)))) +
list(zip(list(range(2, 7)), list(range(16, 21))))):
y = np.random.rand(i * (i - 1) // 2)
y2 = np.random.rand(j * (j - 1) // 2)
Z = linkage(y)
Z2 = linkage(y2)
assert_equal(correspond(Z, y2), False)
assert_equal(correspond(Z2, y), False)
def test_num_obs_linkage_multi_matrix(self):
# Tests num_obs_linkage with observation matrices of multiple sizes.
for n in xrange(2, 10):
X = np.random.rand(n, 4)
Y = pdist(X)
Z = linkage(Y)
assert_equal(num_obs_linkage(Z), n)
class TestIsMonotonic(TestCase):
def test_is_monotonic_empty(self):
# Tests is_monotonic(Z) on an empty linkage.
Z = np.zeros((0, 4))
assert_raises(ValueError, is_monotonic, Z)
def test_is_monotonic_1x4(self):
# Tests is_monotonic(Z) on 1x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2]], dtype=np.double)
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_2x4_T(self):
# Tests is_monotonic(Z) on 2x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 3]], dtype=np.double)
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_2x4_F(self):
# Tests is_monotonic(Z) on 2x4 linkage. Expecting False.
Z = np.asarray([[0, 1, 0.4, 2],
[2, 3, 0.3, 3]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_3x4_T(self):
# Tests is_monotonic(Z) on 3x4 linkage. Expecting True.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 2],
[4, 5, 0.6, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_3x4_F1(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 1). Expecting False.
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.2, 2],
[4, 5, 0.6, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_3x4_F2(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 2). Expecting False.
Z = np.asarray([[0, 1, 0.8, 2],
[2, 3, 0.4, 2],
[4, 5, 0.6, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_3x4_F3(self):
# Tests is_monotonic(Z) on 3x4 linkage (case 3). Expecting False
Z = np.asarray([[0, 1, 0.3, 2],
[2, 3, 0.4, 2],
[4, 5, 0.2, 4]], dtype=np.double)
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_tdist_linkage1(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# tdist data set. Expecting True.
Z = linkage(hierarchy_test_data.ytdist, 'single')
assert_equal(is_monotonic(Z), True)
def test_is_monotonic_tdist_linkage2(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# tdist data set. Perturbing. Expecting False.
Z = linkage(hierarchy_test_data.ytdist, 'single')
Z[2, 2] = 0.0
assert_equal(is_monotonic(Z), False)
def test_is_monotonic_Q_linkage(self):
# Tests is_monotonic(Z) on clustering generated by single linkage on
# Q data set. Expecting True.
X = hierarchy_test_data.Q_X
Z = linkage(X, 'single')
assert_equal(is_monotonic(Z), True)
class TestMaxDists(object):
def test_maxdists_empty_linkage(self):
# Tests maxdists(Z) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxdists, Z)
def test_maxdists_one_cluster_linkage(self):
# Tests maxdists(Z) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
MD = maxdists(Z)
expectedMD = calculate_maximum_distances(Z)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxdists_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
yield self.check_maxdists_Q_linkage, method
def check_maxdists_Q_linkage(self, method):
# Tests maxdists(Z) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
MD = maxdists(Z)
expectedMD = calculate_maximum_distances(Z)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestMaxInconsts(object):
def test_maxinconsts_empty_linkage(self):
# Tests maxinconsts(Z, R) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
R = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxinconsts, Z, R)
def test_maxinconsts_difrow_linkage(self):
# Tests maxinconsts(Z, R) on linkage and inconsistency matrices with
# different numbers of clusters. Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.random.rand(2, 4)
assert_raises(ValueError, maxinconsts, Z, R)
def test_maxinconsts_one_cluster_linkage(self):
# Tests maxinconsts(Z, R) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
MD = maxinconsts(Z, R)
expectedMD = calculate_maximum_inconsistencies(Z, R)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxinconsts_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
yield self.check_maxinconsts_Q_linkage, method
def check_maxinconsts_Q_linkage(self, method):
# Tests maxinconsts(Z, R) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
R = inconsistent(Z)
MD = maxinconsts(Z, R)
expectedMD = calculate_maximum_inconsistencies(Z, R)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestMaxRStat(object):
def test_maxRstat_invalid_index(self):
for i in [3.3, -1, 4]:
yield self.check_maxRstat_invalid_index, i
def check_maxRstat_invalid_index(self, i):
# Tests maxRstat(Z, R, i). Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
if isinstance(i, int):
assert_raises(ValueError, maxRstat, Z, R, i)
else:
assert_raises(TypeError, maxRstat, Z, R, i)
def test_maxRstat_empty_linkage(self):
for i in range(4):
yield self.check_maxRstat_empty_linkage, i
def check_maxRstat_empty_linkage(self, i):
# Tests maxRstat(Z, R, i) on empty linkage. Expecting exception.
Z = np.zeros((0, 4), dtype=np.double)
R = np.zeros((0, 4), dtype=np.double)
assert_raises(ValueError, maxRstat, Z, R, i)
def test_maxRstat_difrow_linkage(self):
for i in range(4):
yield self.check_maxRstat_difrow_linkage, i
def check_maxRstat_difrow_linkage(self, i):
# Tests maxRstat(Z, R, i) on linkage and inconsistency matrices with
# different numbers of clusters. Expecting exception.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.random.rand(2, 4)
assert_raises(ValueError, maxRstat, Z, R, i)
def test_maxRstat_one_cluster_linkage(self):
for i in range(4):
yield self.check_maxRstat_one_cluster_linkage, i
def check_maxRstat_one_cluster_linkage(self, i):
# Tests maxRstat(Z, R, i) on linkage with one cluster.
Z = np.asarray([[0, 1, 0.3, 4]], dtype=np.double)
R = np.asarray([[0, 0, 0, 0.3]], dtype=np.double)
MD = maxRstat(Z, R, 1)
expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
assert_allclose(MD, expectedMD, atol=1e-15)
def test_maxRstat_Q_linkage(self):
for method in ['single', 'complete', 'ward', 'centroid', 'median']:
for i in range(4):
yield self.check_maxRstat_Q_linkage, method, i
def check_maxRstat_Q_linkage(self, method, i):
# Tests maxRstat(Z, R, i) on the Q data set
X = hierarchy_test_data.Q_X
Z = linkage(X, method)
R = inconsistent(Z)
MD = maxRstat(Z, R, 1)
expectedMD = calculate_maximum_inconsistencies(Z, R, 1)
assert_allclose(MD, expectedMD, atol=1e-15)
class TestDendrogram(object):
def test_dendrogram_single_linkage_tdist(self):
# Tests dendrogram calculation on single linkage of the tdist data set.
Z = linkage(hierarchy_test_data.ytdist, 'single')
R = dendrogram(Z, no_plot=True)
leaves = R["leaves"]
assert_equal(leaves, [2, 5, 1, 0, 3, 4])
def test_valid_orientation(self):
Z = linkage(hierarchy_test_data.ytdist, 'single')
assert_raises(ValueError, dendrogram, Z, orientation="foo")
@dec.skipif(not have_matplotlib)
def test_dendrogram_plot(self):
for orientation in ['top', 'bottom', 'left', 'right']:
yield self.check_dendrogram_plot, orientation
def check_dendrogram_plot(self, orientation):
# Tests dendrogram plotting.
Z = linkage(hierarchy_test_data.ytdist, 'single')
expected = {'color_list': ['g', 'b', 'b', 'b', 'b'],
'dcoord': [[0.0, 138.0, 138.0, 0.0],
[0.0, 219.0, 219.0, 0.0],
[0.0, 255.0, 255.0, 219.0],
[0.0, 268.0, 268.0, 255.0],
[138.0, 295.0, 295.0, 268.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0],
[45.0, 45.0, 55.0, 55.0],
[35.0, 35.0, 50.0, 50.0],
[25.0, 25.0, 42.5, 42.5],
[10.0, 10.0, 33.75, 33.75]],
'ivl': ['2', '5', '1', '0', '3', '4'],
'leaves': [2, 5, 1, 0, 3, 4]}
fig = plt.figure()
ax = fig.add_subplot(221)
# test that dendrogram accepts ax keyword
R1 = dendrogram(Z, ax=ax, orientation=orientation)
assert_equal(R1, expected)
# test that dendrogram accepts and handle the leaf_font_size and
# leaf_rotation keywords
R1a = dendrogram(Z, ax=ax, orientation=orientation,
leaf_font_size=20, leaf_rotation=90)
testlabel = (
ax.get_xticklabels()[0]
if orientation in ['top', 'bottom']
else ax.get_yticklabels()[0]
)
assert_equal(testlabel.get_rotation(), 90)
assert_equal(testlabel.get_size(), 20)
R1a = dendrogram(Z, ax=ax, orientation=orientation,
leaf_rotation=90)
testlabel = (
ax.get_xticklabels()[0]
if orientation in ['top', 'bottom']
else ax.get_yticklabels()[0]
)
assert_equal(testlabel.get_rotation(), 90)
R1a = dendrogram(Z, ax=ax, orientation=orientation,
leaf_font_size=20)
testlabel = (
ax.get_xticklabels()[0]
if orientation in ['top', 'bottom']
else ax.get_yticklabels()[0]
)
assert_equal(testlabel.get_size(), 20)
plt.close()
# test plotting to gca (will import pylab)
R2 = dendrogram(Z, orientation=orientation)
plt.close()
assert_equal(R2, expected)
@dec.skipif(not have_matplotlib)
def test_dendrogram_truncate_mode(self):
Z = linkage(hierarchy_test_data.ytdist, 'single')
R = dendrogram(Z, 2, 'lastp', show_contracted=True)
plt.close()
assert_equal(R, {'color_list': ['b'],
'dcoord': [[0.0, 295.0, 295.0, 0.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0]],
'ivl': ['(2)', '(4)'],
'leaves': [6, 9]})
R = dendrogram(Z, 2, 'mtica', show_contracted=True)
plt.close()
assert_equal(R, {'color_list': ['g', 'b', 'b', 'b'],
'dcoord': [[0.0, 138.0, 138.0, 0.0],
[0.0, 255.0, 255.0, 0.0],
[0.0, 268.0, 268.0, 255.0],
[138.0, 295.0, 295.0, 268.0]],
'icoord': [[5.0, 5.0, 15.0, 15.0],
[35.0, 35.0, 45.0, 45.0],
[25.0, 25.0, 40.0, 40.0],
[10.0, 10.0, 32.5, 32.5]],
'ivl': ['2', '5', '1', '0', '(2)'],
'leaves': [2, 5, 1, 0, 7]})
def test_dendrogram_colors(self):
# Tests dendrogram plots with alternate colors
Z = linkage(hierarchy_test_data.ytdist, 'single')
set_link_color_palette(['c', 'm', 'y', 'k'])
R = dendrogram(Z, no_plot=True,
above_threshold_color='g', color_threshold=250)
set_link_color_palette(['g', 'r', 'c', 'm', 'y', 'k'])
color_list = R['color_list']
assert_equal(color_list, ['c', 'm', 'g', 'g', 'g'])
# reset color palette (global list)
set_link_color_palette(None)
def calculate_maximum_distances(Z):
# Used for testing correctness of maxdists.
n = Z.shape[0] + 1
B = np.zeros((n - 1,))
q = np.zeros((3,))
for i in xrange(0, n - 1):
q[:] = 0.0
left = Z[i, 0]
right = Z[i, 1]
if left >= n:
q[0] = B[int(left) - n]
if right >= n:
q[1] = B[int(right) - n]
q[2] = Z[i, 2]
B[i] = q.max()
return B
def calculate_maximum_inconsistencies(Z, R, k=3):
# Used for testing correctness of maxinconsts.
n = Z.shape[0] + 1
B = np.zeros((n - 1,))
q = np.zeros((3,))
for i in xrange(0, n - 1):
q[:] = 0.0
left = Z[i, 0]
right = Z[i, 1]
if left >= n:
q[0] = B[int(left) - n]
if right >= n:
q[1] = B[int(right) - n]
q[2] = R[i, k]
B[i] = q.max()
return B
def within_tol(a, b, tol):
return np.abs(a - b).max() < tol
def test_unsupported_uncondensed_distance_matrix_linkage_warning():
assert_warns(ClusterWarning, linkage, [[0, 1], [1, 0]])
def test_euclidean_linkage_value_error():
for method in scipy.cluster.hierarchy._EUCLIDEAN_METHODS:
assert_raises(ValueError, linkage, [[1, 1], [1, 1]],
method=method, metric='cityblock')
def test_2x2_linkage():
Z1 = linkage([1], method='single', metric='euclidean')
Z2 = linkage([[0, 1], [0, 0]], method='single', metric='euclidean')
assert_allclose(Z1, Z2)
def test_node_compare():
np.random.seed(23)
nobs = 50
X = np.random.randn(nobs, 4)
Z = scipy.cluster.hierarchy.ward(X)
tree = to_tree(Z)
assert_(tree > tree.get_left())
assert_(tree.get_right() > tree.get_left())
assert_(tree.get_right() == tree.get_right())
assert_(tree.get_right() != tree.get_left())
def test_cut_tree():
np.random.seed(23)
nobs = 50
X = np.random.randn(nobs, 4)
Z = scipy.cluster.hierarchy.ward(X)
cutree = cut_tree(Z)
assert_equal(cutree[:, 0], np.arange(nobs))
assert_equal(cutree[:, -1], np.zeros(nobs))
assert_equal(cutree.max(0), np.arange(nobs - 1, -1, -1))
assert_equal(cutree[:, [-5]], cut_tree(Z, n_clusters=5))
assert_equal(cutree[:, [-5, -10]], cut_tree(Z, n_clusters=[5, 10]))
assert_equal(cutree[:, [-10, -5]], cut_tree(Z, n_clusters=[10, 5]))
nodes = _order_cluster_tree(Z)
heights = np.array([node.dist for node in nodes])
assert_equal(cutree[:, np.searchsorted(heights, [5])],
cut_tree(Z, height=5))
assert_equal(cutree[:, np.searchsorted(heights, [5, 10])],
cut_tree(Z, height=[5, 10]))
assert_equal(cutree[:, np.searchsorted(heights, [10, 5])],
cut_tree(Z, height=[10, 5]))
def test_Heap():
values = np.array([2, -1, 0, -1.5, 3])
heap = Heap(values)
pair = heap.get_min()
assert_equal(pair['key'], 3)
assert_equal(pair['value'], -1.5)
heap.remove_min()
pair = heap.get_min()
assert_equal(pair['key'], 1)
assert_equal(pair['value'], -1)
heap.change_value(1, 2.5)
pair = heap.get_min()
assert_equal(pair['key'], 2)
assert_equal(pair['value'], 0)
heap.remove_min()
heap.remove_min()
heap.change_value(1, 10)
pair = heap.get_min()
assert_equal(pair['key'], 4)
assert_equal(pair['value'], 3)
heap.remove_min()
pair = heap.get_min()
assert_equal(pair['key'], 1)
assert_equal(pair['value'], 10)
if __name__ == "__main__":
run_module_suite()
| |
from os.path import abspath
VERSION = "0.9.1"
print("CCAL version {} @ {}".format(VERSION, abspath(__file__)))
from .BAD_VARIANT_IDS import BAD_VARIANT_IDS
from .CODON_TO_AMINO_ACID import CODON_TO_AMINO_ACID
from .COLOR_CATEGORICAL import COLOR_CATEGORICAL
from .COLOR_WHITE_BLACK import COLOR_WHITE_BLACK
from .COLOR_WHITE_BROWN import COLOR_WHITE_BROWN
from .GPSMap import GPSMap
from .VARIANT_CLASSIFICATION_MUTSIG_EFFECT import VARIANT_CLASSIFICATION_MUTSIG_EFFECT
from .VARIANT_EFFECTS import VARIANT_EFFECTS
from .VCF_ANN_FIELDS import VCF_ANN_FIELDS
from .VCF_COLUMNS import VCF_COLUMNS
from ._anneal_node_and_element_positions import _anneal_node_and_element_positions
from ._check_node_x_element import _check_node_x_element
from ._check_w_or_h import _check_w_or_h
from ._cluster_clustering_x_element_and_compute_ccc import _cluster_clustering_x_element_and_compute_ccc
from ._compute_context_indices import _compute_context_indices
from ._compute_norm import _compute_norm
from ._count import _count
from ._describe_vcf_df import _describe_vcf_df
from ._fit_skew_t_pdfs import _fit_skew_t_pdfs
from ._get_coclustering_portion import _get_coclustering_portion
from ._get_target_grid_indices import _get_target_grid_indices
from ._get_triangulation_edges import _get_triangulation_edges
from ._gzip_compress import _gzip_compress
from ._identify_what_to_count import _identify_what_to_count
from ._ignore_bad_and_compute_euclidean_distance_between_2_1d_arrays import _ignore_bad_and_compute_euclidean_distance_between_2_1d_arrays
from ._make_annotations import _make_annotations
from ._make_clean_vcf_df import _make_clean_vcf_df
from ._make_context_matrix import _make_context_matrix
from ._make_element_x_dimension import _make_element_x_dimension
from ._make_grid_values_and_categorical_labels import _make_grid_values_and_categorical_labels
from ._make_variant_dict_consistent import _make_variant_dict_consistent
from ._match import _match
from ._match_randomly_sampled_target_and_data_to_compute_margin_of_errors import _match_randomly_sampled_target_and_data_to_compute_margin_of_errors
from ._match_target_and_data import _match_target_and_data
from ._normalize_nd_array import _normalize_nd_array
from ._permute_target_and_match_target_and_data import _permute_target_and_match_target_and_data
from ._plot_2d import _plot_2d
from ._plot_gps_map import _plot_gps_map
from ._plot_mountain import _plot_mountain
from ._print_and_run_command import _print_and_run_command
from ._process_target_or_data_for_plotting import _process_target_or_data_for_plotting
from ._single_sample_gseas import _single_sample_gseas
from ._update_H_by_multiplicative_update import _update_H_by_multiplicative_update
from ._update_W_by_multiplicative_update import _update_W_by_multiplicative_update
from .add_conda_to_path import add_conda_to_path
from .apply_function_on_2_1d_arrays import apply_function_on_2_1d_arrays
from .apply_function_on_2_2d_arrays_slices import apply_function_on_2_2d_arrays_slices
from .cast_series_to_builtins import cast_series_to_builtins
from .cast_str_to_builtins import cast_str_to_builtins
from .check_nd_array_for_bad import check_nd_array_for_bad
from .clean_and_write_df_to_tsv import clean_and_write_df_to_tsv
from .clean_git_url import clean_git_url
from .clean_name import clean_name
from .clean_path import clean_path
from .clip_nd_array_by_standard_deviation import clip_nd_array_by_standard_deviation
from .cluster_2d_array_slices import cluster_2d_array_slices
from .compute_bandwidths import compute_bandwidths
from .compute_context import compute_context
from .compute_correlation_distance import compute_correlation_distance
from .compute_empirical_p_value import compute_empirical_p_value
from .compute_empirical_p_values_and_fdrs import compute_empirical_p_values_and_fdrs
from .compute_entropy import compute_entropy
from .compute_information_coefficient import compute_information_coefficient
from .compute_information_distance import compute_information_distance
from .compute_joint_probability import compute_joint_probability
from .compute_kullback_leibler_divergence import compute_kullback_leibler_divergence
from .compute_mutational_signature_enrichment import compute_mutational_signature_enrichment
from .compute_nd_array_margin_of_error import compute_nd_array_margin_of_error
from .compute_posterior_probability import compute_posterior_probability
from .conda_is_installed import conda_is_installed
from .copy_path import copy_path
from .correlate import correlate
from .count_gene_impacts_from_variant_dicts import count_gene_impacts_from_variant_dicts
from .count_vcf_gz_rows import count_vcf_gz_rows
from .create_gitkeep import create_gitkeep
from .cross_validate import cross_validate
from .download import download
from .download_and_parse_geo_data import download_and_parse_geo_data
from .drop_df_slice import drop_df_slice
from .drop_df_slice_greedily import drop_df_slice_greedily
from .dump_gps_map import dump_gps_map
from .echo_or_print import echo_or_print
from .establish_path import establish_path
from .estimate_kernel_density import estimate_kernel_density
from .exit_ import exit_
from .fit_skew_t_pdf import fit_skew_t_pdf
from .fit_skew_t_pdfs import fit_skew_t_pdfs
from .flatten_nested_iterable import flatten_nested_iterable
from .get_1d_array_unique_objects_in_order import get_1d_array_unique_objects_in_order
from .get_allelic_frequencies import get_allelic_frequencies
from .get_colormap_colors import get_colormap_colors
from .get_conda_environments import get_conda_environments
from .get_conda_prefix import get_conda_prefix
from .get_function_name import get_function_name
from .get_genotype import get_genotype
from .get_gff3_attribute import get_gff3_attribute
from .get_git_versions import get_git_versions
from .get_installed_pip_libraries import get_installed_pip_libraries
from .get_intersections_between_2_1d_arrays import get_intersections_between_2_1d_arrays
from .get_machine import get_machine
from .get_maf_variant_classification import get_maf_variant_classification
from .get_now import get_now
from .get_object_reference import get_object_reference
from .get_open_port import get_open_port
from .get_population_allelic_frequencies import get_population_allelic_frequencies
from .get_shell_environment import get_shell_environment
from .get_unique_iterable_objects_in_order import get_unique_iterable_objects_in_order
from .get_variant_start_and_end_positions import get_variant_start_and_end_positions
from .get_variant_type import get_variant_type
from .get_vcf_info import get_vcf_info
from .get_vcf_info_ann import get_vcf_info_ann
from .get_vcf_sample_format import get_vcf_sample_format
from .get_volume_name import get_volume_name
from .group_and_apply_function_on_each_group_in_iterable import group_and_apply_function_on_each_group_in_iterable
from .group_iterable import group_iterable
from .gsea import gsea
from .gzip_compress_file import gzip_compress_file
from .gzip_decompress_and_bgzip_compress_file import gzip_decompress_and_bgzip_compress_file
from .gzip_decompress_file import gzip_decompress_file
from .have_program import have_program
from .hierarchical_consensus_cluster import hierarchical_consensus_cluster
from .hierarchical_consensus_cluster_with_ks import hierarchical_consensus_cluster_with_ks
from .in_git_repository import in_git_repository
from .index_gff3_df_by_name import index_gff3_df_by_name
from .infer import infer
from .infer_assuming_independence import infer_assuming_independence
from .initialize_logger import initialize_logger
from .install_and_activate_conda import install_and_activate_conda
from .install_python_libraries import install_python_libraries
from .is_inframe import is_inframe
from .is_valid_vcf_gz import is_valid_vcf_gz
from .load_gps_map import load_gps_map
from .log_and_return_response import log_and_return_response
from .log_nd_array import log_nd_array
from .make_categorical_colors import make_categorical_colors
from .make_colorscale import make_colorscale
from .make_colorscale_from_colors import make_colorscale_from_colors
from .make_comparison_panel import make_comparison_panel
from .make_context_matrix import make_context_matrix
from .make_coordinates_for_reflection import make_coordinates_for_reflection
from .make_file_name_from_str import make_file_name_from_str
from .make_maf_from_vcf import make_maf_from_vcf
from .make_match_panel import make_match_panel
from .make_match_panels import make_match_panels
from .make_membership_df_from_categorical_series import make_membership_df_from_categorical_series
from .make_mesh_grid_coordinates_per_axis import make_mesh_grid_coordinates_per_axis
from .make_object_int_mapping import make_object_int_mapping
from .make_random_color import make_random_color
from .make_summary_match_panel import make_summary_match_panel
from .make_volume_dict import make_volume_dict
from .mds import mds
from .merge_dicts_with_callable import merge_dicts_with_callable
from .mount_volume import mount_volume
from .multiprocess import multiprocess
from .nd_array_is_sorted import nd_array_is_sorted
from .nmf_by_multiple_V_and_H import nmf_by_multiple_V_and_H
from .nmf_by_multiplicative_update import nmf_by_multiplicative_update
from .nmf_by_sklearn import nmf_by_sklearn
from .nmf_consensus_cluster import nmf_consensus_cluster
from .nmf_consensus_cluster_with_ks import nmf_consensus_cluster_with_ks
from .normalize_contig import normalize_contig
from .normalize_df import normalize_df
from .normalize_nd_array import normalize_nd_array
from .parse_vcf_row_and_make_variant_dict import parse_vcf_row_and_make_variant_dict
from .plot_and_save import plot_and_save
from .plot_bar import plot_bar
from .plot_bayesian_nomogram import plot_bayesian_nomogram
from .plot_bubble_map import plot_bubble_map
from .plot_color_text import plot_color_text
from .plot_context import plot_context
from .plot_distributions import plot_distributions
from .plot_heat_map import plot_heat_map
from .plot_pie import plot_pie
from .plot_points import plot_points
from .plot_table import plot_table
from .plot_violin_or_box import plot_violin_or_box
from .process_feature_x_sample import process_feature_x_sample
from .read_copynumber_gistic2 import read_copynumber_gistic2
from .read_correlate_copynumber_vs_mrnaseq import read_correlate_copynumber_vs_mrnaseq
from .read_gct import read_gct
from .read_gff3_gz import read_gff3_gz
from .read_gmt import read_gmt
from .read_gmts import read_gmts
from .read_json import read_json
from .read_matrix_market import read_matrix_market
from .read_mutsignozzlereport2cv import read_mutsignozzlereport2cv
from .read_vcf_gz_and_make_vcf_dict import read_vcf_gz_and_make_vcf_dict
from .reboot_machine import reboot_machine
from .remove_path import remove_path
from .remove_paths import remove_paths
from .replace_bad_objects_in_iterable import replace_bad_objects_in_iterable
from .rescale_x_y_coordiantes_in_polar_coordiante import rescale_x_y_coordiantes_in_polar_coordiante
from .reverse_complement_dna_sequence import reverse_complement_dna_sequence
from .reverse_transcribe_rna_sequence import reverse_transcribe_rna_sequence
from .run_command import run_command
from .run_command_and_monitor import run_command_and_monitor
from .select_gene_symbol import select_gene_symbol
from .select_series_indices import select_series_indices
from .select_tcga_sample_by_sample_type_and_group import select_tcga_sample_by_sample_type_and_group
from .shuffle_each_2d_array_slice import shuffle_each_2d_array_slice
from .shutdown_machine import shutdown_machine
from .single_sample_gsea import single_sample_gsea
from .single_sample_gseas import single_sample_gseas
from .solve_ax_equal_b import solve_ax_equal_b
from .solve_for_H import solve_for_H
from .split_codons import split_codons
from .split_df import split_df
from .split_maf_by_tumor_sample_barcode import split_maf_by_tumor_sample_barcode
from .split_str_ignoring_inside_quotes import split_str_ignoring_inside_quotes
from .str_is_version import str_is_version
from .summarize_feature_x_sample import summarize_feature_x_sample
from .title_str import title_str
from .train_and_classify import train_and_classify
from .train_and_regress import train_and_regress
from .transcribe_dna_sequence import transcribe_dna_sequence
from .translate_nucleotide_sequence import translate_nucleotide_sequence
from .untitle_str import untitle_str
from .update_variant_dict import update_variant_dict
from .write_dict import write_dict
from .write_gct import write_gct
from .write_gmt import write_gmt
from .write_json import write_json
| |
# Copyright (c) 2014 VMware, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import mock
from oslo_utils import units
from oslo_vmware import exceptions as vexc
from oslo_vmware.objects import datastore as ds_obj
from nova import exception
from nova import test
from nova.tests.unit.virt.vmwareapi import fake
from nova.virt.vmwareapi import ds_util
class DsUtilTestCase(test.NoDBTestCase):
def setUp(self):
super(DsUtilTestCase, self).setUp()
self.session = fake.FakeSession()
self.flags(api_retry_count=1, group='vmware')
fake.reset()
def tearDown(self):
super(DsUtilTestCase, self).tearDown()
fake.reset()
def test_get_datacenter_ref(self):
with mock.patch.object(self.session, '_call_method') as call_method:
ds_util.get_datacenter_ref(self.session, "datacenter")
call_method.assert_called_once_with(
self.session.vim,
"FindByInventoryPath",
self.session.vim.service_content.searchIndex,
inventoryPath="datacenter")
def test_file_delete(self):
def fake_call_method(module, method, *args, **kwargs):
self.assertEqual('DeleteDatastoreFile_Task', method)
name = kwargs.get('name')
self.assertEqual('[ds] fake/path', name)
datacenter = kwargs.get('datacenter')
self.assertEqual('fake-dc-ref', datacenter)
return 'fake_delete_task'
with test.nested(
mock.patch.object(self.session, '_wait_for_task'),
mock.patch.object(self.session, '_call_method',
fake_call_method)
) as (_wait_for_task, _call_method):
ds_path = ds_obj.DatastorePath('ds', 'fake/path')
ds_util.file_delete(self.session,
ds_path, 'fake-dc-ref')
_wait_for_task.assert_has_calls([
mock.call('fake_delete_task')])
def test_file_copy(self):
def fake_call_method(module, method, *args, **kwargs):
self.assertEqual('CopyDatastoreFile_Task', method)
src_name = kwargs.get('sourceName')
self.assertEqual('[ds] fake/path/src_file', src_name)
src_dc_ref = kwargs.get('sourceDatacenter')
self.assertEqual('fake-src-dc-ref', src_dc_ref)
dst_name = kwargs.get('destinationName')
self.assertEqual('[ds] fake/path/dst_file', dst_name)
dst_dc_ref = kwargs.get('destinationDatacenter')
self.assertEqual('fake-dst-dc-ref', dst_dc_ref)
return 'fake_copy_task'
with test.nested(
mock.patch.object(self.session, '_wait_for_task'),
mock.patch.object(self.session, '_call_method',
fake_call_method)
) as (_wait_for_task, _call_method):
src_ds_path = ds_obj.DatastorePath('ds', 'fake/path', 'src_file')
dst_ds_path = ds_obj.DatastorePath('ds', 'fake/path', 'dst_file')
ds_util.file_copy(self.session,
str(src_ds_path), 'fake-src-dc-ref',
str(dst_ds_path), 'fake-dst-dc-ref')
_wait_for_task.assert_has_calls([
mock.call('fake_copy_task')])
def test_file_move(self):
def fake_call_method(module, method, *args, **kwargs):
self.assertEqual('MoveDatastoreFile_Task', method)
sourceName = kwargs.get('sourceName')
self.assertEqual('[ds] tmp/src', sourceName)
destinationName = kwargs.get('destinationName')
self.assertEqual('[ds] base/dst', destinationName)
sourceDatacenter = kwargs.get('sourceDatacenter')
self.assertEqual('fake-dc-ref', sourceDatacenter)
destinationDatacenter = kwargs.get('destinationDatacenter')
self.assertEqual('fake-dc-ref', destinationDatacenter)
return 'fake_move_task'
with test.nested(
mock.patch.object(self.session, '_wait_for_task'),
mock.patch.object(self.session, '_call_method',
fake_call_method)
) as (_wait_for_task, _call_method):
src_ds_path = ds_obj.DatastorePath('ds', 'tmp/src')
dst_ds_path = ds_obj.DatastorePath('ds', 'base/dst')
ds_util.file_move(self.session,
'fake-dc-ref', src_ds_path, dst_ds_path)
_wait_for_task.assert_has_calls([
mock.call('fake_move_task')])
def test_disk_move(self):
def fake_call_method(module, method, *args, **kwargs):
self.assertEqual('MoveVirtualDisk_Task', method)
src_name = kwargs.get('sourceName')
self.assertEqual('[ds] tmp/src', src_name)
dest_name = kwargs.get('destName')
self.assertEqual('[ds] base/dst', dest_name)
src_datacenter = kwargs.get('sourceDatacenter')
self.assertEqual('fake-dc-ref', src_datacenter)
dest_datacenter = kwargs.get('destDatacenter')
self.assertEqual('fake-dc-ref', dest_datacenter)
return 'fake_move_task'
with test.nested(
mock.patch.object(self.session, '_wait_for_task'),
mock.patch.object(self.session, '_call_method',
fake_call_method)
) as (_wait_for_task, _call_method):
ds_util.disk_move(self.session,
'fake-dc-ref', '[ds] tmp/src', '[ds] base/dst')
_wait_for_task.assert_has_calls([
mock.call('fake_move_task')])
def test_disk_copy(self):
with test.nested(
mock.patch.object(self.session, '_wait_for_task'),
mock.patch.object(self.session, '_call_method',
return_value=mock.sentinel.cm)
) as (_wait_for_task, _call_method):
ds_util.disk_copy(self.session, mock.sentinel.dc_ref,
mock.sentinel.source_ds, mock.sentinel.dest_ds)
_wait_for_task.assert_called_once_with(mock.sentinel.cm)
_call_method.assert_called_once_with(
mock.ANY, 'CopyVirtualDisk_Task', 'VirtualDiskManager',
sourceName='sentinel.source_ds',
destDatacenter=mock.sentinel.dc_ref,
sourceDatacenter=mock.sentinel.dc_ref, force=False,
destName='sentinel.dest_ds')
def test_disk_delete(self):
with test.nested(
mock.patch.object(self.session, '_wait_for_task'),
mock.patch.object(self.session, '_call_method',
return_value=mock.sentinel.cm)
) as (_wait_for_task, _call_method):
ds_util.disk_delete(self.session,
'fake-dc-ref', '[ds] tmp/disk.vmdk')
_wait_for_task.assert_called_once_with(mock.sentinel.cm)
_call_method.assert_called_once_with(
mock.ANY, 'DeleteVirtualDisk_Task', 'VirtualDiskManager',
datacenter='fake-dc-ref', name='[ds] tmp/disk.vmdk')
def test_mkdir(self):
def fake_call_method(module, method, *args, **kwargs):
self.assertEqual('MakeDirectory', method)
name = kwargs.get('name')
self.assertEqual('[ds] fake/path', name)
datacenter = kwargs.get('datacenter')
self.assertEqual('fake-dc-ref', datacenter)
createParentDirectories = kwargs.get('createParentDirectories')
self.assertTrue(createParentDirectories)
with mock.patch.object(self.session, '_call_method',
fake_call_method):
ds_path = ds_obj.DatastorePath('ds', 'fake/path')
ds_util.mkdir(self.session, ds_path, 'fake-dc-ref')
def test_file_exists(self):
def fake_call_method(module, method, *args, **kwargs):
if method == 'SearchDatastore_Task':
ds_browser = args[0]
self.assertEqual('fake-browser', ds_browser)
datastorePath = kwargs.get('datastorePath')
self.assertEqual('[ds] fake/path', datastorePath)
return 'fake_exists_task'
# Should never get here
self.fail()
def fake_wait_for_task(task_ref):
if task_ref == 'fake_exists_task':
result_file = fake.DataObject()
result_file.path = 'fake-file'
result = fake.DataObject()
result.file = [result_file]
result.path = '[ds] fake/path'
task_info = fake.DataObject()
task_info.result = result
return task_info
# Should never get here
self.fail()
with test.nested(
mock.patch.object(self.session, '_call_method',
fake_call_method),
mock.patch.object(self.session, '_wait_for_task',
fake_wait_for_task)):
ds_path = ds_obj.DatastorePath('ds', 'fake/path')
file_exists = ds_util.file_exists(self.session,
'fake-browser', ds_path, 'fake-file')
self.assertTrue(file_exists)
def test_file_exists_fails(self):
def fake_call_method(module, method, *args, **kwargs):
if method == 'SearchDatastore_Task':
return 'fake_exists_task'
# Should never get here
self.fail()
def fake_wait_for_task(task_ref):
if task_ref == 'fake_exists_task':
raise vexc.FileNotFoundException()
# Should never get here
self.fail()
with test.nested(
mock.patch.object(self.session, '_call_method',
fake_call_method),
mock.patch.object(self.session, '_wait_for_task',
fake_wait_for_task)):
ds_path = ds_obj.DatastorePath('ds', 'fake/path')
file_exists = ds_util.file_exists(self.session,
'fake-browser', ds_path, 'fake-file')
self.assertFalse(file_exists)
def _mock_get_datastore_calls(self, *datastores):
"""Mock vim_util calls made by get_datastore."""
datastores_i = [None]
# For the moment, at least, this list of datastores is simply passed to
# get_properties_for_a_collection_of_objects, which we mock below. We
# don't need to over-complicate the fake function by worrying about its
# contents.
fake_ds_list = ['fake-ds']
def fake_call_method(module, method, *args, **kwargs):
# Mock the call which returns a list of datastores for the cluster
if (module == ds_util.vutil and
method == 'get_object_property' and
args == ('fake-cluster', 'datastore')):
fake_ds_mor = fake.DataObject()
fake_ds_mor.ManagedObjectReference = fake_ds_list
return fake_ds_mor
# Return the datastore result sets we were passed in, in the order
# given
if (module == ds_util.vim_util and
method == 'get_properties_for_a_collection_of_objects' and
args[0] == 'Datastore' and
args[1] == fake_ds_list):
# Start a new iterator over given datastores
datastores_i[0] = iter(datastores)
return next(datastores_i[0])
# Continue returning results from the current iterator.
if (module == ds_util.vutil and
method == 'continue_retrieval'):
try:
return next(datastores_i[0])
except StopIteration:
return None
if (method == 'continue_retrieval' or
method == 'cancel_retrieval'):
return
# Sentinel that get_datastore's use of vim has changed
self.fail('Unexpected vim call in get_datastore: %s' % method)
return mock.patch.object(self.session, '_call_method',
side_effect=fake_call_method)
def test_get_datastore(self):
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.Datastore())
fake_objects.add_object(fake.Datastore("fake-ds-2", 2048, 1000,
False, "normal"))
fake_objects.add_object(fake.Datastore("fake-ds-3", 4096, 2000,
True, "inMaintenance"))
with self._mock_get_datastore_calls(fake_objects):
result = ds_util.get_datastore(self.session, 'fake-cluster')
self.assertEqual("fake-ds", result.name)
self.assertEqual(units.Ti, result.capacity)
self.assertEqual(500 * units.Gi, result.freespace)
def test_get_datastore_with_regex(self):
# Test with a regex that matches with a datastore
datastore_valid_regex = re.compile(r"^openstack.*\d$")
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.Datastore("openstack-ds0"))
fake_objects.add_object(fake.Datastore("fake-ds0"))
fake_objects.add_object(fake.Datastore("fake-ds1"))
with self._mock_get_datastore_calls(fake_objects):
result = ds_util.get_datastore(self.session, 'fake-cluster',
datastore_valid_regex)
self.assertEqual("openstack-ds0", result.name)
def test_get_datastore_with_token(self):
regex = re.compile(r"^ds.*\d$")
fake0 = fake.FakeRetrieveResult()
fake0.add_object(fake.Datastore("ds0", 10 * units.Gi, 5 * units.Gi))
fake0.add_object(fake.Datastore("foo", 10 * units.Gi, 9 * units.Gi))
setattr(fake0, 'token', 'token-0')
fake1 = fake.FakeRetrieveResult()
fake1.add_object(fake.Datastore("ds2", 10 * units.Gi, 8 * units.Gi))
fake1.add_object(fake.Datastore("ds3", 10 * units.Gi, 1 * units.Gi))
with self._mock_get_datastore_calls(fake0, fake1):
result = ds_util.get_datastore(self.session, 'fake-cluster', regex)
self.assertEqual("ds2", result.name)
def test_get_datastore_with_list(self):
# Test with a regex containing whitelist of datastores
datastore_valid_regex = re.compile("(openstack-ds0|openstack-ds2)")
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.Datastore("openstack-ds0"))
fake_objects.add_object(fake.Datastore("openstack-ds1"))
fake_objects.add_object(fake.Datastore("openstack-ds2"))
with self._mock_get_datastore_calls(fake_objects):
result = ds_util.get_datastore(self.session, 'fake-cluster',
datastore_valid_regex)
self.assertNotEqual("openstack-ds1", result.name)
def test_get_datastore_with_regex_error(self):
# Test with a regex that has no match
# Checks if code raises DatastoreNotFound with a specific message
datastore_invalid_regex = re.compile("unknown-ds")
exp_message = ("Datastore regex %s did not match any datastores"
% datastore_invalid_regex.pattern)
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(fake.Datastore("fake-ds0"))
fake_objects.add_object(fake.Datastore("fake-ds1"))
# assertRaisesRegExp would have been a good choice instead of
# try/catch block, but it's available only from Py 2.7.
try:
with self._mock_get_datastore_calls(fake_objects):
ds_util.get_datastore(self.session, 'fake-cluster',
datastore_invalid_regex)
except exception.DatastoreNotFound as e:
self.assertEqual(exp_message, e.args[0])
else:
self.fail("DatastoreNotFound Exception was not raised with "
"message: %s" % exp_message)
def test_get_datastore_without_datastore(self):
self.assertRaises(exception.DatastoreNotFound,
ds_util.get_datastore,
fake.FakeObjectRetrievalSession(None), cluster="fake-cluster")
def test_get_datastore_inaccessible_ds(self):
data_store = fake.Datastore()
data_store.set("summary.accessible", False)
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(data_store)
with self._mock_get_datastore_calls(fake_objects):
self.assertRaises(exception.DatastoreNotFound,
ds_util.get_datastore,
self.session, 'fake-cluster')
def test_get_datastore_ds_in_maintenance(self):
data_store = fake.Datastore()
data_store.set("summary.maintenanceMode", "inMaintenance")
fake_objects = fake.FakeRetrieveResult()
fake_objects.add_object(data_store)
with self._mock_get_datastore_calls(fake_objects):
self.assertRaises(exception.DatastoreNotFound,
ds_util.get_datastore,
self.session, 'fake-cluster')
def test_get_datastore_no_host_in_cluster(self):
def fake_call_method(module, method, *args, **kwargs):
return ''
with mock.patch.object(self.session, '_call_method',
fake_call_method):
self.assertRaises(exception.DatastoreNotFound,
ds_util.get_datastore,
self.session, 'fake-cluster')
def _test_is_datastore_valid(self, accessible=True,
maintenance_mode="normal",
type="VMFS",
datastore_regex=None,
ds_types=ds_util.ALL_SUPPORTED_DS_TYPES):
propdict = {}
propdict["summary.accessible"] = accessible
propdict["summary.maintenanceMode"] = maintenance_mode
propdict["summary.type"] = type
propdict["summary.name"] = "ds-1"
return ds_util._is_datastore_valid(propdict, datastore_regex, ds_types)
def test_is_datastore_valid(self):
for ds_type in ds_util.ALL_SUPPORTED_DS_TYPES:
self.assertTrue(self._test_is_datastore_valid(True,
"normal",
ds_type))
def test_is_datastore_valid_inaccessible_ds(self):
self.assertFalse(self._test_is_datastore_valid(False,
"normal",
"VMFS"))
def test_is_datastore_valid_ds_in_maintenance(self):
self.assertFalse(self._test_is_datastore_valid(True,
"inMaintenance",
"VMFS"))
def test_is_datastore_valid_ds_type_invalid(self):
self.assertFalse(self._test_is_datastore_valid(True,
"normal",
"vfat"))
def test_is_datastore_valid_not_matching_regex(self):
datastore_regex = re.compile("ds-2")
self.assertFalse(self._test_is_datastore_valid(True,
"normal",
"VMFS",
datastore_regex))
def test_is_datastore_valid_matching_regex(self):
datastore_regex = re.compile("ds-1")
self.assertTrue(self._test_is_datastore_valid(True,
"normal",
"VMFS",
datastore_regex))
def test_get_connected_hosts_none(self):
with mock.patch.object(self.session,
'_call_method') as _call_method:
hosts = ds_util.get_connected_hosts(self.session,
'fake_datastore')
self.assertEqual([], hosts)
_call_method.assert_called_once_with(
mock.ANY, 'get_object_property',
'fake_datastore', 'host')
def test_get_connected_hosts(self):
host = mock.Mock(spec=object)
host.value = 'fake-host'
host_mount = mock.Mock(spec=object)
host_mount.key = host
host_mounts = mock.Mock(spec=object)
host_mounts.DatastoreHostMount = [host_mount]
with mock.patch.object(self.session, '_call_method',
return_value=host_mounts) as _call_method:
hosts = ds_util.get_connected_hosts(self.session,
'fake_datastore')
self.assertEqual(['fake-host'], hosts)
_call_method.assert_called_once_with(
mock.ANY, 'get_object_property',
'fake_datastore', 'host')
| |
#!/usr/bin/env python3
# Copyright (c) 2017-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Class for bitcoind node under test"""
import contextlib
import decimal
import errno
from enum import Enum
import http.client
import json
import logging
import os
import re
import subprocess
import tempfile
import time
import urllib.parse
import collections
import shlex
import sys
from .authproxy import JSONRPCException
from .descriptors import descsum_create
from .messages import MY_SUBVERSION
from .util import (
MAX_NODES,
append_config,
delete_cookie_file,
get_auth_cookie,
get_rpc_proxy,
rpc_url,
wait_until_helper,
p2p_port,
EncodeDecimal,
)
BITCOIND_PROC_WAIT_TIMEOUT = 60
class FailedToStartError(Exception):
"""Raised when a node fails to start correctly."""
class ErrorMatch(Enum):
FULL_TEXT = 1
FULL_REGEX = 2
PARTIAL_REGEX = 3
class TestNode():
"""A class for representing a bitcoind node under test.
This class contains:
- state about the node (whether it's running, etc)
- a Python subprocess.Popen object representing the running process
- an RPC connection to the node
- one or more P2P connections to the node
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
def __init__(self, i, datadir, *, chain, rpchost, timewait, timeout_factor, bitcoind, bitcoin_cli, coverage_dir, cwd, extra_conf=None, extra_args=None, use_cli=False, start_perf=False, use_valgrind=False, version=None, descriptors=False, chain_in_args=True):
"""
Kwargs:
start_perf (bool): If True, begin profiling the node with `perf` as soon as
the node starts.
"""
self.index = i
self.datadir = datadir
self.bitcoinconf = os.path.join(self.datadir, "elements.conf")
self.stdout_dir = os.path.join(self.datadir, "stdout")
self.stderr_dir = os.path.join(self.datadir, "stderr")
self.chain = chain
self.rpchost = rpchost
self.rpc_timeout = timewait
self.binary = bitcoind
self.coverage_dir = coverage_dir
self.cwd = cwd
self.descriptors = descriptors
if extra_conf is not None:
append_config(datadir, extra_conf)
# Most callers will just need to add extra args to the standard list below.
# For those callers that need more flexibility, they can just set the args property directly.
# Note that common args are set in the config file (see initialize_datadir)
self.extra_args = extra_args
self.version = version
# Configuration for logging is set as command-line args rather than in the bitcoin.conf file.
# This means that starting a bitcoind using the temp dir to debug a failed test won't
# spam debug.log.
self.args = [
self.binary,
"-datadir=" + self.datadir,
"-logtimemicros",
"-debug",
"-debugexclude=libevent",
"-debugexclude=leveldb",
"-uacomment=testnode%d" % i,
]
if chain_in_args:
self.args += ["-chain=" + self.chain]
if use_valgrind:
default_suppressions_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"..", "..", "..", "contrib", "valgrind.supp")
suppressions_file = os.getenv("VALGRIND_SUPPRESSIONS_FILE",
default_suppressions_file)
self.args = ["valgrind", "--suppressions={}".format(suppressions_file),
"--gen-suppressions=all", "--exit-on-first-error=yes",
"--error-exitcode=1", "--quiet"] + self.args
if self.version_is_at_least(190000):
self.args.append("-logthreadnames")
self.cli = TestNodeCLI(bitcoin_cli, self.datadir, self.chain)
self.use_cli = use_cli
self.start_perf = start_perf
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.url = None
self.log = logging.getLogger('TestFramework.node%d' % i)
self.cleanup_on_exit = True # Whether to kill the node when this object goes away
# Cache perf subprocesses here by their data output filename.
self.perf_subprocesses = {}
self.p2ps = []
self.timeout_factor = timeout_factor
# ELEMENTS:
self.deterministic_priv_key = None
def set_deterministic_priv_key(self, address, privkey):
AddressKeyPair = collections.namedtuple('AddressKeyPair', ['address', 'key'])
self.deterministic_priv_key = AddressKeyPair(address, privkey)
AddressKeyPair = collections.namedtuple('AddressKeyPair', ['address', 'key'])
PRIV_KEYS = [
# address , privkey
AddressKeyPair('2doncj41FX6LahE2aspuLuQcnmgrLtfvvma', 'cQ1PxVTn5J3qCzUqXzpNeHiPVxZcwwaJkFmZcokescXwmHWAdBoe'),
AddressKeyPair('2dv4oTKjmi3TF6dRq2TmasMdNtTeuqHNcPb', 'cRuGzyZjb5zQQg1TbAiGK1UJBuK1UQHaFf4DXBUcPZNZ3WomNoxW'),
AddressKeyPair('2drNifUyWj5D8UPrAJtuQjUyPSUYpE4gb7E', 'cSYxv1JKDNrSKnm2fzNj1uFUkr7HqTQHvU8PCAZ1mWNwYB5LwZVu'),
AddressKeyPair('2dwTDXu4QLFG61upW7wkRLbkUxDRYuRmnAd', 'cQLvSojQFLkikwuzhSkKPv9REWpCDNhvGiG5hjutYPQj4HT8GnJy'),
AddressKeyPair('2doqDLuXpHx3x6Bd9bBAWxRzzKZCXTbSCWE', 'cNkbdkyQ9RX3Yrp4oFACs4p4iiyBxoC3pL7zioGKSzmvyLbiR4Rm'),
AddressKeyPair('2dktmJxjtpKEftBKFaGBUCW7wsUEBGeuoSi', 'cSCCVU7iUXMNnqrHPeVxHEgG48TsyNwd3FAsS2hjTYKdTSNDXJXV'),
AddressKeyPair('2dxjmBn21SQjhffXzzi1hz5nKA4quM7jtsT', 'cVpzWr59KE7DsWaSJkySSPSvkrr6huUsjYBF3wcwCMhW4cEPNmU1'),
AddressKeyPair('2dpFhgNeWqm7LVbvZo29bvXPEzwWTVfzVSY', 'cRPF2Kfm21BWf3GPHMKGGMQN1a6sNJPGsyYSz8VWQhsoVUr42q4r'),
AddressKeyPair('2df8FzAJJtPcHsvXYRh4BTmnhSUUVE5zNhE', 'cU9yBKyGyRNBpzSmVavoh9szgaFUjKbPG9P3CPtycXAxmdwnKxiL'),
AddressKeyPair('2N7XEbmyLeviPkfiTMEcnEFrdcwfrW2nypj', 'cSpjeQMdDAD6Kr6g4T4HwFX1jMoiMKw5iBjHUSNgVTEdiZbrSa8o'),
AddressKeyPair('2N5epYpqDw55w9HBpU97tNRUdwHXtFQ1kHs', 'cNEfBggwDGdf27hitveT75RicFtjeZX7pN4fsV3KjKEamScPzh63'),
AddressKeyPair('2N1hYBmCYcYNjUwiSAxricmwEgqpxhXeBHo', 'cMrjzHT5XvEYob52zEHLgpC2jbSTiRYRAJgPvFDdeQTMy7AM5L3N'),
]
def get_deterministic_priv_key(self):
"""Return a deterministic priv key in base58, that only depends on the node's index"""
assert len(self.PRIV_KEYS) == MAX_NODES
# ELEMENTS: this allows overriding the default for parent nodes in fedpeg test
if self.deterministic_priv_key is not None:
self.log.debug("Custom deterministic_priv_key: {}".format(self.deterministic_priv_key))
return self.deterministic_priv_key
return self.PRIV_KEYS[self.index]
def _node_msg(self, msg: str) -> str:
"""Return a modified msg that identifies this node by its index as a debugging aid."""
return "[node %d] %s" % (self.index, msg)
def _raise_assertion_error(self, msg: str):
"""Raise an AssertionError with msg modified to identify this node."""
raise AssertionError(self._node_msg(msg))
def __del__(self):
# Ensure that we don't leave any bitcoind processes lying around after
# the test ends
if self.process and self.cleanup_on_exit:
# Should only happen on test failure
# Avoid using logger, as that may have already been shutdown when
# this destructor is called.
print(self._node_msg("Cleaning up leftover process"))
self.process.kill()
def __getattr__(self, name):
"""Dispatches any unrecognised messages to the RPC connection or a CLI instance."""
if self.use_cli:
return getattr(RPCOverloadWrapper(self.cli, True, self.descriptors), name)
else:
assert self.rpc_connected and self.rpc is not None, self._node_msg("Error: no RPC connection")
return getattr(RPCOverloadWrapper(self.rpc, descriptors=self.descriptors), name)
def start(self, extra_args=None, *, cwd=None, stdout=None, stderr=None, **kwargs):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
# Add a new stdout and stderr file each time bitcoind is started
if stderr is None:
stderr = tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False)
if stdout is None:
stdout = tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False)
self.stderr = stderr
self.stdout = stdout
if cwd is None:
cwd = self.cwd
# Delete any existing cookie file -- if such a file exists (eg due to
# unclean shutdown), it will get overwritten anyway by bitcoind, and
# potentially interfere with our attempt to authenticate
delete_cookie_file(self.datadir, self.chain)
# add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are written to stderr and not the terminal
subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1")
self.process = subprocess.Popen(self.args + extra_args, env=subp_env, stdout=stdout, stderr=stderr, cwd=cwd, **kwargs)
self.running = True
self.log.debug("bitcoind started, waiting for RPC to come up")
if self.start_perf:
self._start_perf()
def wait_for_rpc_connection(self):
"""Sets up an RPC connection to the bitcoind process. Returns False if unable to connect."""
# Poll at a rate of four times per second
poll_per_s = 4
for _ in range(poll_per_s * self.rpc_timeout):
if self.process.poll() is not None:
raise FailedToStartError(self._node_msg(
'bitcoind exited with status {} during initialization'.format(self.process.returncode)))
try:
rpc = get_rpc_proxy(
rpc_url(self.datadir, self.index, self.chain, self.rpchost),
self.index,
timeout=self.rpc_timeout // 2, # Shorter timeout to allow for one retry in case of ETIMEDOUT
coveragedir=self.coverage_dir,
)
rpc.getblockcount()
# If the call to getblockcount() succeeds then the RPC connection is up
if self.version_is_at_least(190000):
# getmempoolinfo.loaded is available since commit
# bb8ae2c (version 0.19.0)
wait_until_helper(lambda: rpc.getmempoolinfo()['loaded'], timeout_factor=self.timeout_factor)
# Wait for the node to finish reindex, block import, and
# loading the mempool. Usually importing happens fast or
# even "immediate" when the node is started. However, there
# is no guarantee and sometimes ThreadImport might finish
# later. This is going to cause intermittent test failures,
# because generally the tests assume the node is fully
# ready after being started.
#
# For example, the node will reject block messages from p2p
# when it is still importing with the error "Unexpected
# block message received"
#
# The wait is done here to make tests as robust as possible
# and prevent racy tests and intermittent failures as much
# as possible. Some tests might not need this, but the
# overhead is trivial, and the added guarantees are worth
# the minimal performance cost.
self.log.debug("RPC successfully started")
if self.use_cli:
return
self.rpc = rpc
self.rpc_connected = True
self.url = self.rpc.url
return
except JSONRPCException as e: # Initialization phase
# -28 RPC in warmup
# -342 Service unavailable, RPC server started but is shutting down due to error
if e.error['code'] != -28 and e.error['code'] != -342:
raise # unknown JSON RPC exception
except ConnectionResetError:
# This might happen when the RPC server is in warmup, but shut down before the call to getblockcount
# succeeds. Try again to properly raise the FailedToStartError
pass
except OSError as e:
if e.errno == errno.ETIMEDOUT:
pass # Treat identical to ConnectionResetError
elif e.errno == errno.ECONNREFUSED:
pass # Port not yet open?
else:
raise # unknown OS error
except ValueError as e: # cookie file not found and no rpcuser or rpcpassword; bitcoind is still starting
if "No RPC credentials" not in str(e):
raise
time.sleep(1.0 / poll_per_s)
self._raise_assertion_error("Unable to connect to bitcoind after {}s".format(self.rpc_timeout))
def wait_for_cookie_credentials(self):
"""Ensures auth cookie credentials can be read, e.g. for testing CLI with -rpcwait before RPC connection is up."""
self.log.debug("Waiting for cookie credentials")
# Poll at a rate of four times per second.
poll_per_s = 4
for _ in range(poll_per_s * self.rpc_timeout):
try:
get_auth_cookie(self.datadir, self.chain)
self.log.debug("Cookie credentials successfully retrieved")
return
except ValueError: # cookie file not found and no rpcuser or rpcpassword; bitcoind is still starting
pass # so we continue polling until RPC credentials are retrieved
time.sleep(1.0 / poll_per_s)
self._raise_assertion_error("Unable to retrieve cookie credentials after {}s".format(self.rpc_timeout))
def generate(self, nblocks, maxtries=1000000):
self.log.debug("TestNode.generate() dispatches `generate` call to `generatetoaddress`")
return self.generatetoaddress(nblocks=nblocks, address=self.get_deterministic_priv_key().address, maxtries=maxtries)
def get_wallet_rpc(self, wallet_name):
if self.use_cli:
return RPCOverloadWrapper(self.cli("-rpcwallet={}".format(wallet_name)), True, self.descriptors)
else:
assert self.rpc_connected and self.rpc, self._node_msg("RPC not connected")
wallet_path = "wallet/{}".format(urllib.parse.quote(wallet_name))
return RPCOverloadWrapper(self.rpc / wallet_path, descriptors=self.descriptors)
def version_is_at_least(self, ver):
return self.version is None or self.version >= ver
def stop_node(self, expected_stderr='', wait=0):
"""Stop the node."""
if not self.running:
return
self.log.debug("Stopping node")
try:
# Do not use wait argument when testing older nodes, e.g. in feature_backwards_compatibility.py
if self.version_is_at_least(180000):
self.stop(wait=wait)
else:
self.stop()
except http.client.CannotSendRequest:
self.log.exception("Unable to stop node.")
# If there are any running perf processes, stop them.
for profile_name in tuple(self.perf_subprocesses.keys()):
self._stop_perf(profile_name)
# Check that stderr is as expected
self.stderr.seek(0)
stderr = self.stderr.read().decode('utf-8').strip()
if stderr != expected_stderr:
raise AssertionError("Unexpected stderr {} != {}".format(stderr, expected_stderr))
self.stdout.close()
self.stderr.close()
del self.p2ps[:]
def is_node_stopped(self):
"""Checks whether the node has stopped.
Returns True if the node has stopped. False otherwise.
This method is responsible for freeing resources (self.process)."""
if not self.running:
return True
return_code = self.process.poll()
if return_code is None:
return False
# process has stopped. Assert that it didn't return an error code.
assert return_code == 0, self._node_msg(
"Node returned non-zero exit code (%d) when stopping" % return_code)
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.log.debug("Node stopped")
return True
def wait_until_stopped(self, timeout=BITCOIND_PROC_WAIT_TIMEOUT):
wait_until_helper(self.is_node_stopped, timeout=timeout, timeout_factor=self.timeout_factor)
@contextlib.contextmanager
def assert_debug_log(self, expected_msgs, unexpected_msgs=None, timeout=2):
if unexpected_msgs is None:
unexpected_msgs = []
time_end = time.time() + timeout * self.timeout_factor
debug_log = os.path.join(self.datadir, self.chain, 'debug.log')
with open(debug_log, encoding='utf-8') as dl:
dl.seek(0, 2)
prev_size = dl.tell()
yield
while True:
found = True
with open(debug_log, encoding='utf-8') as dl:
dl.seek(prev_size)
log = dl.read()
print_log = " - " + "\n - ".join(log.splitlines())
for unexpected_msg in unexpected_msgs:
if re.search(re.escape(unexpected_msg), log, flags=re.MULTILINE):
self._raise_assertion_error('Unexpected message "{}" partially matches log:\n\n{}\n\n'.format(unexpected_msg, print_log))
for expected_msg in expected_msgs:
if re.search(re.escape(expected_msg), log, flags=re.MULTILINE) is None:
found = False
if found:
return
if time.time() >= time_end:
break
time.sleep(0.05)
self._raise_assertion_error('Expected messages "{}" does not partially match log:\n\n{}\n\n'.format(str(expected_msgs), print_log))
@contextlib.contextmanager
def profile_with_perf(self, profile_name):
"""
Context manager that allows easy profiling of node activity using `perf`.
See `test/functional/README.md` for details on perf usage.
Args:
profile_name (str): This string will be appended to the
profile data filename generated by perf.
"""
subp = self._start_perf(profile_name)
yield
if subp:
self._stop_perf(profile_name)
def _start_perf(self, profile_name=None):
"""Start a perf process to profile this node.
Returns the subprocess running perf."""
subp = None
def test_success(cmd):
return subprocess.call(
# shell=True required for pipe use below
cmd, shell=True,
stderr=subprocess.DEVNULL, stdout=subprocess.DEVNULL) == 0
if not sys.platform.startswith('linux'):
self.log.warning("Can't profile with perf; only available on Linux platforms")
return None
if not test_success('which perf'):
self.log.warning("Can't profile with perf; must install perf-tools")
return None
if not test_success('readelf -S {} | grep .debug_str'.format(shlex.quote(self.binary))):
self.log.warning(
"perf output won't be very useful without debug symbols compiled into bitcoind")
output_path = tempfile.NamedTemporaryFile(
dir=self.datadir,
prefix="{}.perf.data.".format(profile_name or 'test'),
delete=False,
).name
cmd = [
'perf', 'record',
'-g', # Record the callgraph.
'--call-graph', 'dwarf', # Compatibility for gcc's --fomit-frame-pointer.
'-F', '101', # Sampling frequency in Hz.
'-p', str(self.process.pid),
'-o', output_path,
]
subp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.perf_subprocesses[profile_name] = subp
return subp
def _stop_perf(self, profile_name):
"""Stop (and pop) a perf subprocess."""
subp = self.perf_subprocesses.pop(profile_name)
output_path = subp.args[subp.args.index('-o') + 1]
subp.terminate()
subp.wait(timeout=10)
stderr = subp.stderr.read().decode()
if 'Consider tweaking /proc/sys/kernel/perf_event_paranoid' in stderr:
self.log.warning(
"perf couldn't collect data! Try "
"'sudo sysctl -w kernel.perf_event_paranoid=-1'")
else:
report_cmd = "perf report -i {}".format(output_path)
self.log.info("See perf output by running '{}'".format(report_cmd))
def assert_start_raises_init_error(self, extra_args=None, expected_msg=None, match=ErrorMatch.FULL_TEXT, *args, **kwargs):
"""Attempt to start the node and expect it to raise an error.
extra_args: extra arguments to pass through to bitcoind
expected_msg: regex that stderr should match when bitcoind fails
Will throw if bitcoind starts without an error.
Will throw if an expected_msg is provided and it does not match bitcoind's stdout."""
with tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False) as log_stderr, \
tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout:
try:
self.start(extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs)
self.wait_for_rpc_connection()
self.stop_node()
self.wait_until_stopped()
except FailedToStartError as e:
self.log.debug('bitcoind failed to start: %s', e)
self.running = False
self.process = None
# Check stderr for expected message
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8').strip()
if match == ErrorMatch.PARTIAL_REGEX:
if re.search(expected_msg, stderr, flags=re.MULTILINE) is None:
self._raise_assertion_error(
'Expected message "{}" does not partially match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_REGEX:
if re.fullmatch(expected_msg, stderr) is None:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
elif match == ErrorMatch.FULL_TEXT:
if expected_msg != stderr:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
else:
if expected_msg is None:
assert_msg = "bitcoind should have exited with an error"
else:
assert_msg = "bitcoind should have exited with expected error " + expected_msg
self._raise_assertion_error(assert_msg)
def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs):
"""Add a p2p connection to the node.
This method adds the p2p connection to the self.p2ps list and also
returns the connection to the caller."""
if 'dstport' not in kwargs:
kwargs['dstport'] = p2p_port(self.index)
if 'dstaddr' not in kwargs:
kwargs['dstaddr'] = '127.0.0.1'
p2p_conn.peer_connect(**kwargs, net=self.chain, timeout_factor=self.timeout_factor)()
self.p2ps.append(p2p_conn)
p2p_conn.wait_until(lambda: p2p_conn.is_connected, check_connected=False)
if wait_for_verack:
# Wait for the node to send us the version and verack
p2p_conn.wait_for_verack()
# At this point we have sent our version message and received the version and verack, however the full node
# has not yet received the verack from us (in reply to their version). So, the connection is not yet fully
# established (fSuccessfullyConnected).
#
# This shouldn't lead to any issues when sending messages, since the verack will be in-flight before the
# message we send. However, it might lead to races where we are expecting to receive a message. E.g. a
# transaction that will be added to the mempool as soon as we return here.
#
# So syncing here is redundant when we only want to send a message, but the cost is low (a few milliseconds)
# in comparison to the upside of making tests less fragile and unexpected intermittent errors less likely.
p2p_conn.sync_with_ping()
return p2p_conn
def num_test_p2p_connections(self):
"""Return number of test framework p2p connections to the node."""
return len([peer for peer in self.getpeerinfo() if peer['subver'] == MY_SUBVERSION])
def disconnect_p2ps(self):
"""Close all p2p connections to the node."""
for p in self.p2ps:
p.peer_disconnect()
del self.p2ps[:]
wait_until_helper(lambda: self.num_test_p2p_connections() == 0, timeout_factor=self.timeout_factor)
class TestNodeCLIAttr:
def __init__(self, cli, command):
self.cli = cli
self.command = command
def __call__(self, *args, **kwargs):
return self.cli.send_cli(self.command, *args, **kwargs)
def get_request(self, *args, **kwargs):
return lambda: self(*args, **kwargs)
def arg_to_cli(arg):
if isinstance(arg, bool):
return str(arg).lower()
elif arg is None:
return 'null'
elif isinstance(arg, dict) or isinstance(arg, list):
return json.dumps(arg, default=EncodeDecimal)
else:
return str(arg)
class TestNodeCLI():
"""Interface to bitcoin-cli for an individual node"""
def __init__(self, binary, datadir, chain):
self.options = []
self.binary = binary
self.datadir = datadir
self.chain = chain
self.input = None
self.log = logging.getLogger('TestFramework.bitcoincli')
def __call__(self, *options, input=None):
# TestNodeCLI is callable with bitcoin-cli command-line options
cli = TestNodeCLI(self.binary, self.datadir, self.chain)
cli.options = [str(o) for o in options]
cli.input = input
return cli
def __getattr__(self, command):
return TestNodeCLIAttr(self, command)
def batch(self, requests):
results = []
for request in requests:
try:
results.append(dict(result=request()))
except JSONRPCException as e:
results.append(dict(error=e))
return results
def send_cli(self, command=None, *args, **kwargs):
"""Run bitcoin-cli command. Deserializes returned string as python object."""
pos_args = [arg_to_cli(arg) for arg in args]
named_args = [str(key) + "=" + arg_to_cli(value) for (key, value) in kwargs.items()]
assert not (pos_args and named_args), "Cannot use positional arguments and named arguments in the same bitcoin-cli call"
p_args = [self.binary, "-datadir=" + self.datadir, "-chain=" + self.chain] + self.options
if named_args:
p_args += ["-named"]
if command is not None:
p_args += [command]
p_args += pos_args + named_args
self.log.debug("Running bitcoin-cli {}".format(p_args[2:]))
process = subprocess.Popen(p_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
cli_stdout, cli_stderr = process.communicate(input=self.input)
returncode = process.poll()
if returncode:
match = re.match(r'error code: ([-0-9]+)\nerror message:\n(.*)', cli_stderr)
if match:
code, message = match.groups()
raise JSONRPCException(dict(code=int(code), message=message))
# Ignore cli_stdout, raise with cli_stderr
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
try:
return json.loads(cli_stdout, parse_float=decimal.Decimal)
except (json.JSONDecodeError, decimal.InvalidOperation):
return cli_stdout.rstrip("\n")
class RPCOverloadWrapper():
def __init__(self, rpc, cli=False, descriptors=False):
self.rpc = rpc
self.is_cli = cli
self.descriptors = descriptors
def __getattr__(self, name):
return getattr(self.rpc, name)
def createwallet(self, wallet_name, disable_private_keys=None, blank=None, passphrase='', avoid_reuse=None, descriptors=None, load_on_startup=None):
if descriptors is None:
descriptors = self.descriptors
return self.__getattr__('createwallet')(wallet_name, disable_private_keys, blank, passphrase, avoid_reuse, descriptors, load_on_startup)
def importprivkey(self, privkey, label=None, rescan=None):
wallet_info = self.getwalletinfo()
if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']):
return self.__getattr__('importprivkey')(privkey, label, rescan)
desc = descsum_create('combo(' + privkey + ')')
req = [{
'desc': desc,
'timestamp': 0 if rescan else 'now',
'label': label if label else ''
}]
import_res = self.importdescriptors(req)
if not import_res[0]['success']:
raise JSONRPCException(import_res[0]['error'])
def addmultisigaddress(self, nrequired, keys, label=None, address_type=None):
wallet_info = self.getwalletinfo()
if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']):
return self.__getattr__('addmultisigaddress')(nrequired, keys, label, address_type)
cms = self.createmultisig(nrequired, keys, address_type)
req = [{
'desc': cms['descriptor'],
'timestamp': 0,
'label': label if label else ''
}]
import_res = self.importdescriptors(req)
if not import_res[0]['success']:
raise JSONRPCException(import_res[0]['error'])
return cms
def importpubkey(self, pubkey, label=None, rescan=None):
wallet_info = self.getwalletinfo()
if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']):
return self.__getattr__('importpubkey')(pubkey, label, rescan)
desc = descsum_create('combo(' + pubkey + ')')
req = [{
'desc': desc,
'timestamp': 0 if rescan else 'now',
'label': label if label else ''
}]
import_res = self.importdescriptors(req)
if not import_res[0]['success']:
raise JSONRPCException(import_res[0]['error'])
def importaddress(self, address, label=None, rescan=None, p2sh=None):
wallet_info = self.getwalletinfo()
if 'descriptors' not in wallet_info or ('descriptors' in wallet_info and not wallet_info['descriptors']):
return self.__getattr__('importaddress')(address, label, rescan, p2sh)
is_hex = False
try:
int(address ,16)
is_hex = True
desc = descsum_create('raw(' + address + ')')
except:
desc = descsum_create('addr(' + address + ')')
reqs = [{
'desc': desc,
'timestamp': 0 if rescan else 'now',
'label': label if label else ''
}]
if is_hex and p2sh:
reqs.append({
'desc': descsum_create('p2sh(raw(' + address + '))'),
'timestamp': 0 if rescan else 'now',
'label': label if label else ''
})
import_res = self.importdescriptors(reqs)
for res in import_res:
if not res['success']:
raise JSONRPCException(res['error'])
| |
import django
from django.conf import settings
from django.contrib.admin.templatetags.admin_static import static
from django.core.urlresolvers import reverse
from django.forms.widgets import Select, SelectMultiple
from django import forms
from django.utils.safestring import mark_safe
from django.utils.encoding import force_text
from django.utils.html import escape
import json
from smart_selects.utils import unicode_sorter, sort_results
try:
from django.apps import apps
get_model = apps.get_model
except ImportError:
from django.db.models.loading import get_model
if django.VERSION >= (1, 2, 0) and getattr(settings, 'USE_DJANGO_JQUERY', True):
USE_DJANGO_JQUERY = True
else:
USE_DJANGO_JQUERY = False
JQUERY_URL = getattr(settings, 'JQUERY_URL', 'https://ajax.googleapis.com/ajax/libs/jquery/2.2.0/jquery.min.js')
URL_PREFIX = getattr(settings, "SMART_SELECTS_URL_PREFIX", "")
class ChainedSelect(Select):
def __init__(self, to_app_name, to_model_name, chained_field, chained_model_field,
foreign_key_app_name, foreign_key_model_name, foreign_key_field_name,
show_all, auto_choose, manager=None, view_name=None, *args, **kwargs):
self.to_app_name = to_app_name
self.to_model_name = to_model_name
self.chained_field = chained_field
self.chained_model_field = chained_model_field
self.show_all = show_all
self.auto_choose = auto_choose
self.manager = manager
self.view_name = view_name
self.foreign_key_app_name = foreign_key_app_name
self.foreign_key_model_name = foreign_key_model_name
self.foreign_key_field_name = foreign_key_field_name
super(Select, self).__init__(*args, **kwargs)
@property
def media(self):
"""Media defined as a dynamic property instead of an inner class."""
vendor = '' if django.VERSION < (1, 9, 0) else 'vendor/jquery/'
extra = '' if settings.DEBUG else '.min'
js = [
'%sjquery%s.js' % (vendor, extra),
'jquery.init.js',
]
if USE_DJANGO_JQUERY:
js = [static('admin/js/%s' % url) for url in js]
elif JQUERY_URL:
js = [JQUERY_URL]
js = js + [static('smart-selects/admin/js/chainedfk.js')]
return forms.Media(js=js)
def render(self, name, value, attrs=None, choices=()):
if len(name.split('-')) > 1: # formset
chained_field = '-'.join(name.split('-')[:-1] + [self.chained_field])
else:
chained_field = self.chained_field
if not self.view_name:
if self.show_all:
view_name = "chained_filter_all"
else:
view_name = "chained_filter"
else:
view_name = self.view_name
kwargs = {
'app': self.to_app_name,
'model': self.to_model_name,
'field': self.chained_model_field,
'foreign_key_app_name': self.foreign_key_app_name,
'foreign_key_model_name': self.foreign_key_model_name,
'foreign_key_field_name': self.foreign_key_field_name,
'value': '1'
}
if self.manager is not None:
kwargs.update({'manager': self.manager})
url = URL_PREFIX + ("/".join(reverse(view_name, kwargs=kwargs).split("/")[:-2]))
if self.auto_choose:
auto_choose = 'true'
else:
auto_choose = 'false'
iterator = iter(self.choices)
if hasattr(iterator, '__next__'):
empty_label = iterator.__next__()[1]
else:
# Hacky way to getting the correct empty_label from the field instead of a hardcoded '--------'
empty_label = iterator.next()[1]
js = """
<script type="text/javascript">
(function($) {
var chainfield = "#id_%(chainfield)s";
var url = "%(url)s";
var id = "#%(id)s";
var value = %(value)s;
var auto_choose = %(auto_choose)s;
var empty_label = "%(empty_label)s";
$(document).ready(function() {
chainedfk.init(chainfield, url, id, value, empty_label, auto_choose);
});
})(jQuery || django.jQuery);
</script>
"""
js = js % {"chainfield": chained_field,
"url": url,
"id": attrs['id'],
'value': 'undefined' if value is None or value == '' else value,
'auto_choose': auto_choose,
'empty_label': escape(empty_label)}
final_choices = []
if value:
available_choices = self._get_available_choices(self.queryset, value)
for choice in available_choices:
final_choices.append((choice.pk, force_text(choice)))
if len(final_choices) > 1:
final_choices = [("", (empty_label))] + final_choices
if self.show_all:
final_choices.append(("", (empty_label)))
self.choices = list(self.choices)
self.choices.sort(key=lambda x: unicode_sorter(x[1]))
for ch in self.choices:
if ch not in final_choices:
final_choices.append(ch)
self.choices = ()
final_attrs = self.build_attrs(attrs, name=name)
if 'class' in final_attrs:
final_attrs['class'] += ' chained'
else:
final_attrs['class'] = 'form-control chained'
output = js
output += super(ChainedSelect, self).render(name, value, final_attrs, choices=final_choices)
return mark_safe(output)
def _get_available_choices(self, queryset, value):
"""
get possible choices for selection
"""
item = queryset.filter(pk=value).first()
if item:
try:
pk = getattr(item, self.chained_model_field + "_id")
filter = {self.chained_model_field: pk}
except AttributeError:
try: # maybe m2m?
pks = getattr(item, self.chained_model_field).all().values_list('pk', flat=True)
filter = {self.chained_model_field + "__in": pks}
except AttributeError:
try: # maybe a set?
pks = getattr(item, self.chained_model_field + "_set").all().values_list('pk', flat=True)
filter = {self.chained_model_field + "__in": pks}
except: # give up
filter = {}
filtered = list(get_model(self.to_app_name, self.to_model_name).objects.filter(**filter).distinct())
sort_results(filtered)
else:
# invalid value for queryset
filtered = []
return filtered
class ChainedSelectMultiple(SelectMultiple):
def __init__(self, to_app_name, to_model_name, chain_field, chained_model_field,
foreign_key_app_name, foreign_key_model_name, foreign_key_field_name,
auto_choose, manager=None, *args, **kwargs):
self.to_app_name = to_app_name
self.to_model_name = to_model_name
self.chain_field = chain_field
self.chained_model_field = chained_model_field
self.auto_choose = auto_choose
self.manager = manager
self.foreign_key_app_name = foreign_key_app_name
self.foreign_key_model_name = foreign_key_model_name
self.foreign_key_field_name = foreign_key_field_name
super(SelectMultiple, self).__init__(*args, **kwargs)
@property
def media(self):
"""Media defined as a dynamic property instead of an inner class."""
vendor = '' if django.VERSION < (1, 9, 0) else 'vendor/jquery/'
extra = '' if settings.DEBUG else '.min'
js = [
'%sjquery%s.js' % (vendor, extra),
'jquery.init.js',
]
if USE_DJANGO_JQUERY:
js = [static('admin/js/%s' % url) for url in js]
elif JQUERY_URL:
js = [JQUERY_URL]
js = js + [static('smart-selects/admin/js/chainedm2m.js')]
return forms.Media(js=js)
def render(self, name, value, attrs=None, choices=()):
if len(name.split('-')) > 1: # formset
chain_field = '-'.join(name.split('-')[:-1] + [self.chain_field])
else:
chain_field = self.chain_field
view_name = 'chained_filter'
kwargs = {
'app': self.to_app_name,
'model': self.to_model_name,
'field': self.chained_model_field,
'foreign_key_app_name': self.foreign_key_app_name,
'foreign_key_model_name': self.foreign_key_model_name,
'foreign_key_field_name': self.foreign_key_field_name,
'value': '1'
}
if self.manager is not None:
kwargs.update({'manager': self.manager})
url = URL_PREFIX + ("/".join(reverse(view_name, kwargs=kwargs).split("/")[:-2]))
if self.auto_choose:
auto_choose = 'true'
else:
auto_choose = 'false'
js = """
<script type="text/javascript">
(function($) {
var chainfield = "#id_%(chainfield)s";
var url = "%(url)s";
var id = "#%(id)s";
var value = %(value)s;
var auto_choose = %(auto_choose)s;
$(document).ready(function() {
chainedm2m.init(chainfield, url, id, value, auto_choose);
});
})(jQuery || django.jQuery);
</script>
"""
js = js % {"chainfield": chain_field,
"url": url,
"id": attrs['id'],
'value': json.dumps(value),
'auto_choose': auto_choose}
# since we cannot deduce the value of the chained_field
# so we just render empty choices here and let the js
# fetch related choices later
final_choices = []
self.choices = () # need to set explicitly because the Select widget will use it in render
final_attrs = self.build_attrs(attrs, name=name)
if 'class' in final_attrs:
final_attrs['class'] += ' chained'
else:
final_attrs['class'] = 'form-control chained'
output = super(ChainedSelectMultiple, self).render(name, value, final_attrs, choices=final_choices)
output += js
return mark_safe(output)
| |
"""
Low-level serial communication for Trinamic TMCM-140-42-SE controller
(used internally for the Thorlabs MFC1)
"""
import serial, struct, time, collections
try:
# this is nicer because it provides deadlock debugging information
from acq4.util.Mutex import RecursiveMutex as RLock
except ImportError:
from threading import RLock
try:
from ..SerialDevice import SerialDevice, TimeoutError, DataError
except ValueError:
## relative imports not allowed when running from command prompt, so
## we adjust sys.path when running the script for testing
if __name__ == '__main__':
import sys, os
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from SerialDevice import SerialDevice, TimeoutError, DataError
def threadsafe(method):
# decorator for automatic mutex lock/unlock
def lockMutex(self, *args, **kwds):
with self.lock:
return method(self, *args, **kwds)
return lockMutex
COMMANDS = {
'rol': 2,
'ror': 1,
'mvp': 4,
'mst': 3,
'rfs': 13,
'sco': 30,
'cco': 32,
'gco': 31,
'sap': 5,
'gap': 6,
'stap': 7,
'rsap': 8,
'sgp': 9,
'ggp': 10,
'stgp': 11,
'rsgp': 12,
'sio': 14,
'gio': 15,
'calc': 19,
'comp': 20,
'jc': 21,
'ja': 22,
'csub': 23,
'rsub': 24,
'wait': 27,
'stop': 28,
'sco': 30,
'gco': 31,
'cco': 32,
'calcx': 33,
'aap': 34,
'agp': 35,
'aco': 39,
'sac': 29,
'stop_application': 128,
'run_application': 129,
'step_application': 130,
'reset_application': 131,
'start_download': 132,
'stop_download': 133,
'get_application_status': 135,
'get_firmware_version': 136,
'restore_factory_settings': 137,
}
PARAMETERS = { # negative values indicate read-only parameters
'target_position': 0,
'actual_position': 1,
'target_speed': 2,
'actual_speed': 3,
'maximum_speed': 4,
'maximum_acceleration': 5,
'maximum_current': 6,
'standby_current': 7,
'target_pos_reached': 8,
'ref_switch_status': 9,
'right_limit_switch_status': 10,
'left_limit_switch_status': 11,
'right_limit_switch_disable': 12,
'left_limit_switch_disable': 13,
'minimum_speed': -130,
'acceleration': -135,
'ramp_mode': 138,
'microstep_resolution': 140,
'soft_stop_flag': 149,
'ramp_divisor': 153,
'pulse_divisor': 154,
'referencing_mode': 193,
'referencing_search_speed': 194,
'referencing_switch_speed': 195,
'distance_end_switches': 196,
'mixed_decay_threshold': 203,
'freewheeling': 204,
'stall_detection_threshold': 205,
'actual_load_value': 206,
'driver_error_flags': -208,
'encoder_position': 209,
'encoder_prescaler': 210,
'fullstep_threshold': 211,
'maximum_encoder_deviation': 212,
'power_down_delay': 214,
'absolute_encoder_value': -215,
}
GLOBAL_PARAMETERS = {
'eeprom_magic': 64,
'baud_rate': 65,
'serial_address': 66,
'ascii_mode': 67,
'eeprom_lock': 73,
'auto_start_mode': 77,
'tmcl_code_protection': 81,
'coordinate_storage': 84,
'tmcl_application_status': 128,
'download_mode': 129,
'tmcl_program_counter': 130,
'tick_timer': 132,
'random_number': -133,
}
OPERATORS = {
'add': 0,
'sub': 1,
'mul': 2,
'div': 3,
'mod': 4,
'and': 5,
'or': 6,
'xor': 7,
'not': 8,
'load': 9,
'swap': 10,
}
CONDITIONS = {
'ze': 0,
'nz': 1,
'eq': 2,
'ne': 3,
'gt': 4,
'ge': 5,
'lt': 6,
'le': 7,
'eto': 8,
'eal': 9,
'esd': 12,
}
STATUS = {
1: "Wrong checksum",
2: "Invalid command",
3: "Wrong type",
4: "Invalid value",
5: "Configuration EEPROM locked",
6: "Command not available",
}
class TMCMError(Exception):
def __init__(self, status):
self.status = status
msg = STATUS[status]
Exception.__init__(msg)
class TMCM140(SerialDevice):
def __init__(self, port, baudrate=9600, module_addr=1):
"""
port: serial COM port (eg. COM3 or /dev/ttyACM0)
baudrate: 9600 by default
module_addr: 1 by default
"""
self.lock = RLock(debug=True)
self.port = port
assert isinstance(module_addr, int)
assert module_addr > 0
self.module_addr = module_addr
self.module_str = chr(module_addr+64)
self._waiting_for_reply = False
SerialDevice.__init__(self, port=self.port, baudrate=baudrate)
@threadsafe
def command(self, cmd, type, motor, value):
"""Send a command to the controller and return the reply.
If an error is returned from the controller then raise an exception.
"""
self._send_cmd(cmd, type, motor, value)
return self._get_reply()
def rotate(self, velocity):
"""Begin rotating motor.
velocity: -2047 to +2047
negative values turn left; positive values turn right.
"""
assert isinstance(velocity, int)
assert -2047 <= velocity <= 2047
if velocity < 0:
direction = 'l'
velocity = -velocity
else:
direction = 'r'
self.command('ro'+direction, 0, 0, velocity)
def stop(self):
"""Stop the motor.
Note: does not stop currently running programs.
"""
self.command('mst', 0, 0, 0)
def move(self, pos, relative=False, velocity=None):
"""Rotate until reaching *pos*.
pos: The target position
relative: If True, then *pos* is interpreted as relative to the current
position
velocity: Optionally set the target velocity before moving
"""
assert isinstance(pos, int)
assert -2**32 <= pos < 2**32
if velocity is not None:
assert isinstance(velocity, int)
assert 0 <= velocity < 2048
raise NotImplementedError()
type = 1 if relative else 0
self.command('mvp', type, 0, pos)
def get_param(self, param):
pnum = abs(PARAMETERS[param])
return self.command('gap', pnum, 0, 0)[4]
def __getitem__(self, param):
return self.get_param(param)
def set_param(self, param, value, **kwds):
"""Set a parameter value.
If valus is 'accum' then the parameter is set from the accumulator
register.
"""
pnum = PARAMETERS[param]
if pnum < 0:
raise TypeError("Parameter %s is read-only." % param)
if pnum in (PARAMETERS['maximum_current'], PARAMETERS['standby_current']) and value > 100:
if kwds.get('force', False) is not True:
raise Exception("Refusing to set current > 100 (this can damage the motor). "
"To override, use force=True.")
if value == 'accum':
self.command('aap', pnum, 0, 0)
else:
self.command('sap', pnum, 0, value)
@threadsafe
def set_params(self, **kwds):
"""Set multiple parameters.
The driver is thread-locked until all parameters are set.
"""
for param, value in kwds.items():
self.set_param(param, value)
def __setitem__(self, param, value):
return self.set_param(param, value)
def get_global(self, param):
"""Return a global parameter or copy global to accumulator.
Use param='gpX' to refer to general-purpose variables.
"""
if param.startswith('gp'):
pnum = int(param[2:])
bank = 2
else:
pnum = abs(GLOBAL_PARAMETERS[param])
bank = 0
return self.command('ggp', pnum, bank, 0)[4]
def set_global(self, param, value):
if param.startswith('gp'):
pnum = int(param[2:])
bank = 2
else:
pnum = GLOBAL_PARAMETERS[param]
bank = 0
if pnum < 0:
raise TypeError("Parameter %s is read-only." % param)
if value == 'accum':
self.command('agp', pnum, bank, 0)
else:
self.command('sgp', pnum, bank, value)
def stop_program(self):
"""Stop the currently running TMCL program.
"""
self.command('stop_application', 0, 0, 0)
def start_program(self, address=None):
"""Start running TMCL program code from the given address (in bytes?),
or from the current address if None.
"""
if address is None:
self.command('run_application', 0, 0, 0)
else:
self.command('run_application', 1, 0, address)
def start_download(self, address=0):
"""Begin loading TMCL commands into EEPROM .
"""
self.command('start_download', 0, 0, address)
def stop_download(self):
"""Finish loading TMCL commands into EEPROM.
"""
self.command('stop_download', 0, 0, 0)
def write_program(self, address=0):
return ProgramManager(self, address)
def program_status(self):
"""Return current program status:
0=stop, 1=run, 2=step, 3=reset
"""
return self.command('get_application_status', 0, 0, 0)[4]
def calc(self, op, value):
opnum = OPERATORS[op]
if opnum > 9:
raise TypeError("Operator %s invalid for calc" % op)
self.command('calc', opnum, 0, value)
def calcx(self, op):
opnum = OPERATORS[op]
self.command('calcx', opnum, 0, 0)
def comp(self, val):
self.command('comp', 0, 0, val)
def jump(self, *args):
"""Program jump to *addr* (instruction index).
Usage:
jump(address)
jump(cond, address)
Where *cond* may be ze, nz, eq, ne, gt, ge, lt, le, eto, eal, or esd.
"""
if len(args) == 1:
assert isinstance(args[0], int)
self.command('ja', 0, 0, args[0])
else:
cnum = CONDITIONS[args[0]]
self.command('jc', cnum, 0, args[1])
def _send_cmd(self, cmd, type, motor, value):
"""Send a command to the controller.
"""
if self._waiting_for_reply:
raise Exception("Cannot send command; previous reply has not been "
"received yet.")
cmd_num = COMMANDS[cmd]
assert isinstance(type, int)
assert isinstance(motor, int)
# Try packing the value first as unsigned, then signed. (the overlapping
# integer ranges have identical bit representation, so there is no
# ambiguity)
try:
cmd = struct.pack('>BBBBI', self.module_addr, cmd_num, type, motor, value)
except struct.error:
cmd = struct.pack('>BBBBi', self.module_addr, cmd_num, type, motor, value)
chksum = sum(bytearray(cmd)) % 256
out = cmd + struct.pack('B', chksum)
self.write(out)
self._waiting_for_reply = True
def _get_reply(self):
"""Read and parse a reply from the controller.
Raise an exception if an error was reported.
"""
if not self._waiting_for_reply:
raise Exception("No reply expected.")
try:
d = self.read(9)
finally:
self._waiting_for_reply = False
d2 = self.readAll()
if len(d2) > 0:
raise Exception("Error: extra data while reading reply.")
parts = struct.unpack('>BBBBiB', d)
reply_addr, module_addr, status, cmd_num, value, chksum = parts
if chksum != sum(bytearray(d[:-1])) % 256:
raise Exception("Invalid checksum reading from controller.")
if status < 100:
raise TMCMError(status)
return parts
class ProgramManager(object):
def __init__(self, mcm, start=0):
self.mcm = mcm
self.start = start
self.count = 0
def __enter__(self):
self.mcm.lock.acquire()
self.mcm.start_download(self.start)
return self
def __exit__(self, *args):
# insert an extra stop to ensure the program can't leak
# into previously written code.
self.mcm.command('stop', 0, 0, 0)
self.mcm.stop_download()
self.mcm.lock.release()
def __getattr__(self, name):
self.count += 1
return getattr(self.mcm, name)
| |
# -*- coding: utf-8 -*-
"""
flask_security.forms
~~~~~~~~~~~~~~~~~~~~
Flask-Security forms module
:copyright: (c) 2012 by Matt Wright.
:copyright: (c) 2017 by CERN.
:license: MIT, see LICENSE for more details.
"""
import inspect
from flask import Markup, current_app, flash, request
from flask_login import current_user
from flask_wtf import FlaskForm as BaseForm
from speaklater import make_lazy_gettext
from wtforms import BooleanField, Field, HiddenField, PasswordField, \
StringField, SubmitField, ValidationError, validators
from .confirmable import requires_confirmation
from .utils import _, _datastore, config_value, get_message, hash_password, \
localize_callback, url_for_security, validate_redirect_url
lazy_gettext = make_lazy_gettext(lambda: localize_callback)
_default_field_labels = {
'email': _('Email Address'),
'password': _('Password'),
'remember_me': _('Remember Me'),
'login': _('Login'),
'register': _('Register'),
'send_confirmation': _('Resend Confirmation Instructions'),
'recover_password': _('Recover Password'),
'reset_password': _('Reset Password'),
'retype_password': _('Retype Password'),
'new_password': _('New Password'),
'change_password': _('Change Password'),
'send_login_link': _('Send Login Link')
}
class ValidatorMixin(object):
def __call__(self, form, field):
if self.message and self.message.isupper():
self.message = get_message(self.message)[0]
return super(ValidatorMixin, self).__call__(form, field)
class EqualTo(ValidatorMixin, validators.EqualTo):
pass
class Required(ValidatorMixin, validators.DataRequired):
pass
class Email(ValidatorMixin, validators.Email):
pass
class Length(ValidatorMixin, validators.Length):
pass
email_required = Required(message='EMAIL_NOT_PROVIDED')
email_validator = Email(message='INVALID_EMAIL_ADDRESS')
password_required = Required(message='PASSWORD_NOT_PROVIDED')
password_length = Length(min=6, max=128, message='PASSWORD_INVALID_LENGTH')
def get_form_field_label(key):
return lazy_gettext(_default_field_labels.get(key, ''))
def unique_user_email(form, field):
if _datastore.get_user(field.data) is not None:
msg = get_message('EMAIL_ALREADY_ASSOCIATED', email=field.data)[0]
raise ValidationError(msg)
def valid_user_email(form, field):
form.user = _datastore.get_user(field.data)
if form.user is None:
raise ValidationError(get_message('USER_DOES_NOT_EXIST')[0])
class Form(BaseForm):
def __init__(self, *args, **kwargs):
if current_app.testing:
self.TIME_LIMIT = None
super(Form, self).__init__(*args, **kwargs)
class EmailFormMixin():
email = StringField(
get_form_field_label('email'),
validators=[email_required, email_validator])
class UserEmailFormMixin():
user = None
email = StringField(
get_form_field_label('email'),
validators=[email_required, email_validator, valid_user_email])
class UniqueEmailFormMixin():
email = StringField(
get_form_field_label('email'),
validators=[email_required, email_validator, unique_user_email])
class PasswordFormMixin():
password = PasswordField(
get_form_field_label('password'), validators=[password_required])
class NewPasswordFormMixin():
password = PasswordField(
get_form_field_label('password'),
validators=[password_required, password_length])
class PasswordConfirmFormMixin():
password_confirm = PasswordField(
get_form_field_label('retype_password'),
validators=[EqualTo('password', message='RETYPE_PASSWORD_MISMATCH'),
password_required])
class NextFormMixin():
next = HiddenField()
def validate_next(self, field):
if field.data and not validate_redirect_url(field.data):
field.data = ''
flash(*get_message('INVALID_REDIRECT'))
raise ValidationError(get_message('INVALID_REDIRECT')[0])
class RegisterFormMixin():
submit = SubmitField(get_form_field_label('register'))
def to_dict(form):
def is_field_and_user_attr(member):
return isinstance(member, Field) and \
hasattr(_datastore.user_model, member.name)
fields = inspect.getmembers(form, is_field_and_user_attr)
return dict((key, value.data) for key, value in fields)
class SendConfirmationForm(Form, UserEmailFormMixin):
"""The default send confirmation form"""
submit = SubmitField(get_form_field_label('send_confirmation'))
def __init__(self, *args, **kwargs):
super(SendConfirmationForm, self).__init__(*args, **kwargs)
if request.method == 'GET':
self.email.data = request.args.get('email', None)
def validate(self):
if not super(SendConfirmationForm, self).validate():
return False
if self.user.confirmed_at is not None:
self.email.errors.append(get_message('ALREADY_CONFIRMED')[0])
return False
return True
class ForgotPasswordForm(Form, UserEmailFormMixin):
"""The default forgot password form"""
submit = SubmitField(get_form_field_label('recover_password'))
def validate(self):
if not super(ForgotPasswordForm, self).validate():
return False
if requires_confirmation(self.user):
self.email.errors.append(get_message('CONFIRMATION_REQUIRED')[0])
return False
return True
class PasswordlessLoginForm(Form, UserEmailFormMixin):
"""The passwordless login form"""
submit = SubmitField(get_form_field_label('send_login_link'))
def __init__(self, *args, **kwargs):
super(PasswordlessLoginForm, self).__init__(*args, **kwargs)
def validate(self):
if not super(PasswordlessLoginForm, self).validate():
return False
if not self.user.is_active:
self.email.errors.append(get_message('DISABLED_ACCOUNT')[0])
return False
return True
class LoginForm(Form, NextFormMixin):
"""The default login form"""
email = StringField(get_form_field_label('email'),
validators=[Required(message='EMAIL_NOT_PROVIDED')])
password = PasswordField(get_form_field_label('password'),
validators=[password_required])
remember = BooleanField(get_form_field_label('remember_me'))
submit = SubmitField(get_form_field_label('login'))
def __init__(self, *args, **kwargs):
super(LoginForm, self).__init__(*args, **kwargs)
if not self.next.data:
self.next.data = request.args.get('next', '')
self.remember.default = config_value('DEFAULT_REMEMBER_ME')
if current_app.extensions['security'].recoverable and \
not self.password.description:
html = Markup(u'<a href="{url}">{message}</a>'.format(
url=url_for_security("forgot_password"),
message=get_message("FORGOT_PASSWORD")[0],
))
self.password.description = html
def validate(self):
if not super(LoginForm, self).validate():
return False
self.user = _datastore.get_user(self.email.data)
if self.user is None:
self.email.errors.append(get_message('USER_DOES_NOT_EXIST')[0])
# Reduce timing variation between existing and non-existung users
hash_password(self.password.data)
return False
if not self.user.password:
self.password.errors.append(get_message('PASSWORD_NOT_SET')[0])
# Reduce timing variation between existing and non-existung users
hash_password(self.password.data)
return False
if not self.user.verify_and_update_password(self.password.data):
self.password.errors.append(get_message('INVALID_PASSWORD')[0])
return False
if requires_confirmation(self.user):
self.email.errors.append(get_message('CONFIRMATION_REQUIRED')[0])
return False
if not self.user.is_active:
self.email.errors.append(get_message('DISABLED_ACCOUNT')[0])
return False
return True
class ConfirmRegisterForm(Form, RegisterFormMixin,
UniqueEmailFormMixin, NewPasswordFormMixin):
pass
class RegisterForm(ConfirmRegisterForm, PasswordConfirmFormMixin,
NextFormMixin):
def __init__(self, *args, **kwargs):
super(RegisterForm, self).__init__(*args, **kwargs)
if not self.next.data:
self.next.data = request.args.get('next', '')
class ResetPasswordForm(Form, NewPasswordFormMixin, PasswordConfirmFormMixin):
"""The default reset password form"""
submit = SubmitField(get_form_field_label('reset_password'))
class ChangePasswordForm(Form, PasswordFormMixin):
"""The default change password form"""
new_password = PasswordField(
get_form_field_label('new_password'),
validators=[password_required, password_length])
new_password_confirm = PasswordField(
get_form_field_label('retype_password'),
validators=[EqualTo('new_password',
message='RETYPE_PASSWORD_MISMATCH'),
password_required])
submit = SubmitField(get_form_field_label('change_password'))
def validate(self):
if not super(ChangePasswordForm, self).validate():
return False
if not current_user.verify_and_update_password(self.password.data):
self.password.errors.append(get_message('INVALID_PASSWORD')[0])
return False
if self.password.data == self.new_password.data:
self.password.errors.append(get_message('PASSWORD_IS_THE_SAME')[0])
return False
return True
| |
# -*- coding: utf-8 -*-
from urllib.parse import quote_plus
import pytest
from plexapi.exceptions import BadRequest
from . import conftest as utils
from . import test_media, test_mixins
def test_audio_Artist_attr(artist):
artist.reload()
assert utils.is_datetime(artist.addedAt)
assert artist.albumSort == -1
if artist.art:
assert utils.is_art(artist.art)
if artist.countries:
assert "United States of America" in [i.tag for i in artist.countries]
# assert "Electronic" in [i.tag for i in artist.genres]
assert utils.is_string(artist.guid, gte=5)
assert artist.index == 1
assert utils.is_metadata(artist._initpath)
assert utils.is_metadata(artist.key)
assert utils.is_int(artist.librarySectionID)
assert artist.listType == "audio"
assert utils.is_datetime(artist.lastRatedAt)
assert utils.is_datetime(artist.lastViewedAt)
assert len(artist.locations) == 1
assert len(artist.locations[0]) >= 10
assert artist.ratingKey >= 1
assert artist._server._baseurl == utils.SERVER_BASEURL
assert isinstance(artist.similar, list)
if artist.summary:
assert "Alias" in artist.summary
if artist.thumb:
assert utils.is_thumb(artist.thumb)
assert artist.title == "Broke For Free"
assert artist.titleSort == "Broke For Free"
assert artist.type == "artist"
assert utils.is_datetime(artist.updatedAt)
assert utils.is_int(artist.viewCount, gte=0)
def test_audio_Artist_get(artist):
track = artist.get(album="Layers", title="As Colourful as Ever")
assert track.title == "As Colourful as Ever"
def test_audio_Artist_history(artist):
history = artist.history()
assert isinstance(history, list)
def test_audio_Artist_track(artist):
track = artist.track("As Colourful as Ever")
assert track.title == "As Colourful as Ever"
track = artist.track(album="Layers", track=1)
assert track.parentTitle == "Layers"
assert track.index == 1
with pytest.raises(BadRequest):
artist.track()
def test_audio_Artist_tracks(artist):
tracks = artist.tracks()
assert len(tracks) == 1
def test_audio_Artist_album(artist):
album = artist.album("Layers")
assert album.title == "Layers"
def test_audio_Artist_albums(artist):
albums = artist.albums()
assert len(albums) == 1 and albums[0].title == "Layers"
def test_audio_Artist_hubs(artist):
hubs = artist.hubs()
assert isinstance(hubs, list)
def test_audio_Artist_mixins_edit_advanced_settings(artist):
test_mixins.edit_advanced_settings(artist)
@pytest.mark.xfail(reason="Changing images fails randomly")
def test_audio_Artist_mixins_images(artist):
test_mixins.lock_art(artist)
test_mixins.lock_poster(artist)
test_mixins.edit_art(artist)
test_mixins.edit_poster(artist)
test_mixins.attr_artUrl(artist)
test_mixins.attr_posterUrl(artist)
def test_audio_Artist_mixins_rating(artist):
test_mixins.edit_rating(artist)
def test_audio_Artist_mixins_tags(artist):
test_mixins.edit_collection(artist)
test_mixins.edit_country(artist)
test_mixins.edit_genre(artist)
test_mixins.edit_mood(artist)
test_mixins.edit_similar_artist(artist)
test_mixins.edit_style(artist)
def test_audio_Artist_media_tags(artist):
artist.reload()
test_media.tag_collection(artist)
test_media.tag_country(artist)
test_media.tag_genre(artist)
test_media.tag_mood(artist)
test_media.tag_similar(artist)
test_media.tag_style(artist)
def test_audio_Artist_PlexWebURL(plex, artist):
url = artist.getWebURL()
assert url.startswith('https://app.plex.tv/desktop')
assert plex.machineIdentifier in url
assert 'details' in url
assert quote_plus(artist.key) in url
def test_audio_Album_attrs(album):
assert utils.is_datetime(album.addedAt)
if album.art:
assert utils.is_art(album.art)
assert isinstance(album.formats, list)
assert isinstance(album.genres, list)
assert album.index == 1
assert utils.is_metadata(album._initpath)
assert utils.is_metadata(album.key)
assert utils.is_datetime(album.lastRatedAt)
assert utils.is_datetime(album.lastViewedAt)
assert utils.is_int(album.librarySectionID)
assert album.listType == "audio"
assert utils.is_datetime(album.originallyAvailableAt)
assert utils.is_metadata(album.parentKey)
assert utils.is_int(album.parentRatingKey)
if album.parentThumb:
assert utils.is_thumb(album.parentThumb)
assert album.parentTitle == "Broke For Free"
assert album.ratingKey >= 1
assert album._server._baseurl == utils.SERVER_BASEURL
assert album.studio == "[no label]"
assert isinstance(album.subformats, list)
assert album.summary == ""
if album.thumb:
assert utils.is_thumb(album.thumb)
assert album.title == "Layers"
assert album.titleSort == "Layers"
assert album.type == "album"
assert utils.is_datetime(album.updatedAt)
assert utils.is_int(album.viewCount, gte=0)
assert album.year in (2012,)
def test_audio_Album_history(album):
history = album.history()
assert isinstance(history, list)
def test_audio_Track_history(track):
history = track.history()
assert isinstance(history, list)
def test_audio_Album_tracks(album):
tracks = album.tracks()
assert len(tracks) == 1
def test_audio_Album_track(album, track=None):
# this is not reloaded. its not that much info missing.
track = track or album.track("As Colourful As Ever")
track2 = album.track(track=1)
assert track == track2
with pytest.raises(BadRequest):
album.track()
def test_audio_Album_get(album):
# alias for album.track()
track = album.get("As Colourful As Ever")
test_audio_Album_track(album, track=track)
def test_audio_Album_artist(album):
artist = album.artist()
artist.title == "Broke For Free"
@pytest.mark.xfail(reason="Changing images fails randomly")
def test_audio_Album_mixins_images(album):
test_mixins.lock_art(album)
test_mixins.lock_poster(album)
test_mixins.edit_art(album)
test_mixins.edit_poster(album)
test_mixins.attr_artUrl(album)
test_mixins.attr_posterUrl(album)
def test_audio_Album_mixins_rating(album):
test_mixins.edit_rating(album)
def test_audio_Album_mixins_tags(album):
test_mixins.edit_collection(album)
test_mixins.edit_genre(album)
test_mixins.edit_label(album)
test_mixins.edit_mood(album)
test_mixins.edit_style(album)
def test_audio_Album_media_tags(album):
album.reload()
test_media.tag_collection(album)
test_media.tag_genre(album)
test_media.tag_label(album)
test_media.tag_mood(album)
test_media.tag_style(album)
def test_audio_Album_PlexWebURL(plex, album):
url = album.getWebURL()
assert url.startswith('https://app.plex.tv/desktop')
assert plex.machineIdentifier in url
assert 'details' in url
assert quote_plus(album.key) in url
def test_audio_Track_attrs(album):
track = album.get("As Colourful As Ever").reload()
assert utils.is_datetime(track.addedAt)
if track.art:
assert utils.is_art(track.art)
assert track.chapterSource is None
assert utils.is_int(track.duration)
if track.grandparentArt:
assert utils.is_art(track.grandparentArt)
assert utils.is_metadata(track.grandparentKey)
assert utils.is_int(track.grandparentRatingKey)
if track.grandparentThumb:
assert utils.is_thumb(track.grandparentThumb)
assert track.grandparentTitle == "Broke For Free"
assert track.guid.startswith("mbid://") or track.guid.startswith("plex://track/")
assert track.hasSonicAnalysis is False
assert track.index == 1
assert track.trackNumber == track.index
assert utils.is_metadata(track._initpath)
assert utils.is_metadata(track.key)
assert utils.is_datetime(track.lastRatedAt)
assert utils.is_datetime(track.lastViewedAt)
assert utils.is_int(track.librarySectionID)
assert track.listType == "audio"
assert len(track.locations) == 1
assert len(track.locations[0]) >= 10
# Assign 0 track.media
media = track.media[0]
assert track.moods == []
assert track.originalTitle in (None, "Broke For Free")
assert int(track.parentIndex) == 1
assert utils.is_metadata(track.parentKey)
assert utils.is_int(track.parentRatingKey)
if track.parentThumb:
assert utils.is_thumb(track.parentThumb)
assert track.parentTitle == "Layers"
assert track.playlistItemID is None
assert track.primaryExtraKey is None
# assert utils.is_int(track.ratingCount)
assert utils.is_int(track.ratingKey)
assert track._server._baseurl == utils.SERVER_BASEURL
assert track.sessionKey is None
assert track.summary == ""
if track.thumb:
assert utils.is_thumb(track.thumb)
assert track.title == "As Colourful as Ever"
assert track.titleSort == "As Colourful as Ever"
assert not track.transcodeSessions
assert track.type == "track"
assert utils.is_datetime(track.updatedAt)
assert utils.is_int(track.viewCount, gte=0)
assert track.viewOffset == 0
assert track.viewedAt is None
assert track.year is None
assert track.url(None) is None
assert media.aspectRatio is None
assert media.audioChannels == 2
assert media.audioCodec == "mp3"
assert media.bitrate == 128
assert media.container == "mp3"
assert utils.is_int(media.duration)
assert media.height is None
assert utils.is_int(media.id, gte=1)
assert utils.is_metadata(media._initpath)
assert media.optimizedForStreaming is None
# Assign 0 media.parts
part = media.parts[0]
assert media._server._baseurl == utils.SERVER_BASEURL
assert media.videoCodec is None
assert media.videoFrameRate is None
assert media.videoResolution is None
assert media.width is None
assert part.container == "mp3"
assert utils.is_int(part.duration)
assert part.file.endswith(".mp3")
assert utils.is_int(part.id)
assert utils.is_metadata(part._initpath)
assert utils.is_part(part.key)
# assert part.media == <Media:Holy.Moment>
assert part._server._baseurl == utils.SERVER_BASEURL
assert part.size == 3761053
# Assign 0 part.streams
stream = part.streams[0]
assert stream.audioChannelLayout == "stereo"
assert stream.bitDepth is None
assert stream.bitrate == 128
assert stream.bitrateMode is None
assert stream.channels == 2
assert stream.codec == "mp3"
assert stream.duration is None
assert utils.is_int(stream.id)
assert stream.index == 0
assert utils.is_metadata(stream._initpath)
assert stream.language is None
assert stream.languageCode is None
# assert stream.part == <MediaPart:22>
assert stream.samplingRate == 48000
assert stream.selected is True
assert stream._server._baseurl == utils.SERVER_BASEURL
assert stream.streamType == 2
assert stream.title is None
assert stream.type == 2
assert stream.albumGain is None
assert stream.albumPeak is None
assert stream.albumRange is None
assert stream.endRamp is None
assert stream.gain is None
assert stream.loudness is None
assert stream.lra is None
assert stream.peak is None
assert stream.startRamp is None
def test_audio_Track_album(album):
tracks = album.tracks()
assert tracks[0].album() == album
def test_audio_Track_artist(album, artist):
tracks = album.tracks()
assert tracks[0].artist() == artist
def test_audio_Track_mixins_images(track):
test_mixins.attr_artUrl(track)
test_mixins.attr_posterUrl(track)
def test_audio_Track_mixins_rating(track):
test_mixins.edit_rating(track)
def test_audio_Track_mixins_tags(track):
test_mixins.edit_collection(track)
test_mixins.edit_mood(track)
def test_audio_Track_media_tags(track):
track.reload()
test_media.tag_collection(track)
test_media.tag_mood(track)
def test_audio_Track_PlexWebURL(plex, track):
url = track.getWebURL()
assert url.startswith('https://app.plex.tv/desktop')
assert plex.machineIdentifier in url
assert 'details' in url
assert quote_plus(track.parentKey) in url
def test_audio_Audio_section(artist, album, track):
assert artist.section()
assert album.section()
assert track.section()
assert track.section().key == album.section().key == artist.section().key
def test_audio_Artist_download(monkeydownload, tmpdir, artist):
total = len(artist.tracks())
filepaths = artist.download(savepath=str(tmpdir))
assert len(filepaths) == total
subfolders = artist.download(savepath=str(tmpdir), subfolders=True)
assert len(subfolders) == total
def test_audio_Album_download(monkeydownload, tmpdir, album):
total = len(album.tracks())
filepaths = album.download(savepath=str(tmpdir))
assert len(filepaths) == total
def test_audio_Track_download(monkeydownload, tmpdir, track):
filepaths = track.download(savepath=str(tmpdir))
assert len(filepaths) == 1
| |
from bs4 import BeautifulSoup
from django.conf import settings
from django.contrib.gis import admin
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.forms.formsets import formset_factory
from django.http import HttpResponseRedirect
from django.utils.translation import ugettext as _
from geoforms.forms import CheckboxElementForm
from geoforms.forms import CheckboxElementFormSet
from geoforms.forms import DrawbuttonForm
from geoforms.forms import NumberElementForm
from geoforms.forms import ParagraphForm
from geoforms.forms import RadioElementForm
from geoforms.forms import RadioElementFormSet
from geoforms.forms import TextareaForm
from geoforms.forms import TextElementForm
from geoforms.forms import QuestionForm
from geoforms.forms import RangeElementForm
from geoforms.forms import SelectElementForm
from geoforms.forms import SelectElementFormSet
from geoforms.models import SelectElementModel
from geoforms.models import CheckboxElementModel
from geoforms.models import DrawbuttonElementModel
from geoforms.models import GeoformElement
from geoforms.models import FormElement
from geoforms.models import ParagraphElementModel
from geoforms.models import Questionnaire
from geoforms.models import QuestionnaireForm
from geoforms.models import NumberElementModel
from geoforms.models import RadioElementModel
from geoforms.models import TextElementModel
from geoforms.models import TextareaModel
from geoforms.models import RangeElementModel
from geoforms.models import PopupModel
from geoforms.models import PageModel
from geoforms.models import GeoJSONPopupModel
from geoforms.models import Lottery
from modeltranslation.admin import TranslationAdmin
from modeltranslation.admin import TranslationTabularInline
admin.site.register(Lottery, TranslationAdmin)
class GeoformElementAdmin(TranslationAdmin, admin.ModelAdmin):
list_display = ('name',
'element_type',
'id',
'html')
ordering = ['name']
def __init__(self, *args, **kwargs):
super(GeoformElementAdmin, self).__init__(*args, **kwargs)
sfields = ['element_type']
for lang in settings.LANGUAGES:
sfields.append('html_%s' % lang[0])
setattr(self,
'search_fields',
sfields)
class FormElementAdmin(admin.ModelAdmin):
ordering = ['geoform', 'order']
class ElementInline(TranslationTabularInline):
model = FormElement
extra = 0
class GeoformAdmin(TranslationAdmin, admin.ModelAdmin):
list_display = ('name', 'id')
inlines = [
ElementInline
]
class PageAdmin(GeoformAdmin):
"""
Page admin
"""
def queryset(self, request):
return self.model.objects.filter(page_type = 'form')
admin.site.register(PageModel, PageAdmin)
class PopupAdmin(GeoformAdmin):
"""
Popup admin
"""
def queryset(self, request):
return self.model.objects.filter(page_type = 'popup')
admin.site.register(PopupModel, PopupAdmin)
class GeoJSONPopupAdmin(GeoformAdmin):
"""
GeoJSONPopup admin
"""
def queryset(self, request):
return self.model.objects.filter(page_type = 'gpop')
admin.site.register(GeoJSONPopupModel, GeoJSONPopupAdmin)
class QuestionnaireFormAdmin(admin.ModelAdmin):
ordering = ['questionnaire', 'order']
class GeoformInline(TranslationTabularInline):
model = QuestionnaireForm
extra = 0
class QuestionnaireAdmin(admin.OSMGeoAdmin, TranslationAdmin):
list_display = ('name',)
ordering = ['name']
inlines = [
GeoformInline
]
default_lon = getattr(settings,
'ORGANIZATION_ADMIN_DEFAULT_MAP_SETTINGS',
{'default_lon': 0})['default_lon']
default_lat = getattr(settings,
'ORGANIZATION_ADMIN_DEFAULT_MAP_SETTINGS',
{'default_lat': 0})['default_lat']
default_zoom = getattr(settings,
'ORGANIZATION_ADMIN_DEFAULT_MAP_SETTINGS',
{'default_zoom': 4})['default_zoom']
fieldsets = (
(None, {
'fields': ('name', 'description', ('start_date', 'end_date'), 'area',)
}),
(_('Advanced options'), {
'classes': ('collapse',),
'fields': ('show_area', 'scale_visible_area',)
}),
)
#Following fields
openlayers_url = '%s%s' % (getattr(settings, 'STATIC_URL', '/'), 'js/libs/OpenLayers.js')
extra_js = (reverse_lazy('osmextra'),)
def change_view(self, request, object_id, form_url='', extra_context=None):
extra_context = extra_context or {}
extra_context['slug'] = Questionnaire.on_site.get(pk = object_id).slug
return super(QuestionnaireAdmin, self).change_view(request, object_id,
form_url, extra_context=extra_context)
class Media:
css = {
"all": ("css/questionnaire_admin.css",)
}
admin.site.register(GeoformElement, GeoformElementAdmin)
admin.site.register(Questionnaire, QuestionnaireAdmin)
class TextElementAdmin(GeoformElementAdmin):
"""
This is the admin for text inputs
"""
form = TextElementForm
def queryset(self, request):
return self.model.objects.filter(element_type = 'text')
admin.site.register(TextElementModel, TextElementAdmin)
class TextareaAdmin(GeoformElementAdmin):
"""
This is the admin for adding textareas
"""
form = TextareaForm
def queryset(self, request):
return self.model.objects.filter(element_type = 'textarea')
admin.site.register(TextareaModel, TextareaAdmin)
class NumberElementAdmin(GeoformElementAdmin):
form = NumberElementForm
fieldsets = (
(None, {
'fields': ('question',)
}),
(_('Advanced options'), {
'classes': ('collapse',),
'fields': ('min_value',
'max_value',
'step')
}),
)
def queryset(self, request):
return self.model.objects.filter(element_type = 'number')
admin.site.register(NumberElementModel, NumberElementAdmin)
class RangeElementAdmin(GeoformElementAdmin):
form = RangeElementForm
fieldsets = (
(None, {
'fields': ('question',
'min_label',
'max_label',)
}),
(_('Advanced options'), {
'classes': ('collapse',),
'fields': ('min_value',
'max_value',
'step',
'initial_value',)
}),
)
def queryset(self, request):
return self.model.objects.filter(element_type = 'range')
admin.site.register(RangeElementModel, RangeElementAdmin)
class ParagraphElementAdmin(GeoformElementAdmin):
form = ParagraphForm
def queryset(self, request):
return self.model.objects.filter(element_type = 'paragraph')
admin.site.register(ParagraphElementModel, ParagraphElementAdmin)
class DrawbuttonElementAdmin(GeoformElementAdmin):
form = DrawbuttonForm
def queryset(self, request):
return self.model.objects.filter(element_type = 'drawbutton')
admin.site.register(DrawbuttonElementModel, DrawbuttonElementAdmin)
class CheckboxElementAdmin(GeoformElementAdmin):
form = CheckboxElementForm
add_form_template = 'admin/geoforms/geoformelement/create_element.html'
change_form_template = add_form_template
def queryset(self, request):
return self.model.objects.filter(element_type = 'checkbox')
def add_view(self, request, form_url='', extra_context=None):
if request.method == 'POST':
ces = formset_factory(CheckboxElementForm,
formset=CheckboxElementFormSet)
cs = ces(request.POST)
cs.save()
return HttpResponseRedirect(reverse('admin:geoforms_checkboxelementmodel_changelist'))
else:
return super(CheckboxElementAdmin, self).add_view(request,
form_url = '',
extra_context = {
'current_app': self.admin_site.name,
'form': QuestionForm(),
'formset': formset_factory(CheckboxElementForm)})
def change_view(self, request, object_id, form_url='', extra_context=None):
if request.method == 'POST':
ces = formset_factory(CheckboxElementForm,
formset=CheckboxElementFormSet)
cs = ces(request.POST)
cs.save()
return HttpResponseRedirect(reverse('admin:geoforms_checkboxelementmodel_changelist'))
else:
initial_data = []
question_data = {'question': []}
checkboxelement = CheckboxElementModel.objects.get(id = object_id)
for i, lang in enumerate(settings.LANGUAGES):
html = getattr(checkboxelement,'html_%s' % lang[0])
if html == None:
html = getattr(checkboxelement,'html_%s' % settings.LANGUAGES[0][0])
soup = BeautifulSoup(html)
question_data['question'].append(soup.p.text.strip())
if soup.find(attrs={'data-random': 'true'}):
question_data['randomize'] = True
labels = soup.find_all('label')
for j, label in enumerate(labels):
if i == 0:
initial_data.append({u'label': [label.text.strip()]})
else:
initial_data[j]['label'].append(label.text.strip())
return super(CheckboxElementAdmin, self).change_view(request,
object_id,
form_url = '',
extra_context = {
'current_app': self.admin_site.name,
'form': QuestionForm(initial = question_data),
'formset': formset_factory(CheckboxElementForm,
extra = 0)(initial = initial_data)})
admin.site.register(CheckboxElementModel, CheckboxElementAdmin)
class RadioElementAdmin(GeoformElementAdmin):
form = RadioElementForm
add_form_template = 'admin/geoforms/geoformelement/create_element.html'
change_form_template = add_form_template
def queryset(self, request):
return self.model.objects.filter(element_type = 'radio')
def add_view(self, request, form_url='', extra_context=None):
if request.method == 'POST':
res = formset_factory(RadioElementForm,
formset=RadioElementFormSet)
rs = res(request.POST)
rs.save()
return HttpResponseRedirect(reverse('admin:geoforms_radioelementmodel_changelist'))
else:
return super(RadioElementAdmin, self).add_view(request,
form_url = '',
extra_context = {
'current_app': self.admin_site.name,
'form': QuestionForm(),
'formset': formset_factory(RadioElementForm)})
def change_view(self, request, object_id, form_url='', extra_context=None):
if request.method == 'POST':
res = formset_factory(RadioElementForm,
formset=RadioElementFormSet)
rs = res(request.POST)
rs.save()
return HttpResponseRedirect(reverse('admin:geoforms_radioelementmodel_changelist'))
else:
initial_data = []
question_data = {'question': []}
radioelement = RadioElementModel.objects.get(id = object_id)
for i, lang in enumerate(settings.LANGUAGES):
html = getattr(radioelement,'html_%s' % lang[0])
if html == None:
html = getattr(radioelement,'html_%s' % settings.LANGUAGES[0][0])
soup = BeautifulSoup(html)
question_data['question'].append(soup.p.text)
if soup.find(attrs={'data-random': 'true'}):
question_data['randomize'] = True
labels = soup.find_all('label')
for j, label in enumerate(labels):
if i == 0:
initial_data.append({u'label': [label.text.strip()]})
else:
initial_data[j]['label'].append(label.text.strip())
return super(RadioElementAdmin, self).change_view(request,
object_id,
form_url = '',
extra_context = {
'current_app': self.admin_site.name,
'form': QuestionForm(initial = question_data),
'formset': formset_factory(RadioElementForm,
extra = 0)(initial = initial_data)})
admin.site.register(RadioElementModel, RadioElementAdmin)
class SelectElementAdmin(GeoformElementAdmin):
form = SelectElementForm
add_form_template = 'admin/geoforms/geoformelement/create_element.html'
change_form_template = add_form_template
def queryset(self, request):
return self.model.objects.filter(element_type = 'select')
def add_view(self, request, form_url='', extra_context=None):
if request.method == 'POST':
res = formset_factory(SelectElementForm,
formset=SelectElementFormSet)
rs = res(request.POST)
rs.save()
return HttpResponseRedirect(reverse('admin:geoforms_selectelementmodel_changelist'))
else:
return super(SelectElementAdmin, self).add_view(request,
form_url = '',
extra_context = {
'current_app': self.admin_site.name,
'form': QuestionForm(),
'formset': formset_factory(SelectElementForm)})
def change_view(self, request, object_id, form_url='', extra_context=None):
if request.method == 'POST':
res = formset_factory(SelectElementForm,
formset=SelectElementFormSet)
rs = res(request.POST)
rs.save()
return HttpResponseRedirect(reverse('admin:geoforms_selectelementmodel_changelist'))
else:
initial_data = []
question_data = {'question': []}
selectelement = SelectElementModel.objects.get(id = object_id)
for i, lang in enumerate(settings.LANGUAGES):
html = getattr(selectelement,'html_%s' % lang[0])
if html == None:
html = getattr(selectelement,'html_%s' % settings.LANGUAGES[0][0])
soup = BeautifulSoup(html)
question_data['question'].append(soup.p.contents[0])
if soup.find(attrs={'data-random': 'true'}):
question_data['randomize'] = True
options = soup.find_all('option')
for j, option in enumerate(options):
# Don't add empty values
if option.text == '':
continue
if i == 0:
initial_data.append({u'label': [option.text.strip()]})
else:
initial_data[j-1]['label'].append(option.text.strip())
return super(SelectElementAdmin, self).change_view(request,
object_id,
form_url = '',
extra_context = {
'current_app': self.admin_site.name,
'form': QuestionForm(initial = question_data),
'formset': formset_factory(SelectElementForm,
extra = 0)(initial = initial_data)})
admin.site.register(SelectElementModel, SelectElementAdmin)
| |
# Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Public API for BGPSpeaker.
This API can be used by various services like RPC, CLI, IoC, etc.
"""
import inspect
import logging
import traceback
from ryu.services.protocols.bgp.base import add_bgp_error_metadata
from ryu.services.protocols.bgp.base import API_ERROR_CODE
from ryu.services.protocols.bgp.base import BGPSException
from ryu.services.protocols.bgp.core_manager import CORE_MANAGER
from ryu.services.protocols.bgp.rtconf.base import get_validator
from ryu.services.protocols.bgp.rtconf.base import MissingRequiredConf
from ryu.services.protocols.bgp.rtconf.base import RuntimeConfigError
LOG = logging.getLogger('bgpspeaker.api.base')
# Various constants used in API calls
ROUTE_DISTINGUISHER = 'route_dist'
PREFIX = 'prefix'
NEXT_HOP = 'next_hop'
VPN_LABEL = 'label'
API_SYM = 'name'
ORIGIN_RD = 'origin_rd'
ROUTE_FAMILY = 'route_family'
# API call registry
_CALL_REGISTRY = {}
@add_bgp_error_metadata(code=API_ERROR_CODE,
sub_code=1,
def_desc='Unknown API error.')
class ApiException(BGPSException):
pass
@add_bgp_error_metadata(code=API_ERROR_CODE,
sub_code=2,
def_desc='API symbol or method is not known.')
class MethodNotFound(ApiException):
pass
@add_bgp_error_metadata(code=API_ERROR_CODE,
sub_code=3,
def_desc='Error related to BGPS core not starting.')
class CoreNotStarted(ApiException):
pass
def register(**kwargs):
"""Decorator for registering API function.
Does not do any check or validation.
"""
def decorator(func):
_CALL_REGISTRY[kwargs.get(API_SYM, func.__name__)] = func
return func
return decorator
def register_method(name):
"""Decorator for registering methods that provide BGPS public API.
"""
def decorator(func):
setattr(func, '__api_method_name__', name)
return func
return decorator
def register_class(cls):
"""Decorator for the registering class whose instance methods provide BGPS
public API.
"""
old_init = cls.__init__
def new_init(self, *args, **kwargs):
old_init(self, *args, **kwargs)
api_registered_methods = \
[(m_name, m) for m_name, m in
inspect.getmembers(cls, predicate=inspect.ismethod)
if hasattr(m, '__api_method_name__')]
for _, method in api_registered_methods:
api_name = getattr(method, '__api_method_name__')
def create_wrapper(method):
def api_method_wrapper(*args, **kwargs):
return method(self, *args, **kwargs)
return api_method_wrapper
register(name=api_name)(create_wrapper(method))
cls.__init__ = new_init
return cls
class RegisterWithArgChecks(object):
"""Decorator for registering API functions.
Does some argument checking and validation of required arguments.
"""
def __init__(self, name, req_args=None, opt_args=None):
self._name = name
if not req_args:
req_args = []
self._req_args = req_args
if not opt_args:
opt_args = []
self._opt_args = opt_args
self._all_args = (set(self._req_args) | set(self._opt_args))
def __call__(self, func):
"""Wraps given function and registers it as API.
Returns original function.
"""
def wrapped_fun(**kwargs):
"""Wraps a function to do validation before calling actual func.
Wraps a function to take key-value args. only. Checks if:
1) all required argument of wrapped function are provided
2) no extra/un-known arguments are passed
3) checks if validator for required arguments is available
4) validates required arguments
Raises exception if no validator can be found for required args.
"""
# Check if we are missing arguments.
if not kwargs and len(self._req_args) > 0:
raise MissingRequiredConf(desc='Missing all required '
'attributes.')
# Check if we have unknown arguments.
given_args = set(kwargs.keys())
unknown_attrs = given_args - set(self._all_args)
if unknown_attrs:
raise RuntimeConfigError(desc=('Unknown attributes %r' %
unknown_attrs))
# Check if required arguments are missing
missing_req_args = set(self._req_args) - given_args
if missing_req_args:
conf_name = ', '.join(missing_req_args)
raise MissingRequiredConf(conf_name=conf_name)
#
# Prepare to call wrapped function.
#
# Collect required arguments in the order asked and validate it.
req_values = []
for req_arg in self._req_args:
req_value = kwargs.get(req_arg)
# Validate required value.
validator = get_validator(req_arg)
if not validator:
raise ValueError('No validator registered for function %s'
' and arg. %s' % (func, req_arg))
validator(req_value)
req_values.append(req_value)
# Collect optional arguments.
opt_items = {}
for opt_arg, opt_value in kwargs.items():
if opt_arg in self._opt_args:
opt_items[opt_arg] = opt_value
# Call actual function
return func(*req_values, **opt_items)
# Register wrapped function
_CALL_REGISTRY[self._name] = wrapped_fun
return func
def is_call_registered(call_name):
return call_name in _CALL_REGISTRY
def get_call(call_name):
return _CALL_REGISTRY.get(call_name)
def call(symbol, **kwargs):
"""Calls/executes BGPS public API identified by given symbol and passes
given kwargs as param.
"""
LOG.info("API method %s called with args: %s", symbol, str(kwargs))
# TODO(PH, JK) improve the way api function modules are loaded
import all # noqa
if not is_call_registered(symbol):
message = 'Did not find any method registered by symbol %s' % symbol
raise MethodNotFound(message)
if not symbol.startswith('core') and not CORE_MANAGER.started:
raise CoreNotStarted(desc='CoreManager is not active.')
call = get_call(symbol)
try:
return call(**kwargs)
except BGPSException as r:
LOG.error(traceback.format_exc())
raise r
except Exception as e:
LOG.error(traceback.format_exc())
raise ApiException(desc=str(e))
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_featurevectormatrix
----------------------------------
Tests for `featurevectormatrix` module.
"""
import unittest
from featurevectormatrix import FeatureVectorMatrix
v3 = [1, 2, 3]
v4 = [1, 2, 3, 4]
c3 = ['a', 'b', 'c']
c4 = ['a', 'b', 'c', 'd']
d3 = {
'a': 1,
'b': 2,
'c': 3,
}
d4 = {
'a': 1,
'b': 2,
'c': 3,
'd': 4,
}
d2_4 = {
'e': 1,
'f': 2,
'g': 3,
'h': 4,
}
class TestFeatureVectorMatrix(unittest.TestCase):
def test_exceptions(self):
fvm = FeatureVectorMatrix()
caught = False
try:
fvm.add_row(v3)
except IndexError:
caught = True
self.assertTrue(caught, "You can't add a list to a fvm with no columns defined")
fvm.set_column_names(c3)
self.assertEquals(3, fvm.column_count())
fvm.add_row(v3)
self.assertEquals(1, fvm.row_count())
caught = False
try:
print fvm.keys()
except NotImplementedError:
caught = True
self.assertTrue(caught, "You can't get keys without all rows keyed")
caught = False
try:
fvm.transpose()
except NotImplementedError:
caught = True
self.assertTrue(caught, "You can't rotate a fvm without all rows keyed")
caught = False
try:
fvm.add_row(v4)
except IndexError:
caught = True
self.assertTrue(caught, "You can't add a list to a fvm with more columns than the given list")
fvm.add_row(d4)
self.assertEquals(len(fvm.column_names()), 4)
# test that the already added row gets the extra 0 appended
self.assertEquals(fvm.get_row_list(0), [1, 2, 3,0])
def test_fill_small_list(self):
fvm = FeatureVectorMatrix()
fvm.set_column_names(c4)
fvm.add_row(v3)
self.assertEquals(fvm.get_row_list(0), [1, 2, 3, 0])
def test_fill_small_dict(self):
fvm = FeatureVectorMatrix()
fvm.add_row(d3)
self.assertEquals(fvm.get_row_dict(0), d3)
fvm.add_row(d4)
self.assertEquals(fvm.get_row_dict(1), d4)
self.assertEquals(fvm.get_row_list(0), [ d3[c] if c in d3 else fvm._default_value for c in fvm.column_names() ])
def test2(self):
fvm = FeatureVectorMatrix()
fvm.set_column_names(c3)
self.assertEquals(c3, fvm.column_names())
fvm.add_row(d3)
self.assertEquals(3, fvm.column_count())
self.assertEquals(fvm.get_row_list(0), v3)
fvm.add_row(d2_4)
self.assertEquals(7, fvm.column_count())
self.assertEquals(fvm.get_row_list(0), v3 + [0,0,0,0])
self.assertEquals(fvm.get_row_dict(0), d3)
def test_exceptions_key(self):
fvm = FeatureVectorMatrix()
caught = False
try:
fvm.add_row(v3, 'a')
except IndexError:
caught = True
self.assertTrue(caught, "You can't add a list to a fvm with no columns defined")
fvm.set_column_names(c3)
self.assertEquals(3, fvm.column_count())
fvm.add_row(v3, 'a')
self.assertEquals(1, fvm.row_count())
fvm.add_row(v3, 'a')
self.assertEquals(1, fvm.row_count())
caught = False
try:
fvm.add_row(v4, 'b')
except IndexError:
caught = True
self.assertTrue(caught, "You can't add a list to a fvm with more columns than the given list")
fvm.add_row(d4, 'c')
self.assertEquals(len(fvm.column_names()), 4)
self.assertEquals(2, fvm.row_count())
# test that the already added row gets the extra 0 appended
self.assertEquals(fvm.get_row_list(0), [1, 2, 3,0])
def test_fill_small_list_key(self):
fvm = FeatureVectorMatrix()
fvm.set_column_names(c4)
fvm.add_row(v3, 'a')
self.assertEquals(fvm.get_row_list(0), [1, 2, 3, 0])
def test_fill_small_dict_key(self):
fvm = FeatureVectorMatrix()
fvm.add_row(d3, 'a')
self.assertEquals(fvm.get_row_dict(0), d3)
fvm.add_row(d4, 'b')
self.assertEquals(fvm.get_row_dict(1), d4)
self.assertEquals(fvm.get_row_list(0), [ d3[c] if c in d3 else fvm._default_value for c in fvm.column_names() ])
def test2_key(self):
fvm = FeatureVectorMatrix()
fvm.set_column_names(c3)
self.assertEquals(c3, fvm.column_names())
fvm.add_row(d3, 'a')
self.assertEquals(3, fvm.column_count())
self.assertEquals(fvm.get_row_list('a'), v3)
fvm.add_row(d2_4, 'b')
self.assertEquals(7, fvm.column_count())
self.assertEquals(fvm.get_row_list('a'), v3 + [0,0,0,0])
self.assertEquals(fvm['a'], v3 + [0,0,0,0])
self.assertEquals(fvm[0], v3 + [0,0,0,0])
self.assertEquals(fvm.get_row_dict('a'), d3)
self.assertEquals([ k for k in fvm ], ['a', 'b'])
def test_getitem(self):
fvm = FeatureVectorMatrix(default_to_hashed_rows=True)
fvm.set_column_names(c3)
self.assertEquals(c3, fvm.column_names())
fvm.add_row(v3, 'first')
self.assertEquals(fvm[0], d3)
tfvm = fvm.transpose()
self.assertEquals(tfvm[0], {'first': 1})
def test_transpose_default_value(self):
fvm = FeatureVectorMatrix(default_value=-1)
fvm.set_column_names(c3)
self.assertEquals(c3, fvm.column_names())
fvm.add_row(v3, 'first')
fvm.add_row(d4, 'second')
self.assertEquals(fvm[0], [1, 2, 3, -1])
tfvm = fvm.transpose()
for key in tfvm:
self.assertIsNotNone(tfvm[key])
self.assertEquals(tfvm.get_row_dict(3), {'second': 4})
def test_matrix(self):
fvm = FeatureVectorMatrix()
fvm.set_column_names(c3)
fvm.add_row(d3, 'one')
fvm.add_row(d4, 'two')
fvm.add_row(d4, 'three')
self.assertEquals(4, fvm.column_count())
self.assertEquals(3, fvm.row_count())
v3_old = fvm.get_row_list(0)
self.assertEquals(fvm.get_row_dict('two'), d4)
rfvm = fvm.transpose()
self.assertEquals(3, rfvm.column_count())
self.assertEquals(4, rfvm.row_count())
self.assertEquals(rfvm.get_row_dict(2), {'three': 3, 'two': 3, 'one': 3})
self.assertEquals(rfvm.get_row_dict(3), {'two': 4, 'three': 4})
self.assertEquals(rfvm.column_names(), ['one', 'two', 'three'])
self.assertEquals(rfvm.get_row_list(2), [3, 3, 3])
self.assertEquals(rfvm.get_row_list(3), [0, 4, 4])
fvm2 = rfvm.transpose()
self.assertEquals(4, fvm2.column_count())
self.assertEquals(3, fvm2.row_count())
self.assertEquals(fvm2.get_row_dict(2), d4)
self.assertEquals(fvm2.get_row_dict(0), d3)
self.assertEquals(fvm2.get_row_list(0), v3_old)
self.assertEquals(fvm2.get_row_dict('two'), d4)
def test_len(self):
fvm = FeatureVectorMatrix()
fvm.add_row(d3, 'one')
self.assertEquals(1, len(fvm))
self.assertEquals(fvm.keys(), ['one'])
if __name__ == '__main__':
unittest.main()
| |
# This file is part of beets.
# Copyright 2015, Fabrice Laporte, Yevgeny Bezman, and Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import subprocess
import os
import collections
import itertools
import sys
import warnings
import re
from beets import logging
from beets import ui
from beets.plugins import BeetsPlugin
from beets.util import syspath, command_output, displayable_path
from beets import config
# Utilities.
class ReplayGainError(Exception):
"""Raised when a local (to a track or an album) error occurs in one
of the backends.
"""
class FatalReplayGainError(Exception):
"""Raised when a fatal error occurs in one of the backends.
"""
def call(args):
"""Execute the command and return its output or raise a
ReplayGainError on failure.
"""
try:
return command_output(args)
except subprocess.CalledProcessError as e:
raise ReplayGainError(
"{0} exited with status {1}".format(args[0], e.returncode)
)
except UnicodeEncodeError:
# Due to a bug in Python 2's subprocess on Windows, Unicode
# filenames can fail to encode on that platform. See:
# http://code.google.com/p/beets/issues/detail?id=499
raise ReplayGainError("argument encoding failed")
# Backend base and plumbing classes.
Gain = collections.namedtuple("Gain", "gain peak")
AlbumGain = collections.namedtuple("AlbumGain", "album_gain track_gains")
class Backend(object):
"""An abstract class representing engine for calculating RG values.
"""
def __init__(self, config, log):
"""Initialize the backend with the configuration view for the
plugin.
"""
self._log = log
def compute_track_gain(self, items):
raise NotImplementedError()
def compute_album_gain(self, album):
# TODO: implement album gain in terms of track gain of the
# individual tracks which can be used for any backend.
raise NotImplementedError()
# bsg1770gain backend
class Bs1770gainBackend(Backend):
"""bs1770gain is a loudness scanner compliant with ITU-R BS.1770 and
its flavors EBU R128, ATSC A/85 and Replaygain 2.0.
"""
def __init__(self, config, log):
super(Bs1770gainBackend, self).__init__(config, log)
config.add({
'chunk_at': 5000,
'method': 'replaygain',
})
self.chunk_at = config['chunk_at'].as_number()
self.method = b'--' + bytes(config['method'].get(unicode))
cmd = b'bs1770gain'
try:
call([cmd, self.method])
self.command = cmd
except OSError:
raise FatalReplayGainError(
'Is bs1770gain installed? Is your method in config correct?'
)
if not self.command:
raise FatalReplayGainError(
'no replaygain command found: install bs1770gain'
)
def compute_track_gain(self, items):
"""Computes the track gain of the given tracks, returns a list
of TrackGain objects.
"""
output = self.compute_gain(items, False)
return output
def compute_album_gain(self, album):
"""Computes the album gain of the given album, returns an
AlbumGain object.
"""
# TODO: What should be done when not all tracks in the album are
# supported?
supported_items = album.items()
output = self.compute_gain(supported_items, True)
if not output:
raise ReplayGainError('no output from bs1770gain')
return AlbumGain(output[-1], output[:-1])
def isplitter(self, items, chunk_at):
"""Break an iterable into chunks of at most size `chunk_at`,
generating lists for each chunk.
"""
iterable = iter(items)
while True:
result = []
for i in range(chunk_at):
try:
a = next(iterable)
except StopIteration:
break
else:
result.append(a)
if result:
yield result
else:
break
def compute_gain(self, items, is_album):
"""Computes the track or album gain of a list of items, returns
a list of TrackGain objects.
When computing album gain, the last TrackGain object returned is
the album gain
"""
if len(items) == 0:
return []
albumgaintot = 0.0
albumpeaktot = 0.0
returnchunks = []
# In the case of very large sets of music, we break the tracks
# into smaller chunks and process them one at a time. This
# avoids running out of memory.
if len(items) > self.chunk_at:
i = 0
for chunk in self.isplitter(items, self.chunk_at):
i += 1
returnchunk = self.compute_chunk_gain(chunk, is_album)
albumgaintot += returnchunk[-1].gain
albumpeaktot += returnchunk[-1].peak
returnchunks = returnchunks + returnchunk[0:-1]
returnchunks.append(Gain(albumgaintot / i, albumpeaktot / i))
return returnchunks
else:
return self.compute_chunk_gain(items, is_album)
def compute_chunk_gain(self, items, is_album):
"""Compute ReplayGain values and return a list of results
dictionaries as given by `parse_tool_output`.
"""
# Construct shell command.
cmd = [self.command]
cmd = cmd + [self.method]
cmd = cmd + [b'-it']
# Workaround for Windows: the underlying tool fails on paths
# with the \\?\ prefix, so we don't use it here. This
# prevents the backend from working with long paths.
args = cmd + [syspath(i.path, prefix=False) for i in items]
# Invoke the command.
self._log.debug("executing {0}", " ".join(map(displayable_path, args)))
output = call(args)
self._log.debug(u'analysis finished: {0}', output)
results = self.parse_tool_output(output,
len(items) + is_album)
self._log.debug(u'{0} items, {1} results', len(items), len(results))
return results
def parse_tool_output(self, text, num_lines):
"""Given the output from bs1770gain, parse the text and
return a list of dictionaries
containing information about each analyzed file.
"""
out = []
data = text.decode('utf8', errors='ignore')
regex = re.compile(
ur'(\s{2,2}\[\d+\/\d+\].*?|\[ALBUM\].*?)'
'(?=\s{2,2}\[\d+\/\d+\]|\s{2,2}\[ALBUM\]'
':|done\.\s)', re.DOTALL | re.UNICODE)
results = re.findall(regex, data)
for parts in results[0:num_lines]:
part = parts.split(b'\n')
if len(part) == 0:
self._log.debug('bad tool output: {0!r}', text)
raise ReplayGainError('bs1770gain failed')
try:
song = {
'file': part[0],
'gain': float((part[1].split('/'))[1].split('LU')[0]),
'peak': float(part[2].split('/')[1]),
}
except IndexError:
self._log.info('bs1770gain reports (faulty file?): {}', parts)
continue
out.append(Gain(song['gain'], song['peak']))
return out
# mpgain/aacgain CLI tool backend.
class CommandBackend(Backend):
def __init__(self, config, log):
super(CommandBackend, self).__init__(config, log)
config.add({
'command': u"",
'noclip': True,
})
self.command = config["command"].get(unicode)
if self.command:
# Explicit executable path.
if not os.path.isfile(self.command):
raise FatalReplayGainError(
'replaygain command does not exist: {0}'.format(
self.command
)
)
else:
# Check whether the program is in $PATH.
for cmd in (b'mp3gain', b'aacgain'):
try:
call([cmd, b'-v'])
self.command = cmd
except OSError:
pass
if not self.command:
raise FatalReplayGainError(
'no replaygain command found: install mp3gain or aacgain'
)
self.noclip = config['noclip'].get(bool)
target_level = config['targetlevel'].as_number()
self.gain_offset = int(target_level - 89)
def compute_track_gain(self, items):
"""Computes the track gain of the given tracks, returns a list
of TrackGain objects.
"""
supported_items = filter(self.format_supported, items)
output = self.compute_gain(supported_items, False)
return output
def compute_album_gain(self, album):
"""Computes the album gain of the given album, returns an
AlbumGain object.
"""
# TODO: What should be done when not all tracks in the album are
# supported?
supported_items = filter(self.format_supported, album.items())
if len(supported_items) != len(album.items()):
self._log.debug(u'tracks are of unsupported format')
return AlbumGain(None, [])
output = self.compute_gain(supported_items, True)
return AlbumGain(output[-1], output[:-1])
def format_supported(self, item):
"""Checks whether the given item is supported by the selected tool.
"""
if 'mp3gain' in self.command and item.format != 'MP3':
return False
elif 'aacgain' in self.command and item.format not in ('MP3', 'AAC'):
return False
return True
def compute_gain(self, items, is_album):
"""Computes the track or album gain of a list of items, returns
a list of TrackGain objects.
When computing album gain, the last TrackGain object returned is
the album gain
"""
if len(items) == 0:
return []
"""Compute ReplayGain values and return a list of results
dictionaries as given by `parse_tool_output`.
"""
# Construct shell command. The "-o" option makes the output
# easily parseable (tab-delimited). "-s s" forces gain
# recalculation even if tags are already present and disables
# tag-writing; this turns the mp3gain/aacgain tool into a gain
# calculator rather than a tag manipulator because we take care
# of changing tags ourselves.
cmd = [self.command, b'-o', b'-s', b's']
if self.noclip:
# Adjust to avoid clipping.
cmd = cmd + [b'-k']
else:
# Disable clipping warning.
cmd = cmd + [b'-c']
cmd = cmd + [b'-d', bytes(self.gain_offset)]
cmd = cmd + [syspath(i.path) for i in items]
self._log.debug(u'analyzing {0} files', len(items))
self._log.debug(u"executing {0}", " ".join(map(displayable_path, cmd)))
output = call(cmd)
self._log.debug(u'analysis finished')
return self.parse_tool_output(output,
len(items) + (1 if is_album else 0))
def parse_tool_output(self, text, num_lines):
"""Given the tab-delimited output from an invocation of mp3gain
or aacgain, parse the text and return a list of dictionaries
containing information about each analyzed file.
"""
out = []
for line in text.split(b'\n')[1:num_lines + 1]:
parts = line.split(b'\t')
if len(parts) != 6 or parts[0] == b'File':
self._log.debug(u'bad tool output: {0}', text)
raise ReplayGainError('mp3gain failed')
d = {
'file': parts[0],
'mp3gain': int(parts[1]),
'gain': float(parts[2]),
'peak': float(parts[3]) / (1 << 15),
'maxgain': int(parts[4]),
'mingain': int(parts[5]),
}
out.append(Gain(d['gain'], d['peak']))
return out
# GStreamer-based backend.
class GStreamerBackend(Backend):
def __init__(self, config, log):
super(GStreamerBackend, self).__init__(config, log)
self._import_gst()
# Initialized a GStreamer pipeline of the form filesrc ->
# decodebin -> audioconvert -> audioresample -> rganalysis ->
# fakesink The connection between decodebin and audioconvert is
# handled dynamically after decodebin figures out the type of
# the input file.
self._src = self.Gst.ElementFactory.make("filesrc", "src")
self._decbin = self.Gst.ElementFactory.make("decodebin", "decbin")
self._conv = self.Gst.ElementFactory.make("audioconvert", "conv")
self._res = self.Gst.ElementFactory.make("audioresample", "res")
self._rg = self.Gst.ElementFactory.make("rganalysis", "rg")
# We check which files need gain ourselves, so all files given
# to rganalsys should have their gain computed, even if it
# already exists.
self._rg.set_property("forced", True)
self._rg.set_property("reference-level",
config["targetlevel"].as_number())
self._sink = self.Gst.ElementFactory.make("fakesink", "sink")
self._pipe = self.Gst.Pipeline()
self._pipe.add(self._src)
self._pipe.add(self._decbin)
self._pipe.add(self._conv)
self._pipe.add(self._res)
self._pipe.add(self._rg)
self._pipe.add(self._sink)
self._src.link(self._decbin)
self._conv.link(self._res)
self._res.link(self._rg)
self._rg.link(self._sink)
self._bus = self._pipe.get_bus()
self._bus.add_signal_watch()
self._bus.connect("message::eos", self._on_eos)
self._bus.connect("message::error", self._on_error)
self._bus.connect("message::tag", self._on_tag)
# Needed for handling the dynamic connection between decodebin
# and audioconvert
self._decbin.connect("pad-added", self._on_pad_added)
self._decbin.connect("pad-removed", self._on_pad_removed)
self._main_loop = self.GLib.MainLoop()
self._files = []
def _import_gst(self):
"""Import the necessary GObject-related modules and assign `Gst`
and `GObject` fields on this object.
"""
try:
import gi
except ImportError:
raise FatalReplayGainError(
"Failed to load GStreamer: python-gi not found"
)
try:
gi.require_version('Gst', '1.0')
except ValueError as e:
raise FatalReplayGainError(
"Failed to load GStreamer 1.0: {0}".format(e)
)
from gi.repository import GObject, Gst, GLib
# Calling GObject.threads_init() is not needed for
# PyGObject 3.10.2+
with warnings.catch_warnings():
warnings.simplefilter("ignore")
GObject.threads_init()
Gst.init([sys.argv[0]])
self.GObject = GObject
self.GLib = GLib
self.Gst = Gst
def compute(self, files, album):
self._error = None
self._files = list(files)
if len(self._files) == 0:
return
self._file_tags = collections.defaultdict(dict)
if album:
self._rg.set_property("num-tracks", len(self._files))
if self._set_first_file():
self._main_loop.run()
if self._error is not None:
raise self._error
def compute_track_gain(self, items):
self.compute(items, False)
if len(self._file_tags) != len(items):
raise ReplayGainError("Some tracks did not receive tags")
ret = []
for item in items:
ret.append(Gain(self._file_tags[item]["TRACK_GAIN"],
self._file_tags[item]["TRACK_PEAK"]))
return ret
def compute_album_gain(self, album):
items = list(album.items())
self.compute(items, True)
if len(self._file_tags) != len(items):
raise ReplayGainError("Some items in album did not receive tags")
ret = []
for item in items:
ret.append(Gain(self._file_tags[item]["TRACK_GAIN"],
self._file_tags[item]["TRACK_PEAK"]))
last_tags = self._file_tags[items[-1]]
return AlbumGain(Gain(last_tags["ALBUM_GAIN"],
last_tags["ALBUM_PEAK"]), ret)
def close(self):
self._bus.remove_signal_watch()
def _on_eos(self, bus, message):
# A file finished playing in all elements of the pipeline. The
# RG tags have already been propagated. If we don't have a next
# file, we stop processing.
if not self._set_next_file():
self._pipe.set_state(self.Gst.State.NULL)
self._main_loop.quit()
def _on_error(self, bus, message):
self._pipe.set_state(self.Gst.State.NULL)
self._main_loop.quit()
err, debug = message.parse_error()
f = self._src.get_property("location")
# A GStreamer error, either an unsupported format or a bug.
self._error = \
ReplayGainError(u"Error {0} - {1} on file {2}".format(err,
debug,
f))
def _on_tag(self, bus, message):
tags = message.parse_tag()
def handle_tag(taglist, tag, userdata):
# The rganalysis element provides both the existing tags for
# files and the new computes tags. In order to ensure we
# store the computed tags, we overwrite the RG values of
# received a second time.
if tag == self.Gst.TAG_TRACK_GAIN:
self._file_tags[self._file]["TRACK_GAIN"] = \
taglist.get_double(tag)[1]
elif tag == self.Gst.TAG_TRACK_PEAK:
self._file_tags[self._file]["TRACK_PEAK"] = \
taglist.get_double(tag)[1]
elif tag == self.Gst.TAG_ALBUM_GAIN:
self._file_tags[self._file]["ALBUM_GAIN"] = \
taglist.get_double(tag)[1]
elif tag == self.Gst.TAG_ALBUM_PEAK:
self._file_tags[self._file]["ALBUM_PEAK"] = \
taglist.get_double(tag)[1]
elif tag == self.Gst.TAG_REFERENCE_LEVEL:
self._file_tags[self._file]["REFERENCE_LEVEL"] = \
taglist.get_double(tag)[1]
tags.foreach(handle_tag, None)
def _set_first_file(self):
if len(self._files) == 0:
return False
self._file = self._files.pop(0)
self._pipe.set_state(self.Gst.State.NULL)
self._src.set_property("location", syspath(self._file.path))
self._pipe.set_state(self.Gst.State.PLAYING)
return True
def _set_file(self):
"""Initialize the filesrc element with the next file to be analyzed.
"""
# No more files, we're done
if len(self._files) == 0:
return False
self._file = self._files.pop(0)
# Disconnect the decodebin element from the pipeline, set its
# state to READY to to clear it.
self._decbin.unlink(self._conv)
self._decbin.set_state(self.Gst.State.READY)
# Set a new file on the filesrc element, can only be done in the
# READY state
self._src.set_state(self.Gst.State.READY)
self._src.set_property("location", syspath(self._file.path))
# Ensure the filesrc element received the paused state of the
# pipeline in a blocking manner
self._src.sync_state_with_parent()
self._src.get_state(self.Gst.CLOCK_TIME_NONE)
# Ensure the decodebin element receives the paused state of the
# pipeline in a blocking manner
self._decbin.sync_state_with_parent()
self._decbin.get_state(self.Gst.CLOCK_TIME_NONE)
return True
def _set_next_file(self):
"""Set the next file to be analyzed while keeping the pipeline
in the PAUSED state so that the rganalysis element can correctly
handle album gain.
"""
# A blocking pause
self._pipe.set_state(self.Gst.State.PAUSED)
self._pipe.get_state(self.Gst.CLOCK_TIME_NONE)
# Try setting the next file
ret = self._set_file()
if ret:
# Seek to the beginning in order to clear the EOS state of the
# various elements of the pipeline
self._pipe.seek_simple(self.Gst.Format.TIME,
self.Gst.SeekFlags.FLUSH,
0)
self._pipe.set_state(self.Gst.State.PLAYING)
return ret
def _on_pad_added(self, decbin, pad):
sink_pad = self._conv.get_compatible_pad(pad, None)
assert(sink_pad is not None)
pad.link(sink_pad)
def _on_pad_removed(self, decbin, pad):
# Called when the decodebin element is disconnected from the
# rest of the pipeline while switching input files
peer = pad.get_peer()
assert(peer is None)
class AudioToolsBackend(Backend):
"""ReplayGain backend that uses `Python Audio Tools
<http://audiotools.sourceforge.net/>`_ and its capabilities to read more
file formats and compute ReplayGain values using it replaygain module.
"""
def __init__(self, config, log):
super(AudioToolsBackend, self).__init__(config, log)
self._import_audiotools()
def _import_audiotools(self):
"""Check whether it's possible to import the necessary modules.
There is no check on the file formats at runtime.
:raises :exc:`ReplayGainError`: if the modules cannot be imported
"""
try:
import audiotools
import audiotools.replaygain
except ImportError:
raise FatalReplayGainError(
"Failed to load audiotools: audiotools not found"
)
self._mod_audiotools = audiotools
self._mod_replaygain = audiotools.replaygain
def open_audio_file(self, item):
"""Open the file to read the PCM stream from the using
``item.path``.
:return: the audiofile instance
:rtype: :class:`audiotools.AudioFile`
:raises :exc:`ReplayGainError`: if the file is not found or the
file format is not supported
"""
try:
audiofile = self._mod_audiotools.open(item.path)
except IOError:
raise ReplayGainError(
"File {} was not found".format(item.path)
)
except self._mod_audiotools.UnsupportedFile:
raise ReplayGainError(
"Unsupported file type {}".format(item.format)
)
return audiofile
def init_replaygain(self, audiofile, item):
"""Return an initialized :class:`audiotools.replaygain.ReplayGain`
instance, which requires the sample rate of the song(s) on which
the ReplayGain values will be computed. The item is passed in case
the sample rate is invalid to log the stored item sample rate.
:return: initialized replagain object
:rtype: :class:`audiotools.replaygain.ReplayGain`
:raises: :exc:`ReplayGainError` if the sample rate is invalid
"""
try:
rg = self._mod_replaygain.ReplayGain(audiofile.sample_rate())
except ValueError:
raise ReplayGainError(
"Unsupported sample rate {}".format(item.samplerate)
)
return
return rg
def compute_track_gain(self, items):
"""Compute ReplayGain values for the requested items.
:return list: list of :class:`Gain` objects
"""
return [self._compute_track_gain(item) for item in items]
def _compute_track_gain(self, item):
"""Compute ReplayGain value for the requested item.
:rtype: :class:`Gain`
"""
audiofile = self.open_audio_file(item)
rg = self.init_replaygain(audiofile, item)
# Each call to title_gain on a replaygain object return peak and gain
# of the track.
# Note that the method needs an audiotools.PCMReader instance that can
# be obtained from an audiofile instance.
rg_track_gain, rg_track_peak = rg.title_gain(audiofile.to_pcm())
self._log.debug(u'ReplayGain for track {0} - {1}: {2:.2f}, {3:.2f}',
item.artist, item.title, rg_track_gain, rg_track_peak)
return Gain(gain=rg_track_gain, peak=rg_track_peak)
def compute_album_gain(self, album):
"""Compute ReplayGain values for the requested album and its items.
:rtype: :class:`AlbumGain`
"""
self._log.debug(u'Analysing album {0}', album)
# The first item is taken and opened to get the sample rate to
# initialize the replaygain object. The object is used for all the
# tracks in the album to get the album values.
item = list(album.items())[0]
audiofile = self.open_audio_file(item)
rg = self.init_replaygain(audiofile, item)
track_gains = []
for item in album.items():
audiofile = self.open_audio_file(item)
rg_track_gain, rg_track_peak = rg.title_gain(audiofile.to_pcm())
track_gains.append(
Gain(gain=rg_track_gain, peak=rg_track_peak)
)
self._log.debug(u'ReplayGain for track {0}: {1:.2f}, {2:.2f}',
item, rg_track_gain, rg_track_peak)
# After getting the values for all tracks, it's possible to get the
# album values.
rg_album_gain, rg_album_peak = rg.album_gain()
self._log.debug(u'ReplayGain for album {0}: {1:.2f}, {2:.2f}',
album, rg_album_gain, rg_album_peak)
return AlbumGain(
Gain(gain=rg_album_gain, peak=rg_album_peak),
track_gains=track_gains
)
# Main plugin logic.
class ReplayGainPlugin(BeetsPlugin):
"""Provides ReplayGain analysis.
"""
backends = {
"command": CommandBackend,
"gstreamer": GStreamerBackend,
"audiotools": AudioToolsBackend,
"bs1770gain": Bs1770gainBackend
}
def __init__(self):
super(ReplayGainPlugin, self).__init__()
# default backend is 'command' for backward-compatibility.
self.config.add({
'overwrite': False,
'auto': True,
'backend': u'command',
'targetlevel': 89,
})
self.overwrite = self.config['overwrite'].get(bool)
backend_name = self.config['backend'].get(unicode)
if backend_name not in self.backends:
raise ui.UserError(
u"Selected ReplayGain backend {0} is not supported. "
u"Please select one of: {1}".format(
backend_name,
u', '.join(self.backends.keys())
)
)
# On-import analysis.
if self.config['auto']:
self.import_stages = [self.imported]
try:
self.backend_instance = self.backends[backend_name](
self.config, self._log
)
except (ReplayGainError, FatalReplayGainError) as e:
raise ui.UserError(
'replaygain initialization failed: {0}'.format(e)
)
def track_requires_gain(self, item):
return self.overwrite or \
(not item.rg_track_gain or not item.rg_track_peak)
def album_requires_gain(self, album):
# Skip calculating gain only when *all* files don't need
# recalculation. This way, if any file among an album's tracks
# needs recalculation, we still get an accurate album gain
# value.
return self.overwrite or \
any([not item.rg_album_gain or not item.rg_album_peak
for item in album.items()])
def store_track_gain(self, item, track_gain):
item.rg_track_gain = track_gain.gain
item.rg_track_peak = track_gain.peak
item.store()
self._log.debug(u'applied track gain {0}, peak {1}',
item.rg_track_gain, item.rg_track_peak)
def store_album_gain(self, album, album_gain):
album.rg_album_gain = album_gain.gain
album.rg_album_peak = album_gain.peak
album.store()
self._log.debug(u'applied album gain {0}, peak {1}',
album.rg_album_gain, album.rg_album_peak)
def handle_album(self, album, write):
"""Compute album and track replay gain store it in all of the
album's items.
If ``write`` is truthy then ``item.write()`` is called for each
item. If replay gain information is already present in all
items, nothing is done.
"""
if not self.album_requires_gain(album):
self._log.info(u'Skipping album {0}', album)
return
self._log.info(u'analyzing {0}', album)
try:
album_gain = self.backend_instance.compute_album_gain(album)
if len(album_gain.track_gains) != len(album.items()):
raise ReplayGainError(
u"ReplayGain backend failed "
u"for some tracks in album {0}".format(album)
)
self.store_album_gain(album, album_gain.album_gain)
for item, track_gain in itertools.izip(album.items(),
album_gain.track_gains):
self.store_track_gain(item, track_gain)
if write:
item.try_write()
except ReplayGainError as e:
self._log.info(u"ReplayGain error: {0}", e)
except FatalReplayGainError as e:
raise ui.UserError(
u"Fatal replay gain error: {0}".format(e)
)
def handle_track(self, item, write):
"""Compute track replay gain and store it in the item.
If ``write`` is truthy then ``item.write()`` is called to write
the data to disk. If replay gain information is already present
in the item, nothing is done.
"""
if not self.track_requires_gain(item):
self._log.info(u'Skipping track {0}', item)
return
self._log.info(u'analyzing {0}', item)
try:
track_gains = self.backend_instance.compute_track_gain([item])
if len(track_gains) != 1:
raise ReplayGainError(
u"ReplayGain backend failed for track {0}".format(item)
)
self.store_track_gain(item, track_gains[0])
if write:
item.try_write()
except ReplayGainError as e:
self._log.info(u"ReplayGain error: {0}", e)
except FatalReplayGainError as e:
raise ui.UserError(
u"Fatal replay gain error: {0}".format(e)
)
def imported(self, session, task):
"""Add replay gain info to items or albums of ``task``.
"""
if task.is_album:
self.handle_album(task.album, False)
else:
self.handle_track(task.item, False)
def commands(self):
"""Return the "replaygain" ui subcommand.
"""
def func(lib, opts, args):
self._log.setLevel(logging.INFO)
write = config['import']['write'].get(bool)
if opts.album:
for album in lib.albums(ui.decargs(args)):
self.handle_album(album, write)
else:
for item in lib.items(ui.decargs(args)):
self.handle_track(item, write)
cmd = ui.Subcommand('replaygain', help='analyze for ReplayGain')
cmd.parser.add_album_option()
cmd.func = func
return [cmd]
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
""" Tests for Multiple IP Ranges feature
"""
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.cloudstackException import cloudstackAPIException
from marvin.integration.lib.utils import *
from marvin.integration.lib.base import *
from marvin.integration.lib.common import *
from netaddr import *
from nose.plugins.attrib import attr
class Services:
"""Test Multiple IP Ranges
"""
def __init__(self):
self.services = {
"account": {
"email": "test@test.com",
"firstname": "Test",
"lastname": "User",
"username": "test",
# Random characters are appended for unique
# username
"password": "password",
},
"service_offering": {
"name": "Tiny Instance",
"displaytext": "Tiny Instance",
"cpunumber": 1,
"cpuspeed": 200, # in MHz
"memory": 256, # In MBs
},
"disk_offering": {
"displaytext": "Small Disk",
"name": "Small Disk",
"disksize": 1
},
"templates": {
"displaytext": 'Template',
"name": 'Template',
"ostype": "CentOS 5.3 (64-bit)",
"templatefilter": 'self',
},
"vlan_ip_range": {
"startip": "",
"endip": "",
"netmask": "",
"gateway": "",
"forvirtualnetwork": "false",
"vlan": "untagged",
},
"ostype": "CentOS 5.3 (64-bit)",
"sleep": 60,
"timeout": 10,
}
class TestMultipleIpRanges(cloudstackTestCase):
"""Test Multiple IP Ranges for guest network
"""
@classmethod
def setUpClass(cls):
cls.api_client = super(TestMultipleIpRanges, cls).getClsTestClient().getApiClient()
cls.services = Services().services
# Get Zone, Domain and templates
cls.domain = get_domain(cls.api_client, cls.services)
cls.zone = get_zone(cls.api_client, cls.services)
cls.pod = get_pod(cls.api_client, cls.zone.id, cls.services)
cls.services['mode'] = cls.zone.networktype
cls.services["domainid"] = cls.domain.id
cls.services["zoneid"] = cls.zone.id
cls.account = Account.create(
cls.api_client,
cls.services["account"],
domainid=cls.domain.id
)
cls.services["account"] = cls.account.name
cls.disk_offering = DiskOffering.create(
cls.api_client,
cls.services["disk_offering"]
)
cls.service_offering = ServiceOffering.create(
cls.api_client,
cls.services["service_offering"]
)
cls.template = get_template(
cls.api_client,
cls.zone.id,
cls.services["ostype"]
)
cls.services["templates"]["ostypeid"] = cls.template.ostypeid
cls.services["diskoffering"] = cls.disk_offering.id
cls._cleanup = [
cls.account,
]
return
@classmethod
def tearDownClass(cls):
try:
#Cleanup resources used
cleanup_resources(cls.api_client, cls._cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def setUp(self):
self.apiclient = self.testClient.getApiClient()
self.dbclient = self.testClient.getDbConnection()
self.cleanup = [ ]
return
def tearDown(self):
try:
#Clean up, terminate the resources created
cleanup_resources(self.apiclient, self.cleanup)
except Exception as e:
raise Exception("Warning: Exception during cleanup : %s" % e)
return
def increment_cidr(self):
"""Takes CIDR as input and will increment by one and returns the new CIDR
"""
publicIpRange = PublicIpRange.list(self.apiclient)
self.startIp = publicIpRange[0].startip
self.endIp = publicIpRange[0].endip
self.gateway = publicIpRange[0].gateway
self.netmask = publicIpRange[0].netmask
#Pass ip address and mask length to IPNetwork to findout the CIDR
ip = IPNetwork(self.startIp+"/"+self.netmask)
new_cidr = ip.__iadd__(1)
ip2 = IPNetwork(new_cidr)
return ip2
def verify_vlan_range(self,vlan,services):
#compare vlan_list response with configured values
self.assertEqual(
isinstance(vlan, list),
True,
"Check list response returned a valid list"
)
self.assertNotEqual(
len(vlan),
0,
"check list vlan response"
)
self.assertEqual(
str(vlan[0].startip),
str(services["startip"]),
"Start IP in vlan ip range is not matched with the configured start ip"
)
self.assertEqual(
str(vlan[0].endip),
str(services["endip"]),
"End IP in vlan ip range is not matched with the configured end ip"
)
self.assertEqual(
str(vlan[0].gateway),
str(services["gateway"]),
"gateway in vlan ip range is not matched with the configured gateway"
)
self.assertEqual(
str(vlan[0].netmask),
str(services["netmask"]),
"netmask in vlan ip range is not matched with the configured netmask"
)
return
@attr(tags=["sg"])
def test_01_add_ip_same_cidr(self):
"""Test add guest ip range in the existing cidr
"""
#call increment_cidr function to get exiting cidr from the setup and increment it
ip2 = self.increment_cidr()
test_nw = ip2.network
ip = IPAddress(test_nw)
#Add IP range(5 IPs) in the new CIDR
test_gateway = ip.__add__(1)
test_startIp = ip.__add__(3)
test_endIp = ip.__add__(10)
test_startIp2= ip.__add__(11)
test_endIp2 = ip.__add__(15)
#Populating services with new IP range
self.services["vlan_ip_range"]["startip"] = test_startIp
self.services["vlan_ip_range"]["endip"] = test_endIp
self.services["vlan_ip_range"]["gateway"] = test_gateway
self.services["vlan_ip_range"]["netmask"] = self.netmask
self.services["vlan_ip_range"]["zoneid"] = self.zone.id
self.services["vlan_ip_range"]["podid"] = self.pod.id
#create new vlan ip range
self.debug("Creating new ip range with new cidr in the same vlan")
new_vlan = PublicIpRange.create(self.apiclient, self.services["vlan_ip_range"])
self.debug("Created new vlan range with startip:%s and endip:%s" %(test_startIp,test_endIp))
self.cleanup.append(new_vlan)
new_vlan_res = new_vlan.list(self.apiclient,id=new_vlan.vlan.id)
#Compare list output with configured values
self.verify_vlan_range(new_vlan_res,self.services["vlan_ip_range"])
#Add few more ips in the same CIDR
self.services["vlan_ip_range"]["startip"] = test_startIp2
self.services["vlan_ip_range"]["endip"] = test_endIp2
self.debug("Creating new ip range in the existing CIDR")
new_vlan2 = PublicIpRange.create(self.apiclient, self.services["vlan_ip_range"])
self.debug("Created new vlan range with startip:%s and endip:%s" %(test_startIp2,test_endIp2))
self.cleanup.append(new_vlan2)
#list new vlan ip range
new_vlan2_res = new_vlan2.list(self.apiclient,id=new_vlan2.vlan.id)
#Compare list output with configured values
self.verify_vlan_range(new_vlan2_res,self.services["vlan_ip_range"])
return
@attr(tags=["sg"])
def test_02_add_ip_diff_cidr(self):
"""Test add ip range in a new cidr
Steps:
1.Get public vlan range (guest cidr) from the setup
2.Add IP range to a new cidr
"""
#call increment_cidr function to get exiting cidr from the setup and increment it
ip2 = self.increment_cidr()
test_nw = ip2.network
ip = IPAddress(test_nw)
#Add IP range(5 IPs) in the new CIDR
test_gateway = ip.__add__(1)
test_startIp = ip.__add__(3)
test_endIp = ip.__add__(10)
#Populating services with new IP range
self.services["vlan_ip_range"]["startip"] = test_startIp
self.services["vlan_ip_range"]["endip"] = test_endIp
self.services["vlan_ip_range"]["gateway"] = test_gateway
self.services["vlan_ip_range"]["netmask"] = self.netmask
self.services["vlan_ip_range"]["zoneid"] = self.zone.id
self.services["vlan_ip_range"]["podid"] = self.pod.id
#create new vlan ip range
self.debug("Adding new ip range in different CIDR in same vlan")
new_vlan = PublicIpRange.create(self.apiclient, self.services["vlan_ip_range"])
self.debug("Created new vlan range with startip:%s and endip:%s" %(test_startIp,test_endIp))
self.cleanup.append(new_vlan)
new_vlan_res = new_vlan.list(self.apiclient,id=new_vlan.vlan.id)
#Compare list output with configured values
self.verify_vlan_range(new_vlan_res,self.services["vlan_ip_range"])
return
@attr(tags=["sg"])
def test_03_del_ip_range(self):
"""Test delete ip range
Steps:
1.Add ip range in same/new cidr
2.delete the ip range added at step1
3.Verify the ip range deletion using list APIs
"""
#call increment_cidr function to get exiting cidr from the setup and increment it
ip2 = self.increment_cidr()
test_nw = ip2.network
ip = IPAddress(test_nw)
#Add IP range(5 IPs) in the new CIDR
test_gateway = ip.__add__(1)
test_startIp = ip.__add__(3)
test_endIp = ip.__add__(10)
#Populating services with new IP range
self.services["vlan_ip_range"]["startip"] = test_startIp
self.services["vlan_ip_range"]["endip"] = test_endIp
self.services["vlan_ip_range"]["gateway"] = test_gateway
self.services["vlan_ip_range"]["netmask"] = self.netmask
self.services["vlan_ip_range"]["zoneid"] = self.zone.id
self.services["vlan_ip_range"]["podid"] = self.pod.id
#create new vlan ip range
self.debug("Creating new ip range in the new cidr")
new_vlan = PublicIpRange.create(self.apiclient, self.services["vlan_ip_range"])
self.debug("Created new vlan range with startip:%s and endip:%s" %(test_startIp,test_endIp))
new_vlan_res = new_vlan.list(self.apiclient,id=new_vlan.vlan.id)
#Compare list output with configured values
self.verify_vlan_range(new_vlan_res,self.services["vlan_ip_range"])
#Delete the above IP range
self.debug("Deleting new ip range added in new cidr")
new_vlan.delete(self.apiclient)
#listing vlan ip ranges with the id should through exception , if not mark the test case as failed
try:
new_vlan.list(self.apiclient, id=new_vlan.vlan.id)
except cloudstackAPIException as cs:
self.debug(cs.errorMsg)
self.assertTrue(cs.errorMsg.find("entity does not exist")>0, msg="Failed to delete IP range")
return
@attr(tags=["sg"])
def test_04_add_noncontiguous_ip_range(self):
"""Test adding non-contiguous ip range in existing cidr
1.Add ip range in new cidr
1.Add non-contigous ip range in cidr added at step1
2.Verify the ip range using list APIs
"""
#call increment_cidr function to get exiting cidr from the setup and increment it
ip2 = self.increment_cidr()
test_nw = ip2.network
ip = IPAddress(test_nw)
#Add IP range(5 IPs) in the new CIDR
test_gateway = ip.__add__(1)
test_startIp = ip.__add__(50)
test_endIp = ip.__add__(60)
#Populating services with new IP range
self.services["vlan_ip_range"]["startip"] = test_startIp
self.services["vlan_ip_range"]["endip"] = test_endIp
self.services["vlan_ip_range"]["gateway"] = test_gateway
self.services["vlan_ip_range"]["netmask"] = self.netmask
self.services["vlan_ip_range"]["zoneid"] = self.zone.id
self.services["vlan_ip_range"]["podid"] = self.pod.id
#create new vlan ip range
new_vlan = PublicIpRange.create(self.apiclient, self.services["vlan_ip_range"])
self.debug("Created new vlan range with startip:%s and endip:%s" %(test_startIp,test_endIp))
self.cleanup.append(new_vlan)
new_vlan_res = new_vlan.list(self.apiclient,id=new_vlan.vlan.id)
#Compare list output with configured values
self.verify_vlan_range(new_vlan_res,self.services["vlan_ip_range"])
#Add non-contiguous ip range in exiting cidr
test_startIp2 = ip.__add__(10)
test_endIp2 = ip.__add__(20)
#Populating services with new IP range
self.services["vlan_ip_range"]["startip"] = test_startIp2
self.services["vlan_ip_range"]["endip"] = test_endIp2
#create new vlan ip range
self.debug("Adding non contiguous ip range")
new_vlan = PublicIpRange.create(self.apiclient, self.services["vlan_ip_range"])
self.debug("Created new vlan range with startip:%s and endip:%s" %(test_startIp,test_endIp))
self.cleanup.append(new_vlan)
new_vlan_res = new_vlan.list(self.apiclient,id=new_vlan.vlan.id)
#Compare list output with configured values
self.verify_vlan_range(new_vlan_res,self.services["vlan_ip_range"])
return
@attr(tags=["sg"])
def test_05_add_overlapped_ip_range(self):
"""Test adding overlapped ip range in existing cidr
1.Add ip range in new cidr e.g:10.147.40.10-10.147.40.100
2.Add ip range overlapped with the ip range in step1 e.g.10.147.40.90-150
"""
#call increment_cidr function to get exiting cidr from the setup and increment it
ip2 = self.increment_cidr()
test_nw = ip2.network
ip = IPAddress(test_nw)
#Add IP range in the new CIDR
test_gateway = ip.__add__(1)
test_startIp = ip.__add__(10)
test_endIp = ip.__add__(30)
test_startIp2 = ip.__add__(20)
test_endIp2 = ip.__add__(40)
#Populating services with new IP range
self.services["vlan_ip_range"]["startip"] = test_startIp
self.services["vlan_ip_range"]["endip"] = test_endIp
self.services["vlan_ip_range"]["gateway"] = test_gateway
self.services["vlan_ip_range"]["netmask"] = self.netmask
self.services["vlan_ip_range"]["zoneid"] = self.zone.id
self.services["vlan_ip_range"]["podid"] = self.pod.id
#create new vlan ip range
self.debug("Creating new ip range with startip:%s and endip: %s".format(test_startIp,test_endIp))
new_vlan = PublicIpRange.create(self.apiclient, self.services["vlan_ip_range"])
self.debug("Created new vlan range with startip:%s and endip:%s" %(test_startIp,test_endIp))
self.cleanup.append(new_vlan)
new_vlan_res = new_vlan.list(self.apiclient,id=new_vlan.vlan.id)
#Compare list output with configured values
self.verify_vlan_range(new_vlan_res,self.services["vlan_ip_range"])
#Add overlapped ip range
#Populating services with new IP range
self.services["vlan_ip_range"]["startip"] = test_startIp2
self.services["vlan_ip_range"]["endip"] = test_endIp2
#Try to create ip range overlapped with exiting ip range
self.debug("Adding overlapped ip range")
try:
new_vlan2 = PublicIpRange.create(self.apiclient, self.services["vlan_ip_range"])
except cloudstackAPIException as cs:
self.debug(cs.errorMsg)
self.assertTrue(cs.errorMsg.find("already has IPs that overlap with the new range")>0, msg="Fail:CS allowed adding overlapped ip ranges in guest cidr")
return
#Test will reach here if there is a bug in overlap ip range checking
self.cleanup.append(new_vlan2)
self.fail("CS should not accept overlapped ip ranges in guest traffic, but it allowed")
return
@attr(tags=["sg"])
def test_06_add_ip_range_overlapped_with_two_ranges(self):
"""Test adding overlapped ip range with two existing cidr
1.Add ip range in new cidr e.g:10.147.40.2-10.147.40.10
2.Add another ip range in the same cidr e.g:10.147.40.20-10.147.40.30
2.Add ip range overlapped with both the ip ranges e.g.10.147.40.10-20
"""
#call increment_cidr function to get exiting cidr from the setup and increment it
ip2 = self.increment_cidr()
test_nw = ip2.network
ip = IPAddress(test_nw)
#Add IP range in the new CIDR
test_gateway = ip.__add__(1)
test_startIp = ip.__add__(2)
test_endIp = ip.__add__(5)
test_startIp2 = ip.__add__(7)
test_endIp2 = ip.__add__(10)
test_startIp3 = ip.__add__(5)
test_endIp3 = ip.__add__(7)
#Populating services with new IP range
self.services["vlan_ip_range"]["startip"] = test_startIp
self.services["vlan_ip_range"]["endip"] = test_endIp
self.services["vlan_ip_range"]["gateway"] = test_gateway
self.services["vlan_ip_range"]["netmask"] = self.netmask
self.services["vlan_ip_range"]["zoneid"] = self.zone.id
self.services["vlan_ip_range"]["podid"] = self.pod.id
#create new vlan ip range
new_vlan = PublicIpRange.create(self.apiclient, self.services["vlan_ip_range"])
self.debug("Created new vlan range with startip:%s and endip:%s" %(test_startIp,test_endIp))
self.cleanup.append(new_vlan)
new_vlan_res = new_vlan.list(self.apiclient,id=new_vlan.vlan.id)
#Compare list output with configured values
self.verify_vlan_range(new_vlan_res,self.services["vlan_ip_range"])
#Add 2nd IP range in the same CIDR
self.services["vlan_ip_range"]["startip"] = test_startIp2
self.services["vlan_ip_range"]["endip"] = test_endIp2
new_vlan2 = PublicIpRange.create(self.apiclient, self.services["vlan_ip_range"])
self.debug("Created new vlan range with startip:%s and endip:%s" %(test_startIp2,test_endIp2))
self.cleanup.append(new_vlan2)
#Populating services with new IP range
self.services["vlan_ip_range"]["startip"] = test_startIp3
self.services["vlan_ip_range"]["endip"] = test_endIp3
#Try to create ip range overlapped with exiting ip range
self.debug("Adding ip range overlapped with two cidrs")
try:
new_vlan3 = PublicIpRange.create(self.apiclient, self.services["vlan_ip_range"])
except cloudstackAPIException as cs:
self.debug(cs.errorMsg)
self.assertTrue(cs.errorMsg.find("already has IPs that overlap with the new range")>0, msg="Fail:CS allowed adding overlapped ip ranges in guest cidr")
return
#Test will reach here if there is a bug in overlap ip range checking
self.cleanup.append(new_vlan3)
self.fail("CS should not accept overlapped ip ranges in guest traffic, but it allowed")
return
@attr(tags=["sg"])
def test_07_add_iprange_superset(self):
"""Test adding ip range superset to existing CIDR
1.Add IP range in new CIDR
2.Try to add ip range superset to CIDR added in step1
"""
#call increment_cidr function to get exiting cidr from the setup and increment it
ip2 = self.increment_cidr()
test_nw = ip2.network
ip = IPAddress(test_nw)
#Add IP range in the new CIDR
test_gateway = ip.__add__(1)
test_startIp = ip.__add__(2)
test_endIp = ip.__add__(10)
test_startIp2 = ip.__add__(20)
test_endIp2 = ip.__add__(30)
#Populating services with new IP range
self.services["vlan_ip_range"]["startip"] = test_startIp
self.services["vlan_ip_range"]["endip"] = test_endIp
self.services["vlan_ip_range"]["gateway"] = test_gateway
self.services["vlan_ip_range"]["netmask"] = self.netmask
self.services["vlan_ip_range"]["zoneid"] = self.zone.id
self.services["vlan_ip_range"]["podid"] = self.pod.id
#create new vlan ip range
new_vlan = PublicIpRange.create(self.apiclient, self.services["vlan_ip_range"])
self.debug("Created new vlan range with startip:%s and endip:%s" %(test_startIp,test_endIp))
self.cleanup.append(new_vlan)
new_vlan_res = new_vlan.list(self.apiclient,id=new_vlan.vlan.id)
#Compare list output with configured values
self.verify_vlan_range(new_vlan_res,self.services["vlan_ip_range"])
#Add ip range superset to the existing CIDR
#Following code finds the netmask superset to existing CIDR
cidr = ip2.cidr
mask_len = 2**(32-cidr.prefixlen)
netmask = IPAddress(self.netmask)
superset = netmask.__isub__(mask_len)
#Add this superset netmask to services
self.services["vlan_ip_range"]["netmask"] = superset
self.services["vlan_ip_range"]["startip"] = test_startIp2
self.services["vlan_ip_range"]["endip"] = test_endIp2
self.debug("Adding IP range super set to existing CIDR")
try:
new_vlan2 = PublicIpRange.create(self.apiclient, self.services["vlan_ip_range"])
except cloudstackAPIException as cs:
self.debug(cs.errorMsg)
self.assertTrue(cs.errorMsg.find("superset")>0, msg="Fail: CS allowed adding ip range superset to existing CIDR")
return
#Test will reach here if there is a bug in allowing superset ip range
self.cleanup.append(new_vlan2)
self.fail("CS should not allow adding ip range superset to existing CIDR")
return
@attr(tags=["sg"])
def test_08_add_iprange_subset(self):
"""Test adding ip range subset to existing CIDR
1.Add IP range in new CIDR
2.Try to add ip range subset to CIDR added in step1
"""
#call increment_cidr function to get exiting cidr from the setup and increment it
ip2 = self.increment_cidr()
test_nw = ip2.network
ip = IPAddress(test_nw)
#Add IP range in the new CIDR
test_gateway = ip.__add__(1)
test_startIp = ip.__add__(2)
test_endIp = ip.__add__(10)
test_startIp2 = ip.__add__(20)
test_endIp2 = ip.__add__(30)
#Populating services with new IP range
self.services["vlan_ip_range"]["startip"] = test_startIp
self.services["vlan_ip_range"]["endip"] = test_endIp
self.services["vlan_ip_range"]["gateway"] = test_gateway
self.services["vlan_ip_range"]["netmask"] = self.netmask
self.services["vlan_ip_range"]["zoneid"] = self.zone.id
self.services["vlan_ip_range"]["podid"] = self.pod.id
#create new vlan ip range
new_vlan = PublicIpRange.create(self.apiclient, self.services["vlan_ip_range"])
self.debug("Created new vlan range with startip:%s and endip:%s" %(test_startIp,test_endIp))
self.cleanup.append(new_vlan)
new_vlan_res = new_vlan.list(self.apiclient,id=new_vlan.vlan.id)
#Compare list output with configured values
self.verify_vlan_range(new_vlan_res,self.services["vlan_ip_range"])
#Add ip range superset to the existing CIDR
#Following code finds the netmask superset to existing CIDR
cidr = ip2.cidr
mask_len = 2**(32-(cidr.prefixlen+1))
netmask = IPAddress(self.netmask)
subset = netmask.__iadd__(mask_len)
#Add this superset netmask to services
self.services["vlan_ip_range"]["netmask"] = subset
self.services["vlan_ip_range"]["startip"] = test_startIp2
self.services["vlan_ip_range"]["endip"] = test_endIp2
self.debug("Adding ip range subset to existing cidr")
try:
new_vlan2 = PublicIpRange.create(self.apiclient, self.services["vlan_ip_range"])
except cloudstackAPIException as cs:
self.debug(cs.errorMsg)
self.assertTrue(cs.errorMsg.find("subset")>0, msg="Fail: CS allowed adding ip range subset to existing CIDR")
return
#Test will reach here if there is a bug in allowing superset ip range
self.cleanup.append(new_vlan2)
self.fail("CS should not allow adding ip range subset to existing CIDR")
return
| |
# -*- coding: utf-8 -*-
"""
Created on Mon Feb 3 20:14:11 2014
@author: james
"""
def propertyGenerator(name, variables, module, mem_ptr,
getter=None, setter=None, indent=1,
getter_success=None, setter_success=None,
getter_errors={}, setter_errors={},
exception="ValueError",
unknown_error_msg="Unknown error"):
"""
Function to generate cython code for a series of similar properties
Example:
property maxSetupCalls:
def __get__(self, ):
raise NotImplementedError()
def __set__(self, int msbset):
ret = kinsol.KINSetMaxSetupCalls(self._kn, msbset)
if ret == kinsol.KIN_SUCCESS:
return
if ret == kinsol.KIN_MEM_NULL:
raise ValueError('Setup first must be called before SetMaxSetupCalls')
if ret == kinsol.KIN_ILL_INPUT:
raise ValueError('Illegal value')
raise ValueError('Unknown error ({}))'.format(ret))
"""
fmt = lambda s,i:" "*i*4 + "{}\n".format(s)
make_args = lambda f,vs,d:d.join([f.format(v) for v in vs])
vnames = [vn for vn,vt in variables]
s = fmt("property {}:".format(name), indent)
s += fmt("def __get__(self, ):",indent+1)
if getter is None:
s += fmt("raise NotImplementedError()", indent+2)
else:
for vname,vtype in variables:
s += fmt("cdef {} {}".format(vtype,vname), indent+2)
args = make_args("&{}", vnames, ", ")
s += fmt("ret = {}.{}({},{})".format(module,getter,mem_ptr,args),indent+2 )
s += fmt("if ret == {}.{}:".format(module,getter_success), indent+2)
args = make_args("{}", vnames, ", ")
s += fmt("return {}".format(args), indent+3)
# Write all errors
for error,msg in getter_errors.iteritems():
s += fmt("if ret == {}.{}:".format(module, error), indent+2)
s += fmt('raise {}("{}")'.format(exception, msg), indent+3)
# Final catch all error
s += fmt('raise {}("{}")'.format(exception, unknown_error_msg), indent+2)
s += fmt("",indent+1)
s += fmt("def __set__(self, value):", indent+1)
if setter is None:
s += fmt("raise NotImplementedError()", indent+2)
else:
for i,(vname,vtype) in enumerate(variables):
if len(variables) > 1: # Ensure the tuple is broken up if more than 1 variable
s += fmt("cdef {} {} = value[{}]".format(vtype,vname,i), indent+2)
else:
s += fmt("cdef {} {} = value".format(vtype,vname), indent+2)
args = make_args("{}", vnames, ", ")
s += fmt("ret = {}.{}({},{})".format(module,setter,mem_ptr,args), indent+2)
s += fmt("if ret == {}.{}:".format(module,setter_success), indent+2)
s += fmt("return", indent+3)
# Write all errors
for error,msg in setter_errors.iteritems():
s += fmt("if ret == {}.{}:".format(module, error), indent+2)
s += fmt('raise {}("{}")'.format(exception, msg), indent+3)
# Final catch all error
s += fmt('raise {}("{}")'.format(exception, unknown_error_msg), indent+2)
return s
def createKinsolProperties(filename ):
module = 'kinsol'
mem_ptr = 'self._kn'
setter_success = 'KIN_SUCCESS'
setter_errors={
'KIN_MEM_NULL':"KINSOL memory pointer is NULL",
'KIN_ILL_INPUT':"Illegal value"}
properties = (
('printLevel',[('printfl','int'),],'KINSetPrintLevel'),
('numMaxIters',[('mxiter','long int'),], 'KINSetNumMaxIters'),
('noInitSeutp',[('noInitSeutp','sun.booleantype'),],'KINSetNoInitSetup'),
('noResMon',[('noNNIResMon','sun.booleantype'),],'KINSetNoResMon'),
('maxSetupCalls',[('msbset','long int'),], 'KINSetMaxSetupCalls'),
('maxSubSetupCalls',[('msbsetsub','long int'),], 'KINSetMaxSubSetupCalls'),
('etaForm',[('etachoice','int'),], 'KINSetEtaForm'),
('etaConstValue',[('eta','sun.realtype'),], 'KINSetEtaConstValue'),
('etaParams',[('egamma','sun.realtype'),('ealpha','sun.realtype')],'KINSetEtaParams'),
('resMonParams',[('omegamin','sun.realtype'),('omegamax','sun.realtype')],'KINSetResMonParams'),
('resMonConstValue',[('omegaconst','sun.realtype'),],'KINSetResMonConstValue'),
('noMinEps',[('noMinEps','sun.booleantype'),],'KINSetNoMinEps'),
('maxNewtonStep',[('mxnewtstep','sun.realtype'),],'KINSetMaxNewtonStep'),
('maxBetaFails',[('mxbcf','long int'),],'KINSetMaxBetaFails'),
('funcNormTol',[('fnormtol','sun.realtype'),],'KINSetFuncNormTol'),
('scaledStepTol',[('scsteptol','sun.realtype'),],'KINSetScaledStepTol'),
)
s = """
cdef class BaseKinsol:
def __cinit__(self,*args,**kwds):
self._kn = kinsol.KINCreate()
if not self._kn:
raise MemoryError
ret = kinsol.KINSetUserData(self._kn, <void *>self)
if ret != 0:
raise KinsolError()
"""
s += "\n"
for name,variables,func in properties:
s += propertyGenerator(name,variables,module,mem_ptr,setter=func,
setter_success=setter_success,setter_errors=setter_errors)
s += "\n"
fh = open(filename, 'wb')
fh.write(s)
fh.close()
def createCvodeProperties(filename, ):
module = 'cvode'
mem_ptr = 'self._cv'
getter_success = 'CV_SUCCESS'
getter_errors = {
'CV_MEM_NULL':'CVODE memory pointer is NULL',
'CV_NO_SLDET':'Stability limit was not turned on',
}
get_properties = (
('workSpace', [('lenrw', 'long int'),('leniw','long int')],'CVodeGetWorkSpace'),
('numSteps', [('nsteps', 'long int'),], 'CVodeGetNumSteps'),
('numRhsEvals', [('nfevals', 'long int'),], 'CVodeGetNumRhsEvals' ),
('numLinSolvSetups', [('nlinsetups', 'long int'),], 'CVodeGetNumLinSolvSetups'),
('numErrTestFails', [('netfails', 'long int'),], 'CVodeGetNumErrTestFails'),
('lastOrder', [('qlast', 'int'),], 'CVodeGetLastOrder'),
('currentOrder', [('qcur', 'int'),], 'CVodeGetCurrentOrder'),
('numStabLimOrderReds', [('nslred', 'long int'),], 'CVodeGetNumStabLimOrderReds'),
('actualInitStep', [('hinused', 'sun.realtype'),], 'CVodeGetActualInitStep'),
('lastStep', [('hinused', 'sun.realtype'),], 'CVodeGetLastStep'),
('currentStep', [('hcur', 'sun.realtype'),], 'CVodeGetCurrentStep'),
('currentTime', [('tcur', 'sun.realtype'),], 'CVodeGetCurrentTime'),
('tolScaleFactor', [('tolsfac', 'sun.realtype'),], 'CVodeGetTolScaleFactor'),
('numGEvals', [('ngevals', 'long int'),], 'CVodeGetNumGEvals'),
('numNonlinSolvIters', [('nniters', 'long int'),], 'CVodeGetNumNonlinSolvIters'),
('numNonlinSolvConvFails', [('nncfails', 'long int'),], 'CVodeGetNumNonlinSolvConvFails'),
('dlsWorkSpace', [('lenrwLS', 'long int'),('leniwLS', 'long int'),], 'CVDlsGetWorkSpace'),
('dlsNumJacEvals', [('njevals', 'long int'),], 'CVDlsGetNumJacEvals'),
('dlsNumRhsEvals', [('nfevalsLS', 'long int'),], 'CVDlsGetNumRhsEvals'),
('dlsLastFlag', [('flag', 'long int'),], 'CVDlsGetLastFlag'),
('spilsWorkSpace', [('lenrwLS', 'long int'),('leniwLS', 'long int')], 'CVSpilsGetWorkSpace'),
('spilsNumPrecEvals', [('npevals', 'long int'),], 'CVSpilsGetNumPrecEvals'),
('spilsNumPrecSolves', [('npsolves', 'long int'),], 'CVSpilsGetNumPrecSolves'),
('spilsNumLinIters', [('nliters', 'long int'),], 'CVSpilsGetNumLinIters'),
('spilsNumConvFails', [('nlcfails', 'long int'),], 'CVSpilsGetNumConvFails'),
('spilsNumJtimesEvals', [('njvevals', 'long int'),], 'CVSpilsGetNumJtimesEvals'),
('spilsNumRhsEvals', [('nfevalsLS', 'long int'),], 'CVSpilsGetNumRhsEvals')
)
setter_success = 'CV_SUCCESS'
setter_errors={
'CV_MEM_NULL':"CVODE memory pointer is NULL",
'CV_ILL_INPUT':"Illegal value"}
set_properties = (
('maxNumSteps', [('mxsteps','long int'),], 'CVodeSetMaxNumSteps'),
('maxOrd', [('maxord','int'),], 'CVodeSetMaxOrd'),
('maxHnilWarns', [('mxhnil','int'),], 'CVodeSetMaxHnilWarns'),
('stabLimDet', [('stldet', 'sun.booleantype'),], 'CVodeSetStabLimDet'),
('initStep', [('hin','sun.realtype'),], 'CVodeSetInitStep'),
('minStep', [('hmin', 'sun.realtype'),], 'CVodeSetMinStep'),
('maxStep', [('hmax', 'sun.realtype'),], 'CVodeSetMaxStep'),
('stopTime', [('tstop', 'sun.realtype'),], 'CVodeSetStopTime'),
('maxErrTestFails', [('maxnef', 'int'),], 'CVodeSetMaxErrTestFails'),
('maxNonlinIters', [('maxcor', 'int'),], 'CVodeSetMaxNonlinIters'),
('maxConvFails', [('maxncf', 'int'),], 'CVodeSetMaxConvFails'),
('nonlinConvCoef', [('nlscoef', 'sun.realtype'),], 'CVodeSetNonlinConvCoef'),
('iterType', [('iter', 'int'),], 'CVodeSetIterType'),
)
s = """
cdef class BaseCvode:
def __cinit__(self, *args, **kwds):
multistep = kwds.pop('multistep', 'bdf')
iteration = kwds.pop('iteration', 'functional')
if multistep == 'bdf':
self._ms = cvode.CV_BDF
elif multistep == 'adams':
self._ms = cvode.CV_ADAMS
else:
raise ValueError
if iteration == 'functional':
self._it = cvode.CV_FUNCTIONAL
elif iteration == 'newton':
self._it = cvode.CV_NEWTON
else:
raise ValueError
self._cv = cvode.CVodeCreate(self._ms, self._it)
if not self._cv:
raise MemoryError
ret = cvode.CVodeSetUserData(self._cv, <void *>self)
"""
s += "\n"
for name,variables,func in get_properties:
s += propertyGenerator(name,variables,module,mem_ptr,getter=func,
getter_success=getter_success,getter_errors=getter_errors)
s += "\n"
for name,variables,func in set_properties:
s += propertyGenerator(name,variables,module,mem_ptr,setter=func,
setter_success=setter_success,setter_errors=setter_errors)
s += "\n"
fh = open(filename, 'wb')
fh.write(s)
fh.close()
if __name__ == '__main__':
s = propertyGenerator('maxSetupCalls', [('msbset','int'),], 'kinsol', 'self._kn',
setter='KINSetMaxSetupCalls', setter_success='KIN_SUCCESS',
setter_errors={'KIN_MEM_NULL':"KINSOL memory pointer is NULL",
'KIN_ILL_INPUT':"Illegal value"},)
| |
# Copyright (C) 2017-2018 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
import mock
import os.path
import shutil
import struct
import tempfile
from cuckoo.common.abstracts import Signature
from cuckoo.common.objects import Dictionary
from cuckoo.common.scripting import Scripting
from cuckoo.core.database import Database
from cuckoo.core.extract import ExtractManager
from cuckoo.core.plugins import RunSignatures, RunProcessing
from cuckoo.core.startup import init_yara, init_modules
from cuckoo.main import cuckoo_create
from cuckoo.misc import cwd, set_cwd, mkdir
def test_signature_version():
rs = RunSignatures
class sig_normal(object):
name = "sig_normal"
minimum = "2.0.0"
maximum = None
rs.version = "2.0.0"
assert rs.check_signature_version(sig_normal)
rs.version = "2.2.0"
assert rs.check_signature_version(sig_normal)
class sig_run(object):
name = "sig_run"
minimum = "2.0.0"
maximum = None
def run(self):
pass
assert not rs.check_signature_version(sig_run)
class sig_outdated(object):
name = "sig_outdated"
minimum = "2.0.3"
maximum = None
rs.version = "2.0.0"
assert not rs.check_signature_version(sig_outdated)
class sig_obsolete(object):
name = "sig_obsolete"
minimum = "2.0.0"
maximum = "2.0.9"
rs.version = "2.1.0"
assert not rs.check_signature_version(sig_obsolete)
def test_should_load_signature():
rs = RunSignatures
rs.version = "2.0.0"
class sig_not_enabled(object):
enabled = False
assert not rs.should_load_signature(sig_not_enabled)
class sig_empty_name(object):
enabled = True
name = None
assert not rs.should_load_signature(sig_empty_name)
class sig_enable_false(object):
enabled = True
name = "enable_false"
minimum = "2.0.0"
maximum = None
def enable(self):
return False
assert not rs.should_load_signature(sig_enable_false())
class sig_enable_true(object):
enabled = True
name = "enable_true"
minimum = "2.0.0"
maximum = None
platform = None
def enable(self):
return True
assert rs.should_load_signature(sig_enable_true())
def test_should_enable_signature_empty_platform():
rs = RunSignatures({})
class sig_empty_platform(object):
platform = None
assert rs.should_enable_signature(sig_empty_platform())
class sig_other_platform(object):
platform = "nope"
assert not rs.should_enable_signature(sig_other_platform())
class sig_windows_platform(object):
platform = "windows"
assert rs.should_enable_signature(sig_windows_platform())
def test_should_enable_signature_linux_platform():
rs = RunSignatures({
"info": {
"platform": "linux",
},
})
class sig_empty_platform(object):
platform = None
assert rs.should_enable_signature(sig_empty_platform())
class sig_other_platform(object):
platform = "nope"
assert not rs.should_enable_signature(sig_other_platform())
class sig_windows_platform(object):
platform = "windows"
assert not rs.should_enable_signature(sig_windows_platform())
def test_should_enable_signature_windows_platform():
rs = RunSignatures({
"info": {
"platform": "windows",
},
})
class sig_empty_platform(object):
platform = None
assert rs.should_enable_signature(sig_empty_platform())
class sig_other_platform(object):
platform = "nope"
assert not rs.should_enable_signature(sig_other_platform())
class sig_windows_platform(object):
platform = "windows"
assert rs.should_enable_signature(sig_windows_platform())
def test_signature_order():
class sig(object):
enabled = True
minimum = "2.0.0"
maximum = None
platform = "windows"
marks = []
def __init__(self, caller):
pass
class sig1(sig):
name = "sig1"
order = 3
class sig2(sig):
name = "sig2"
order = 1
class sig3(sig):
name = "sig3"
order = 2
with mock.patch("cuckoo.core.plugins.cuckoo") as p:
p.signatures = sig1, sig2, sig3
RunSignatures.init_once()
rs = RunSignatures({})
assert isinstance(rs.signatures[0], sig2)
assert isinstance(rs.signatures[1], sig3)
assert isinstance(rs.signatures[2], sig1)
class test_call_signature():
class sig(object):
enabled = True
name = "sig"
minimum = "2.0.0"
maximum = None
platform = "windows"
matched = False
order = 1
def __init__(self, caller):
pass
def on_signature(self, sig):
pass
with mock.patch("cuckoo.core.plugins.cuckoo") as p:
p.signatures = sig,
RunSignatures.init_once()
rs = RunSignatures({})
s1 = rs.signatures[0]
# Not a match.
f = mock.MagicMock(return_value=False)
s1.matched = False
rs.call_signature(s1, f, 1, 2, a=3, b=4)
assert s1.matched is False
f.assert_called_once_with(1, 2, a=3, b=4)
# It is a match.
f = mock.MagicMock(return_value=True)
rs.call_signature(s1, f, "foo", "bar")
assert s1.matched is True
f.assert_called_once_with("foo", "bar")
# Now it is a match, no longer call the handler.
f = mock.MagicMock()
rs.call_signature(s1, f, "foo", "bar")
f.assert_not_called()
def test_check_suricata():
class caller(object):
results = {
"suricata": {
"alerts": [{
"signature": "SID_TEST",
}],
},
}
s = Signature(caller)
assert s.check_suricata_alerts(".*TEST.*")
@mock.patch("cuckoo.core.plugins.log")
def test_signature_severity(p):
class sig(object):
name = "foobar"
matched = True
severity = 42
marks = []
def init(self):
pass
def on_complete(self):
pass
def results(self):
return self.__class__.__dict__
rs = RunSignatures({})
rs.signatures = sig(),
rs.run()
assert p.debug.call_count == 2
assert p.debug.call_args_list[1][1]["extra"] == {
"action": "signature.match", "status": "success",
"signature": "foobar", "severity": 42,
}
def test_mark_config():
class sig(Signature):
name = "foobar"
def on_complete(self):
self.mark_config({
"family": "foobar",
"cnc": "thisiscnc.com",
"url": [
"url1", "url2",
],
})
return True
rs = RunSignatures({
"metadata": {},
})
rs.signatures = sig(rs), sig(rs)
rs.run()
assert rs.results["metadata"] == {
"cfgextr": [{
"family": "foobar",
"cnc": [
"thisiscnc.com",
],
"url": [
"url1", "url2",
],
}],
}
def test_on_yara():
set_cwd(os.path.realpath(tempfile.mkdtemp()))
cuckoo_create()
init_modules()
shutil.copy(
cwd("yara", "binaries", "vmdetect.yar"),
cwd("yara", "memory", "vmdetect.yar")
)
init_yara()
mkdir(cwd(analysis=1))
open(cwd("binary", analysis=1), "wb").write("\x0f\x3f\x07\x0b")
mkdir(cwd("files", analysis=1))
open(cwd("files", "1.txt", analysis=1), "wb").write("\x56\x4d\x58\x68")
mkdir(cwd("memory", analysis=1))
open(cwd("memory", "1-0.dmp", analysis=1), "wb").write(
struct.pack("QIIII", 0x400000, 0x1000, 0, 0, 0) + "\x45\xc7\x00\x01"
)
Database().connect()
ExtractManager._instances = {}
results = RunProcessing(task=Dictionary({
"id": 1,
"category": "file",
"target": __file__,
})).run()
assert results["target"]["file"]["yara"][0]["offsets"] == {
"virtualpc": [(0, 0)],
}
assert results["procmemory"][0]["regions"] == [{
"addr": "0x00400000",
"end": "0x00401000",
"offset": 24,
"protect": None,
"size": 4096,
"state": 0,
"type": 0,
}]
assert results["procmemory"][0]["yara"][0]["offsets"] == {
"vmcheckdll": [(24, 0)],
}
assert results["dropped"][0]["yara"][0]["offsets"] == {
"vmware": [(0, 0)],
"vmware1": [(0, 0)],
}
class sig1(object):
name = "sig1"
@property
def matched(self):
return False
@matched.setter
def matched(self, value):
pass
def init(self):
pass
def on_signature(self, sig):
pass
def on_complete(self):
pass
def on_extract(self, match):
pass
on_yara = mock.MagicMock()
rs = RunSignatures(results)
rs.signatures = sig1(),
rs.run()
assert sig1.on_yara.call_count == 3
sig1.on_yara.assert_any_call(
"sample", cwd("binary", analysis=1), mock.ANY
)
sig1.on_yara.assert_any_call(
"dropped", cwd("files", "1.txt", analysis=1), mock.ANY
)
sig1.on_yara.assert_any_call(
"procmem", cwd("memory", "1-0.dmp", analysis=1), mock.ANY
)
ym = sig1.on_yara.call_args_list[0][0][2]
assert ym.offsets == {
"virtualpc": [(0, 0)],
}
assert ym.string("virtualpc", 0) == "\x0f\x3f\x07\x0b"
def test_on_extract():
set_cwd(tempfile.mkdtemp())
cuckoo_create()
init_modules()
Database().connect()
mkdir(cwd(analysis=2))
cmd = Scripting().parse_command("cmd.exe /c ping 1.2.3.4")
ex = ExtractManager.for_task(2)
ex.push_script({
"pid": 1,
"first_seen": 2,
}, cmd)
results = RunProcessing(task=Dictionary({
"id": 2,
"category": "file",
"target": __file__,
})).run()
assert results["extracted"] == [{
"category": "script",
"pid": 1,
"first_seen": 2,
"program": "cmd",
"raw": cwd("extracted", "0.bat", analysis=2),
"yara": [],
"info": {},
}]
class sig1(object):
name = "sig1"
@property
def matched(self):
return False
@matched.setter
def matched(self, value):
pass
def init(self):
pass
def on_signature(self):
pass
def on_complete(self):
pass
def on_yara(self):
pass
on_extract = mock.MagicMock()
rs = RunSignatures(results)
rs.signatures = sig1(),
rs.run()
sig1.on_extract.assert_called_once()
em = sig1.on_extract.call_args_list[0][0][0]
assert em.category == "script"
class TestSignatureMethods(object):
def report(self, obj):
class caller(object):
results = obj
return Signature(caller())
def test_check_command_line(self):
r = self.report({
"behavior": {
"summary": {
"command_line": [
"foo", "bar", "foobar",
],
},
},
})
r.check_command_line("foo") == "foo"
r.check_command_line("ar$", regex=True) == "bar"
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of the compute RPC API.
"""
from oslo.config import cfg
from nova import exception
from nova.objects import base as objects_base
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova import rpcclient
rpcapi_opts = [
cfg.StrOpt('compute_topic',
default='compute',
help='the topic compute nodes listen on'),
]
CONF = cfg.CONF
CONF.register_opts(rpcapi_opts)
rpcapi_cap_opt = cfg.StrOpt('compute',
help='Set a version cap for messages sent to compute services')
CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels')
def _compute_host(host, instance):
'''Get the destination host for a message.
:param host: explicit host to send the message to.
:param instance: If an explicit host was not specified, use
instance['host']
:returns: A host
'''
if host:
return host
if not instance:
raise exception.NovaException(_('No compute host specified'))
if not instance['host']:
raise exception.NovaException(_('Unable to find host for '
'Instance %s') % instance['uuid'])
return instance['host']
class ComputeAPI(rpcclient.RpcProxy):
'''Client side of the compute rpc API.
API version history:
1.0 - Initial version.
1.1 - Adds get_host_uptime()
1.2 - Adds check_can_live_migrate_[destination|source]
1.3 - Adds change_instance_metadata()
1.4 - Remove instance_uuid, add instance argument to reboot_instance()
1.5 - Remove instance_uuid, add instance argument to pause_instance(),
unpause_instance()
1.6 - Remove instance_uuid, add instance argument to suspend_instance()
1.7 - Remove instance_uuid, add instance argument to
get_console_output()
1.8 - Remove instance_uuid, add instance argument to
add_fixed_ip_to_instance()
1.9 - Remove instance_uuid, add instance argument to attach_volume()
1.10 - Remove instance_id, add instance argument to
check_can_live_migrate_destination()
1.11 - Remove instance_id, add instance argument to
check_can_live_migrate_source()
1.12 - Remove instance_uuid, add instance argument to confirm_resize()
1.13 - Remove instance_uuid, add instance argument to detach_volume()
1.14 - Remove instance_uuid, add instance argument to finish_resize()
1.15 - Remove instance_uuid, add instance argument to
finish_revert_resize()
1.16 - Remove instance_uuid, add instance argument to get_diagnostics()
1.17 - Remove instance_uuid, add instance argument to get_vnc_console()
1.18 - Remove instance_uuid, add instance argument to inject_file()
1.19 - Remove instance_uuid, add instance argument to
inject_network_info()
1.20 - Remove instance_id, add instance argument to
post_live_migration_at_destination()
1.21 - Remove instance_uuid, add instance argument to
power_off_instance() and stop_instance()
1.22 - Remove instance_uuid, add instance argument to
power_on_instance() and start_instance()
1.23 - Remove instance_id, add instance argument to
pre_live_migration()
1.24 - Remove instance_uuid, add instance argument to
rebuild_instance()
1.25 - Remove instance_uuid, add instance argument to
remove_fixed_ip_from_instance()
1.26 - Remove instance_id, add instance argument to
remove_volume_connection()
1.27 - Remove instance_uuid, add instance argument to
rescue_instance()
1.28 - Remove instance_uuid, add instance argument to reset_network()
1.29 - Remove instance_uuid, add instance argument to resize_instance()
1.30 - Remove instance_uuid, add instance argument to resume_instance()
1.31 - Remove instance_uuid, add instance argument to revert_resize()
1.32 - Remove instance_id, add instance argument to
rollback_live_migration_at_destination()
1.33 - Remove instance_uuid, add instance argument to
set_admin_password()
1.34 - Remove instance_uuid, add instance argument to
snapshot_instance()
1.35 - Remove instance_uuid, add instance argument to
unrescue_instance()
1.36 - Remove instance_uuid, add instance argument to
change_instance_metadata()
1.37 - Remove instance_uuid, add instance argument to
terminate_instance()
1.38 - Changes to prep_resize():
- remove instance_uuid, add instance
- remove instance_type_id, add instance_type
- remove topic, it was unused
1.39 - Remove instance_uuid, add instance argument to run_instance()
1.40 - Remove instance_id, add instance argument to live_migration()
1.41 - Adds refresh_instance_security_rules()
1.42 - Add reservations arg to prep_resize(), resize_instance(),
finish_resize(), confirm_resize(), revert_resize() and
finish_revert_resize()
1.43 - Add migrate_data to live_migration()
1.44 - Adds reserve_block_device_name()
2.0 - Remove 1.x backwards compat
2.1 - Adds orig_sys_metadata to rebuild_instance()
2.2 - Adds slave_info parameter to add_aggregate_host() and
remove_aggregate_host()
2.3 - Adds volume_id to reserve_block_device_name()
2.4 - Add bdms to terminate_instance
2.5 - Add block device and network info to reboot_instance
2.6 - Remove migration_id, add migration to resize_instance
2.7 - Remove migration_id, add migration to confirm_resize
2.8 - Remove migration_id, add migration to finish_resize
2.9 - Add publish_service_capabilities()
2.10 - Adds filter_properties and request_spec to prep_resize()
2.11 - Adds soft_delete_instance() and restore_instance()
2.12 - Remove migration_id, add migration to revert_resize
2.13 - Remove migration_id, add migration to finish_revert_resize
2.14 - Remove aggregate_id, add aggregate to add_aggregate_host
2.15 - Remove aggregate_id, add aggregate to remove_aggregate_host
2.16 - Add instance_type to resize_instance
2.17 - Add get_backdoor_port()
2.18 - Add bdms to rebuild_instance
2.19 - Add node to run_instance
2.20 - Add node to prep_resize
2.21 - Add migrate_data dict param to pre_live_migration()
2.22 - Add recreate, on_shared_storage and host arguments to
rebuild_instance()
2.23 - Remove network_info from reboot_instance
2.24 - Added get_spice_console method
2.25 - Add attach_interface() and detach_interface()
2.26 - Add validate_console_port to ensure the service connects to
vnc on the correct port
2.27 - Adds 'reservations' to terminate_instance() and
soft_delete_instance()
... Grizzly supports message version 2.27. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 2.27.
2.28 - Adds check_instance_shared_storage()
2.29 - Made start_instance() and stop_instance() take new-world
instance objects
2.30 - Adds live_snapshot_instance()
2.31 - Adds shelve_instance(), shelve_offload_instance, and
unshelve_instance()
2.32 - Make reboot_instance take a new world instance object
2.33 - Made suspend_instance() and resume_instance() take new-world
instance objects
2.34 - Added swap_volume()
2.35 - Made terminate_instance() and soft_delete_instance() take
new-world instance objects
2.36 - Made pause_instance() and unpause_instance() take new-world
instance objects
2.37 - Added the legacy_bdm_in_spec parameter to run_instance
2.38 - Made check_can_live_migrate_[destination|source] take
new-world instance objects
2.39 - Made revert_resize() and confirm_resize() take new-world
instance objects
2.40 - Made reset_network() take new-world instance object
2.41 - Make inject_network_info take new-world instance object
2.42 - Splits snapshot_instance() into snapshot_instance() and
backup_instance() and makes them take new-world instance
objects.
2.43 - Made prep_resize() take new-world instance object
2.44 - Add volume_snapshot_create(), volume_snapshot_delete()
2.45 - Made resize_instance() take new-world objects
2.46 - Made finish_resize() take new-world objects
2.47 - Made finish_revert_resize() take new-world objects
... Havana supports message version 2.47. So, any changes to existing
methods in 2.x after that point should be done such that they can
handle the version_cap being set to 2.47.
2.48 - Make add_aggregate_host() and remove_aggregate_host() take
new-world objects
... - Remove live_snapshot() that was never actually used
3.0 - Remove 2.x compatibility
3.1 - Update get_spice_console() to take an instance object
3.2 - Update get_vnc_console() to take an instance object
3.3 - Update validate_console_port() to take an instance object
3.4 - Update rebuild_instance() to take an instance object
'''
#
# NOTE(russellb): This is the default minimum version that the server
# (manager) side must implement unless otherwise specified using a version
# argument to self.call()/cast()/etc. here. It should be left as X.0 where
# X is the current major API version (1.0, 2.0, ...). For more information
# about rpc API versioning, see the docs in
# openstack/common/rpc/dispatcher.py.
#
BASE_RPC_API_VERSION = '3.0'
VERSION_ALIASES = {
'grizzly': '2.27',
'havana': '2.47',
}
def __init__(self):
version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.compute,
CONF.upgrade_levels.compute)
super(ComputeAPI, self).__init__(
topic=CONF.compute_topic,
default_version=self.BASE_RPC_API_VERSION,
serializer=objects_base.NovaObjectSerializer(),
version_cap=version_cap)
self.client = self.get_client()
def _get_compat_version(self, current, havana_compat):
if not self.can_send_version(current):
return havana_compat
return current
def add_aggregate_host(self, ctxt, aggregate, host_param, host,
slave_info=None):
'''Add aggregate host.
:param ctxt: request context
:param aggregate_id:
:param host_param: This value is placed in the message to be the 'host'
parameter for the remote method.
:param host: This is the host to send the message to.
'''
if self.can_send_version('3.0'):
version = '3.0'
elif self.can_send_version('2.48'):
version = '2.48'
else:
# NOTE(russellb) Havana compat
version = '2.14'
aggregate = jsonutils.to_primitive(aggregate)
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'add_aggregate_host',
aggregate=aggregate, host=host_param,
slave_info=slave_info)
def add_fixed_ip_to_instance(self, ctxt, instance, network_id):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.0')
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'add_fixed_ip_to_instance',
instance=instance_p, network_id=network_id)
def attach_interface(self, ctxt, instance, network_id, port_id,
requested_ip):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.25')
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'attach_interface',
instance=instance_p, network_id=network_id,
port_id=port_id, requested_ip=requested_ip)
def attach_volume(self, ctxt, instance, volume_id, mountpoint):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.0')
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'attach_volume',
instance=instance_p, volume_id=volume_id,
mountpoint=mountpoint)
def change_instance_metadata(self, ctxt, instance, diff):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.0')
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'change_instance_metadata',
instance=instance_p, diff=diff)
def check_can_live_migrate_destination(self, ctxt, instance, destination,
block_migration, disk_over_commit):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.38')
cctxt = self.client.prepare(server=destination, version=version)
return cctxt.call(ctxt, 'check_can_live_migrate_destination',
instance=instance,
block_migration=block_migration,
disk_over_commit=disk_over_commit)
def check_can_live_migrate_source(self, ctxt, instance, dest_check_data):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.38')
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'check_can_live_migrate_source',
instance=instance,
dest_check_data=dest_check_data)
def check_instance_shared_storage(self, ctxt, instance, data):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.28')
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'check_instance_shared_storage',
instance=instance_p,
data=data)
def confirm_resize(self, ctxt, instance, migration, host,
reservations=None, cast=True):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.39')
cctxt = self.client.prepare(server=_compute_host(host, instance),
version=version)
rpc_method = cctxt.cast if cast else cctxt.call
return rpc_method(ctxt, 'confirm_resize',
instance=instance, migration=migration,
reservations=reservations)
def detach_interface(self, ctxt, instance, port_id):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.25')
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'detach_interface',
instance=instance_p, port_id=port_id)
def detach_volume(self, ctxt, instance, volume_id):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.0')
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'detach_volume',
instance=instance_p, volume_id=volume_id)
def finish_resize(self, ctxt, instance, migration, image, disk_info,
host, reservations=None):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.46')
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'finish_resize',
instance=instance, migration=migration,
image=image, disk_info=disk_info, reservations=reservations)
def finish_revert_resize(self, ctxt, instance, migration, host,
reservations=None):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.47')
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'finish_revert_resize',
instance=instance, migration=migration,
reservations=reservations)
def get_console_output(self, ctxt, instance, tail_length):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.0')
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_console_output',
instance=instance_p, tail_length=tail_length)
def get_console_pool_info(self, ctxt, console_type, host):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.0')
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'get_console_pool_info',
console_type=console_type)
def get_console_topic(self, ctxt, host):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.0')
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'get_console_topic')
def get_diagnostics(self, ctxt, instance):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.0')
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_diagnostics',
instance=instance_p)
def get_vnc_console(self, ctxt, instance, console_type):
if self.can_send_version('3.2'):
version = '3.2'
else:
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.0')
instance = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_vnc_console',
instance=instance, console_type=console_type)
def get_spice_console(self, ctxt, instance, console_type):
if self.can_send_version('3.1'):
version = '3.1'
else:
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.24')
instance = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'get_spice_console',
instance=instance, console_type=console_type)
def validate_console_port(self, ctxt, instance, port, console_type):
if self.can_send_version('3.3'):
version = '3.3'
else:
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.26')
instance = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'validate_console_port',
instance=instance, port=port,
console_type=console_type)
def host_maintenance_mode(self, ctxt, host_param, mode, host):
'''Set host maintenance mode
:param ctxt: request context
:param host_param: This value is placed in the message to be the 'host'
parameter for the remote method.
:param mode:
:param host: This is the host to send the message to.
'''
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.0')
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'host_maintenance_mode',
host=host_param, mode=mode)
def host_power_action(self, ctxt, action, host):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.0')
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'host_power_action', action=action)
def inject_file(self, ctxt, instance, path, file_contents):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.0')
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'inject_file',
instance=instance_p, path=path,
file_contents=file_contents)
def inject_network_info(self, ctxt, instance):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.41')
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'inject_network_info', instance=instance)
def live_migration(self, ctxt, instance, dest, block_migration, host,
migrate_data=None):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.0')
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'live_migration', instance=instance_p,
dest=dest, block_migration=block_migration,
migrate_data=migrate_data)
def pause_instance(self, ctxt, instance):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.36')
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'pause_instance', instance=instance)
def post_live_migration_at_destination(self, ctxt, instance,
block_migration, host):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.0')
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt,
'post_live_migration_at_destination',
instance=instance_p, block_migration=block_migration)
def pre_live_migration(self, ctxt, instance, block_migration, disk,
host, migrate_data=None):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.21')
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'pre_live_migration',
instance=instance_p,
block_migration=block_migration,
disk=disk, migrate_data=migrate_data)
def prep_resize(self, ctxt, image, instance, instance_type, host,
reservations=None, request_spec=None,
filter_properties=None, node=None):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.43')
instance_type_p = jsonutils.to_primitive(instance_type)
image_p = jsonutils.to_primitive(image)
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'prep_resize',
instance=instance,
instance_type=instance_type_p,
image=image_p, reservations=reservations,
request_spec=request_spec,
filter_properties=filter_properties,
node=node)
def reboot_instance(self, ctxt, instance, block_device_info,
reboot_type):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.32')
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'reboot_instance',
instance=instance,
block_device_info=block_device_info,
reboot_type=reboot_type)
def rebuild_instance(self, ctxt, instance, new_pass, injected_files,
image_ref, orig_image_ref, orig_sys_metadata, bdms,
recreate=False, on_shared_storage=False, host=None,
kwargs=None):
# NOTE(danms): kwargs is only here for cells compatibility, don't
# actually send it to compute
if self.can_send_version('3.4'):
version = '3.4'
else:
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.22')
instance = jsonutils.to_primitive(instance)
bdms_p = jsonutils.to_primitive(bdms)
cctxt = self.client.prepare(server=_compute_host(host, instance),
version=version)
cctxt.cast(ctxt, 'rebuild_instance',
instance=instance, new_pass=new_pass,
injected_files=injected_files, image_ref=image_ref,
orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata, bdms=bdms_p,
recreate=recreate, on_shared_storage=on_shared_storage)
def refresh_provider_fw_rules(self, ctxt, host):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.0')
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'refresh_provider_fw_rules')
def remove_aggregate_host(self, ctxt, aggregate, host_param, host,
slave_info=None):
'''Remove aggregate host.
:param ctxt: request context
:param aggregate_id:
:param host_param: This value is placed in the message to be the 'host'
parameter for the remote method.
:param host: This is the host to send the message to.
'''
if self.can_send_version('3.0'):
version = '3.0'
elif self.can_send_version('2.48'):
version = '2.48'
else:
# NOTE(russellb) Havana compat
version = '2.15'
aggregate = jsonutils.to_primitive(aggregate)
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'remove_aggregate_host',
aggregate=aggregate, host=host_param,
slave_info=slave_info)
def remove_fixed_ip_from_instance(self, ctxt, instance, address):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.0')
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'remove_fixed_ip_from_instance',
instance=instance_p, address=address)
def remove_volume_connection(self, ctxt, instance, volume_id, host):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.0')
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'remove_volume_connection',
instance=instance_p, volume_id=volume_id)
def rescue_instance(self, ctxt, instance, rescue_password):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.0')
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'rescue_instance',
instance=instance_p,
rescue_password=rescue_password)
def reset_network(self, ctxt, instance):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.40')
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'reset_network', instance=instance)
def resize_instance(self, ctxt, instance, migration, image, instance_type,
reservations=None):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.45')
instance_type_p = jsonutils.to_primitive(instance_type)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'resize_instance',
instance=instance, migration=migration,
image=image, reservations=reservations,
instance_type=instance_type_p)
def resume_instance(self, ctxt, instance):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.33')
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'resume_instance', instance=instance)
def revert_resize(self, ctxt, instance, migration, host,
reservations=None):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.39')
cctxt = self.client.prepare(server=_compute_host(host, instance),
version=version)
cctxt.cast(ctxt, 'revert_resize',
instance=instance, migration=migration,
reservations=reservations)
def rollback_live_migration_at_destination(self, ctxt, instance, host):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.0')
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'rollback_live_migration_at_destination',
instance=instance_p)
def run_instance(self, ctxt, instance, host, request_spec,
filter_properties, requested_networks,
injected_files, admin_password,
is_first_time, node=None, legacy_bdm_in_spec=True):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.37')
instance_p = jsonutils.to_primitive(instance)
msg_kwargs = {'instance': instance_p, 'request_spec': request_spec,
'filter_properties': filter_properties,
'requested_networks': requested_networks,
'injected_files': injected_files,
'admin_password': admin_password,
'is_first_time': is_first_time, 'node': node,
'legacy_bdm_in_spec': legacy_bdm_in_spec}
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'run_instance', **msg_kwargs)
def set_admin_password(self, ctxt, instance, new_pass):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.0')
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'set_admin_password',
instance=instance_p, new_pass=new_pass)
def set_host_enabled(self, ctxt, enabled, host):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.0')
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'set_host_enabled', enabled=enabled)
def swap_volume(self, ctxt, instance, old_volume_id, new_volume_id):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.34')
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'swap_volume',
instance=instance, old_volume_id=old_volume_id,
new_volume_id=new_volume_id)
def get_host_uptime(self, ctxt, host):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.0')
cctxt = self.client.prepare(server=host, version=version)
return cctxt.call(ctxt, 'get_host_uptime')
def reserve_block_device_name(self, ctxt, instance, device, volume_id):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.3')
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
return cctxt.call(ctxt, 'reserve_block_device_name',
instance=instance_p, device=device,
volume_id=volume_id)
def backup_instance(self, ctxt, instance, image_id, backup_type,
rotation):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.42')
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'backup_instance',
instance=instance,
image_id=image_id,
backup_type=backup_type,
rotation=rotation)
def snapshot_instance(self, ctxt, instance, image_id):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.42')
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'snapshot_instance',
instance=instance,
image_id=image_id)
def start_instance(self, ctxt, instance):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.29')
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'start_instance', instance=instance)
def stop_instance(self, ctxt, instance, do_cast=True):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.29')
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
rpc_method = cctxt.cast if do_cast else cctxt.call
return rpc_method(ctxt, 'stop_instance', instance=instance)
def suspend_instance(self, ctxt, instance):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.33')
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'suspend_instance', instance=instance)
def terminate_instance(self, ctxt, instance, bdms, reservations=None):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.35')
bdms_p = jsonutils.to_primitive(bdms)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'terminate_instance',
instance=instance, bdms=bdms_p,
reservations=reservations)
def unpause_instance(self, ctxt, instance):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.36')
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'unpause_instance', instance=instance)
def unrescue_instance(self, ctxt, instance):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.0')
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'unrescue_instance', instance=instance_p)
def soft_delete_instance(self, ctxt, instance, reservations=None):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.35')
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'soft_delete_instance',
instance=instance, reservations=reservations)
def restore_instance(self, ctxt, instance):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.0')
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'restore_instance', instance=instance_p)
def shelve_instance(self, ctxt, instance, image_id=None):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.31')
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'shelve_instance',
instance=instance, image_id=image_id)
def shelve_offload_instance(self, ctxt, instance):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.31')
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'shelve_offload_instance', instance=instance)
def unshelve_instance(self, ctxt, instance, host, image=None):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.31')
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'unshelve_instance',
instance=instance, image=image)
def volume_snapshot_create(self, ctxt, instance, volume_id,
create_info):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.44')
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'volume_snapshot_create', instance=instance_p,
volume_id=volume_id, create_info=create_info)
def volume_snapshot_delete(self, ctxt, instance, volume_id, snapshot_id,
delete_info):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.44')
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'volume_snapshot_delete', instance=instance_p,
volume_id=volume_id, snapshot_id=snapshot_id,
delete_info=delete_info)
class SecurityGroupAPI(rpcclient.RpcProxy):
'''Client side of the security group rpc API.
API version history:
1.0 - Initial version.
1.41 - Adds refresh_instance_security_rules()
2.0 - Remove 1.x backwards compat
3.0 - Identical to 2.x, but has to be bumped at the same time as the
compute API since it's all together on the server side.
'''
#
# NOTE(russellb): This is the default minimum version that the server
# (manager) side must implement unless otherwise specified using a version
# argument to self.call()/cast()/etc. here. It should be left as X.0 where
# X is the current major API version (1.0, 2.0, ...). For more information
# about rpc API versioning, see the docs in
# openstack/common/rpc/dispatcher.py.
#
BASE_RPC_API_VERSION = '3.0'
def __init__(self):
version_cap = ComputeAPI.VERSION_ALIASES.get(
CONF.upgrade_levels.compute, CONF.upgrade_levels.compute)
super(SecurityGroupAPI, self).__init__(
topic=CONF.compute_topic,
default_version=self.BASE_RPC_API_VERSION,
version_cap=version_cap)
self.client = self.get_client()
def _get_compat_version(self, current, havana_compat):
if not self.can_send_version(current):
return havana_compat
return current
def refresh_security_group_rules(self, ctxt, security_group_id, host):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.0')
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'refresh_security_group_rules',
security_group_id=security_group_id)
def refresh_security_group_members(self, ctxt, security_group_id,
host):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.0')
cctxt = self.client.prepare(server=host, version=version)
cctxt.cast(ctxt, 'refresh_security_group_members',
security_group_id=security_group_id)
def refresh_instance_security_rules(self, ctxt, host, instance):
# NOTE(russellb) Havana compat
version = self._get_compat_version('3.0', '2.0')
instance_p = jsonutils.to_primitive(instance)
cctxt = self.client.prepare(server=_compute_host(None, instance),
version=version)
cctxt.cast(ctxt, 'refresh_instance_security_rules',
instance=instance_p)
| |
# Generated from /cci_src/imap2017/src/imap-search-parser/python-emitter/imap2017search.g4 by ANTLR 4.6
from antlr4 import *
if __name__ is not None and "." in __name__:
from .imap2017searchParser import imap2017searchParser
else:
from imap2017searchParser import imap2017searchParser
# This class defines a complete listener for a parse tree produced by imap2017searchParser.
class imap2017searchListener(ParseTreeListener):
# Enter a parse tree produced by imap2017searchParser#prog.
def enterProg(self, ctx:imap2017searchParser.ProgContext):
pass
# Exit a parse tree produced by imap2017searchParser#prog.
def exitProg(self, ctx:imap2017searchParser.ProgContext):
pass
# Enter a parse tree produced by imap2017searchParser#astring.
def enterAstring(self, ctx:imap2017searchParser.AstringContext):
pass
# Exit a parse tree produced by imap2017searchParser#astring.
def exitAstring(self, ctx:imap2017searchParser.AstringContext):
pass
# Enter a parse tree produced by imap2017searchParser#astring_char.
def enterAstring_char(self, ctx:imap2017searchParser.Astring_charContext):
pass
# Exit a parse tree produced by imap2017searchParser#astring_char.
def exitAstring_char(self, ctx:imap2017searchParser.Astring_charContext):
pass
# Enter a parse tree produced by imap2017searchParser#atom.
def enterAtom(self, ctx:imap2017searchParser.AtomContext):
pass
# Exit a parse tree produced by imap2017searchParser#atom.
def exitAtom(self, ctx:imap2017searchParser.AtomContext):
pass
# Enter a parse tree produced by imap2017searchParser#atom_char.
def enterAtom_char(self, ctx:imap2017searchParser.Atom_charContext):
pass
# Exit a parse tree produced by imap2017searchParser#atom_char.
def exitAtom_char(self, ctx:imap2017searchParser.Atom_charContext):
pass
# Enter a parse tree produced by imap2017searchParser#atom_specials.
def enterAtom_specials(self, ctx:imap2017searchParser.Atom_specialsContext):
pass
# Exit a parse tree produced by imap2017searchParser#atom_specials.
def exitAtom_specials(self, ctx:imap2017searchParser.Atom_specialsContext):
pass
# Enter a parse tree produced by imap2017searchParser#char8.
def enterChar8(self, ctx:imap2017searchParser.Char8Context):
pass
# Exit a parse tree produced by imap2017searchParser#char8.
def exitChar8(self, ctx:imap2017searchParser.Char8Context):
pass
# Enter a parse tree produced by imap2017searchParser#date.
def enterDate(self, ctx:imap2017searchParser.DateContext):
pass
# Exit a parse tree produced by imap2017searchParser#date.
def exitDate(self, ctx:imap2017searchParser.DateContext):
pass
# Enter a parse tree produced by imap2017searchParser#date_day.
def enterDate_day(self, ctx:imap2017searchParser.Date_dayContext):
pass
# Exit a parse tree produced by imap2017searchParser#date_day.
def exitDate_day(self, ctx:imap2017searchParser.Date_dayContext):
pass
# Enter a parse tree produced by imap2017searchParser#date_day_fixed.
def enterDate_day_fixed(self, ctx:imap2017searchParser.Date_day_fixedContext):
pass
# Exit a parse tree produced by imap2017searchParser#date_day_fixed.
def exitDate_day_fixed(self, ctx:imap2017searchParser.Date_day_fixedContext):
pass
# Enter a parse tree produced by imap2017searchParser#date_month.
def enterDate_month(self, ctx:imap2017searchParser.Date_monthContext):
pass
# Exit a parse tree produced by imap2017searchParser#date_month.
def exitDate_month(self, ctx:imap2017searchParser.Date_monthContext):
pass
# Enter a parse tree produced by imap2017searchParser#date_text.
def enterDate_text(self, ctx:imap2017searchParser.Date_textContext):
pass
# Exit a parse tree produced by imap2017searchParser#date_text.
def exitDate_text(self, ctx:imap2017searchParser.Date_textContext):
pass
# Enter a parse tree produced by imap2017searchParser#date_year.
def enterDate_year(self, ctx:imap2017searchParser.Date_yearContext):
pass
# Exit a parse tree produced by imap2017searchParser#date_year.
def exitDate_year(self, ctx:imap2017searchParser.Date_yearContext):
pass
# Enter a parse tree produced by imap2017searchParser#date_time.
def enterDate_time(self, ctx:imap2017searchParser.Date_timeContext):
pass
# Exit a parse tree produced by imap2017searchParser#date_time.
def exitDate_time(self, ctx:imap2017searchParser.Date_timeContext):
pass
# Enter a parse tree produced by imap2017searchParser#digit_nz.
def enterDigit_nz(self, ctx:imap2017searchParser.Digit_nzContext):
pass
# Exit a parse tree produced by imap2017searchParser#digit_nz.
def exitDigit_nz(self, ctx:imap2017searchParser.Digit_nzContext):
pass
# Enter a parse tree produced by imap2017searchParser#header_fld_name.
def enterHeader_fld_name(self, ctx:imap2017searchParser.Header_fld_nameContext):
pass
# Exit a parse tree produced by imap2017searchParser#header_fld_name.
def exitHeader_fld_name(self, ctx:imap2017searchParser.Header_fld_nameContext):
pass
# Enter a parse tree produced by imap2017searchParser#header_list.
def enterHeader_list(self, ctx:imap2017searchParser.Header_listContext):
pass
# Exit a parse tree produced by imap2017searchParser#header_list.
def exitHeader_list(self, ctx:imap2017searchParser.Header_listContext):
pass
# Enter a parse tree produced by imap2017searchParser#flag.
def enterFlag(self, ctx:imap2017searchParser.FlagContext):
pass
# Exit a parse tree produced by imap2017searchParser#flag.
def exitFlag(self, ctx:imap2017searchParser.FlagContext):
pass
# Enter a parse tree produced by imap2017searchParser#flag_extension.
def enterFlag_extension(self, ctx:imap2017searchParser.Flag_extensionContext):
pass
# Exit a parse tree produced by imap2017searchParser#flag_extension.
def exitFlag_extension(self, ctx:imap2017searchParser.Flag_extensionContext):
pass
# Enter a parse tree produced by imap2017searchParser#flag_fetch.
def enterFlag_fetch(self, ctx:imap2017searchParser.Flag_fetchContext):
pass
# Exit a parse tree produced by imap2017searchParser#flag_fetch.
def exitFlag_fetch(self, ctx:imap2017searchParser.Flag_fetchContext):
pass
# Enter a parse tree produced by imap2017searchParser#flag_keyword.
def enterFlag_keyword(self, ctx:imap2017searchParser.Flag_keywordContext):
pass
# Exit a parse tree produced by imap2017searchParser#flag_keyword.
def exitFlag_keyword(self, ctx:imap2017searchParser.Flag_keywordContext):
pass
# Enter a parse tree produced by imap2017searchParser#flag_list.
def enterFlag_list(self, ctx:imap2017searchParser.Flag_listContext):
pass
# Exit a parse tree produced by imap2017searchParser#flag_list.
def exitFlag_list(self, ctx:imap2017searchParser.Flag_listContext):
pass
# Enter a parse tree produced by imap2017searchParser#flag_perm.
def enterFlag_perm(self, ctx:imap2017searchParser.Flag_permContext):
pass
# Exit a parse tree produced by imap2017searchParser#flag_perm.
def exitFlag_perm(self, ctx:imap2017searchParser.Flag_permContext):
pass
# Enter a parse tree produced by imap2017searchParser#literal.
def enterLiteral(self, ctx:imap2017searchParser.LiteralContext):
pass
# Exit a parse tree produced by imap2017searchParser#literal.
def exitLiteral(self, ctx:imap2017searchParser.LiteralContext):
pass
# Enter a parse tree produced by imap2017searchParser#list_wildcards.
def enterList_wildcards(self, ctx:imap2017searchParser.List_wildcardsContext):
pass
# Exit a parse tree produced by imap2017searchParser#list_wildcards.
def exitList_wildcards(self, ctx:imap2017searchParser.List_wildcardsContext):
pass
# Enter a parse tree produced by imap2017searchParser#number.
def enterNumber(self, ctx:imap2017searchParser.NumberContext):
pass
# Exit a parse tree produced by imap2017searchParser#number.
def exitNumber(self, ctx:imap2017searchParser.NumberContext):
pass
# Enter a parse tree produced by imap2017searchParser#nz_number.
def enterNz_number(self, ctx:imap2017searchParser.Nz_numberContext):
pass
# Exit a parse tree produced by imap2017searchParser#nz_number.
def exitNz_number(self, ctx:imap2017searchParser.Nz_numberContext):
pass
# Enter a parse tree produced by imap2017searchParser#quoted.
def enterQuoted(self, ctx:imap2017searchParser.QuotedContext):
pass
# Exit a parse tree produced by imap2017searchParser#quoted.
def exitQuoted(self, ctx:imap2017searchParser.QuotedContext):
pass
# Enter a parse tree produced by imap2017searchParser#quoted_char.
def enterQuoted_char(self, ctx:imap2017searchParser.Quoted_charContext):
pass
# Exit a parse tree produced by imap2017searchParser#quoted_char.
def exitQuoted_char(self, ctx:imap2017searchParser.Quoted_charContext):
pass
# Enter a parse tree produced by imap2017searchParser#quoted_specials.
def enterQuoted_specials(self, ctx:imap2017searchParser.Quoted_specialsContext):
pass
# Exit a parse tree produced by imap2017searchParser#quoted_specials.
def exitQuoted_specials(self, ctx:imap2017searchParser.Quoted_specialsContext):
pass
# Enter a parse tree produced by imap2017searchParser#resp_specials.
def enterResp_specials(self, ctx:imap2017searchParser.Resp_specialsContext):
pass
# Exit a parse tree produced by imap2017searchParser#resp_specials.
def exitResp_specials(self, ctx:imap2017searchParser.Resp_specialsContext):
pass
# Enter a parse tree produced by imap2017searchParser#search.
def enterSearch(self, ctx:imap2017searchParser.SearchContext):
pass
# Exit a parse tree produced by imap2017searchParser#search.
def exitSearch(self, ctx:imap2017searchParser.SearchContext):
pass
# Enter a parse tree produced by imap2017searchParser#s_all.
def enterS_all(self, ctx:imap2017searchParser.S_allContext):
pass
# Exit a parse tree produced by imap2017searchParser#s_all.
def exitS_all(self, ctx:imap2017searchParser.S_allContext):
pass
# Enter a parse tree produced by imap2017searchParser#s_answered.
def enterS_answered(self, ctx:imap2017searchParser.S_answeredContext):
pass
# Exit a parse tree produced by imap2017searchParser#s_answered.
def exitS_answered(self, ctx:imap2017searchParser.S_answeredContext):
pass
# Enter a parse tree produced by imap2017searchParser#s_bcc.
def enterS_bcc(self, ctx:imap2017searchParser.S_bccContext):
pass
# Exit a parse tree produced by imap2017searchParser#s_bcc.
def exitS_bcc(self, ctx:imap2017searchParser.S_bccContext):
pass
# Enter a parse tree produced by imap2017searchParser#s_before.
def enterS_before(self, ctx:imap2017searchParser.S_beforeContext):
pass
# Exit a parse tree produced by imap2017searchParser#s_before.
def exitS_before(self, ctx:imap2017searchParser.S_beforeContext):
pass
# Enter a parse tree produced by imap2017searchParser#s_body.
def enterS_body(self, ctx:imap2017searchParser.S_bodyContext):
pass
# Exit a parse tree produced by imap2017searchParser#s_body.
def exitS_body(self, ctx:imap2017searchParser.S_bodyContext):
pass
# Enter a parse tree produced by imap2017searchParser#s_cc.
def enterS_cc(self, ctx:imap2017searchParser.S_ccContext):
pass
# Exit a parse tree produced by imap2017searchParser#s_cc.
def exitS_cc(self, ctx:imap2017searchParser.S_ccContext):
pass
# Enter a parse tree produced by imap2017searchParser#s_deleted.
def enterS_deleted(self, ctx:imap2017searchParser.S_deletedContext):
pass
# Exit a parse tree produced by imap2017searchParser#s_deleted.
def exitS_deleted(self, ctx:imap2017searchParser.S_deletedContext):
pass
# Enter a parse tree produced by imap2017searchParser#s_flagged.
def enterS_flagged(self, ctx:imap2017searchParser.S_flaggedContext):
pass
# Exit a parse tree produced by imap2017searchParser#s_flagged.
def exitS_flagged(self, ctx:imap2017searchParser.S_flaggedContext):
pass
# Enter a parse tree produced by imap2017searchParser#s_from.
def enterS_from(self, ctx:imap2017searchParser.S_fromContext):
pass
# Exit a parse tree produced by imap2017searchParser#s_from.
def exitS_from(self, ctx:imap2017searchParser.S_fromContext):
pass
# Enter a parse tree produced by imap2017searchParser#s_keyword.
def enterS_keyword(self, ctx:imap2017searchParser.S_keywordContext):
pass
# Exit a parse tree produced by imap2017searchParser#s_keyword.
def exitS_keyword(self, ctx:imap2017searchParser.S_keywordContext):
pass
# Enter a parse tree produced by imap2017searchParser#s_new.
def enterS_new(self, ctx:imap2017searchParser.S_newContext):
pass
# Exit a parse tree produced by imap2017searchParser#s_new.
def exitS_new(self, ctx:imap2017searchParser.S_newContext):
pass
# Enter a parse tree produced by imap2017searchParser#s_old.
def enterS_old(self, ctx:imap2017searchParser.S_oldContext):
pass
# Exit a parse tree produced by imap2017searchParser#s_old.
def exitS_old(self, ctx:imap2017searchParser.S_oldContext):
pass
# Enter a parse tree produced by imap2017searchParser#s_on.
def enterS_on(self, ctx:imap2017searchParser.S_onContext):
pass
# Exit a parse tree produced by imap2017searchParser#s_on.
def exitS_on(self, ctx:imap2017searchParser.S_onContext):
pass
# Enter a parse tree produced by imap2017searchParser#s_recent.
def enterS_recent(self, ctx:imap2017searchParser.S_recentContext):
pass
# Exit a parse tree produced by imap2017searchParser#s_recent.
def exitS_recent(self, ctx:imap2017searchParser.S_recentContext):
pass
# Enter a parse tree produced by imap2017searchParser#s_seen.
def enterS_seen(self, ctx:imap2017searchParser.S_seenContext):
pass
# Exit a parse tree produced by imap2017searchParser#s_seen.
def exitS_seen(self, ctx:imap2017searchParser.S_seenContext):
pass
# Enter a parse tree produced by imap2017searchParser#s_since.
def enterS_since(self, ctx:imap2017searchParser.S_sinceContext):
pass
# Exit a parse tree produced by imap2017searchParser#s_since.
def exitS_since(self, ctx:imap2017searchParser.S_sinceContext):
pass
# Enter a parse tree produced by imap2017searchParser#s_subject.
def enterS_subject(self, ctx:imap2017searchParser.S_subjectContext):
pass
# Exit a parse tree produced by imap2017searchParser#s_subject.
def exitS_subject(self, ctx:imap2017searchParser.S_subjectContext):
pass
# Enter a parse tree produced by imap2017searchParser#s_text.
def enterS_text(self, ctx:imap2017searchParser.S_textContext):
pass
# Exit a parse tree produced by imap2017searchParser#s_text.
def exitS_text(self, ctx:imap2017searchParser.S_textContext):
pass
# Enter a parse tree produced by imap2017searchParser#s_to.
def enterS_to(self, ctx:imap2017searchParser.S_toContext):
pass
# Exit a parse tree produced by imap2017searchParser#s_to.
def exitS_to(self, ctx:imap2017searchParser.S_toContext):
pass
# Enter a parse tree produced by imap2017searchParser#s_unaswered.
def enterS_unaswered(self, ctx:imap2017searchParser.S_unasweredContext):
pass
# Exit a parse tree produced by imap2017searchParser#s_unaswered.
def exitS_unaswered(self, ctx:imap2017searchParser.S_unasweredContext):
pass
# Enter a parse tree produced by imap2017searchParser#s_undeleted.
def enterS_undeleted(self, ctx:imap2017searchParser.S_undeletedContext):
pass
# Exit a parse tree produced by imap2017searchParser#s_undeleted.
def exitS_undeleted(self, ctx:imap2017searchParser.S_undeletedContext):
pass
# Enter a parse tree produced by imap2017searchParser#s_unflagged.
def enterS_unflagged(self, ctx:imap2017searchParser.S_unflaggedContext):
pass
# Exit a parse tree produced by imap2017searchParser#s_unflagged.
def exitS_unflagged(self, ctx:imap2017searchParser.S_unflaggedContext):
pass
# Enter a parse tree produced by imap2017searchParser#s_unkeyword.
def enterS_unkeyword(self, ctx:imap2017searchParser.S_unkeywordContext):
pass
# Exit a parse tree produced by imap2017searchParser#s_unkeyword.
def exitS_unkeyword(self, ctx:imap2017searchParser.S_unkeywordContext):
pass
# Enter a parse tree produced by imap2017searchParser#s_unseen.
def enterS_unseen(self, ctx:imap2017searchParser.S_unseenContext):
pass
# Exit a parse tree produced by imap2017searchParser#s_unseen.
def exitS_unseen(self, ctx:imap2017searchParser.S_unseenContext):
pass
# Enter a parse tree produced by imap2017searchParser#s_draft.
def enterS_draft(self, ctx:imap2017searchParser.S_draftContext):
pass
# Exit a parse tree produced by imap2017searchParser#s_draft.
def exitS_draft(self, ctx:imap2017searchParser.S_draftContext):
pass
# Enter a parse tree produced by imap2017searchParser#s_header.
def enterS_header(self, ctx:imap2017searchParser.S_headerContext):
pass
# Exit a parse tree produced by imap2017searchParser#s_header.
def exitS_header(self, ctx:imap2017searchParser.S_headerContext):
pass
# Enter a parse tree produced by imap2017searchParser#s_larger.
def enterS_larger(self, ctx:imap2017searchParser.S_largerContext):
pass
# Exit a parse tree produced by imap2017searchParser#s_larger.
def exitS_larger(self, ctx:imap2017searchParser.S_largerContext):
pass
# Enter a parse tree produced by imap2017searchParser#s_not.
def enterS_not(self, ctx:imap2017searchParser.S_notContext):
pass
# Exit a parse tree produced by imap2017searchParser#s_not.
def exitS_not(self, ctx:imap2017searchParser.S_notContext):
pass
# Enter a parse tree produced by imap2017searchParser#s_or.
def enterS_or(self, ctx:imap2017searchParser.S_orContext):
pass
# Exit a parse tree produced by imap2017searchParser#s_or.
def exitS_or(self, ctx:imap2017searchParser.S_orContext):
pass
# Enter a parse tree produced by imap2017searchParser#s_semtbefore.
def enterS_semtbefore(self, ctx:imap2017searchParser.S_semtbeforeContext):
pass
# Exit a parse tree produced by imap2017searchParser#s_semtbefore.
def exitS_semtbefore(self, ctx:imap2017searchParser.S_semtbeforeContext):
pass
# Enter a parse tree produced by imap2017searchParser#s_senton.
def enterS_senton(self, ctx:imap2017searchParser.S_sentonContext):
pass
# Exit a parse tree produced by imap2017searchParser#s_senton.
def exitS_senton(self, ctx:imap2017searchParser.S_sentonContext):
pass
# Enter a parse tree produced by imap2017searchParser#s_sentsince.
def enterS_sentsince(self, ctx:imap2017searchParser.S_sentsinceContext):
pass
# Exit a parse tree produced by imap2017searchParser#s_sentsince.
def exitS_sentsince(self, ctx:imap2017searchParser.S_sentsinceContext):
pass
# Enter a parse tree produced by imap2017searchParser#s_smaller.
def enterS_smaller(self, ctx:imap2017searchParser.S_smallerContext):
pass
# Exit a parse tree produced by imap2017searchParser#s_smaller.
def exitS_smaller(self, ctx:imap2017searchParser.S_smallerContext):
pass
# Enter a parse tree produced by imap2017searchParser#s_uid.
def enterS_uid(self, ctx:imap2017searchParser.S_uidContext):
pass
# Exit a parse tree produced by imap2017searchParser#s_uid.
def exitS_uid(self, ctx:imap2017searchParser.S_uidContext):
pass
# Enter a parse tree produced by imap2017searchParser#s_undraft.
def enterS_undraft(self, ctx:imap2017searchParser.S_undraftContext):
pass
# Exit a parse tree produced by imap2017searchParser#s_undraft.
def exitS_undraft(self, ctx:imap2017searchParser.S_undraftContext):
pass
# Enter a parse tree produced by imap2017searchParser#s_sequence_set.
def enterS_sequence_set(self, ctx:imap2017searchParser.S_sequence_setContext):
pass
# Exit a parse tree produced by imap2017searchParser#s_sequence_set.
def exitS_sequence_set(self, ctx:imap2017searchParser.S_sequence_setContext):
pass
# Enter a parse tree produced by imap2017searchParser#s_nested_search.
def enterS_nested_search(self, ctx:imap2017searchParser.S_nested_searchContext):
pass
# Exit a parse tree produced by imap2017searchParser#s_nested_search.
def exitS_nested_search(self, ctx:imap2017searchParser.S_nested_searchContext):
pass
# Enter a parse tree produced by imap2017searchParser#section.
def enterSection(self, ctx:imap2017searchParser.SectionContext):
pass
# Exit a parse tree produced by imap2017searchParser#section.
def exitSection(self, ctx:imap2017searchParser.SectionContext):
pass
# Enter a parse tree produced by imap2017searchParser#section_msgtext.
def enterSection_msgtext(self, ctx:imap2017searchParser.Section_msgtextContext):
pass
# Exit a parse tree produced by imap2017searchParser#section_msgtext.
def exitSection_msgtext(self, ctx:imap2017searchParser.Section_msgtextContext):
pass
# Enter a parse tree produced by imap2017searchParser#section_part.
def enterSection_part(self, ctx:imap2017searchParser.Section_partContext):
pass
# Exit a parse tree produced by imap2017searchParser#section_part.
def exitSection_part(self, ctx:imap2017searchParser.Section_partContext):
pass
# Enter a parse tree produced by imap2017searchParser#section_spec.
def enterSection_spec(self, ctx:imap2017searchParser.Section_specContext):
pass
# Exit a parse tree produced by imap2017searchParser#section_spec.
def exitSection_spec(self, ctx:imap2017searchParser.Section_specContext):
pass
# Enter a parse tree produced by imap2017searchParser#section_text.
def enterSection_text(self, ctx:imap2017searchParser.Section_textContext):
pass
# Exit a parse tree produced by imap2017searchParser#section_text.
def exitSection_text(self, ctx:imap2017searchParser.Section_textContext):
pass
# Enter a parse tree produced by imap2017searchParser#seq_number.
def enterSeq_number(self, ctx:imap2017searchParser.Seq_numberContext):
pass
# Exit a parse tree produced by imap2017searchParser#seq_number.
def exitSeq_number(self, ctx:imap2017searchParser.Seq_numberContext):
pass
# Enter a parse tree produced by imap2017searchParser#seq_range.
def enterSeq_range(self, ctx:imap2017searchParser.Seq_rangeContext):
pass
# Exit a parse tree produced by imap2017searchParser#seq_range.
def exitSeq_range(self, ctx:imap2017searchParser.Seq_rangeContext):
pass
# Enter a parse tree produced by imap2017searchParser#sequence_set.
def enterSequence_set(self, ctx:imap2017searchParser.Sequence_setContext):
pass
# Exit a parse tree produced by imap2017searchParser#sequence_set.
def exitSequence_set(self, ctx:imap2017searchParser.Sequence_setContext):
pass
# Enter a parse tree produced by imap2017searchParser#string_1.
def enterString_1(self, ctx:imap2017searchParser.String_1Context):
pass
# Exit a parse tree produced by imap2017searchParser#string_1.
def exitString_1(self, ctx:imap2017searchParser.String_1Context):
pass
# Enter a parse tree produced by imap2017searchParser#time.
def enterTime(self, ctx:imap2017searchParser.TimeContext):
pass
# Exit a parse tree produced by imap2017searchParser#time.
def exitTime(self, ctx:imap2017searchParser.TimeContext):
pass
# Enter a parse tree produced by imap2017searchParser#zone.
def enterZone(self, ctx:imap2017searchParser.ZoneContext):
pass
# Exit a parse tree produced by imap2017searchParser#zone.
def exitZone(self, ctx:imap2017searchParser.ZoneContext):
pass
| |
# EFILTER Forensic Query Language
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
EFILTER DottySQL syntax output.
"""
__author__ = "Adam Sindelar <adamsh@google.com>"
import six
from efilter import dispatch
from efilter import ast
from efilter import syntax
from efilter import query as q
from efilter.parsers.dottysql import grammar
BUILTINS = dict((v, k) for k, v in six.iteritems(grammar.BUILTINS))
def __expression_precedence(expr):
operator = grammar.OPERATORS.by_handler.get(type(expr))
if operator:
return operator.precedence, operator.assoc
return None, None
@dispatch.multimethod
def asdottysql(expr):
"""Produces equivalent DottySQL output to the AST.
This class follows the visitor pattern. See documentation on VisitorEngine.
"""
_ = expr
raise NotImplementedError()
@asdottysql.implementation(for_type=q.Query)
def asdottysql(query):
return asdottysql(query.root)
@asdottysql.implementation(for_types=(ast.Within, ast.Cast, ast.Reducer))
def asdottysql_builtin(expr):
if not type(expr) in BUILTINS:
return "<Subexpression cannot be formatted as DottySQL.>"
body = ", ".join([asdottysql(x) for x in expr.children])
return "%s(%s)" % (BUILTINS[type(expr)], body)
@asdottysql.implementation(for_type=ast.Map)
def asdottysql_map(expr):
lhs = asdottysql(expr.lhs)
rhs = asdottysql(expr.rhs)
if (isinstance(expr.lhs, (ast.Map, ast.Var))
and isinstance(expr.rhs, (ast.Map, ast.Var))):
return "%s.%s" % (lhs, rhs)
return "map(%s, %s)" % (lhs, rhs)
@asdottysql.implementation(for_type=ast.Let)
def asdottysql_let(expr):
if not isinstance(expr.lhs, ast.Bind):
return "<Non-literal let cannot be formatted as DottySQL>"
pairs = []
for pair in expr.lhs.children:
if not isinstance(pair.lhs, ast.Literal):
return "<Non-literal binding names cannot be formatted as DottySQL>"
pairs.append("%s = %s" % (pair.lhs.value, asdottysql(pair.rhs)))
return "let(%s) %s" % (", ".join(pairs), asdottysql(expr.rhs))
@asdottysql.implementation(for_types=(ast.NumericExpression, ast.Relation,
ast.LogicalOperation))
def asdottysql_operator(expr):
operator = grammar.OPERATORS.by_handler[type(expr)]
children = []
for child in expr.children:
precedence, _ = __expression_precedence(child)
if precedence is not None and precedence < operator.precedence:
children.append("(%s)" % asdottysql(child))
else:
children.append(asdottysql(child))
separator = " %s " % operator.name
return separator.join(children)
def _format_binary(lhs, rhs, operator, lspace=" ", rspace=" "):
left = asdottysql(lhs)
right = asdottysql(rhs)
lhs_precedence, lassoc = __expression_precedence(lhs)
if lassoc == "left" and lhs_precedence is not None:
lhs_precedence += 1
if lhs_precedence is not None and lhs_precedence < operator.precedence:
left = "(%s)" % left
rhs_precedence, rassoc = __expression_precedence(rhs)
if rassoc == "right" and rhs_precedence is not None:
rhs_precedence += 1
if rhs_precedence is not None and rhs_precedence < operator.precedence:
right = "(%s)" % right
return "".join((left, lspace, operator.name, rspace, right))
@asdottysql.implementation(for_type=ast.Complement)
def asdottysql(expr):
if (isinstance(expr.value, ast.Equivalence)
and len(expr.value.children) == 2):
return _format_binary(expr.value.children[0],
expr.value.children[1],
grammar.OPERATORS.by_name["!="])
if isinstance(expr.value, ast.Membership):
return _format_binary(expr.value.children[0],
expr.value.children[1],
grammar.OPERATORS.by_name["not in"])
child_precedence, assoc = __expression_precedence(expr.value)
if assoc == "left" and child_precedence:
child_precedence += 1
if (child_precedence is not None
and child_precedence < __expression_precedence(expr)[0]):
return "not (%s)" % asdottysql(expr.value)
return "not %s" % asdottysql(expr.value)
@asdottysql.implementation(for_type=ast.Bind)
def asdottysql(expr):
return "bind(%s)" % ", ".join(asdottysql(x) for x in expr.children)
@asdottysql.implementation(for_type=ast.Pair)
def asdottysql(expr):
return _format_binary(expr.lhs, expr.rhs, grammar.OPERATORS.by_name[":"],
lspace="")
@asdottysql.implementation(for_types=(ast.IsInstance, ast.RegexFilter,
ast.Membership))
def asdottysql(expr):
return _format_binary(expr.lhs, expr.rhs,
grammar.OPERATORS.by_handler[type(expr)])
@asdottysql.implementation(for_type=ast.Apply)
def asdottysql(expr):
arguments = iter(expr.children)
func = next(arguments)
return "%s(%s)" % (asdottysql(func),
", ".join([asdottysql(arg) for arg in arguments]))
@asdottysql.implementation(for_type=ast.Select)
def asdottysql(expr):
arguments = iter(expr.children)
source = asdottysql(next(arguments))
if not isinstance(expr.lhs, (ast.ValueExpression, ast.Repeat, ast.Tuple,
ast.Map, ast.Select, ast.Apply, ast.Bind)):
source = "(%s)" % source
return "%s[%s]" % (source,
", ".join([asdottysql(arg) for arg in arguments]))
@asdottysql.implementation(for_type=ast.Resolve)
def asdottysql(expr):
if not isinstance(expr.rhs, ast.Literal):
return "<expression cannot be formatted as DottySQL>"
return _format_binary(expr.lhs, ast.Var(expr.rhs.value),
grammar.OPERATORS.by_handler[ast.Resolve], lspace="",
rspace="")
@asdottysql.implementation(for_type=ast.Repeat)
def asdottysql(expr):
return "(%s)" % ", ".join(asdottysql(x) for x in expr.children)
@asdottysql.implementation(for_type=ast.Tuple)
def asdottysql(expr):
return "[%s]" % ", ".join(asdottysql(x) for x in expr.children)
@asdottysql.implementation(for_type=ast.IfElse)
def asdottysql(expr):
branches = ["if %s then %s" % (asdottysql(c), asdottysql(v))
for c, v in expr.conditions()]
if_ = " else ".join(branches)
else_ = expr.default()
if not else_ or else_ == ast.Literal(None):
return if_
return "%s else %s" % (if_, asdottysql(else_))
@asdottysql.implementation(for_type=ast.Literal)
def asdottysql(expr):
return repr(expr.value)
@asdottysql.implementation(for_type=ast.Var)
def asdottysql(expr):
return expr.value
syntax.Syntax.register_formatter(shorthand="dottysql", formatter=asdottysql)
| |
@package('graph')
class Base:
__slots__ = (
'style',
'color',
'fillcolor',
'label',
'fontcolor',
)
def __init__(self):
pass
def update_properties(self, data):
for k, v in items(data):
self[k] = v
@package('graph')
class Node(Base):
__slots__ = (
'name',
'shape',
'pos',
'label_pos',
'x',
'y',
'width',
'height',
)
def __init__(self, name):
super().__init__()
self.name = name
def update_properties(self, val):
super().update_properties(val)
if val.pos:
self.parse_pos()
if val.lp:
self.parse_lp()
def parse_pos(self):
x, y = self.pos.split(',')
self.x = float(x)
self.y = float(y)
def parse_lp(self):
x, y = self.lp.split(',')
self.label_pos = Point(float(x), float(y))
class Point:
__slots__ = ['x', 'y']
def __init__(self, x, y):
self.x = x
self.y = y
@package('graph')
class Edge(Base):
__slots__ = (
'pos', #actually path
'lp', #label position
'startnode',
'endnode',
'path',
'startpoint',
'endpoint',
'arrowhead',
'dir',
)
def __init__(self, snode, enode):
self.startnode = snode
self.endnode = enode
def update_properties(self, val):
super().update_properties(val)
if val.pos:
self.parse_pos()
def parse_pos(self):
arr = self.pos.split(' ')
self.path = []
self.startpoint = None
self.endpoint = None
for item in values(arr):
if item.charAt(0) == 'e' or item.charAt(0) == 's':
_c, x, y = item.split(',')
pt = Point(float(x), float(y))
if _c == 'e':
self.endpoint = pt
else:
self.startpoint = pt
continue
x, y = item.split(',')
self.path.push(Point(float(x), float(y)))
@package('graph')
class Subgraph(Base):
__slots__ = (
'bb', #bounding box
'parent',
'directed',
'node_defaults',
'edge_defaults',
'nodes',
'edges',
'subgraphs',
)
def __init__(self, name, parent):
super().__init__()
self.name = name
self.parent = parent
self.node_defaults = {}
self.edge_defaults = {}
self.nodes = {}
self.edges = []
self.subgraphs = []
self.directed = self.parent.directed
self.update_node_defaults(self.parent.node_defaults)
self.update_edge_defaults(self.parent.edge_defaults)
def update_node_defaults(self, params):
for k, v in items(params):
self.node_params[k] = v
def update_edge_defaults(self, params):
for k, v in items(params):
self.edge_params[k] = v
def add_subgraph(self, name):
gr = Subgraph(name, self)
self.subgraphs.push(gr)
return gr
def add_edge(self, start, end, properties):
edge = self.parent.add_edge(start, end, properties)
self.edges.push(edge)
return edge
def add_node(self, name, properties):
node = self.parent.add_node(name, properties)
self.nodes[name] = node
return node
@package('graph')
class AnonSubgraph(Subgraph):
def __init__(self, parent):
super().__init__(None, parent)
@package('graph')
class Graph(Base):
__slots__ = (
'name',
'bb', #bounding box
'rankdir',
'size',
'scalex',
'scaley',
'directed',
'node_defaults',
'edge_defaults',
'nodes',
'edges',
'subgraphs',
)
def __init__(self, name):
super().__init__()
self.name = name
self.node_defaults = {
'style': '',
'color': 0x000000,
'shape': 'ellipse',
}
self.edge_defaults = {
'arrowhead': 'normal',
'dir': 'forward',
}
self.nodes = {}
self.edges = []
self.subgraphs = []
self.directed = False
def update_properties(self, val):
super().update_properties(val)
if val.bb and self.size or val.size and self.bb:
l,t,r,b = self.bb.split(',')
w,h = self.size.split(',')
self.scalex = r/w
self.scaley = b/h
def update_node_defaults(self, params):
for k, v in items(params):
self.node_defaults[k] = v
def update_edge_defaults(self, params):
for k, v in items(params):
self.edge_defaults[k] = v
def add_subgraph(self, name):
gr = Subgraph(name, self)
self.subgraphs.push(gr)
return gr
def add_anonsub(self):
gr = AnonSubgraph(self)
self.subgraphs.push(gr)
return gr
def add_edge(self, start, end, properties):
snode = self.nodes[start]
if not snode:
snode = Node(start)
self.nodes[start] = snode
enode = self.nodes[end]
if not enode:
enode = Node(end)
self.nodes[end] = enode
edge = Edge(snode, enode)
edge.update_properties(self.edge_defaults)
edge.update_properties(properties)
self.edges.push(edge)
return edge
def add_node(self, name, properties):
node = self.nodes[name]
if not node:
node = Node(name)
self.nodes[name] = node
node.update_properties(self.node_defaults)
node.update_properties(properties)
return node
@package('graph')
class Digraph(Graph):
__slots__ = []
def __init__(self, name):
super().__init__(name)
self.directed = True
| |
"""
Form classes
"""
from __future__ import unicode_literals
import copy
from collections import OrderedDict
from django.core.exceptions import NON_FIELD_ERRORS, ValidationError
# BoundField is imported for backwards compatibility in Django 1.9
from django.forms.boundfield import BoundField # NOQA
from django.forms.fields import Field, FileField
# pretty_name is imported for backwards compatibility in Django 1.9
from django.forms.utils import ErrorDict, ErrorList, pretty_name # NOQA
from django.forms.widgets import Media, MediaDefiningClass
from django.utils import six
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.functional import cached_property
from django.utils.html import conditional_escape, html_safe
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
__all__ = ('BaseForm', 'Form')
class DeclarativeFieldsMetaclass(MediaDefiningClass):
"""
Metaclass that collects Fields declared on the base classes.
"""
def __new__(mcs, name, bases, attrs):
# Collect fields from current class.
current_fields = []
for key, value in list(attrs.items()):
if isinstance(value, Field):
current_fields.append((key, value))
attrs.pop(key)
current_fields.sort(key=lambda x: x[1].creation_counter)
attrs['declared_fields'] = OrderedDict(current_fields)
new_class = (super(DeclarativeFieldsMetaclass, mcs)
.__new__(mcs, name, bases, attrs))
# Walk through the MRO.
declared_fields = OrderedDict()
for base in reversed(new_class.__mro__):
# Collect fields from base class.
if hasattr(base, 'declared_fields'):
declared_fields.update(base.declared_fields)
# Field shadowing.
for attr, value in base.__dict__.items():
if value is None and attr in declared_fields:
declared_fields.pop(attr)
new_class.base_fields = declared_fields
new_class.declared_fields = declared_fields
return new_class
@html_safe
@python_2_unicode_compatible
class BaseForm(object):
# This is the main implementation of all the Form logic. Note that this
# class is different than Form. See the comments by the Form class for more
# information. Any improvements to the form API should be made to *this*
# class, not to the Form class.
field_order = None
prefix = None
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, label_suffix=None,
empty_permitted=False, field_order=None):
self.is_bound = data is not None or files is not None
self.data = data or {}
self.files = files or {}
self.auto_id = auto_id
if prefix is not None:
self.prefix = prefix
self.initial = initial or {}
self.error_class = error_class
# Translators: This is the default suffix added to form field labels
self.label_suffix = label_suffix if label_suffix is not None else _(':')
self.empty_permitted = empty_permitted
self._errors = None # Stores the errors after clean() has been called.
# The base_fields class attribute is the *class-wide* definition of
# fields. Because a particular *instance* of the class might want to
# alter self.fields, we create self.fields here by copying base_fields.
# Instances should always modify self.fields; they should not modify
# self.base_fields.
self.fields = copy.deepcopy(self.base_fields)
self._bound_fields_cache = {}
self.order_fields(self.field_order if field_order is None else field_order)
def order_fields(self, field_order):
"""
Rearranges the fields according to field_order.
field_order is a list of field names specifying the order. Fields not
included in the list are appended in the default order for backward
compatibility with subclasses not overriding field_order. If field_order
is None, all fields are kept in the order defined in the class.
Unknown fields in field_order are ignored to allow disabling fields in
form subclasses without redefining ordering.
"""
if field_order is None:
return
fields = OrderedDict()
for key in field_order:
try:
fields[key] = self.fields.pop(key)
except KeyError: # ignore unknown fields
pass
fields.update(self.fields) # add remaining fields in original order
self.fields = fields
def __str__(self):
return self.as_table()
def __repr__(self):
if self._errors is None:
is_valid = "Unknown"
else:
is_valid = self.is_bound and not bool(self._errors)
return '<%(cls)s bound=%(bound)s, valid=%(valid)s, fields=(%(fields)s)>' % {
'cls': self.__class__.__name__,
'bound': self.is_bound,
'valid': is_valid,
'fields': ';'.join(self.fields),
}
def __iter__(self):
for name in self.fields:
yield self[name]
def __getitem__(self, name):
"Returns a BoundField with the given name."
try:
field = self.fields[name]
except KeyError:
raise KeyError(
"Key %r not found in '%s'" % (name, self.__class__.__name__))
if name not in self._bound_fields_cache:
self._bound_fields_cache[name] = field.get_bound_field(self, name)
return self._bound_fields_cache[name]
@property
def errors(self):
"Returns an ErrorDict for the data provided for the form"
if self._errors is None:
self.full_clean()
return self._errors
def is_valid(self):
"""
Returns True if the form has no errors. Otherwise, False. If errors are
being ignored, returns False.
"""
return self.is_bound and not self.errors
def add_prefix(self, field_name):
"""
Returns the field name with a prefix appended, if this Form has a
prefix set.
Subclasses may wish to override.
"""
return '%s-%s' % (self.prefix, field_name) if self.prefix else field_name
def add_initial_prefix(self, field_name):
"""
Add a 'initial' prefix for checking dynamic initial values
"""
return 'initial-%s' % self.add_prefix(field_name)
def _html_output(self, normal_row, error_row, row_ender, help_text_html, errors_on_separate_row):
"Helper function for outputting HTML. Used by as_table(), as_ul(), as_p()."
top_errors = self.non_field_errors() # Errors that should be displayed above all fields.
output, hidden_fields = [], []
for name, field in self.fields.items():
html_class_attr = ''
bf = self[name]
# Escape and cache in local variable.
bf_errors = self.error_class([conditional_escape(error) for error in bf.errors])
if bf.is_hidden:
if bf_errors:
top_errors.extend(
[_('(Hidden field %(name)s) %(error)s') % {'name': name, 'error': force_text(e)}
for e in bf_errors])
hidden_fields.append(six.text_type(bf))
else:
# Create a 'class="..."' attribute if the row should have any
# CSS classes applied.
css_classes = bf.css_classes()
if css_classes:
html_class_attr = ' class="%s"' % css_classes
if errors_on_separate_row and bf_errors:
output.append(error_row % force_text(bf_errors))
if bf.label:
label = conditional_escape(force_text(bf.label))
label = bf.label_tag(label) or ''
else:
label = ''
if field.help_text:
help_text = help_text_html % force_text(field.help_text)
else:
help_text = ''
output.append(normal_row % {
'errors': force_text(bf_errors),
'label': force_text(label),
'field': six.text_type(bf),
'help_text': help_text,
'html_class_attr': html_class_attr,
'css_classes': css_classes,
'field_name': bf.html_name,
})
if top_errors:
output.insert(0, error_row % force_text(top_errors))
if hidden_fields: # Insert any hidden fields in the last row.
str_hidden = ''.join(hidden_fields)
if output:
last_row = output[-1]
# Chop off the trailing row_ender (e.g. '</td></tr>') and
# insert the hidden fields.
if not last_row.endswith(row_ender):
# This can happen in the as_p() case (and possibly others
# that users write): if there are only top errors, we may
# not be able to conscript the last row for our purposes,
# so insert a new, empty row.
last_row = (normal_row % {
'errors': '',
'label': '',
'field': '',
'help_text': '',
'html_class_attr': html_class_attr,
'css_classes': '',
'field_name': '',
})
output.append(last_row)
output[-1] = last_row[:-len(row_ender)] + str_hidden + row_ender
else:
# If there aren't any rows in the output, just append the
# hidden fields.
output.append(str_hidden)
return mark_safe('\n'.join(output))
def as_table(self):
"Returns this form rendered as HTML <tr>s -- excluding the <table></table>."
return self._html_output(
normal_row='<tr%(html_class_attr)s><th>%(label)s</th><td>%(errors)s%(field)s%(help_text)s</td></tr>',
error_row='<tr><td colspan="2">%s</td></tr>',
row_ender='</td></tr>',
help_text_html='<br /><span class="helptext">%s</span>',
errors_on_separate_row=False)
def as_ul(self):
"Returns this form rendered as HTML <li>s -- excluding the <ul></ul>."
return self._html_output(
normal_row='<li%(html_class_attr)s>%(errors)s%(label)s %(field)s%(help_text)s</li>',
error_row='<li>%s</li>',
row_ender='</li>',
help_text_html=' <span class="helptext">%s</span>',
errors_on_separate_row=False)
def as_p(self):
"Returns this form rendered as HTML <p>s."
return self._html_output(
normal_row='<p%(html_class_attr)s>%(label)s %(field)s%(help_text)s</p>',
error_row='%s',
row_ender='</p>',
help_text_html=' <span class="helptext">%s</span>',
errors_on_separate_row=True)
def non_field_errors(self):
"""
Returns an ErrorList of errors that aren't associated with a particular
field -- i.e., from Form.clean(). Returns an empty ErrorList if there
are none.
"""
return self.errors.get(NON_FIELD_ERRORS, self.error_class(error_class='nonfield'))
def add_error(self, field, error):
"""
Update the content of `self._errors`.
The `field` argument is the name of the field to which the errors
should be added. If its value is None the errors will be treated as
NON_FIELD_ERRORS.
The `error` argument can be a single error, a list of errors, or a
dictionary that maps field names to lists of errors. What we define as
an "error" can be either a simple string or an instance of
ValidationError with its message attribute set and what we define as
list or dictionary can be an actual `list` or `dict` or an instance
of ValidationError with its `error_list` or `error_dict` attribute set.
If `error` is a dictionary, the `field` argument *must* be None and
errors will be added to the fields that correspond to the keys of the
dictionary.
"""
if not isinstance(error, ValidationError):
# Normalize to ValidationError and let its constructor
# do the hard work of making sense of the input.
error = ValidationError(error)
if hasattr(error, 'error_dict'):
if field is not None:
raise TypeError(
"The argument `field` must be `None` when the `error` "
"argument contains errors for multiple fields."
)
else:
error = error.error_dict
else:
error = {field or NON_FIELD_ERRORS: error.error_list}
for field, error_list in error.items():
if field not in self.errors:
if field != NON_FIELD_ERRORS and field not in self.fields:
raise ValueError(
"'%s' has no field named '%s'." % (self.__class__.__name__, field))
if field == NON_FIELD_ERRORS:
self._errors[field] = self.error_class(error_class='nonfield')
else:
self._errors[field] = self.error_class()
self._errors[field].extend(error_list)
if field in self.cleaned_data:
del self.cleaned_data[field]
def has_error(self, field, code=None):
if code is None:
return field in self.errors
if field in self.errors:
for error in self.errors.as_data()[field]:
if error.code == code:
return True
return False
def full_clean(self):
"""
Cleans all of self.data and populates self._errors and
self.cleaned_data.
"""
self._errors = ErrorDict()
if not self.is_bound: # Stop further processing.
return
self.cleaned_data = {}
# If the form is permitted to be empty, and none of the form data has
# changed from the initial data, short circuit any validation.
if self.empty_permitted and not self.has_changed():
return
self._clean_fields()
self._clean_form()
self._post_clean()
def _clean_fields(self):
for name, field in self.fields.items():
if field.disabled:
# Initial values are supposed to be clean
self.cleaned_data[name] = self.initial.get(name, field.initial)
continue
# value_from_datadict() gets the data from the data dictionaries.
# Each widget type knows how to retrieve its own data, because some
# widgets split data over several HTML fields.
value = field.widget.value_from_datadict(self.data, self.files, self.add_prefix(name))
try:
if isinstance(field, FileField):
initial = self.initial.get(name, field.initial)
value = field.clean(value, initial)
else:
value = field.clean(value)
self.cleaned_data[name] = value
if hasattr(self, 'clean_%s' % name):
value = getattr(self, 'clean_%s' % name)()
self.cleaned_data[name] = value
except ValidationError as e:
self.add_error(name, e)
def _clean_form(self):
try:
cleaned_data = self.clean()
except ValidationError as e:
self.add_error(None, e)
else:
if cleaned_data is not None:
self.cleaned_data = cleaned_data
def _post_clean(self):
"""
An internal hook for performing additional cleaning after form cleaning
is complete. Used for model validation in model forms.
"""
pass
def clean(self):
"""
Hook for doing any extra form-wide cleaning after Field.clean() has been
called on every field. Any ValidationError raised by this method will
not be associated with a particular field; it will have a special-case
association with the field named '__all__'.
"""
return self.cleaned_data
def has_changed(self):
"""
Returns True if data differs from initial.
"""
return bool(self.changed_data)
@cached_property
def changed_data(self):
data = []
for name, field in self.fields.items():
prefixed_name = self.add_prefix(name)
data_value = field.widget.value_from_datadict(self.data, self.files, prefixed_name)
if not field.show_hidden_initial:
initial_value = self.initial.get(name, field.initial)
if callable(initial_value):
initial_value = initial_value()
else:
initial_prefixed_name = self.add_initial_prefix(name)
hidden_widget = field.hidden_widget()
try:
initial_value = field.to_python(hidden_widget.value_from_datadict(
self.data, self.files, initial_prefixed_name))
except ValidationError:
# Always assume data has changed if validation fails.
data.append(name)
continue
if field.has_changed(initial_value, data_value):
data.append(name)
return data
@property
def media(self):
"""
Provide a description of all media required to render the widgets on this form
"""
media = Media()
for field in self.fields.values():
media = media + field.widget.media
return media
def is_multipart(self):
"""
Returns True if the form needs to be multipart-encoded, i.e. it has
FileInput. Otherwise, False.
"""
for field in self.fields.values():
if field.widget.needs_multipart_form:
return True
return False
def hidden_fields(self):
"""
Returns a list of all the BoundField objects that are hidden fields.
Useful for manual form layout in templates.
"""
return [field for field in self if field.is_hidden]
def visible_fields(self):
"""
Returns a list of BoundField objects that aren't hidden fields.
The opposite of the hidden_fields() method.
"""
return [field for field in self if not field.is_hidden]
class Form(six.with_metaclass(DeclarativeFieldsMetaclass, BaseForm)):
"A collection of Fields, plus their associated data."
# This is a separate class from BaseForm in order to abstract the way
# self.fields is specified. This class (Form) is the one that does the
# fancy metaclass stuff purely for the semantic sugar -- it allows one
# to define a form using declarative syntax.
# BaseForm itself has no way of designating self.fields.
| |
import copy
import re
from collections import OrderedDict, defaultdict
from conans.errors import ConanException
from conans.util.log import logger
def unquote(text):
text = text.strip()
if len(text) > 1 and (text[0] == text[-1]) and text[0] in "'\"":
return text[1:-1]
return text
class EnvValues(object):
""" Object to represent the introduced env values entered by the user
with the -e or profiles etc.
self._data is a dictionary with: {package: {var: value}}
"package" can be None if the var is global.
"value" can be a list or a string. If it's a list the variable is appendable like PATH or PYTHONPATH
"""
def __init__(self):
self._data = defaultdict(dict)
def copy(self):
ret = EnvValues()
ret._data = copy.deepcopy(self._data)
return ret
@staticmethod
def load_value(the_value):
if the_value.startswith("[") and the_value.endswith("]"):
return [val.strip() for val in the_value[1:-1].split(",") if val]
else:
return the_value
@staticmethod
def loads(text):
ret = EnvValues()
if not text:
return ret
for env_def in text.splitlines():
try:
if env_def:
if "=" not in env_def:
raise ConanException("Invalid env line '%s'" % env_def)
tmp = env_def.split("=", 1)
name = tmp[0]
value = unquote(tmp[1])
package = None
if ":" in name:
tmp = name.split(":", 1)
package = tmp[0].strip()
name = tmp[1].strip()
else:
name = name.strip()
# Lists values=> MYVAR=[1,2,three]
value = EnvValues.load_value(value)
ret.add(name, value, package)
except ConanException:
raise
except Exception as exc:
raise ConanException("Error parsing the env values: %s" % str(exc))
return ret
def dumps(self):
def append_vars(pairs, result):
for name, value in sorted(pairs.items()):
if isinstance(value, list):
value = "[%s]" % ",".join(value)
if package:
result.append("%s:%s=%s" % (package, name, value.replace("\\", "/")))
else:
result.append("%s=%s" % (name, value.replace("\\", "/")))
result = []
# First the global vars
for package, pairs in self._sorted_data:
if package is None:
append_vars(pairs, result)
# Then the package scoped ones
for package, pairs in self._sorted_data:
if package is not None:
append_vars(pairs, result)
return "\n".join(result)
@property
def data(self):
return self._data
@property
def _sorted_data(self):
# Python 3 can't compare None with strings, so if None we order just with the var name
return [(key, self._data[key]) for key in sorted(self._data, key=lambda x: x if x else "a")]
def add(self, name, value, package=None):
# New data, not previous value
if name not in self._data[package]:
if isinstance(value, list):
self._data[package][name] = value
else:
self._data[package][name] = value.replace("\\", "/")
# There is data already
else:
# Only append at the end if we had a list
if isinstance(self._data[package][name], list):
if isinstance(value, list):
self._data[package][name].extend(value)
else:
self._data[package][name].append(value)
def remove(self, name, package=None):
del self._data[package][name]
def update_replace(self, key, value):
""" method useful for command "conan profile update"
to execute real update instead of soft update
"""
if ":" in key:
package_name, key = key.split(":", 1)
else:
package_name, key = None, key
self._data[package_name][key] = value
def normalize_paths(self):
for package_name, env_vars in self._data.items():
for name, value in env_vars.items():
if not isinstance(value, list):
self._data[package_name][name] = value.replace("\\", "/")
def update(self, env_obj):
"""accepts other EnvValues object or DepsEnvInfo
it prioritize the values that are already at self._data
"""
if env_obj:
if isinstance(env_obj, EnvValues):
for package_name, env_vars in env_obj.data.items():
for name, value in env_vars.items():
if isinstance(value, list):
value = copy.copy(value) # Aware of copying by reference the list
self.add(name, value, package_name)
# DepsEnvInfo. the OLD values are always kept, never overwrite,
elif isinstance(env_obj, DepsEnvInfo):
for (name, value) in env_obj.vars.items():
self.add(name, value)
else:
raise ConanException("unknown env type: %s" % env_obj)
def env_dicts(self, package_name):
"""Returns two dicts of env variables that applies to package 'name',
the first for simple values A=1, and the second for multiple A=1;2;3"""
ret = {}
ret_multi = {}
# First process the global variables
global_pairs = self._data.get(None)
own_pairs = self._data.get(package_name)
if global_pairs:
for name, value in global_pairs.items():
if isinstance(value, list):
ret_multi[name] = value
else:
ret[name] = value
# Then the package scoped vars, that will override the globals
if own_pairs:
for name, value in own_pairs.items():
if isinstance(value, list):
ret_multi[name] = value
if name in ret: # Already exists a global variable, remove it
del ret[name]
else:
ret[name] = value
if name in ret_multi: # Already exists a list global variable, remove it
del ret_multi[name]
# FIXME: This dict is only used doing a ret.update(ret_multi). Unnecessary?
return ret, ret_multi
def __repr__(self):
return str(dict(self._data))
class EnvInfo(object):
""" Object that stores all the environment variables required:
env = EnvInfo()
env.hola = True
env.Cosa.append("OTRO")
env.Cosa.append("MAS")
env.Cosa = "hello"
env.Cosa.append("HOLA")
"""
def __init__(self):
self._values_ = {}
@staticmethod
def _adjust_casing(name):
"""We don't want to mix "path" with "PATH", actually we don`t want to mix anything
with different casing. Furthermore in Windows all is uppercase, but managing all in
upper case will be breaking."""
return name.upper() if name.lower() == "path" else name
def __getattr__(self, name):
if name.startswith("_") and name.endswith("_"):
return super(EnvInfo, self).__getattr__(name)
name = self._adjust_casing(name)
attr = self._values_.get(name)
if not attr:
self._values_[name] = []
return self._values_[name]
def __setattr__(self, name, value):
if name.startswith("_") and name.endswith("_"):
return super(EnvInfo, self).__setattr__(name, value)
name = self._adjust_casing(name)
self._values_[name] = value
@property
def vars(self):
return self._values_
class DepsEnvInfo(EnvInfo):
""" All the env info for a conanfile dependencies
"""
def __init__(self):
super(DepsEnvInfo, self).__init__()
self._dependencies_ = OrderedDict()
@property
def dependencies(self):
return self._dependencies_.items()
@property
def deps(self):
return self._dependencies_.keys()
def __getitem__(self, item):
return self._dependencies_[item]
def update(self, dep_env_info, pkg_name):
self._dependencies_[pkg_name] = dep_env_info
def merge_lists(seq1, seq2):
return [s for s in seq1 if s not in seq2] + seq2
# With vars if its set the keep the set value
for varname, value in dep_env_info.vars.items():
if varname not in self.vars:
self.vars[varname] = value
elif isinstance(self.vars[varname], list):
if isinstance(value, list):
self.vars[varname] = merge_lists(self.vars[varname], value)
else:
self.vars[varname] = merge_lists(self.vars[varname], [value])
else:
logger.warn("DISCARDED variable %s=%s from %s" % (varname, value, pkg_name))
def update_deps_env_info(self, dep_env_info):
assert isinstance(dep_env_info, DepsEnvInfo)
for pkg_name, env_info in dep_env_info.dependencies:
self.update(env_info, pkg_name)
@staticmethod
def loads(text):
ret = DepsEnvInfo()
lib_name = None
env_info = None
for line in text.splitlines():
if not lib_name and not line.startswith("[ENV_"):
raise ConanException("Error, invalid file format reading env info variables")
elif line.startswith("[ENV_"):
if env_info:
ret.update(env_info, lib_name)
lib_name = line[5:-1]
env_info = EnvInfo()
else:
var_name, value = line.split("=", 1)
if value[0] == "[" and value[-1] == "]":
# Take all the items between quotes
values = re.findall('"([^"]*)"', value[1:-1])
for val in values:
getattr(env_info, var_name).append(val)
else:
setattr(env_info, var_name, value) # peel quotes
if env_info:
ret.update(env_info, lib_name)
return ret
def dumps(self):
sections = []
for name, env_info in self._dependencies_.items():
sections.append("[ENV_%s]" % name)
for var, values in sorted(env_info.vars.items()):
tmp = "%s=" % var
if isinstance(values, list):
tmp += "[%s]" % ",".join(['"%s"' % val for val in values])
else:
tmp += '%s' % values
sections.append(tmp)
return "\n".join(sections)
| |
#!/usr/bin/env python3
#
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Lint format strings: This program checks that the number of arguments passed
# to a variadic format string function matches the number of format specifiers
# in the format string.
import argparse
import re
import sys
FALSE_POSITIVES = [
("src/dbwrapper.cpp", "vsnprintf(p, limit - p, format, backup_ap)"),
("src/index/base.cpp", "FatalError(const char* fmt, const Args&... args)"),
("src/netbase.cpp", "LogConnectFailure(bool manual_connection, const char* fmt, const Args&... args)"),
("src/util/system.cpp", "strprintf(_(COPYRIGHT_HOLDERS), _(COPYRIGHT_HOLDERS_SUBSTITUTION))"),
("src/util/system.cpp", "strprintf(COPYRIGHT_HOLDERS, COPYRIGHT_HOLDERS_SUBSTITUTION)"),
("src/wallet/wallet.h", "WalletLogPrintf(std::string fmt, Params... parameters)"),
("src/wallet/wallet.h", "LogPrintf((\"%s \" + fmt).c_str(), GetDisplayName(), parameters...)"),
("src/logging.h", "LogPrintf(const char* fmt, const Args&... args)"),
]
def parse_function_calls(function_name, source_code):
"""Return an array with all calls to function function_name in string source_code.
Preprocessor directives and C++ style comments ("//") in source_code are removed.
>>> len(parse_function_calls("foo", "foo();bar();foo();bar();"))
2
>>> parse_function_calls("foo", "foo(1);bar(1);foo(2);bar(2);")[0].startswith("foo(1);")
True
>>> parse_function_calls("foo", "foo(1);bar(1);foo(2);bar(2);")[1].startswith("foo(2);")
True
>>> len(parse_function_calls("foo", "foo();bar();// foo();bar();"))
1
>>> len(parse_function_calls("foo", "#define FOO foo();"))
0
"""
assert(type(function_name) is str and type(source_code) is str and function_name)
lines = [re.sub("// .*", " ", line).strip()
for line in source_code.split("\n")
if not line.strip().startswith("#")]
return re.findall(r"[^a-zA-Z_](?=({}\(.*).*)".format(function_name), " " + " ".join(lines))
def normalize(s):
"""Return a normalized version of string s with newlines, tabs and C style comments ("/* ... */")
replaced with spaces. Multiple spaces are replaced with a single space.
>>> normalize(" /* nothing */ foo\tfoo /* bar */ foo ")
'foo foo foo'
"""
assert(type(s) is str)
s = s.replace("\n", " ")
s = s.replace("\t", " ")
s = re.sub("/\*.*?\*/", " ", s)
s = re.sub(" {2,}", " ", s)
return s.strip()
ESCAPE_MAP = {
r"\n": "[escaped-newline]",
r"\t": "[escaped-tab]",
r'\"': "[escaped-quote]",
}
def escape(s):
"""Return the escaped version of string s with "\\\"", "\\n" and "\\t" escaped as
"[escaped-backslash]", "[escaped-newline]" and "[escaped-tab]".
>>> unescape(escape("foo")) == "foo"
True
>>> escape(r'foo \\t foo \\n foo \\\\ foo \\ foo \\"bar\\"')
'foo [escaped-tab] foo [escaped-newline] foo \\\\\\\\ foo \\\\ foo [escaped-quote]bar[escaped-quote]'
"""
assert(type(s) is str)
for raw_value, escaped_value in ESCAPE_MAP.items():
s = s.replace(raw_value, escaped_value)
return s
def unescape(s):
"""Return the unescaped version of escaped string s.
Reverses the replacements made in function escape(s).
>>> unescape(escape("bar"))
'bar'
>>> unescape("foo [escaped-tab] foo [escaped-newline] foo \\\\\\\\ foo \\\\ foo [escaped-quote]bar[escaped-quote]")
'foo \\\\t foo \\\\n foo \\\\\\\\ foo \\\\ foo \\\\"bar\\\\"'
"""
assert(type(s) is str)
for raw_value, escaped_value in ESCAPE_MAP.items():
s = s.replace(escaped_value, raw_value)
return s
def parse_function_call_and_arguments(function_name, function_call):
"""Split string function_call into an array of strings consisting of:
* the string function_call followed by "("
* the function call argument #1
* ...
* the function call argument #n
* a trailing ");"
The strings returned are in escaped form. See escape(...).
>>> parse_function_call_and_arguments("foo", 'foo("%s", "foo");')
['foo(', '"%s",', ' "foo"', ')']
>>> parse_function_call_and_arguments("foo", 'foo("%s", "foo");')
['foo(', '"%s",', ' "foo"', ')']
>>> parse_function_call_and_arguments("foo", 'foo("%s %s", "foo", "bar");')
['foo(', '"%s %s",', ' "foo",', ' "bar"', ')']
>>> parse_function_call_and_arguments("fooprintf", 'fooprintf("%050d", i);')
['fooprintf(', '"%050d",', ' i', ')']
>>> parse_function_call_and_arguments("foo", 'foo(bar(foobar(barfoo("foo"))), foobar); barfoo')
['foo(', 'bar(foobar(barfoo("foo"))),', ' foobar', ')']
>>> parse_function_call_and_arguments("foo", "foo()")
['foo(', '', ')']
>>> parse_function_call_and_arguments("foo", "foo(123)")
['foo(', '123', ')']
>>> parse_function_call_and_arguments("foo", 'foo("foo")')
['foo(', '"foo"', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", std::wstring_convert<std::codecvt_utf8_utf16<wchar_t>,wchar_t>().to_bytes(buf), err);')
['strprintf(', '"%s (%d)",', ' std::wstring_convert<std::codecvt_utf8_utf16<wchar_t>,wchar_t>().to_bytes(buf),', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo<wchar_t>().to_bytes(buf), err);')
['strprintf(', '"%s (%d)",', ' foo<wchar_t>().to_bytes(buf),', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo().to_bytes(buf), err);')
['strprintf(', '"%s (%d)",', ' foo().to_bytes(buf),', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo << 1, err);')
['strprintf(', '"%s (%d)",', ' foo << 1,', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo<bar>() >> 1, err);')
['strprintf(', '"%s (%d)",', ' foo<bar>() >> 1,', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo < 1 ? bar : foobar, err);')
['strprintf(', '"%s (%d)",', ' foo < 1 ? bar : foobar,', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo < 1, err);')
['strprintf(', '"%s (%d)",', ' foo < 1,', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo > 1 ? bar : foobar, err);')
['strprintf(', '"%s (%d)",', ' foo > 1 ? bar : foobar,', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo > 1, err);')
['strprintf(', '"%s (%d)",', ' foo > 1,', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo <= 1, err);')
['strprintf(', '"%s (%d)",', ' foo <= 1,', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo <= bar<1, 2>(1, 2), err);')
['strprintf(', '"%s (%d)",', ' foo <= bar<1, 2>(1, 2),', ' err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo>foo<1,2>(1,2)?bar:foobar,err)');
['strprintf(', '"%s (%d)",', ' foo>foo<1,2>(1,2)?bar:foobar,', 'err', ')']
>>> parse_function_call_and_arguments("strprintf", 'strprintf("%s (%d)", foo>foo<1,2>(1,2),err)');
['strprintf(', '"%s (%d)",', ' foo>foo<1,2>(1,2),', 'err', ')']
"""
assert(type(function_name) is str and type(function_call) is str and function_name)
remaining = normalize(escape(function_call))
expected_function_call = "{}(".format(function_name)
assert(remaining.startswith(expected_function_call))
parts = [expected_function_call]
remaining = remaining[len(expected_function_call):]
open_parentheses = 1
open_template_arguments = 0
in_string = False
parts.append("")
for i, char in enumerate(remaining):
parts.append(parts.pop() + char)
if char == "\"":
in_string = not in_string
continue
if in_string:
continue
if char == "(":
open_parentheses += 1
continue
if char == ")":
open_parentheses -= 1
if open_parentheses > 1:
continue
if open_parentheses == 0:
parts.append(parts.pop()[:-1])
parts.append(char)
break
prev_char = remaining[i - 1] if i - 1 >= 0 else None
next_char = remaining[i + 1] if i + 1 <= len(remaining) - 1 else None
if char == "<" and next_char not in [" ", "<", "="] and prev_char not in [" ", "<"]:
open_template_arguments += 1
continue
if char == ">" and next_char not in [" ", ">", "="] and prev_char not in [" ", ">"] and open_template_arguments > 0:
open_template_arguments -= 1
if open_template_arguments > 0:
continue
if char == ",":
parts.append("")
return parts
def parse_string_content(argument):
"""Return the text within quotes in string argument.
>>> parse_string_content('1 "foo %d bar" 2')
'foo %d bar'
>>> parse_string_content('1 foobar 2')
''
>>> parse_string_content('1 "bar" 2')
'bar'
>>> parse_string_content('1 "foo" 2 "bar" 3')
'foobar'
>>> parse_string_content('1 "foo" 2 " " "bar" 3')
'foo bar'
>>> parse_string_content('""')
''
>>> parse_string_content('')
''
>>> parse_string_content('1 2 3')
''
"""
assert(type(argument) is str)
string_content = ""
in_string = False
for char in normalize(escape(argument)):
if char == "\"":
in_string = not in_string
elif in_string:
string_content += char
return string_content
def count_format_specifiers(format_string):
"""Return the number of format specifiers in string format_string.
>>> count_format_specifiers("foo bar foo")
0
>>> count_format_specifiers("foo %d bar foo")
1
>>> count_format_specifiers("foo %d bar %i foo")
2
>>> count_format_specifiers("foo %d bar %i foo %% foo")
2
>>> count_format_specifiers("foo %d bar %i foo %% foo %d foo")
3
>>> count_format_specifiers("foo %d bar %i foo %% foo %*d foo")
4
"""
assert(type(format_string) is str)
format_string = format_string.replace('%%', 'X')
n = 0
in_specifier = False
for i, char in enumerate(format_string):
if char == "%":
in_specifier = True
n += 1
elif char in "aAcdeEfFgGinopsuxX":
in_specifier = False
elif in_specifier and char == "*":
n += 1
return n
def main():
parser = argparse.ArgumentParser(description="This program checks that the number of arguments passed "
"to a variadic format string function matches the number of format "
"specifiers in the format string.")
parser.add_argument("--skip-arguments", type=int, help="number of arguments before the format string "
"argument (e.g. 1 in the case of fprintf)", default=0)
parser.add_argument("function_name", help="function name (e.g. fprintf)", default=None)
parser.add_argument("file", type=argparse.FileType("r", encoding="utf-8"), nargs="*", help="C++ source code file (e.g. foo.cpp)")
args = parser.parse_args()
exit_code = 0
for f in args.file:
for function_call_str in parse_function_calls(args.function_name, f.read()):
parts = parse_function_call_and_arguments(args.function_name, function_call_str)
relevant_function_call_str = unescape("".join(parts))[:512]
if (f.name, relevant_function_call_str) in FALSE_POSITIVES:
continue
if len(parts) < 3 + args.skip_arguments:
exit_code = 1
print("{}: Could not parse function call string \"{}(...)\": {}".format(f.name, args.function_name, relevant_function_call_str))
continue
argument_count = len(parts) - 3 - args.skip_arguments
format_str = parse_string_content(parts[1 + args.skip_arguments])
format_specifier_count = count_format_specifiers(format_str)
if format_specifier_count != argument_count:
exit_code = 1
print("{}: Expected {} argument(s) after format string but found {} argument(s): {}".format(f.name, format_specifier_count, argument_count, relevant_function_call_str))
continue
sys.exit(exit_code)
if __name__ == "__main__":
main()
| |
from twisted.trial import unittest
from game.direction import FORWARD, BACKWARD, LEFT, RIGHT
from game.test.util import PlayerCreationMixin
from game.vector import Vector
# Expedient hack until I switch to decimals
_epsilon = 0.0001
class DirectionObserver(object):
"""
Recorder implementation of the direction observer interface used to verify
that direction observation works.
@ivar changes: C{list} of three-tuples of player objects, positions, and
directions. One element per call to C{directionChanged}.
"""
def __init__(self):
self.changes = []
def directionChanged(self, player):
"""
Record a direction change event for the given player.
@param player: The player which changed direction.
"""
self.changes.append((
player, player.getPosition(), player.direction))
class PlayerTests(unittest.TestCase, PlayerCreationMixin):
"""
There should be an object which has a position and can be moved in
eight directions.
"""
def test_setPosition(self):
"""
Players have a position which can be set with C{setPosition}.
"""
player = self.makePlayer(Vector(1, 0, 2))
player.setPosition(Vector(-2, 0, 1))
self.assertEqual(player.getPosition(), Vector(-2, 0, 1))
def test_setPositionAfterSomeMotion(self):
"""
Players should be placed at the correct position if C{setPosition} is
called after they have been moving around a little bit.
"""
player = self.makePlayer(Vector(1, 0, 2))
player.setDirection(FORWARD)
self.advanceTime(1)
player.setPosition(Vector(-2, 0, 1))
self.assertEqual(player.getPosition(), Vector(-2, 0, 1))
def test_getPosition(self):
"""
Players have a C{getPosition} method the initial return value of which
is based on initializer parameters.
"""
player = self.makePlayer(Vector(0, 0, 0))
v = player.getPosition()
self.assertEqual(v, Vector(0, 0, 0))
def test_setDirection(self):
"""
L{Player.setDirection} should accept a vector which sets the direction
of the player's movement.
"""
player = self.makePlayer(Vector(3, 0, 2))
player.setDirection(FORWARD + LEFT)
self.assertEqual(player.direction, FORWARD + LEFT)
player.setDirection(BACKWARD)
self.assertEqual(player.direction, BACKWARD)
def test_getPositionWithoutMovementAfterTimePasses(self):
"""
Directionless L{Player}s should remain stationary.
"""
position = Vector(2, 5, 3)
player = self.makePlayer(position)
self.advanceTime(10)
self.assertEqual(player.getPosition(), position)
def test_getPositionWithMovementAfterTimePasses(self):
"""
Directed L{Player}s should change position.
"""
v = Vector(3, 0, -2)
player = self.makePlayer(v)
player.setDirection(LEFT)
self.advanceTime(1)
self.assertEqual(player.getPosition(), Vector(v.x - 1, v.y, v.z))
def test_greaterSpeedResultsInGreaterDisplacement(self):
"""
A L{Player} which is moving more quickly should travel further.
"""
v = Vector(2, 3, 0)
speed = 5
player = self.makePlayer(v, speed=speed)
player.setDirection(RIGHT)
self.advanceTime(1)
p = player.getPosition()
self.assertTrue(abs(p.x - v.x - speed) < _epsilon)
self.assertTrue(abs(p.y - v.y) < _epsilon)
self.assertTrue(abs(p.z - v.z) < _epsilon)
def test_getPositionWithMovementAfterTimePassesTwice(self):
"""
Twice-directed players should have an accurate position after each
change in direction after some time passes.
"""
v = Vector(3, 0, -2)
player = self.makePlayer(v)
player.setDirection(RIGHT)
self.advanceTime(1)
self.assertEqual(player.getPosition(), Vector(v.x + 1, v.y, v.z))
player.setDirection(FORWARD)
self.advanceTime(1)
self.assertEquals(player.getPosition(), Vector(v.x + 1, v.y, v.z - 1))
def test_getPositionFloats(self):
"""
L{Player.getPosition} will returns C{float} values if the player's
coordinates don't fall exactly onto integer values.
"""
player = self.makePlayer(Vector(0, 0, 0))
player.setDirection(FORWARD)
self.advanceTime(0.5)
self.assertEquals(player.getPosition(), Vector(0, 0, -0.5))
def test_stop(self):
"""
Setting the player's direction to C{None} makes the player cease moving.
"""
x, y = 49, 27
player = self.makePlayer(Vector(x, 0, y))
player.setDirection(None)
self.advanceTime(1)
self.assertEqual(player.getPosition(), Vector(x, 0, y))
player.setDirection(RIGHT)
self.advanceTime(1)
self.assertEqual(player.getPosition(), Vector(x + 1, 0, y))
player.setDirection(None)
self.advanceTime(1)
self.assertEqual(player.getPosition(), Vector(x + 1, 0, y))
def test_observeDirection(self):
"""
Setting the player's direction should notify any observers registered
with that player of the new direction.
"""
position = Vector(6, 3, 2)
player = self.makePlayer(position)
observer = DirectionObserver()
player.addObserver(observer)
player.setDirection(FORWARD)
self.assertEqual(observer.changes, [(player, position, FORWARD)])
def test_getPositionInsideObserver(self):
"""
L{Player.getPosition} should return an accurate value when called
within an observer's C{directionChanged} callback.
"""
position = Vector(1, 0, 1)
player = self.makePlayer(position)
player.setDirection(RIGHT)
self.advanceTime(1)
observer = DirectionObserver()
player.addObserver(observer)
player.setDirection(None)
[(p, v, d)] = observer.changes
self.assertIdentical(p, player)
self.assertIsInstance(v, Vector)
self.assertIdentical(d, None)
# XXX Switch to decimal (seriously)
self.assertTrue(abs(v.x - 2) < _epsilon)
self.assertTrue(abs(v.y - 0) < _epsilon)
self.assertTrue(abs(v.z - 1) < _epsilon)
def test_turn(self):
"""
L{Player.turn} rotates the player's perspective along the horizontal
(that is, about the Y axis) and vertical (that is, about the X axis)
planes.
"""
player = self.makePlayer(Vector(0, 0, 0))
player.turn(0, 1)
self.assertEquals(player.orientation, Vector(0, 1, 0))
player.turn(0, 2)
self.assertEquals(player.orientation, Vector(0, 3, 0))
player.turn(0, -4)
self.assertEquals(player.orientation, Vector(0, -1, 0))
player.turn(1, 0)
self.assertEquals(player.orientation, Vector(1, -1, 0))
player.turn(-2, 0)
self.assertEquals(player.orientation, Vector(-1, -1, 0))
player.turn(4, 0)
self.assertEquals(player.orientation, Vector(3, -1, 0))
def test_forwardMotionFollowsOrientation(self):
"""
Motion in the forward direction translates the player's position in the
direction they are facing.
"""
player = self.makePlayer(Vector(0, 0, 0))
player.turn(0, 90)
player.setDirection(FORWARD)
self.advanceTime(1)
p = player.getPosition()
self.assertTrue(abs(p.x - 1) < _epsilon)
self.assertTrue(abs(p.y) < _epsilon)
self.assertTrue(abs(p.z) < _epsilon)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.utils.decorators import apply_defaults
from airflow.contrib.kubernetes import kube_client, pod_generator, pod_launcher
from airflow.contrib.kubernetes.pod import Resources
from airflow.utils.state import State
from airflow.contrib.kubernetes.volume_mount import VolumeMount # noqa
from airflow.contrib.kubernetes.volume import Volume # noqa
from airflow.contrib.kubernetes.secret import Secret # noqa
template_fields = ('templates_dict',)
template_ext = tuple()
ui_color = '#ffefeb'
class KubernetesPodOperator(BaseOperator):
"""
Execute a task in a Kubernetes Pod
:param image: Docker image you wish to launch. Defaults to dockerhub.io,
but fully qualified URLS will point to custom repositories
:type image: str
:param namespace: the namespace to run within kubernetes
:type namespace: str
:param cmds: entrypoint of the container. (templated)
The docker images's entrypoint is used if this is not provide.
:type cmds: list[str]
:param arguments: arguments of the entrypoint. (templated)
The docker image's CMD is used if this is not provided.
:type arguments: list[str]
:param image_pull_policy: Specify a policy to cache or always pull an image
:type image_pull_policy: str
:param image_pull_secrets: Any image pull secrets to be given to the pod.
If more than one secret is required, provide a
comma separated list: secret_a,secret_b
:type image_pull_secrets: str
:param volume_mounts: volumeMounts for launched pod
:type volume_mounts: list[airflow.contrib.kubernetes.volume_mount.VolumeMount]
:param volumes: volumes for launched pod. Includes ConfigMaps and PersistentVolumes
:type volumes: list[airflow.contrib.kubernetes.volume.Volume]
:param labels: labels to apply to the Pod
:type labels: dict
:param startup_timeout_seconds: timeout in seconds to startup the pod
:type startup_timeout_seconds: int
:param name: name of the task you want to run,
will be used to generate a pod id
:type name: str
:param env_vars: Environment variables initialized in the container. (templated)
:type env_vars: dict
:param secrets: Kubernetes secrets to inject in the container,
They can be exposed as environment vars or files in a volume.
:type secrets: list[airflow.contrib.kubernetes.secret.Secret]
:param in_cluster: run kubernetes client with in_cluster configuration
:type in_cluster: bool
:param cluster_context: context that points to kubernetes cluster.
Ignored when in_cluster is True. If None, current-context is used.
:type cluster_context: str
:param get_logs: get the stdout of the container as logs of the tasks
:type get_logs: bool
:param affinity: A dict containing a group of affinity scheduling rules
:type affinity: dict
:param node_selectors: A dict containing a group of scheduling rules
:type node_selectors: dict
:param config_file: The path to the Kubernetes config file
:type config_file: str
:param do_xcom_push: If True, the content of the file
/airflow/xcom/return.json in the container will also be pushed to an
XCom when the container completes.
:type do_xcom_push: bool
:param hostnetwork: If True enable host networking on the pod
:type hostnetwork: bool
:param tolerations: A list of kubernetes tolerations
:type tolerations: list tolerations
"""
template_fields = ('cmds', 'arguments', 'env_vars', 'config_file')
def execute(self, context):
try:
client = kube_client.get_kube_client(in_cluster=self.in_cluster,
cluster_context=self.cluster_context,
config_file=self.config_file)
gen = pod_generator.PodGenerator()
for mount in self.volume_mounts:
gen.add_mount(mount)
for volume in self.volumes:
gen.add_volume(volume)
pod = gen.make_pod(
namespace=self.namespace,
image=self.image,
pod_id=self.name,
cmds=self.cmds,
arguments=self.arguments,
labels=self.labels,
)
pod.service_account_name = self.service_account_name
pod.secrets = self.secrets
pod.envs = self.env_vars
pod.image_pull_policy = self.image_pull_policy
pod.image_pull_secrets = self.image_pull_secrets
pod.annotations = self.annotations
pod.resources = self.resources
pod.affinity = self.affinity
pod.node_selectors = self.node_selectors
pod.hostnetwork = self.hostnetwork
pod.tolerations = self.tolerations
launcher = pod_launcher.PodLauncher(kube_client=client,
extract_xcom=self.do_xcom_push)
try:
(final_state, result) = launcher.run_pod(
pod,
startup_timeout=self.startup_timeout_seconds,
get_logs=self.get_logs)
finally:
if self.is_delete_operator_pod:
launcher.delete_pod(pod)
if final_state != State.SUCCESS:
raise AirflowException(
'Pod returned a failure: {state}'.format(state=final_state)
)
return result
except AirflowException as ex:
raise AirflowException('Pod Launching failed: {error}'.format(error=ex))
@apply_defaults
def __init__(self,
namespace,
image,
name,
cmds=None,
arguments=None,
volume_mounts=None,
volumes=None,
env_vars=None,
secrets=None,
in_cluster=False,
cluster_context=None,
labels=None,
startup_timeout_seconds=120,
get_logs=True,
image_pull_policy='IfNotPresent',
annotations=None,
resources=None,
affinity=None,
config_file=None,
do_xcom_push=False,
node_selectors=None,
image_pull_secrets=None,
service_account_name="default",
is_delete_operator_pod=False,
hostnetwork=False,
tolerations=None,
*args,
**kwargs):
super(KubernetesPodOperator, self).__init__(*args, **kwargs)
self.image = image
self.namespace = namespace
self.cmds = cmds or []
self.arguments = arguments or []
self.labels = labels or {}
self.startup_timeout_seconds = startup_timeout_seconds
self.name = name
self.env_vars = env_vars or {}
self.volume_mounts = volume_mounts or []
self.volumes = volumes or []
self.secrets = secrets or []
self.in_cluster = in_cluster
self.cluster_context = cluster_context
self.get_logs = get_logs
self.image_pull_policy = image_pull_policy
self.node_selectors = node_selectors or {}
self.annotations = annotations or {}
self.affinity = affinity or {}
self.do_xcom_push = do_xcom_push
if kwargs.get('xcom_push') is not None:
raise AirflowException("'xcom_push' was deprecated, use 'do_xcom_push' instead")
self.resources = resources or Resources()
self.config_file = config_file
self.image_pull_secrets = image_pull_secrets
self.service_account_name = service_account_name
self.is_delete_operator_pod = is_delete_operator_pod
self.hostnetwork = hostnetwork
self.tolerations = tolerations or []
| |
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
"""
Service monitor to instantiate/scale/monitor services like firewall, LB, ...
"""
import sys
reload(sys)
sys.setdefaultencoding('UTF8')
import gevent
from gevent import monkey
monkey.patch_all(thread=not 'unittest' in sys.modules)
from cfgm_common.zkclient import ZookeeperClient
import requests
import ConfigParser
import cgitb
import cStringIO
import argparse
import socket
import os
import logging
import logging.handlers
from cfgm_common.imid import *
from cfgm_common import importutils
from cfgm_common import svc_info
from cfgm_common.vnc_kombu import VncKombuClient
from config_db import *
from cfgm_common.dependency_tracker import DependencyTracker
from pysandesh.sandesh_base import Sandesh, SandeshSystem
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
from pysandesh.gen_py.process_info.ttypes import ConnectionStatus
from sandesh_common.vns.ttypes import Module
from sandesh_common.vns.constants import ModuleNames
from vnc_api.vnc_api import *
import discoveryclient.client as client
from agent_manager import AgentManager
from db import ServiceMonitorDB
from logger import ServiceMonitorLogger
from loadbalancer_agent import LoadbalancerAgent
from snat_agent import SNATAgent
# zookeeper client connection
_zookeeper_client = None
class SvcMonitor(object):
"""
data + methods used/referred to by ssrc and arc greenlets
"""
_REACTION_MAP = {
"service_appliance_set": {
'self': [],
'service_appliance': []
},
"service_appliance": {
'self': ['service_appliance_set'],
'service_appliance_set': []
},
"loadbalancer_pool": {
'self': [],
'virtual_ip': [],
'loadbalancer_member': [],
'loadbalancer_healthmonitor': [],
},
"loadbalancer_member": {
'self': ['loadbalancer_pool'],
'loadbalancer_pool': []
},
"virtual_ip": {
'self': ['loadbalancer_pool'],
'loadbalancer_pool': []
},
"loadbalancer_healthmonitor": {
'self': ['loadbalancer_pool'],
'loadbalancer_pool': []
},
"service_instance": {
'self': ['virtual_machine'],
'virtual_machine': []
},
"instance_ip": {
'self': [],
},
"floating_ip": {
'self': [],
},
"service_template": {
'self': [],
},
"physical_router": {
'self': [],
},
"physical_interface": {
'self': [],
},
"logical_interface": {
'self': [],
},
"logical_router": {
'self': [],
},
"virtual_network": {
'self': [],
},
"virtual_machine": {
'self': ['virtual_machine_interface'],
'service_instance': [],
'virtual_machine_interface': [],
},
"virtual_machine_interface": {
'self': ['interface_route_table', 'virtual_machine'],
'interface_route_table': [],
'virtual_machine': [],
},
"interface_route_table": {
'self': [],
'virtual_machine_interface': [],
},
"project": {
'self': [],
},
"logical_router": {
'self': [],
},
}
def __init__(self, args=None):
self._args = args
# initialize discovery client
self._disc = None
if self._args.disc_server_ip and self._args.disc_server_port:
self._disc = client.DiscoveryClient(self._args.disc_server_ip,
self._args.disc_server_port,
ModuleNames[Module.SVC_MONITOR])
# initialize logger
self.logger = ServiceMonitorLogger(self._disc, args)
# rotating log file for catchall errors
self._err_file = self._args.trace_file
self._svc_err_logger = logging.getLogger('SvcErrLogger')
self._svc_err_logger.setLevel(logging.ERROR)
try:
with open(self._err_file, 'a'):
handler = logging.handlers.RotatingFileHandler(
self._err_file, maxBytes=64*1024, backupCount=2)
self._svc_err_logger.addHandler(handler)
except IOError:
self.logger.log_warning("Failed to open trace file %s" %
self._err_file)
# Connect to Rabbit and Initialize cassandra connection
self._connect_rabbit()
def _connect_rabbit(self):
rabbit_server = self._args.rabbit_server
rabbit_port = self._args.rabbit_port
rabbit_user = self._args.rabbit_user
rabbit_password = self._args.rabbit_password
rabbit_vhost = self._args.rabbit_vhost
rabbit_ha_mode = self._args.rabbit_ha_mode
self._db_resync_done = gevent.event.Event()
q_name = 'svc_mon.%s' % (socket.gethostname())
self._vnc_kombu = VncKombuClient(rabbit_server, rabbit_port,
rabbit_user, rabbit_password,
rabbit_vhost, rabbit_ha_mode,
q_name, self._vnc_subscribe_callback,
self.logger.log)
self._cassandra = ServiceMonitorDB(self._args, self.logger)
DBBaseSM.init(self, self.logger, self._cassandra)
# end _connect_rabbit
def _vnc_subscribe_callback(self, oper_info):
self._db_resync_done.wait()
try:
self._vnc_subscribe_actions(oper_info)
except Exception:
cgitb_error_log(self)
def _vnc_subscribe_actions(self, oper_info):
try:
msg = "Notification Message: %s" % (pformat(oper_info))
self.logger.log_debug(msg)
obj_type = oper_info['type'].replace('-', '_')
obj_class = DBBaseSM.get_obj_type_map().get(obj_type)
if obj_class is None:
return
if oper_info['oper'] == 'CREATE' or oper_info['oper'] == 'UPDATE':
dependency_tracker = DependencyTracker(
DBBaseSM.get_obj_type_map(), self._REACTION_MAP)
obj_id = oper_info['uuid']
obj = obj_class.get(obj_id)
if obj is not None:
dependency_tracker.evaluate(obj_type, obj)
else:
obj = obj_class.locate(obj_id)
obj.update()
dependency_tracker.evaluate(obj_type, obj)
elif oper_info['oper'] == 'DELETE':
obj_id = oper_info['uuid']
obj = obj_class.get(obj_id)
if obj is None:
return
dependency_tracker = DependencyTracker(
DBBaseSM.get_obj_type_map(), self._REACTION_MAP)
dependency_tracker.evaluate(obj_type, obj)
obj_class.delete(obj_id)
else:
# unknown operation
self.logger.log_error('Unknown operation %s' %
oper_info['oper'])
return
if obj is None:
self.logger.log_error('Error while accessing %s uuid %s' % (
obj_type, obj_id))
return
except Exception:
cgitb_error_log(self)
for sas_id in dependency_tracker.resources.get('service_appliance_set', []):
sas_obj = ServiceApplianceSetSM.get(sas_id)
if sas_obj is not None:
sas_obj.add()
for lb_pool_id in dependency_tracker.resources.get('loadbalancer_pool', []):
lb_pool = LoadbalancerPoolSM.get(lb_pool_id)
if lb_pool is not None:
lb_pool.add()
for si_id in dependency_tracker.resources.get('service_instance', []):
si = ServiceInstanceSM.get(si_id)
if si:
si.state = 'launch'
self._create_service_instance(si)
else:
self.logger.log_info("Deleting SI %s" % si_id)
for vm_id in dependency_tracker.resources.get(
'virtual_machine', []):
vm = VirtualMachineSM.get(vm_id)
self._delete_service_instance(vm)
self.logger.log_info("SI %s deletion succeed" % si_id)
for vn_id in dependency_tracker.resources.get('virtual_network', []):
vn = VirtualNetworkSM.get(vn_id)
if vn:
for si_id in ServiceInstanceSM:
si = ServiceInstanceSM.get(si_id)
intf_list = []
if si.params:
intf_list = si.params.get('interface_list', [])
for intf in intf_list:
if (':').join(vn.fq_name) in intf.values():
self._create_service_instance(si)
for vmi_id in dependency_tracker.resources.get('virtual_machine_interface', []):
vmi = VirtualMachineInterfaceSM.get(vmi_id)
if vmi:
for vm_id in dependency_tracker.resources.get(
'virtual_machine', []):
vm = VirtualMachineSM.get(vm_id)
if vm:
self.check_link_si_to_vm(vm, vmi)
else:
for irt_id in dependency_tracker.resources.get(
'interface_route_table', []):
self._delete_interface_route_table(irt_id)
for fip_id in dependency_tracker.resources.get('floating_ip', []):
fip = FloatingIpSM.get(fip_id)
if fip:
for vmi_id in fip.virtual_machine_interfaces:
vmi = VirtualMachineInterfaceSM.get(vmi_id)
if vmi and vmi.virtual_ip:
self.netns_manager.add_fip_to_vip_vmi(vmi, fip)
for lr_id in dependency_tracker.resources.get('logical_router', []):
lr = LogicalRouterSM.get(lr_id)
if lr:
self.snat_agent.update_snat_instance(lr)
def post_init(self, vnc_lib, args=None):
# api server
self._vnc_lib = vnc_lib
self._nova_client = importutils.import_object(
'svc_monitor.nova_client.ServiceMonitorNovaClient',
self._args, self.logger)
# agent manager
self._agent_manager = AgentManager()
# load vrouter scheduler
self.vrouter_scheduler = importutils.import_object(
self._args.si_netns_scheduler_driver,
self._vnc_lib, self._nova_client,
self._args)
# load virtual machine instance manager
self.vm_manager = importutils.import_object(
'svc_monitor.virtual_machine_manager.VirtualMachineManager',
self._vnc_lib, self._cassandra, self.logger,
self.vrouter_scheduler, self._nova_client, self._agent_manager,
self._args)
# load network namespace instance manager
self.netns_manager = importutils.import_object(
'svc_monitor.instance_manager.NetworkNamespaceManager',
self._vnc_lib, self._cassandra, self.logger,
self.vrouter_scheduler, self._nova_client, self._agent_manager,
self._args)
# load a vrouter instance manager
self.vrouter_manager = importutils.import_object(
'svc_monitor.vrouter_instance_manager.VRouterInstanceManager',
self._vnc_lib, self._cassandra, self.logger,
self.vrouter_scheduler, self._nova_client,
self._agent_manager, self._args)
# load a loadbalancer agent
self.loadbalancer_agent = LoadbalancerAgent(self, self._vnc_lib,
self._cassandra, self._args)
self._agent_manager.register_agent(self.loadbalancer_agent)
# load a snat agent
self.snat_agent = SNATAgent(self, self._vnc_lib,
self._cassandra, self._args)
self._agent_manager.register_agent(self.snat_agent)
# Read the cassandra and populate the entry in ServiceMonitor DB
self.sync_sm()
# create default analyzer template
self._create_default_template('analyzer-template', 'analyzer',
flavor='m1.medium',
image_name='analyzer')
# create default NAT template
self._create_default_template('nat-template', 'firewall',
svc_mode='in-network-nat',
image_name='analyzer',
flavor='m1.medium')
# create default netns SNAT template
self._create_default_template('netns-snat-template', 'source-nat',
svc_mode='in-network-nat',
hypervisor_type='network-namespace',
scaling=True)
# create default loadbalancer template
self._create_default_template('haproxy-loadbalancer-template', 'loadbalancer',
svc_mode='in-network-nat',
hypervisor_type='network-namespace',
scaling=True)
self._create_default_template('docker-template', 'firewall',
svc_mode='transparent',
image_name="ubuntu",
hypervisor_type='vrouter-instance',
vrouter_instance_type='docker',
instance_data={
"command": "/bin/bash"
})
# upgrade handling
self.upgrade()
# check services
self.launch_services()
self._db_resync_done.set()
def upgrade(self):
for si in ServiceInstanceSM.values():
st = ServiceTemplateSM.get(si.service_template)
if not st:
continue
for vm_id in si.virtual_machines:
vm = VirtualMachineSM.get(vm_id)
if vm.virtualization_type:
continue
nova_vm = self._nova_client.oper('servers', 'get',
si.proj_name, id=vm_id)
if not nova_vm:
continue
if not nova_vm.name.split('__')[-1].isdigit():
continue
si_obj = ServiceInstance()
si_obj.name = si.name
si_obj.fq_name = si.fq_name
index = int(nova_vm.name.split('__')[-1]) - 1
instance_name = self.vm_manager._get_instance_name(
si_obj, index)
vm_obj = VirtualMachine()
vm_obj.uuid = vm_id
vm_obj.fq_name = [vm_id]
vm_obj.set_display_name(instance_name + '__' +
st.virtualization_type)
try:
self._vnc_lib.virtual_machine_update(vm_obj)
except Exception:
pass
def launch_services(self):
for si in ServiceInstanceSM.values():
self._create_service_instance(si)
def sync_sm(self):
vn_set = set()
vmi_set = set()
iip_set = set()
for obj in LoadbalancerPoolSM.list_obj():
try:
lb_pool = LoadbalancerPoolSM.locate(obj['uuid'], obj)
except NoIdError:
self.logger.log_error("db entry missing for lb pool %s" %
(obj['uuid']))
continue
if lb_pool.virtual_machine_interface:
vmi_set.add(lb_pool.virtual_machine_interface)
for obj in LoadbalancerMemberSM.list_obj():
try:
lb_pool_member = LoadbalancerMemberSM.locate(obj['uuid'], obj)
except NoIdError:
self.logger.log_error("db entry missing for lb member %s" %
(obj['uuid']))
continue
for obj in VirtualIpSM.list_obj():
try:
virtual_ip = VirtualIpSM.locate(obj['uuid'], obj)
except NoIdError:
self.logger.log_error("db entry missing for lb vip %s" %
(obj['uuid']))
continue
if virtual_ip.virtual_machine_interface:
vmi_set.add(virtual_ip.virtual_machine_interface)
for obj in HealthMonitorSM.list_obj():
try:
lb_hm = HealthMonitorSM.locate(obj['uuid'], obj)
except NoIdError:
self.logger.log_error("db entry missing for lb healthmonitor %s" %
(obj['uuid']))
continue
for obj in ServiceInstanceSM.list_obj():
try:
si = ServiceInstanceSM.locate(obj['uuid'], obj)
except NoIdError:
self.logger.log_error("db entry missing for service instance %s" %
(obj['uuid']))
continue
for obj in ServiceTemplateSM.list_obj():
try:
st = ServiceTemplateSM.locate(obj['uuid'], obj)
except NoIdError:
self.logger.log_error("db entry missing for service template %s" %
(obj['uuid']))
continue
for obj in VirtualNetworkSM.list_obj():
try:
vn = VirtualNetworkSM.locate(obj['uuid'], obj)
except NoIdError:
self.logger.log_error("db entry missing for virtual network %s" %
(obj['uuid']))
continue
vmi_set |= vn.virtual_machine_interfaces
for obj in PhysicalInterfaceSM.list_obj():
try:
ifd = PhysicalInterfaceSM.locate(obj['uuid'], obj)
except NoIdError:
self.logger.log_error("db entry missing for physical interface %s" %
(obj['uuid']))
continue
for obj in LogicalInterfaceSM.list_obj():
try:
ifl = LogicalInterfaceSM.locate(obj['uuid'], obj)
except NoIdError:
self.logger.log_error("db entry missing for logical interface %s" %
(obj['uuid']))
continue
if ifl.virtual_machine_interface:
vmi_set.add(ifl.virtual_machine_interface)
for obj in PhysicalRouterSM.list_obj():
try:
pr = PhysicalRouterSM.locate(obj['uuid'], obj)
except NoIdError:
self.logger.log_error("db entry missing for physical router %s" %
(obj['uuid']))
continue
for obj in VirtualRouterSM.list_obj():
try:
vr = VirtualRouterSM.locate(obj['uuid'], obj)
except NoIdError:
self.logger.log_error("db entry missing for virtual router %s" %
(obj['uuid']))
continue
for obj in VirtualMachineInterfaceSM.list_obj():
try:
vmi = VirtualMachineInterfaceSM.locate(obj['uuid'], obj)
except NoIdError:
self.logger.log_error("db entry missing for virtual machine interface %s" %
(obj['uuid']))
continue
if vmi.instance_ip:
iip_set.add(vmi.instance_ip)
for obj in InterfaceRouteTableSM.list_obj():
try:
irt = InterfaceRouteTableSM.locate(obj['uuid'], obj)
except NoIdError:
self.logger.log_error("db entry missing for interface route table %s" %
(obj['uuid']))
continue
for obj in ProjectSM.list_obj():
try:
prj = ProjectSM.locate(obj['uuid'], obj)
except NoIdError:
self.logger.log_error("db entry missing for project %s" %
(obj['uuid']))
continue
for obj in ServiceApplianceSetSM.list_obj():
try:
sas = ServiceApplianceSetSM.locate(obj['uuid'], obj)
except NoIdError:
self.logger.log_error("db entry missing for service appliance set %s" %
(obj['uuid']))
continue
for obj in ServiceApplianceSM.list_obj():
try:
sa = ServiceApplianceSM.locate(obj['uuid'], obj)
except NoIdError:
self.logger.log_error("db entry missing for service appliance %s" %
(obj['uuid']))
continue
for obj in DomainSM.list_obj():
try:
DomainSM.locate(obj['uuid'], obj)
except NoIdError:
self.logger.log_error("db entry missing for domain %s" %
(obj['uuid']))
continue
for obj in InstanceIpSM.list_obj():
try:
InstanceIpSM.locate(obj['uuid'], obj)
except NoIdError:
self.logger.log_error("db entry missing for instance ip %s" %
(obj['uuid']))
continue
for obj in FloatingIpSM.list_obj():
try:
FloatingIpSM.locate(obj['uuid'], obj)
except NoIdError:
self.logger.log_error("db entry missing for floating ip %s" %
(obj['uuid']))
continue
for obj in SecurityGroupSM.list_obj():
try:
SecurityGroupSM.locate(obj['uuid'], obj)
except NoIdError:
self.logger.log_error("db entry missing for security group %s" %
(obj['uuid']))
continue
for obj in VirtualMachineSM.list_obj():
try:
vm = VirtualMachineSM.locate(obj['uuid'], obj)
except NoIdError:
self.logger.log_error("db entry missing for virtual machine %s" %
(obj['uuid']))
continue
if vm.service_instance:
continue
for vmi_id in vm.virtual_machine_interfaces:
vmi = VirtualMachineInterfaceSM.get(vmi_id)
if not vmi:
continue
self.check_link_si_to_vm(vm, vmi)
for obj in LogicalRouterSM.list_obj():
try:
LogicalRouterSM.locate(obj['uuid'], obj)
except NoIdError:
self.logger.log_error("db entry missing for logical router %s" %
(obj['uuid']))
continue
# Load the loadbalancer driver
self.loadbalancer_agent.load_drivers()
for lb_pool in LoadbalancerPoolSM.values():
lb_pool.add()
# Audit the lb pools
self.loadbalancer_agent.audit_lb_pools()
# Audit the SNAT instances
self.snat_agent.audit_snat_instances()
# end sync_sm
# create service template
def _create_default_template(self, st_name, svc_type, svc_mode=None,
hypervisor_type='virtual-machine',
image_name=None, flavor=None, scaling=False,
vrouter_instance_type=None,
instance_data=None):
domain_name = 'default-domain'
domain_fq_name = [domain_name]
st_fq_name = [domain_name, st_name]
self.logger.log_info("Creating %s %s hypervisor %s" %
(domain_name, st_name, hypervisor_type))
domain_obj = None
for domain in DomainSM.values():
if domain.fq_name == domain_fq_name:
domain_obj = Domain()
domain_obj.uuid = domain.uuid
domain_obj.fq_name = domain_fq_name
break
if not domain_obj:
self.logger.log_error("%s domain not found" % (domain_name))
return
for st in ServiceTemplateSM.values():
if st.fq_name == st_fq_name:
self.logger.log_info("%s exists uuid %s" %
(st.name, str(st.uuid)))
return
svc_properties = ServiceTemplateType()
svc_properties.set_service_type(svc_type)
svc_properties.set_service_mode(svc_mode)
svc_properties.set_service_virtualization_type(hypervisor_type)
svc_properties.set_image_name(image_name)
svc_properties.set_flavor(flavor)
svc_properties.set_ordered_interfaces(True)
svc_properties.set_service_scaling(scaling)
# set interface list
if svc_type == 'analyzer':
if_list = [['left', False]]
elif hypervisor_type == 'network-namespace':
if_list = [['right', True], ['left', True]]
else:
if_list = [
['management', False], ['left', False], ['right', False]]
for itf in if_list:
if_type = ServiceTemplateInterfaceType(shared_ip=itf[1])
if_type.set_service_interface_type(itf[0])
svc_properties.add_interface_type(if_type)
if vrouter_instance_type is not None:
svc_properties.set_vrouter_instance_type(vrouter_instance_type)
if instance_data is not None:
svc_properties.set_instance_data(
json.dumps(instance_data, separators=(',', ':')))
st_obj = ServiceTemplate(name=st_name, domain_obj=domain)
st_obj.set_service_template_properties(svc_properties)
try:
st_uuid = self._vnc_lib.service_template_create(st_obj)
except Exception as e:
self.logger.log_error("%s create failed with error %s" %
(st_name, str(e)))
return
# Create the service template in local db
ServiceTemplateSM.locate(st_uuid)
self.logger.log_info("%s created with uuid %s" %
(st_name, str(st_uuid)))
#_create_default_analyzer_template
def check_link_si_to_vm(self, vm, vmi):
if vm.service_instance:
return
if not vmi.if_type:
return
si_fq_name = vmi.name.split('__')[0:3]
index = int(vmi.name.split('__')[3]) - 1
for si in ServiceInstanceSM.values():
if si.fq_name != si_fq_name:
continue
st = ServiceTemplateSM.get(si.service_template)
self.vm_manager.link_si_to_vm(si, st, index, vm.uuid)
return
def _create_service_instance(self, si):
if si.state == 'active':
return
st = ServiceTemplateSM.get(si.service_template)
if not st:
self.logger.log_error("template not found for %s" %
((':').join(si.fq_name)))
return
self.logger.log_info("Creating SI %s (%s)" %
((':').join(si.fq_name), st.virtualization_type))
try:
if st.virtualization_type == 'virtual-machine':
self.vm_manager.create_service(st, si)
elif st.virtualization_type == 'network-namespace':
self.netns_manager.create_service(st, si)
elif st.virtualization_type == 'vrouter-instance':
self.vrouter_manager.create_service(st, si)
else:
self.logger.log_error("Unknown virt type: %s" %
st.virtualization_type)
except Exception:
cgitb_error_log(self)
si.launch_count += 1
self.logger.log_info("SI %s creation success" % (':').join(si.fq_name))
def _delete_service_instance(self, vm):
self.logger.log_info("Deleting VM %s %s" %
((':').join(vm.proj_fq_name), vm.uuid))
try:
if vm.virtualization_type == svc_info.get_vm_instance_type():
self.vm_manager.delete_service(vm)
elif vm.virtualization_type == svc_info.get_netns_instance_type():
self.netns_manager.delete_service(vm)
elif vm.virtualization_type == 'vrouter-instance':
self.vrouter_manager.delete_service(vm)
except Exception:
cgitb_error_log(self)
# generate UVE
si_fq_name = vm.display_name.split('__')[:-2]
si_fq_str = (':').join(si_fq_name)
self.logger.uve_svc_instance(si_fq_str, status='DELETE',
vms=[{'uuid': vm.uuid}])
return True
def _relaunch_service_instance(self, si):
si.state = 'relaunch'
self._create_service_instance(si)
def _check_service_running(self, si):
st = ServiceTemplateSM.get(si.service_template)
if st.virtualization_type == 'virtual-machine':
status = self.vm_manager.check_service(si)
elif st.virtualization_type == 'network-namespace':
status = self.netns_manager.check_service(si)
elif st.virtualization_type == 'vrouter-instance':
status = self.vrouter_manager.check_service(si)
return status
def _delete_interface_route_table(self, irt_uuid):
try:
self._vnc_lib.interface_route_table_delete(id=irt_uuid)
InterfaceRouteTableSM.delete(irt_uuid)
except (NoIdError, RefsExistError):
return
def _delete_shared_vn(self, vn_uuid):
try:
self.logger.log_info("Deleting vn %s" % (vn_uuid))
self._vnc_lib.virtual_network_delete(id=vn_uuid)
VirtualNetworkSM.delete(vn_uuid)
except (NoIdError, RefsExistError):
pass
@staticmethod
def reset():
for cls in DBBaseSM.get_obj_type_map().values():
cls.reset()
def timer_callback(monitor):
# delete vms without si
vm_delete_list = []
for vm in VirtualMachineSM.values():
si = ServiceInstanceSM.get(vm.service_instance)
if not si and vm.virtualization_type:
vm_delete_list.append(vm)
for vm in vm_delete_list:
monitor._delete_service_instance(vm)
# check status of service
si_id_list = list(ServiceInstanceSM._dict.keys())
for si_id in si_id_list:
si = ServiceInstanceSM.get(si_id)
if not si or not si.launch_count:
continue
if not monitor._check_service_running(si):
monitor._relaunch_service_instance(si)
if si.max_instances != len(si.virtual_machines):
monitor._relaunch_service_instance(si)
# check vns to be deleted
for project in ProjectSM.values():
if project.service_instances:
continue
vn_id_list = list(project.virtual_networks)
for vn_id in vn_id_list:
vn = VirtualNetworkSM.get(vn_id)
if not vn or vn.virtual_machine_interfaces:
continue
if vn.name in svc_info.get_shared_vn_list():
monitor._delete_shared_vn(vn.uuid)
elif vn.name.startswith(svc_info.get_snat_left_vn_prefix()):
monitor._delete_shared_vn(vn.uuid)
def launch_timer(monitor):
while True:
gevent.sleep(svc_info.get_vm_health_interval())
try:
timer_callback(monitor)
except Exception:
cgitb_error_log(monitor)
def cgitb_error_log(monitor):
string_buf = cStringIO.StringIO()
cgitb.Hook(file=string_buf, format="text").handle(sys.exc_info())
monitor.logger.log(string_buf.getvalue(), level=SandeshLevel.SYS_ERR)
def parse_args(args_str):
'''
Eg. python svc_monitor.py --ifmap_server_ip 192.168.1.17
--ifmap_server_port 8443
--ifmap_username test
--ifmap_password test
--rabbit_server localhost
--rabbit_port 5672
--rabbit_user guest
--rabbit_password guest
--cassandra_server_list 10.1.2.3:9160
--api_server_ip 10.1.2.3
--api_server_port 8082
--zk_server_ip 10.1.2.3
--zk_server_port 2181
--collectors 127.0.0.1:8086
--disc_server_ip 127.0.0.1
--disc_server_port 5998
--http_server_port 8090
--log_local
--log_level SYS_DEBUG
--log_category test
--log_file <stdout>
--trace_file /var/log/contrail/svc-monitor.err
--use_syslog
--syslog_facility LOG_USER
--cluster_id <testbed-name>
[--region_name <name>]
[--reset_config]
'''
# Source any specified config/ini file
# Turn off help, so we show all options in response to -h
conf_parser = argparse.ArgumentParser(add_help=False)
conf_parser.add_argument("-c", "--conf_file", action='append',
help="Specify config file", metavar="FILE")
args, remaining_argv = conf_parser.parse_known_args(args_str.split())
defaults = {
'rabbit_server': 'localhost',
'rabbit_port': '5672',
'rabbit_user': 'guest',
'rabbit_password': 'guest',
'rabbit_vhost': None,
'rabbit_ha_mode': False,
'ifmap_server_ip': '127.0.0.1',
'ifmap_server_port': '8443',
'ifmap_username': 'test2',
'ifmap_password': 'test2',
'cassandra_server_list': '127.0.0.1:9160',
'api_server_ip': '127.0.0.1',
'api_server_port': '8082',
'zk_server_ip': '127.0.0.1',
'zk_server_port': '2181',
'collectors': None,
'disc_server_ip': None,
'disc_server_port': None,
'http_server_port': '8088',
'log_local': False,
'log_level': SandeshLevel.SYS_DEBUG,
'log_category': '',
'log_file': Sandesh._DEFAULT_LOG_FILE,
'trace_file': '/var/log/contrail/svc-monitor.err',
'use_syslog': False,
'syslog_facility': Sandesh._DEFAULT_SYSLOG_FACILITY,
'region_name': None,
'cluster_id': '',
'logging_conf': '',
'logger_class': None,
'sandesh_send_rate_limit' : SandeshSystem.get_sandesh_send_rate_limit(),
}
secopts = {
'use_certs': False,
'keyfile': '',
'certfile': '',
'ca_certs': '',
'ifmap_certauth_port': "8444",
}
ksopts = {
'auth_host': '127.0.0.1',
'auth_protocol': 'http',
'auth_port': '5000',
'auth_version': 'v2.0',
'auth_insecure': True,
'admin_user': 'user1',
'admin_password': 'password1',
'admin_tenant_name': 'default-domain'
}
schedops = {
'si_netns_scheduler_driver': \
'svc_monitor.scheduler.vrouter_scheduler.RandomScheduler',
'analytics_server_ip': '127.0.0.1',
'analytics_server_port': '8081',
'availability_zone': None,
'netns_availability_zone': None,
}
cassandraopts = {
'cassandra_user' : None,
'cassandra_password' : None,
}
config = ConfigParser.SafeConfigParser()
if args.conf_file:
config.read(args.conf_file)
defaults.update(dict(config.items("DEFAULTS")))
if ('SECURITY' in config.sections() and
'use_certs' in config.options('SECURITY')):
if config.getboolean('SECURITY', 'use_certs'):
secopts.update(dict(config.items("SECURITY")))
if 'KEYSTONE' in config.sections():
ksopts.update(dict(config.items("KEYSTONE")))
if 'SCHEDULER' in config.sections():
schedops.update(dict(config.items("SCHEDULER")))
if 'CASSANDRA' in config.sections():
cassandraopts.update(dict(config.items('CASSANDRA')))
# Override with CLI options
# Don't surpress add_help here so it will handle -h
parser = argparse.ArgumentParser(
# Inherit options from config_parser
parents=[conf_parser],
# script description with -h/--help
description=__doc__,
# Don't mess with format of description
formatter_class=argparse.RawDescriptionHelpFormatter,
)
defaults.update(secopts)
defaults.update(ksopts)
defaults.update(schedops)
defaults.update(cassandraopts)
parser.set_defaults(**defaults)
parser.add_argument(
"--ifmap_server_ip", help="IP address of ifmap server")
parser.add_argument("--ifmap_server_port", help="Port of ifmap server")
parser.add_argument("--ifmap_username",
help="Username known to ifmap server")
parser.add_argument("--ifmap_password",
help="Password known to ifmap server")
parser.add_argument(
"--cassandra_server_list",
help="List of cassandra servers in IP Address:Port format",
nargs='+')
parser.add_argument(
"--reset_config", action="store_true",
help="Warning! Destroy previous configuration and start clean")
parser.add_argument("--api_server_ip",
help="IP address of API server")
parser.add_argument("--api_server_port",
help="Port of API server")
parser.add_argument("--collectors",
help="List of VNC collectors in ip:port format",
nargs="+")
parser.add_argument("--disc_server_ip",
help="IP address of the discovery server")
parser.add_argument("--disc_server_port",
help="Port of the discovery server")
parser.add_argument("--http_server_port",
help="Port of local HTTP server")
parser.add_argument(
"--log_local", action="store_true",
help="Enable local logging of sandesh messages")
parser.add_argument(
"--log_level",
help="Severity level for local logging of sandesh messages")
parser.add_argument(
"--log_category",
help="Category filter for local logging of sandesh messages")
parser.add_argument("--log_file",
help="Filename for the logs to be written to")
parser.add_argument("--trace_file", help="Filename for the error "
"backtraces to be written to")
parser.add_argument("--use_syslog", action="store_true",
help="Use syslog for logging")
parser.add_argument("--syslog_facility",
help="Syslog facility to receive log lines")
parser.add_argument("--admin_user",
help="Name of keystone admin user")
parser.add_argument("--admin_password",
help="Password of keystone admin user")
parser.add_argument("--admin_tenant_name",
help="Tenant name for keystone admin user")
parser.add_argument("--region_name",
help="Region name for openstack API")
parser.add_argument("--cluster_id",
help="Used for database keyspace separation")
parser.add_argument(
"--logging_conf",
help=("Optional logging configuration file, default: None"))
parser.add_argument(
"--logger_class",
help=("Optional external logger class, default: None"))
parser.add_argument("--cassandra_user",
help="Cassandra user name")
parser.add_argument("--cassandra_password",
help="Cassandra password")
parser.add_argument("--sandesh_send_rate_limit", type=int,
help="Sandesh send rate limit in messages/sec.")
args = parser.parse_args(remaining_argv)
args.config_sections = config
if type(args.cassandra_server_list) is str:
args.cassandra_server_list = args.cassandra_server_list.split()
if type(args.collectors) is str:
args.collectors = args.collectors.split()
if args.region_name and args.region_name.lower() == 'none':
args.region_name = None
if args.availability_zone and args.availability_zone.lower() == 'none':
args.availability_zone = None
if args.netns_availability_zone and \
args.netns_availability_zone.lower() == 'none':
args.netns_availability_zone = None
return args
def run_svc_monitor(args=None):
monitor = SvcMonitor(args)
monitor._zookeeper_client = _zookeeper_client
# Retry till API server is up
connected = False
monitor.logger.api_conn_status_update(ConnectionStatus.INIT)
while not connected:
try:
vnc_api = VncApi(
args.admin_user, args.admin_password, args.admin_tenant_name,
args.api_server_ip, args.api_server_port)
connected = True
monitor.logger.api_conn_status_update(ConnectionStatus.UP)
except requests.exceptions.ConnectionError as e:
monitor.logger.api_conn_status_update(ConnectionStatus.DOWN, str(e))
time.sleep(3)
except ResourceExhaustionError: # haproxy throws 503
time.sleep(3)
monitor.post_init(vnc_api, args)
timer_task = gevent.spawn(launch_timer, monitor)
gevent.joinall([timer_task])
def main(args_str=None):
global _zookeeper_client
if not args_str:
args_str = ' '.join(sys.argv[1:])
args = parse_args(args_str)
if args.cluster_id:
client_pfx = args.cluster_id + '-'
zk_path_pfx = args.cluster_id + '/'
else:
client_pfx = ''
zk_path_pfx = ''
_zookeeper_client = ZookeeperClient(client_pfx+"svc-monitor", args.zk_server_ip)
_zookeeper_client.master_election(zk_path_pfx+"/svc-monitor", os.getpid(),
run_svc_monitor, args)
# end main
def server_main():
cgitb.enable(format='text')
main()
# end server_main
if __name__ == '__main__':
server_main()
| |
__author__ = 'ptoth'
def get_config():
params = {}
params['global'] = {
'epochs': 70
}
update_epochs = 1
verbose = 1
activation_function = "Sigmoid"
loss_function = "MSE"
activation_threshold = 0.5
min_w = -1.0
max_w = 1.0
lifetime_sparsity = 0.014
duty_cycle_decay = 0.006
w_lr = 0.05
inh_lr = 0.05
b_lr = 0.05
r_b_lr = 0.05
learning_rate_increase = 0.01
learning_rate_decrease = 0.99
dropout_ratio = None
momentum = 0.9
zoom = 0.4
make_sparse = False
target_sparsity = 0.1
layer_repeat_factor = 5
local_activation_radius = None
is_transpose_reconstruction = True
regularization = 0.0
curriculum_rate = None
node_type = "SRAutoEncoderNode"
layer1 = {
'name': "layer1",
'verbose': verbose,
'repeat_factor': 1,
'feedforward': {
'name': "layer1-feedforward",
'node_type': node_type,
'inputs_size': 32,
'output_size': 16,
'activation_function': activation_function,
'activation_threshold': activation_threshold,
'lifetime_sparsity': lifetime_sparsity,
'min_weight': min_w,
'max_weight': max_w,
'dropout_ratio': dropout_ratio,
'momentum': momentum,
'local_activation_radius': local_activation_radius,
'zoom': zoom,
'make_sparse': make_sparse,
'target_sparsity': target_sparsity,
'duty_cycle_decay': duty_cycle_decay,
'learning_rate_increase': learning_rate_increase,
'learning_rate_decrease': learning_rate_decrease,
'is_transpose_reconstruction': is_transpose_reconstruction,
'regularization': regularization,
'weights_lr': w_lr,
'inhibition_lr': inh_lr,
'bias_lr': b_lr,
'recon_bias_lr': r_b_lr
},
'recurrent': {
'name': "layer1-recurrent",
'node_type': node_type,
'inputs_size': 16,
'output_size': 16,
'activation_function': activation_function,
'activation_threshold': activation_threshold,
'lifetime_sparsity': lifetime_sparsity,
'min_weight': min_w,
'max_weight': max_w,
'dropout_ratio': dropout_ratio,
'momentum': momentum,
'local_activation_radius': local_activation_radius,
'zoom': zoom,
'make_sparse': make_sparse,
'target_sparsity': target_sparsity,
'duty_cycle_decay': duty_cycle_decay,
'learning_rate_increase': learning_rate_increase,
'learning_rate_decrease': learning_rate_decrease,
'is_transpose_reconstruction': False,
'regularization': regularization,
'weights_lr': w_lr,
'inhibition_lr': inh_lr,
'bias_lr': b_lr,
'recon_bias_lr': r_b_lr
},
'feedback': {
'name': "layer1-feedback",
'node_type': node_type,
'inputs_size': 16,
'output_size': 16,
'activation_function': activation_function,
'activation_threshold': activation_threshold,
'lifetime_sparsity': lifetime_sparsity,
'min_weight': min_w,
'max_weight': max_w,
'dropout_ratio': dropout_ratio,
'momentum': momentum,
'local_activation_radius': local_activation_radius,
'zoom': zoom,
'make_sparse': make_sparse,
'target_sparsity': target_sparsity,
'duty_cycle_decay': duty_cycle_decay,
'learning_rate_increase': learning_rate_increase,
'learning_rate_decrease': learning_rate_decrease,
'is_transpose_reconstruction': is_transpose_reconstruction,
'regularization': regularization,
'weights_lr': w_lr,
'inhibition_lr': inh_lr,
'bias_lr': b_lr,
'recon_bias_lr': r_b_lr
}
}
layer2 = {
'name': "layer2",
'verbose': verbose,
'repeat_factor': layer_repeat_factor,
'feedforward': {
'name': "layer2-feedforward",
'node_type': node_type,
'inputs_size': 16,
'output_size': 16,
'activation_function': activation_function,
'activation_threshold': activation_threshold,
'lifetime_sparsity': lifetime_sparsity,
'min_weight': min_w,
'max_weight': max_w,
'dropout_ratio': dropout_ratio,
'momentum': momentum,
'local_activation_radius': local_activation_radius,
'zoom': zoom,
'make_sparse': make_sparse,
'target_sparsity': target_sparsity,
'duty_cycle_decay': duty_cycle_decay,
'learning_rate_increase': learning_rate_increase/5,
'learning_rate_decrease': learning_rate_decrease,
'is_transpose_reconstruction': is_transpose_reconstruction,
'regularization': regularization,
'weights_lr': w_lr/5,
'inhibition_lr': inh_lr/5,
'bias_lr': b_lr/5,
'recon_bias_lr': r_b_lr/5
},
'recurrent': {
'name': "layer2-recurrent",
'node_type': node_type,
'inputs_size': 16,
'output_size': 16,
'activation_function': activation_function,
'activation_threshold': activation_threshold,
'lifetime_sparsity': lifetime_sparsity,
'min_weight': min_w,
'max_weight': max_w,
'dropout_ratio': dropout_ratio,
'momentum': momentum,
'local_activation_radius': local_activation_radius,
'zoom': zoom,
'make_sparse': make_sparse,
'target_sparsity': target_sparsity,
'duty_cycle_decay': duty_cycle_decay,
'learning_rate_increase': learning_rate_increase/5,
'learning_rate_decrease': learning_rate_decrease,
'is_transpose_reconstruction': False,
'regularization': regularization,
'weights_lr': w_lr/5,
'inhibition_lr': inh_lr/5,
'bias_lr': b_lr/5,
'recon_bias_lr': r_b_lr/5
},
'feedback': {
'name': "layer2-feedback",
'node_type': node_type,
'inputs_size': 16,
'output_size': 16,
'activation_function': activation_function,
'activation_threshold': activation_threshold,
'lifetime_sparsity': lifetime_sparsity,
'min_weight': min_w,
'max_weight': max_w,
'dropout_ratio': dropout_ratio,
'momentum': momentum,
'local_activation_radius': local_activation_radius,
'zoom': zoom,
'make_sparse': make_sparse,
'target_sparsity': target_sparsity,
'duty_cycle_decay': duty_cycle_decay,
'learning_rate_increase': learning_rate_increase/5,
'learning_rate_decrease': learning_rate_decrease,
'is_transpose_reconstruction': is_transpose_reconstruction,
'regularization': regularization,
'weights_lr': w_lr/5,
'inhibition_lr': inh_lr/5,
'bias_lr': b_lr/5,
'recon_bias_lr': r_b_lr/5
}
}
params['network'] = {
'name': "test_network",
'inputs_size': 16,
'curriculum_rate': curriculum_rate,
'serialize': False,
'serialize_path': '../serialized_models',
'verbose': verbose,
'activation_function': activation_function,
'loss_function': loss_function,
'visualize_states': False,
'update_epochs': update_epochs,
'input': {
},
'layers': [
layer1
, layer2
],
'output': {
}
}
return params
| |
from __future__ import with_statement
import inspect
import sys
import textwrap
from fabric import state
from fabric.utils import abort, warn, error, py33
from fabric.network import to_dict, normalize_to_string, disconnect_all
from fabric.context_managers import settings
from fabric.job_queue import JobQueue
from fabric.task_utils import crawl, merge, parse_kwargs
from fabric.exceptions import NetworkError
import collections
if py33:
from contextlib import ExitStack
if sys.version_info[:2] == (2, 5):
# Python 2.5 inspect.getargspec returns a tuple
# instead of ArgSpec namedtuple.
class ArgSpec(object):
def __init__(self, args, varargs, keywords, defaults):
self.args = args
self.varargs = varargs
self.keywords = keywords
self.defaults = defaults
self._tuple = (args, varargs, keywords, defaults)
def __getitem__(self, idx):
return self._tuple[idx]
def patched_get_argspec(func):
return ArgSpec(*inspect._getargspec(func))
inspect._getargspec = inspect.getargspec
inspect.getargspec = patched_get_argspec
def get_task_details(task):
details = [
textwrap.dedent(task.__doc__)
if task.__doc__
else 'No docstring provided']
argspec = inspect.getargspec(task)
default_args = [] if not argspec.defaults else argspec.defaults
num_default_args = len(default_args)
args_without_defaults = argspec.args[:len(argspec.args) - num_default_args]
args_with_defaults = argspec.args[-1 * num_default_args:]
details.append('Arguments: %s' % (
', '.join(
args_without_defaults + [
'%s=%r' % (arg, default)
for arg, default in zip(args_with_defaults, default_args)
])
))
return '\n'.join(details)
def _get_list(env):
def inner(key):
return env.get(key, [])
return inner
class Task(object):
"""
Abstract base class for objects wishing to be picked up as Fabric tasks.
Instances of subclasses will be treated as valid tasks when present in
fabfiles loaded by the :doc:`fab </usage/fab>` tool.
For details on how to implement and use `~fabric.tasks.Task` subclasses,
please see the usage documentation on :ref:`new-style tasks
<new-style-tasks>`.
.. versionadded:: 1.1
"""
name = 'undefined'
use_task_objects = True
aliases = None
is_default = False
# TODO: make it so that this wraps other decorators as expected
def __init__(self, alias=None, aliases=None, default=False, name=None,
*args, **kwargs):
if alias is not None:
self.aliases = [alias, ]
if aliases is not None:
self.aliases = aliases
if name is not None:
self.name = name
self.is_default = default
def __details__(self):
return get_task_details(self.run)
def run(self):
raise NotImplementedError
def get_hosts_and_effective_roles(self, arg_hosts, arg_roles, arg_exclude_hosts, env=None):
"""
Return a tuple containing the host list the given task should be using
and the roles being used.
See :ref:`host-lists` for detailed documentation on how host lists are
set.
.. versionchanged:: 1.9
"""
env = env or {'hosts': [], 'roles': [], 'exclude_hosts': []}
roledefs = env.get('roledefs', {})
# Command line per-task takes precedence over anything else.
if arg_hosts or arg_roles:
return merge(arg_hosts, arg_roles, arg_exclude_hosts, roledefs), arg_roles
# Decorator-specific hosts/roles go next
func_hosts = getattr(self, 'hosts', [])
func_roles = getattr(self, 'roles', [])
if func_hosts or func_roles:
return merge(func_hosts, func_roles, arg_exclude_hosts, roledefs), func_roles
# Finally, the env is checked (which might contain globally set lists
# from the CLI or from module-level code). This will be the empty list
# if these have not been set -- which is fine, this method should
# return an empty list if no hosts have been set anywhere.
env_vars = list(map(_get_list(env), "hosts roles exclude_hosts".split()))
env_vars.append(roledefs)
return merge(*env_vars), env.get('roles', [])
def get_pool_size(self, hosts, default):
# Default parallel pool size (calculate per-task in case variables
# change)
default_pool_size = default or len(hosts)
# Allow per-task override
# Also cast to int in case somebody gave a string
from_task = getattr(self, 'pool_size', None)
pool_size = int(from_task or default_pool_size)
# But ensure it's never larger than the number of hosts
pool_size = min((pool_size, len(hosts)))
# Inform user of final pool size for this task
if state.output.debug:
print(("Parallel tasks now using pool size of %d" % pool_size))
return pool_size
class WrappedCallableTask(Task):
"""
Wraps a given callable transparently, while marking it as a valid Task.
Generally used via `~fabric.decorators.task` and not directly.
.. versionadded:: 1.1
.. seealso:: `~fabric.docs.unwrap_tasks`, `~fabric.decorators.task`
"""
def __init__(self, callable, *args, **kwargs):
super(WrappedCallableTask, self).__init__(*args, **kwargs)
self.wrapped = callable
# Don't use getattr() here -- we want to avoid touching self.name
# entirely so the superclass' value remains default.
if hasattr(callable, '__name__'):
if self.name == 'undefined':
self.__name__ = self.name = callable.__name__
else:
self.__name__ = self.name
if hasattr(callable, '__doc__'):
self.__doc__ = callable.__doc__
if hasattr(callable, '__module__'):
self.__module__ = callable.__module__
def __call__(self, *args, **kwargs):
return self.run(*args, **kwargs)
def run(self, *args, **kwargs):
return self.wrapped(*args, **kwargs)
def __getattr__(self, k):
return getattr(self.wrapped, k)
def __details__(self):
orig = self
while 'wrapped' in orig.__dict__:
orig = orig.__dict__.get('wrapped')
return get_task_details(orig)
def requires_parallel(task):
"""
Returns True if given ``task`` should be run in parallel mode.
Specifically:
* It's been explicitly marked with ``@parallel``, or:
* It's *not* been explicitly marked with ``@serial`` *and* the global
parallel option (``env.parallel``) is set to ``True``.
"""
return (
(state.env.parallel and not getattr(task, 'serial', False))
or getattr(task, 'parallel', False)
)
def _parallel_tasks(commands_to_run):
return any([requires_parallel(crawl(x[0], state.commands)) for x in commands_to_run])
def _is_network_error_ignored():
return not state.env.use_exceptions_for['network'] and state.env.skip_bad_hosts
def _execute(task, host, my_env, args, kwargs, jobs, queue, multiprocessing):
"""
Primary single-host work body of execute()
"""
# Log to stdout
if state.output.running and not hasattr(task, 'return_value'):
print(("[%s] Executing task '%s'" % (host, my_env['command'])))
# Create per-run env with connection settings
local_env = to_dict(host)
local_env.update(my_env)
# Set a few more env flags for parallelism
if queue is not None:
local_env.update({'parallel': True, 'linewise': True})
# Handle parallel execution
if queue is not None: # Since queue is only set for parallel
name = local_env['host_string']
# Wrap in another callable that:
# * expands the env it's given to ensure parallel, linewise, etc are
# all set correctly and explicitly. Such changes are naturally
# insulted from the parent process.
# * nukes the connection cache to prevent shared-access problems
# * knows how to send the tasks' return value back over a Queue
# * captures exceptions raised by the task
def inner(args, kwargs, queue, name, env):
state.env.update(env)
def submit(result):
queue.put({'name': name, 'result': result})
try:
state.connections.clear()
submit(task.run(*args, **kwargs))
except BaseException as e: # We really do want to capture everything
# SystemExit implies use of abort(), which prints its own
# traceback, host info etc -- so we don't want to double up
# on that. For everything else, though, we need to make
# clear what host encountered the exception that will
# print.
if e.__class__ is not SystemExit:
if not (isinstance(e, NetworkError) and
_is_network_error_ignored()):
sys.stderr.write("!!! Parallel execution exception under host %r:\n" % name)
submit(e)
# Here, anything -- unexpected exceptions, or abort()
# driven SystemExits -- will bubble up and terminate the
# child process.
if not (isinstance(e, NetworkError) and
_is_network_error_ignored()):
raise
# Stuff into Process wrapper
kwarg_dict = {
'args': args,
'kwargs': kwargs,
'queue': queue,
'name': name,
'env': local_env,
}
p = multiprocessing.Process(target=inner, kwargs=kwarg_dict)
# Name/id is host string
p.name = name
# Add to queue
jobs.append(p)
# Handle serial execution
else:
if py33:
with ExitStack() as stack:
for s in settings(**local_env):
stack.enter_context(s)
return task.run(*args, **kwargs)
else:
with settings(**local_env):
return task.run(*args, **kwargs)
def _is_task(task):
return isinstance(task, Task)
def execute(task, *args, **kwargs):
"""
Execute ``task`` (callable or name), honoring host/role decorators, etc.
``task`` may be an actual callable object, or it may be a registered task
name, which is used to look up a callable just as if the name had been
given on the command line (including :ref:`namespaced tasks <namespaces>`,
e.g. ``"deploy.migrate"``.
The task will then be executed once per host in its host list, which is
(again) assembled in the same manner as CLI-specified tasks: drawing from
:option:`-H`, :ref:`env.hosts <hosts>`, the `~fabric.decorators.hosts` or
`~fabric.decorators.roles` decorators, and so forth.
``host``, ``hosts``, ``role``, ``roles`` and ``exclude_hosts`` kwargs will
be stripped out of the final call, and used to set the task's host list, as
if they had been specified on the command line like e.g. ``fab
taskname:host=hostname``.
Any other arguments or keyword arguments will be passed verbatim into
``task`` (the function itself -- not the ``@task`` decorator wrapping your
function!) when it is called, so ``execute(mytask, 'arg1',
kwarg1='value')`` will (once per host) invoke ``mytask('arg1',
kwarg1='value')``.
:returns:
a dictionary mapping host strings to the given task's return value for
that host's execution run. For example, ``execute(foo, hosts=['a',
'b'])`` might return ``{'a': None, 'b': 'bar'}`` if ``foo`` returned
nothing on host `a` but returned ``'bar'`` on host `b`.
In situations where a task execution fails for a given host but overall
progress does not abort (such as when :ref:`env.skip_bad_hosts
<skip-bad-hosts>` is True) the return value for that host will be the
error object or message.
.. seealso::
:ref:`The execute usage docs <execute>`, for an expanded explanation
and some examples.
.. versionadded:: 1.3
.. versionchanged:: 1.4
Added the return value mapping; previously this function had no defined
return value.
"""
my_env = {'clean_revert': True}
results = {}
# Obtain task
is_callable = isinstance(task, collections.Callable)
if not (is_callable or _is_task(task)):
# Assume string, set env.command to it
my_env['command'] = task
task = crawl(task, state.commands)
if task is None:
msg = "%r is not callable or a valid task name" % (my_env['command'],)
if state.env.get('skip_unknown_tasks', False):
warn(msg)
return
else:
abort(msg)
# Set env.command if we were given a real function or callable task obj
else:
dunder_name = getattr(task, '__name__', None)
my_env['command'] = getattr(task, 'name', dunder_name)
# Normalize to Task instance if we ended up with a regular callable
if not _is_task(task):
task = WrappedCallableTask(task)
# Filter out hosts/roles kwargs
new_kwargs, hosts, roles, exclude_hosts = parse_kwargs(kwargs)
# Set up host list
my_env['all_hosts'], my_env['effective_roles'] = task.get_hosts_and_effective_roles(hosts, roles,
exclude_hosts, state.env)
parallel = requires_parallel(task)
if parallel:
# Import multiprocessing if needed, erroring out usefully
# if it can't.
try:
import multiprocessing
except ImportError:
import traceback
tb = traceback.format_exc()
abort(tb + """
At least one task needs to be run in parallel, but the
multiprocessing module cannot be imported (see above
traceback.) Please make sure the module is installed
or that the above ImportError is fixed.""")
else:
multiprocessing = None
# Get pool size for this task
pool_size = task.get_pool_size(my_env['all_hosts'], state.env.pool_size)
# Set up job queue in case parallel is needed
queue = multiprocessing.Queue() if parallel else None
jobs = JobQueue(pool_size, queue)
if state.output.debug:
jobs._debug = True
# Call on host list
if my_env['all_hosts']:
# Attempt to cycle on hosts, skipping if needed
for host in my_env['all_hosts']:
try:
results[host] = _execute(
task, host, my_env, args, new_kwargs, jobs, queue,
multiprocessing
)
except NetworkError as e:
results[host] = e
# Backwards compat test re: whether to use an exception or
# abort
if not state.env.use_exceptions_for['network']:
func = warn if state.env.skip_bad_hosts else abort
error(e.message, func=func, exception=e.wrapped)
else:
raise
# If requested, clear out connections here and not just at the end.
if state.env.eagerly_disconnect:
disconnect_all()
# If running in parallel, block until job queue is emptied
if jobs:
err = "One or more hosts failed while executing task '%s'" % (
my_env['command']
)
jobs.close()
# Abort if any children did not exit cleanly (fail-fast).
# This prevents Fabric from continuing on to any other tasks.
# Otherwise, pull in results from the child run.
ran_jobs = jobs.run()
for name, d in list(ran_jobs.items()):
if d['exit_code'] != 0:
if isinstance(d['results'], NetworkError) and \
_is_network_error_ignored():
error(d['results'].message, func=warn, exception=d['results'].wrapped)
elif isinstance(d['results'], BaseException):
error(err, exception=d['results'])
else:
error(err)
results[name] = d['results']
# Or just run once for local-only
else:
if py33:
with ExitStack() as stack:
for s in settings(**my_env):
stack.enter_context(s)
results['<local-only>'] = task.run(*args, **new_kwargs)
else:
with settings(**my_env):
results['<local-only>'] = task.run(*args, **new_kwargs)
# Return what we can from the inner task executions
return results
| |
#!/usr/bin/env python2
# transpiled with BefunCompile v1.3.0 (c) 2017
import sys
import zlib, base64
_g = ("AR+LCAAAAAAABADtnEtP40gQgP9KTyCXWFm6ut1+CUX7Q0bMrLQr72UlXzYnNL9969W2EwMDwwpMESvEJnDI56qud/t4dXXl8PBvcLirdz6c/Hwi2E8kWeb9LLCfS7Kf"
+ "B1aPC+wF9gJ7gb3AXmDfGPZNjgvsGx4H17QF+MH5QG+Rr+Jw+iUPwfdxB77vN36zT6nYBT/99fhhYJ/3LY+bPzdfi2aLp673sdgjdN97KGAX+9sPA1u3LrUOAsrT4XVM"
+ "zjcu4MvTJz66qiHR3gzR91Cg4AOduxaBr47frlHa8HEki2i+JaISMT1T4681feJL52vnk3P3+1B82UPvw5f95t+NcyjUwYdbP6Bw3ceRbIguIBG4ACTZJrm6yRfgYuPA"
+ "/xzio8AiEcKOauwr1l5kjCRivAl48chxhAHC7RHfbkIPYQCgMxS7AeIudP1S5O8NS0QNKS20rLRAAk2VA5Y1vtOaPfgEgzvgreBTgr6CHr88wHB3AOgD9MUXXMJ0uQ30"
+ "VoRthLvVwVZkhQJLFkpattGziFGHgQQdRsne791u16DtGiB1KLUD/sfgJ6ID3oTNP5sCb4S4roXlem9YFCiuSmSsIzGWpeozsEoHFjp6nuLH3xvVyvtHl+Se7kVwA1Rd"
+ "AT1Uq1NjqAgK3yFNrge1tw6kzyhrMlDF/BuTAKFYqOhzjveG9SzQ0UDNBRoDSRxfbveQTj7/GCX83rCivU3FjqZVAxVKcq+BFy9Z4wPEAX/6DvZ4hZbopbjHdcCiNSYi"
+ "VmPUWIol+AIlTvYKWI2PxeaPzRYj4rbp+gjPjSIOerpzaR2wQkTKzAJFOYoPOlHjA3rRpg274qbj0xbZi5A/o19iGJZ6fj2eVgKralyfqDEu4dEZodBRC4cy/iySWsp7"
+ "gq3WAUtEHDngRUgaNyKpKDYZapKsh3JwKe2qXbNzhy5uoQjl0HWYBW2b4iuebtjFhrIPfe+6jdt8/X51963b/Nh8R1/VrATWn6zZMJrlSAaqSpL1nBx3e3hMut+OV3sE"
+ "pVM7lJgBdn1JS3y3DtgSONmZydE/FkG94DhcX8/vzlpg0RyFeWyc6GLueclAPd/H3mbY+YfrgS1pzZLTKSn9qTinow8TwT6oxk+KVE4nrAgL64BtJCRmQJ+tsQSQuGZJ"
+ "qynr+alJWtqkk2MlrsfDeZQonpcueBXjHSCThDane4Zk2UL9tTl3Q8M6YGuO/3GpIpT4IFm8Yo1rznZFFd2TsOiIMdMp48PRlV8H7In2coECavZBHFNR0KjW+HAdUWNj"
+ "//3lofGdC+uApYCwphWq9RmJoHC1SpjR0uvsODD1gOFkH6ge07syDikO1Xm5eYJdy5ptODYGTd4bKcvMXc+LrPEjx0rWLKlxQ2jiXlF7p+gijJWK18LergSWzVHgimLg"
+ "epSULOZJ3y/CDgnT3xOD9d6wkrdS5B9Un9EIo0BPyjIHSP3Cw+JncVsWse+7kPoipOHsv/j3+5XBnifvC/zXHIdrvBO//b4OWJGjNDtEjjUoo/qg8CrYk+O9YcXRhEaD"
+ "KMl6cPFKUCElCzOwwPF/XZJAm/akGkOet3mtGq8KFsMnSXbmXbxza2wFNtZsjnzOBaI63CZOMZUZWCHy3MOS1vN5McoQrG+zx2GjRIl8k7P43NczAzumNicZQaMZgbRu"
+ "zcDSOq0f1t5R6GZgybHmkFipk1ZjJMYAQ7Dz6QLyO4nXbJPTec59zMAS0awIA5VmduRnuTFtzBrjCtUI6sHowpBkRXwYS8iIlw+L/qyh0SC/8KpLZ2QGtg2PjhloecqQ"
+ "NdZBoHTajJ6VLEz5WW5dfRI1lgpqG88rqDYjqIrFl/tZIU4hsVIbkixFwtXD2ittIFh0BD4uLCRds56DZO16SDsPNGy2BDv5Wf8pkvd6EThRNNXkaRkzsGP8zwPk5H1Y"
+ "uFML3hDsg342lDpc7dlemYHVTmXiIbeWNwXkaFmTekOuR6wx1OpxHhSxGdhRV88NlHRDXt3YWhXsGC8FWIh4nJYxA7toPZNl9uphdfeHFVjSYaajIkzIdWPJ8ngziKUI"
+ "StZm9DnsD1pnUxHb6gj4s7HMOm/aGudnkh1Y2gqB7jXPemnnvcoX9RPb0z4erJYQgZUWNNmRXoCMW3hDkh1HFrUpwAUK3Z2Xfnm4eqWw0uOAVkviukfYc3saNCEyBTsL"
+ "Due18bGkbAaWbFFg3kCbAiArszRE2pI2c5mBRaIysSluTxPbNA0emIGdVxep2ga59JSHvkzls9mZQm4NzO2zaLgZ2LOlOk92NEcwZI01SuR0QIqKZ0VyS7OLZ8PVk4HK"
+ "gweWKhUSD4+P4NBRKNBNed5YPpufQDLNZ44luJL3NxmC1c2Vrc6Nh9lSJTWOptQ4cEmtaqeWJSHnrVvGmtG+1S2lIllEo2GSmnf0yDyYJT9b5WFUSejCtOc9gG7dMgML"
+ "jZb95zv9ZThKmj6W1JhalpWWxMXRBNn9EVWNLRkoLbjl50Fpzi7bQJK1MYOx9jKVixeL1wysVsLbabrAM7XWZPjROnZgG61LNJVrec2mcN6xNQO7fK6XDlfPquWWYCn+"
+ "r/NDDPy031DrxobWLHmZlD1s7oOEdpoktzRTIXvxqIKac3aRrJTK5SZYgg3iep54hpsVWC/jfGKTW50roIuahWur10MaG/MjJccpvvw0A7ONLTHC+UlflLlLjmDIQEng"
+ "NK7ZWM8eRVKZmzdOLNCstLINZOpKW2xG+6hLFeP/6PWRvybDRd/oSx6uMu5KM7jLcrYjgJ4Qy7CV10/kT3Zg4XzoK0Z9AuH4CDszsPX/aH9WD/uGqBfYC+wF9gJ7gb3A"
+ "XmAvsEZg/wPTb2bdgHYAAA==")
g = base64.b64decode(_g)[1:]
for i in range(ord(base64.b64decode(_g)[0])):
g = zlib.decompress(g, 16+zlib.MAX_WBITS)
g=list(map(ord, g))
def gr(x,y):
if(x>=0 and y>=0 and x<237 and y<128):
return g[y*237 + x];
return 0;
def gw(x,y,v):
if(x>=0 and y>=0 and x<237 and y<128):
g[y*237 + x]=v;
def td(a,b):
return ((0)if(b==0)else(a//b))
def tm(a,b):
return ((0)if(b==0)else(a%b))
s=[]
def sp():
global s
if (len(s) == 0):
return 0
return s.pop()
def sa(v):
global s
s.append(v)
def sr():
global s
if (len(s) == 0):
return 0
return s[-1]
def _0():
gw(1,0,17)
gw(2,0,0)
gw(3,0,0)
gw(2,3,0)
return 1
def _1():
global t0
gw((gr(3,0)%100)+8,gr(3,0)/100,((gr(gr(2,0)*3,gr(1,0))-48)*10)+(gr((gr(2,0)*3)+1,gr(1,0))-48))
gw(3,0,gr(3,0)+1)
t0=gr(2,0)+1
return (34)if(gr(2,0)!=11)else(2)
def _2():
gw(1,0,gr(1,0)+1)
gw(2,0,0)
return 3
def _3():
return (1)if((((0)if(gr(1,0)-117!=0)else(1))+((0)if(gr(2,0)-1!=0)else(1)))!=2)else(4)
def _4():
gw(5,1,0)
return 5
def _5():
gw(6,1,0)
return 6
def _6():
global t0
t0=gr(5,1)
gw(1,1,gr(6,1))
gw(2,1,t0)
gw(3,1,1)
t0=0
return 7
def _7():
return (33)if((gr(1,1)+gr(2,1))!=0)else(8)
def _8():
global t0
gw(gr(5,1)+109,gr(6,1),t0)
t0=gr(6,1)-127
gw(6,1,gr(6,1)+1)
return (6)if((t0)!=0)else(9)
def _9():
global t0
t0=gr(5,1)-127
gw(5,1,gr(5,1)+1)
return (5)if((t0)!=0)else(10)
def _10():
gw(1,3,17576)
return 11
def _11():
global t0
t0=gr(1,3)
gw(1,3,gr(1,3)-1)
return (16)if((t0)!=0)else(12)
def _12():
gw(1,5,1200)
gw(2,5,0)
return 13
def _13():
global t0
t0=gr(gr((gr(1,5)%100)+8,gr(1,5)/100)+109,gr((gr(1,5)%3)+4,3))
gw(2,5,gr(gr((gr(1,5)%100)+8,gr(1,5)/100)+109,gr((gr(1,5)%3)+4,3))+gr(2,5))
gw((gr(1,5)%100)+8,gr(1,5)/100,t0)
t0=gr(1,5)
return (15)if((gr(1,5))!=0)else(14)
def _14():
sys.stdout.write(str(gr(2,5))+" ")
sys.stdout.flush()
return 35
def _15():
global t0
t0=t0-1
gw(1,5,t0)
return 13
def _16():
global t0
global t1
t0=gr(1,3)/26
t1=((gr(1,3)/26)%26)+97
gw(1,2,(gr(1,3)%26)+97)
gw(2,2,t1)
t0=t0/26
t0=t0+97
gw(3,2,t0)
gw(3,4,0)
gw(1,4,0)
gw(2,4,1)
sa(1200)
sa(gr(gr(8,12)+109,gr(gr(2,4),2)))
sa((1)if(gr(gr(8,12)+109,gr(gr(2,4),2))<32)else(0))
return 17
def _17():
return (18)if(sp()!=0)else(19)
def _18():
sp();
sp();
return 11
def _19():
return (18)if(sr()>126)else(20)
def _20():
global t0
t0=gr(1,4)-9
gw(1,4,gr(1,4)+1)
return (21)if((t0)!=0)else(18)
def _21():
sa(sp()-32)
return (32)if(sp()!=0)else(22)
def _22():
global t0
gw(2,4,(sr()%3)+1)
sa(sr());
sa((sr()%100)+8)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(sp()/100);
v0=sp()
sa(gr(sp(),v0))
sa(sp()+109)
sa(gr(gr(2,4),2))
v0=sp()
t0=gr(sp(),v0)
return (11)if(t0<32)else(23)
def _23():
global t0
return (31)if(t0>126)else(24)
def _24():
global t0
t0=t0-101
return (25)if((t0)!=0)else(30)
def _25():
sa(sr());
return 26
def _26():
return (29)if(sp()!=0)else(27)
def _27():
sp();
return (28)if(gr(3,4)>gr(2,3))else(11)
def _28():
global t0
global t1
gw(2,3,gr(3,4))
t0=gr(3,2)
t1=gr(2,2)
gw(4,3,gr(1,2))
gw(5,3,t1)
gw(6,3,t0)
return 11
def _29():
sa(sp()-1)
return 22
def _30():
gw(3,4,gr(3,4)+1)
sa(sr());
return 26
def _31():
sp();
return 11
def _32():
sa(sp()-1)
gw(2,4,(sr()%3)+1)
sa(sr());
sa((sr()%100)+8)
v0=sp()
v1=sp()
sa(v0)
sa(v1)
sa(sp()/100);
v0=sp()
sa(gr(sp(),v0))
sa(sp()+109)
sa(gr(gr(2,4),2))
v0=sp()
sa(gr(sp(),v0))
sa((1)if(sr()<32)else(0))
return 17
def _33():
global t0
t0=t0+((((gr(1,1)%2)+(gr(2,1)%2))%2)*gr(3,1))
gw(3,1,gr(3,1)*2)
gw(1,1,gr(1,1)/2)
gw(2,1,gr(2,1)/2)
return 7
def _34():
global t0
gw(2,0,t0)
return 3
m=[_0,_1,_2,_3,_4,_5,_6,_7,_8,_9,_10,_11,_12,_13,_14,_15,_16,_17,_18,_19,_20,_21,_22,_23,_24,_25,_26,_27,_28,_29,_30,_31,_32,_33,_34]
c=0
while c<35:
c=m[c]()
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Celery command"""
from multiprocessing import Process
from typing import Optional
import daemon
import psutil
import sqlalchemy.exc
from celery import maybe_patch_concurrency
from daemon.pidfile import TimeoutPIDLockFile
from lockfile.pidlockfile import read_pid_from_pidfile, remove_existing_pidfile
from airflow import settings
from airflow.configuration import conf
from airflow.executors.celery_executor import app as celery_app
from airflow.utils import cli as cli_utils
from airflow.utils.cli import setup_locations, setup_logging
from airflow.utils.serve_logs import serve_logs
WORKER_PROCESS_NAME = "worker"
@cli_utils.action_logging
def flower(args):
"""Starts Flower, Celery monitoring tool"""
options = [
"flower",
conf.get('celery', 'BROKER_URL'),
f"--address={args.hostname}",
f"--port={args.port}",
]
if args.broker_api:
options.append(f"--broker-api={args.broker_api}")
if args.url_prefix:
options.append(f"--url-prefix={args.url_prefix}")
if args.basic_auth:
options.append(f"--basic-auth={args.basic_auth}")
if args.flower_conf:
options.append(f"--conf={args.flower_conf}")
if args.daemon:
pidfile, stdout, stderr, _ = setup_locations(
process="flower",
pid=args.pid,
stdout=args.stdout,
stderr=args.stderr,
log=args.log_file,
)
with open(stdout, "w+") as stdout, open(stderr, "w+") as stderr:
ctx = daemon.DaemonContext(
pidfile=TimeoutPIDLockFile(pidfile, -1),
stdout=stdout,
stderr=stderr,
)
with ctx:
celery_app.start(options)
else:
celery_app.start(options)
def _serve_logs(skip_serve_logs: bool = False) -> Optional[Process]:
"""Starts serve_logs sub-process"""
if skip_serve_logs is False:
sub_proc = Process(target=serve_logs)
sub_proc.start()
return sub_proc
return None
def _run_worker(options, skip_serve_logs):
sub_proc = _serve_logs(skip_serve_logs)
try:
celery_app.worker_main(options)
finally:
if sub_proc:
sub_proc.terminate()
@cli_utils.action_logging
def worker(args):
"""Starts Airflow Celery worker"""
if not settings.validate_session():
raise SystemExit("Worker exiting, database connection precheck failed.")
autoscale = args.autoscale
skip_serve_logs = args.skip_serve_logs
if autoscale is None and conf.has_option("celery", "worker_autoscale"):
autoscale = conf.get("celery", "worker_autoscale")
# Setup locations
pid_file_path, stdout, stderr, log_file = setup_locations(
process=WORKER_PROCESS_NAME,
pid=args.pid,
stdout=args.stdout,
stderr=args.stderr,
log=args.log_file,
)
if hasattr(celery_app.backend, 'ResultSession'):
# Pre-create the database tables now, otherwise SQLA via Celery has a
# race condition where one of the subprocesses can die with "Table
# already exists" error, because SQLA checks for which tables exist,
# then issues a CREATE TABLE, rather than doing CREATE TABLE IF NOT
# EXISTS
try:
session = celery_app.backend.ResultSession()
session.close()
except sqlalchemy.exc.IntegrityError:
# At least on postgres, trying to create a table that already exist
# gives a unique constraint violation or the
# "pg_type_typname_nsp_index" table. If this happens we can ignore
# it, we raced to create the tables and lost.
pass
# Setup Celery worker
options = [
'worker',
'-O',
'fair',
'--queues',
args.queues,
'--concurrency',
args.concurrency,
'--hostname',
args.celery_hostname,
'--loglevel',
conf.get('logging', 'LOGGING_LEVEL'),
'--pidfile',
pid_file_path,
]
if autoscale:
options.extend(['--autoscale', autoscale])
if args.without_mingle:
options.append('--without-mingle')
if args.without_gossip:
options.append('--without-gossip')
if conf.has_option("celery", "pool"):
pool = conf.get("celery", "pool")
options.extend(["--pool", pool])
# Celery pools of type eventlet and gevent use greenlets, which
# requires monkey patching the app:
# https://eventlet.net/doc/patching.html#monkey-patch
# Otherwise task instances hang on the workers and are never
# executed.
maybe_patch_concurrency(['-P', pool])
if args.daemon:
# Run Celery worker as daemon
handle = setup_logging(log_file)
with open(stdout, 'w+') as stdout_handle, open(stderr, 'w+') as stderr_handle:
if args.umask:
umask = args.umask
ctx = daemon.DaemonContext(
files_preserve=[handle],
umask=int(umask, 8),
stdout=stdout_handle,
stderr=stderr_handle,
)
with ctx:
_run_worker(options=options, skip_serve_logs=skip_serve_logs)
else:
# Run Celery worker in the same process
_run_worker(options=options, skip_serve_logs=skip_serve_logs)
@cli_utils.action_logging
def stop_worker(args):
"""Sends SIGTERM to Celery worker"""
# Read PID from file
if args.pid:
pid_file_path = args.pid
else:
pid_file_path, _, _, _ = setup_locations(process=WORKER_PROCESS_NAME)
pid = read_pid_from_pidfile(pid_file_path)
# Send SIGTERM
if pid:
worker_process = psutil.Process(pid)
worker_process.terminate()
# Remove pid file
remove_existing_pidfile(pid_file_path)
| |
import httplib
import urllib
from Tkinter import *
import tkFileDialog
module = euzernaem = parsewerd = destination = ''
def getContents():
global module, euzernaem, parsewerd
conn = httplib.HTTPSConnection('libbrs.nus.edu.sg')
page = '/infogate/loginAction.do?execution=login'
conn.request('GET', page)
resp = conn.getresponse()
conn.close()
cookie = resp.getheader('Set-Cookie')
sessionid = cookie[:cookie.find(';')]
cookie = sessionid
print(cookie)
headers = {
"Content-Type" : "application/x-www-form-urlencoded",
"Cookie" : cookie
}
headersGet = {
"Cookie" : cookie
}
params = 'userid='+euzernaem+'&password='+parsewerd+'&domain=NUSSTU&key=blankid%2BRESULT%2BEXAM%2B'+module
conn = httplib.HTTPSConnection('libbrs.nus.edu.sg')
conn.request("POST", page, params, headers)
resp = conn.getresponse()
data = str(resp.read())
conn.close()
conn = httplib.HTTPConnection('libbrs.nus.edu.sg:8080')
page = '/infogate/jsp/login/success.jsp;jsessionid='+sessionid+'?exe=ResultList'
conn.request("GET", page, params, headersGet)
conn.close()
conn = httplib.HTTPConnection('libbrs.nus.edu.sg:8080')
page = '/infogate/searchAction.do?execution=ResultList'
params = 'database=EXAM&searchstring='+module+'&d='
conn.request("POST", page, params, headers)
resp = conn.getresponse()
data = resp.read()
conn.close()
data = str(data)
downloadall(data, headers, headersGet)
def printNice(params):
for key in params:
print(str(key) + ' = ' + str(params[key]))
def downloadall(data, headers, headersGet):
params = getParams(data)
maxDocIndex = int(params['maxNo'])
params['maxDocIndex'] = params['maxNo']
for i in range(1,maxDocIndex+1):
conn = httplib.HTTPConnection('libbrs.nus.edu.sg:8080')
page = '/infogate/searchAction.do?execution=ViewSelectedResultListLong'
params['preSelectedId'] = i
params['exportids'] = i
conn.request("POST", page, urllib.urlencode(params), headers)
resp = conn.getresponse()
data = resp.read()
conn.close()
data = str(data)
pdfIndex = data.find('View attached PDF file')
if pdfIndex == -1:
continue
pdfIndex = data.rfind('href=', 0, pdfIndex)
openquotes = data.find('"', pdfIndex)
closequotes = data.find('"', openquotes+1)
page = page[:page.rfind('/')+1] + data[openquotes+1:closequotes]
titleIndex = data.find('title=', pdfIndex)
if titleIndex == -1:
continue
openquotes = data.find('"', titleIndex)
closequotes = data.find('"', openquotes+1)
title = data[openquotes+1: closequotes]
conn = httplib.HTTPConnection('libbrs.nus.edu.sg:8080')
conn.request("GET", page, None, headersGet)
resp = conn.getresponse()
data = resp.read()
conn.close()
title = title[title.find('file')+5:]
print('Writing ' + title)
f = open(destination + '/' + title, 'wb+')
f.write(data)
f.close()
updateStatus("Done!", 'success')
def getParams(data):
start = data.find('databasenamesasstring')
start = data.rfind('<', 0, start)
end = data.find('<select', start)
params = {
'databasenamesasstring' : 'Examination Papers Database',
'searchid':'-6901505210342489183',
'f':'list',
'b':'1',
'p':'1',
'd':'EXAM',
'u':'dummy',
'r':'',
'l':'20',
'n':'',
'nn':'',
'historyid':'1',
'maxDocIndex':'11',
'preSelectedId':'1,', #id
'maxNo':'11',
'sPage1':'1',
'pageNo1':'1',
'exportids':'1', #id
'maxNo':'11',
'sPage2':'1',
'pageNo2':'1',
'paraid[0]':'PGH2',
'parashortname[0]':'FACU',
'paravalue[0]':'',
'paraid[1]':'PGH3',
'parashortname[1]':'SUBJ',
'paravalue[1]':'',
'paraid[2]':'PGH5',
'parashortname[2]':'CNAM',
'paravalue[2]':''
}
start = data.find('name=', start, end)
while start != -1:
openquotes = data.find('"', start, end)
closequotes = data.find('"', openquotes+1, end)
name = data[openquotes+1:closequotes]
start = data.find('value=', start, end)
openquotes = data.find('"', start, end)
closequotes = data.find('"', openquotes+1, end)
value = data[openquotes+1:closequotes]
params[name] = value
#print (name + ' ==> ' + value)
start = data.find('name=', start, end)
return params
def askForDestination():
global destination, destField
destination = tkFileDialog.askdirectory(mustexist=False, parent=top, title="Choose a destination")
destField.delete(0)
destField.insert(0, destination)
def startDownload():
global module, euzernaem, parsewerd, destination
module = moduleField.get()
euzernaem = usernameField.get()
parsewerd = passwordField.get()
getContents()
def updateStatus(msg, type='normal'):
global statusLabel
statusLabel['text'] = msg
if type == 'success':
statusLabel['fg'] = 'green'
elif type == 'error':
statusLabel['fg'] = 'red'
root = Tk()
root.title("NUS Past Year Exam Paper Downloader")
top = Frame(root)
top.grid(row=0, column=0, padx=20, pady=20)
top.columnconfigure(0, weight=1)
top.rowconfigure(0, weight=1)
moduleLabel = Label(top, text="Module Code:")
moduleLabel.grid(row=1, column=0)
moduleField = Entry(top, bd=2, textvariable=module)
moduleField.grid(row=1, column=1, columnspan=2)
usernameLabel = Label(top, text="NUSNET ID:")
usernameLabel.grid(row=2, column=0)
usernameField = Entry(top, bd=2, textvariable=euzernaem)
usernameField.grid(row=2, column=1, columnspan=2)
passwordLabel = Label(top, text="Password:")
passwordLabel.grid(row=3, column=0)
passwordField = Entry(top, bd=2, show='*', textvariable=parsewerd)
passwordField.grid(row=3, column=1, columnspan=2)
destLabel = Label(top, text="Destination:")
destLabel.grid(row=4, column=0)
destField = Entry(top, bd=2, textvariable=destination)
destField.grid(row=4, column=1)
destButton = Button(top, text="...", command=askForDestination)
destButton.grid(row=4, column=2)
statusLabel = Label(top, text="^____^", justify=CENTER)
statusLabel.grid(row=5, columnspan=3)
startButton = Button(top, text="Start!", command=startDownload)
startButton.grid(row=6, columnspan=3)
root.mainloop()
| |
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/traffic-engineering/tlvs/tlv/link/sub-tlvs/sub-tlv/administrative-groups/admin-group/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters relating to the administrative
groups being described for the link
"""
__slots__ = ("_path_helper", "_extmethods", "__bit_index", "__set_")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__bit_index = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..31"]},
),
is_leaf=True,
yang_name="bit-index",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
self.__set_ = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="set",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"opaque-lsa",
"traffic-engineering",
"tlvs",
"tlv",
"link",
"sub-tlvs",
"sub-tlv",
"administrative-groups",
"admin-group",
"state",
]
def _get_bit_index(self):
"""
Getter method for bit_index, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv/link/sub_tlvs/sub_tlv/administrative_groups/admin_group/state/bit_index (uint8)
YANG Description: The index of the bit within the 32-bit administrative group field
of the Administrative Group sub-TLV of the Traffic Engineering LSA
"""
return self.__bit_index
def _set_bit_index(self, v, load=False):
"""
Setter method for bit_index, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv/link/sub_tlvs/sub_tlv/administrative_groups/admin_group/state/bit_index (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_bit_index is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bit_index() directly.
YANG Description: The index of the bit within the 32-bit administrative group field
of the Administrative Group sub-TLV of the Traffic Engineering LSA
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["0..31"]},
),
is_leaf=True,
yang_name="bit-index",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """bit_index must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['0..31']}), is_leaf=True, yang_name="bit-index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__bit_index = t
if hasattr(self, "_set"):
self._set()
def _unset_bit_index(self):
self.__bit_index = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..31"]},
),
is_leaf=True,
yang_name="bit-index",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
def _get_set_(self):
"""
Getter method for set_, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv/link/sub_tlvs/sub_tlv/administrative_groups/admin_group/state/set (boolean)
YANG Description: Whether the bit is set within the administrative group field
"""
return self.__set_
def _set_set_(self, v, load=False):
"""
Setter method for set_, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv/link/sub_tlvs/sub_tlv/administrative_groups/admin_group/state/set (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_set_ is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_set_() directly.
YANG Description: Whether the bit is set within the administrative group field
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="set",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """set_ must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="set", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__set_ = t
if hasattr(self, "_set"):
self._set()
def _unset_set_(self):
self.__set_ = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="set",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
bit_index = __builtin__.property(_get_bit_index)
set_ = __builtin__.property(_get_set_)
_pyangbind_elements = OrderedDict([("bit_index", bit_index), ("set_", set_)])
class state(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/traffic-engineering/tlvs/tlv/link/sub-tlvs/sub-tlv/administrative-groups/admin-group/state. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: State parameters relating to the administrative
groups being described for the link
"""
__slots__ = ("_path_helper", "_extmethods", "__bit_index", "__set_")
_yang_name = "state"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__bit_index = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..31"]},
),
is_leaf=True,
yang_name="bit-index",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
self.__set_ = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="set",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"ospfv2",
"areas",
"area",
"lsdb",
"lsa-types",
"lsa-type",
"lsas",
"lsa",
"opaque-lsa",
"traffic-engineering",
"tlvs",
"tlv",
"link",
"sub-tlvs",
"sub-tlv",
"administrative-groups",
"admin-group",
"state",
]
def _get_bit_index(self):
"""
Getter method for bit_index, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv/link/sub_tlvs/sub_tlv/administrative_groups/admin_group/state/bit_index (uint8)
YANG Description: The index of the bit within the 32-bit administrative group field
of the Administrative Group sub-TLV of the Traffic Engineering LSA
"""
return self.__bit_index
def _set_bit_index(self, v, load=False):
"""
Setter method for bit_index, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv/link/sub_tlvs/sub_tlv/administrative_groups/admin_group/state/bit_index (uint8)
If this variable is read-only (config: false) in the
source YANG file, then _set_bit_index is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_bit_index() directly.
YANG Description: The index of the bit within the 32-bit administrative group field
of the Administrative Group sub-TLV of the Traffic Engineering LSA
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int,
restriction_dict={"range": ["0..255"]},
int_size=8,
),
restriction_dict={"range": ["0..31"]},
),
is_leaf=True,
yang_name="bit-index",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """bit_index must be of a type compatible with uint8""",
"defined-type": "uint8",
"generated-type": """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=int, restriction_dict={'range': ['0..255']}, int_size=8), restriction_dict={'range': ['0..31']}), is_leaf=True, yang_name="bit-index", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint8', is_config=False)""",
}
)
self.__bit_index = t
if hasattr(self, "_set"):
self._set()
def _unset_bit_index(self):
self.__bit_index = YANGDynClass(
base=RestrictedClassType(
base_type=RestrictedClassType(
base_type=int, restriction_dict={"range": ["0..255"]}, int_size=8
),
restriction_dict={"range": ["0..31"]},
),
is_leaf=True,
yang_name="bit-index",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="uint8",
is_config=False,
)
def _get_set_(self):
"""
Getter method for set_, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv/link/sub_tlvs/sub_tlv/administrative_groups/admin_group/state/set (boolean)
YANG Description: Whether the bit is set within the administrative group field
"""
return self.__set_
def _set_set_(self, v, load=False):
"""
Setter method for set_, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/traffic_engineering/tlvs/tlv/link/sub_tlvs/sub_tlv/administrative_groups/admin_group/state/set (boolean)
If this variable is read-only (config: false) in the
source YANG file, then _set_set_ is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_set_() directly.
YANG Description: Whether the bit is set within the administrative group field
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="set",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """set_ must be of a type compatible with boolean""",
"defined-type": "boolean",
"generated-type": """YANGDynClass(base=YANGBool, default=YANGBool("false"), is_leaf=True, yang_name="set", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='boolean', is_config=False)""",
}
)
self.__set_ = t
if hasattr(self, "_set"):
self._set()
def _unset_set_(self):
self.__set_ = YANGDynClass(
base=YANGBool,
default=YANGBool("false"),
is_leaf=True,
yang_name="set",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="boolean",
is_config=False,
)
bit_index = __builtin__.property(_get_bit_index)
set_ = __builtin__.property(_get_set_)
_pyangbind_elements = OrderedDict([("bit_index", bit_index), ("set_", set_)])
| |
from __future__ import print_function
import numpy as np
import pandas as pd
import regreg.api as rr
from ...tests.flags import SET_SEED, SMALL_SAMPLES
from ...tests.instance import logistic_instance
from ...tests.decorators import (wait_for_return_value,
set_seed_iftrue,
set_sampling_params_iftrue,
register_report)
import selection.tests.reports as reports
from ...api import (randomization,
glm_group_lasso,
pairs_bootstrap_glm,
multiple_queries,
discrete_family,
projected_langevin,
glm_group_lasso_parametric,
glm_target)
from ..glm import (glm_parametric_covariance,
glm_nonparametric_bootstrap,
restricted_Mest,
set_alpha_matrix)
@register_report(['truth', 'active'])
@set_sampling_params_iftrue(SMALL_SAMPLES, ndraw=10, burnin=10)
@set_seed_iftrue(SET_SEED)
@wait_for_return_value()
def test_multiple_queries(s=3,
n=200,
p=20,
signal=7,
rho=0.1,
lam_frac=0.7,
nview=4,
ndraw=100, burnin=0,
bootstrap=True,
test = 'global'):
#randomizer = randomization.laplace((p,), scale=1)
randomizer = randomization.logistic((p,), scale=1)
X, y, beta, _ = logistic_instance(n=n, p=p, s=s, rho=rho, signal=signal)
nonzero = np.where(beta)[0]
loss = rr.glm.logistic(X, y)
epsilon = 1.
lam = lam_frac * np.mean(np.fabs(np.dot(X.T, np.random.binomial(1, 1. / 2, (n, 10000)))).max(0))
W = np.ones(p)*lam
#W[0] = 0 # use at least some unpenalized
penalty = rr.group_lasso(np.arange(p),
weights=dict(zip(np.arange(p), W)), lagrange=1.)
view = []
for i in range(nview):
view.append(glm_group_lasso(loss, epsilon, penalty, randomizer))
mv = multiple_queries(view)
mv.solve()
active_union = np.zeros(p, np.bool)
for i in range(nview):
active_union += view[i].selection_variable['variables']
nactive = np.sum(active_union)
#print("nactive", nactive)
if set(nonzero).issubset(np.nonzero(active_union)[0]):
if nactive==s:
return None
active_set = np.nonzero(active_union)[0]
if test == 'selected zeros':
inactive_selected = np.array([active_union[i] and i not in nonzero for i in range(p)])
true_active = (beta != 0)
reference = np.zeros(inactive_selected.sum())
target_sampler, target_observed = glm_target(loss,
#true_active,
active_union,
mv,
subset=inactive_selected,
bootstrap=bootstrap,
reference=reference)
test_stat = lambda x: np.linalg.norm(x-reference)
else:
reference = beta[active_union]
target_sampler, target_observed = glm_target(loss,
active_union,
mv,
bootstrap=bootstrap,
reference = reference)
test_stat = lambda x: np.linalg.norm(x-beta[active_union])
observed_test_value = test_stat(target_observed)
pivot = target_sampler.hypothesis_test(test_stat,
observed_test_value,
alternative='twosided',
ndraw=ndraw,
burnin=burnin,
parameter=reference)
full_sample = target_sampler.sample(ndraw=ndraw,
burnin=burnin,
keep_opt=True)
return [pivot], [False]
@register_report(['pvalue', 'active'])
@set_sampling_params_iftrue(SMALL_SAMPLES, ndraw=100, burnin=100)
@set_seed_iftrue(SET_SEED)
@wait_for_return_value(max_tries=200)
def test_parametric_covariance(ndraw=10000, burnin=2000):
s, n, p = 3, 120, 10
randomizer = randomization.laplace((p,), scale=1)
X, y, beta, _ = logistic_instance(n=n, p=p, s=s, rho=0, signal=12)
nonzero = np.where(beta)[0]
lam_frac = 1.
loss = rr.glm.logistic(X, y)
epsilon = 1.
lam = lam_frac * np.mean(np.fabs(np.dot(X.T, np.random.binomial(1, 1. / 2, (n, 10000)))).max(0))
W = np.ones(p)*lam
W[0] = 0 # use at least some unpenalized
penalty = rr.group_lasso(np.arange(p),
weights=dict(zip(np.arange(p), W)), lagrange=1.)
# first randomization
M_est1 = glm_group_lasso_parametric(loss, epsilon, penalty, randomizer)
# second randomization
M_est2 = glm_group_lasso_parametric(loss, epsilon, penalty, randomizer)
mv = multiple_queries([M_est1, M_est2])
mv.solve()
target = M_est1.selection_variable['variables'].copy()
if target[-1] or M_est2.selection_variable['variables'][-1]:
return None
if target[-2] or M_est2.selection_variable['variables'][-2]:
return None
# we should check they are different sizes
target[-2:] = 1
if set(nonzero).issubset(np.nonzero(target)[0]):
form_covariances = glm_parametric_covariance(loss)
mv.setup_sampler(form_covariances)
target_observed = restricted_Mest(loss, target)
linear_func = np.zeros((2,target_observed.shape[0]))
linear_func[0,-1] = 1. # we know this one is null
linear_func[1,-2] = 1. # also null
target_observed = linear_func.dot(target_observed)
target_sampler = mv.setup_target((target, linear_func), target_observed,
parametric=True)
test_stat = lambda x: np.linalg.norm(x)
pval = target_sampler.hypothesis_test(test_stat,
test_stat(target_observed),
alternative='greater',
ndraw=ndraw,
burnin=burnin)
return [pval], [False]
@register_report(['pvalue', 'active'])
@set_sampling_params_iftrue(SMALL_SAMPLES, ndraw=10, burnin=10)
@set_seed_iftrue(SET_SEED)
@wait_for_return_value()
def test_multiple_queries(s=3, n=200, p=20,
signal=7,
rho=0.1,
lam_frac=0.7,
nview=4,
ndraw=10000, burnin=2000,
bootstrap=True):
randomizer = randomization.laplace((p,), scale=1)
X, y, beta, _ = logistic_instance(n=n, p=p, s=s, rho=rho, signal=signal)
nonzero = np.where(beta)[0]
lam_frac = 1.
loss = rr.glm.logistic(X, y)
epsilon = 1.
lam = lam_frac * np.mean(np.fabs(np.dot(X.T, np.random.binomial(1, 1. / 2, (n, 10000)))).max(0))
W = np.ones(p)*lam
W[0] = 0 # use at least some unpenalized
penalty = rr.group_lasso(np.arange(p),
weights=dict(zip(np.arange(p), W)), lagrange=1.)
view = []
for i in range(nview):
view.append(glm_group_lasso(loss, epsilon, penalty, randomizer))
mv = multiple_queries(view)
mv.solve()
active_union = np.zeros(p, np.bool)
for i in range(nview):
active_union += view[i].selection_variable['variables']
nactive = np.sum(active_union)
print("nactive", nactive)
if set(nonzero).issubset(np.nonzero(active_union)[0]):
if nactive==s:
return None
active_set = np.nonzero(active_union)[0]
inactive_selected = np.array([active_union[i] and i not in nonzero for i in range(p)])
true_active = (beta != 0)
reference = np.zeros(inactive_selected.sum())
target_sampler, target_observed = glm_target(loss,
active_union,
mv,
subset=inactive_selected,
bootstrap=bootstrap,
reference=reference)
test_stat = lambda x: np.linalg.norm(x)
observed_test_value = test_stat(target_observed)
full_sample = target_sampler.sample(ndraw=ndraw,
burnin=burnin,
keep_opt=True)
pivot = target_sampler.hypothesis_test(test_stat,
observed_test_value,
alternative='twosided',
ndraw=ndraw,
burnin=burnin,
parameter=reference)
return [pivot], [False]
def report(niter=1, **kwargs):
#kwargs = {'s':3, 'n':300, 'p':20, 'signal':7, 'nview':4, 'test': 'global'}
kwargs = {'s': 3, 'n': 300, 'p': 20, 'signal': 7, 'nview': 1}
kwargs['bootstrap'] = False
intervals_report = reports.reports['test_multiple_queries']
CLT_runs = reports.collect_multiple_runs(intervals_report['test'],
intervals_report['columns'],
niter,
reports.summarize_all,
**kwargs)
#fig = reports.pivot_plot(CLT_runs, color='b', label='CLT')
fig = reports.pivot_plot_2in1(CLT_runs, color='b', label='CLT')
kwargs['bootstrap'] = True
bootstrap_runs = reports.collect_multiple_runs(intervals_report['test'],
intervals_report['columns'],
niter,
reports.summarize_all,
**kwargs)
#fig = reports.pivot_plot(bootstrap_runs, color='g', label='Bootstrap', fig=fig)
fig = reports.pivot_plot_2in1(bootstrap_runs, color='g', label='Bootstrap', fig=fig)
fig.savefig('multiple_queries.pdf') # will have both bootstrap and CLT on plot
if __name__ == "__main__":
report()
| |
"""
Provides serialization for API responses.
See `DRF serializer documentation <http://www.django-rest-framework.org/api-guide/serializers/>`_
Used by the View classes api/views.py to serialize API responses as JSON or HTML.
See DEFAULT_RENDERER_CLASSES setting in core.settings.contrib for the enabled renderers.
"""
# -*- coding: utf-8 -*-
import logging
import django.core.exceptions
from django.contrib.auth.models import User
from django.db import transaction
from jobs.models import HDXExportRegion, Job, SavedFeatureSelection, validate_aoi, validate_mbtiles, PartnerExportRegion
from rest_framework import serializers
from rest_framework_gis import serializers as geo_serializers
from tasks.models import ExportRun, ExportTask
# Get an instance of a logger
LOG = logging.getLogger(__name__)
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('username', )
class ExportTaskSerializer(serializers.ModelSerializer):
class Meta:
model = ExportTask
fields = ('uid', 'name', 'status', 'started_at', 'finished_at',
'duration', 'filesize_bytes', 'download_urls')
class ExportRunSerializer(serializers.ModelSerializer):
tasks = ExportTaskSerializer(many=True, read_only=True)
user = UserSerializer(
read_only=True, default=serializers.CurrentUserDefault())
class Meta:
model = ExportRun
lookup_field = 'uid'
fields = ('uid', 'started_at', 'finished_at', 'duration',
'elapsed_time', 'user', 'size', 'status', 'tasks')
class ConfigurationSerializer(serializers.ModelSerializer):
user = UserSerializer(
read_only=True, default=serializers.CurrentUserDefault())
class Meta:
model = SavedFeatureSelection
fields = ('uid', 'name', 'description', 'yaml', 'public', 'user','pinned')
class JobGeomSerializer(serializers.ModelSerializer):
""" Since Job Geoms can be large, these are serialized separately,
instead of nested within Jobs."""
class Meta:
model = Job
fields = ('the_geom', )
class JobSerializer(serializers.ModelSerializer):
user = UserSerializer(
read_only=True, default=serializers.CurrentUserDefault())
class Meta:
model = Job
fields = ('id', 'uid', 'user', 'name', 'description', 'event',
'export_formats', 'published', 'feature_selection',
'buffer_aoi', 'osma_link', 'created_at', 'area', 'the_geom',
'simplified_geom', 'mbtiles_source', 'mbtiles_minzoom', 'mbtiles_maxzoom','pinned','unfiltered')
extra_kwargs = {
'the_geom': {
'write_only': True
},
'simplified_geom': {
'read_only': True
}
}
def validate(self,data):
try:
validate_aoi(data['the_geom'])
except django.core.exceptions.ValidationError as e:
raise serializers.ValidationError({'the_geom':e.messages[0]})
try:
validate_mbtiles(data)
except django.core.exceptions.ValidationError as e:
raise serializers.ValidationError({'mbtiles_source': e.messages[0]})
return data
def validate_model(model):
try:
model.full_clean()
except django.core.exceptions.ValidationError as e:
raise serializers.ValidationError(e.message_dict)
class PartnerExportRegionListSerializer(serializers.ModelSerializer):
export_formats = serializers.ListField()
feature_selection = serializers.CharField()
simplified_geom = geo_serializers.GeometryField(required=False)
name = serializers.CharField()
class Meta: # noqa
model = PartnerExportRegion
fields = ('id', 'feature_selection',
'schedule_period', 'schedule_hour', 'export_formats',
'name', 'last_run', 'next_run',
'simplified_geom', 'job_uid', 'last_size','group_name')
class PartnerExportRegionSerializer(serializers.ModelSerializer): # noqa
export_formats = serializers.ListField()
feature_selection = serializers.CharField()
simplified_geom = geo_serializers.GeometryField(required=False)
the_geom = geo_serializers.GeometryField()
name = serializers.CharField()
event = serializers.CharField()
description = serializers.CharField()
class Meta: # noqa
model = PartnerExportRegion
fields = ('id', 'feature_selection',
'schedule_period', 'schedule_hour', 'export_formats',
'name', 'event', 'description', 'last_run', 'next_run',
'simplified_geom', 'job_uid',
'the_geom','group','planet_file')
extra_kwargs = {
'simplified_geom': {
'read_only': True
},
'the_geom': {
'write_only': True
}
}
def create(self, validated_data): # noqa
def slice_dict(in_dict, wanted_keys):
return dict((k, in_dict[k]) for k in wanted_keys if k in in_dict)
job_dict = slice_dict(validated_data, [
'the_geom', 'export_formats', 'feature_selection',
])
job_dict['user'] = self.context['request'].user
job_dict['name'] = validated_data.get('name')
job_dict['event'] = validated_data.get('event') or ""
job_dict['description'] = validated_data.get('description') or ""
region_dict = slice_dict(validated_data, [
'schedule_period', 'schedule_hour','group','planet_file'
])
job = Job(**job_dict)
job.hidden = True
job.unlimited_extent = True
validate_model(job)
# check on creation that i'm a member of the group
if not self.context['request'].user.groups.filter(name=region_dict['group'].name).exists():
raise serializers.ValidationError({'group':'You are not a member of this group.'})
with transaction.atomic():
job.save()
region_dict['job'] = job
region = PartnerExportRegion(**region_dict)
validate_model(region)
region.save()
return region
def update(self, instance, validated_data): # noqa
def update_attrs(model, v_data, keys):
for key in keys:
if key in v_data:
setattr(model, key, v_data[key])
# if re-assigning, check group membership
if not self.context['request'].user.groups.filter(name= validated_data['group'].name).exists():
raise serializers.ValidationError({'group':'You are not a member of this group.'})
job = instance.job
update_attrs(job, validated_data, [
'the_geom', 'export_formats', 'feature_selection'
])
job.name = validated_data.get('name')
job.event = validated_data.get('event') or ""
job.description = validated_data.get('description') or ""
validate_model(job)
update_attrs(instance, validated_data, [
'schedule_period', 'schedule_hour', 'group','planet_file'
])
validate_model(instance)
with transaction.atomic():
instance.save()
job.save()
return instance
class HDXExportRegionListSerializer(serializers.ModelSerializer): # noqa
""" The list serializer does not expose the Geom, as it can be large."""
export_formats = serializers.ListField()
dataset_prefix = serializers.CharField()
feature_selection = serializers.CharField()
simplified_geom = geo_serializers.GeometryField(required=False)
name = serializers.CharField()
buffer_aoi = serializers.BooleanField()
class Meta: # noqa
model = HDXExportRegion
fields = ('id', 'dataset_prefix', 'datasets', 'feature_selection',
'schedule_period', 'schedule_hour', 'export_formats',
'locations', 'name', 'last_run', 'next_run',
'simplified_geom', 'dataset_prefix', 'job_uid', 'license',
'subnational', 'extra_notes', 'is_private', 'buffer_aoi', 'last_size')
class HDXExportRegionSerializer(serializers.ModelSerializer): # noqa
""" Internally, an export region is a job model + an export region model
but to API users, it appears as a single entity. """
export_formats = serializers.ListField()
dataset_prefix = serializers.CharField()
feature_selection = serializers.CharField()
simplified_geom = geo_serializers.GeometryField(required=False)
the_geom = geo_serializers.GeometryField()
name = serializers.CharField()
buffer_aoi = serializers.BooleanField()
class Meta: # noqa
model = HDXExportRegion
fields = ('id', 'dataset_prefix', 'datasets', 'feature_selection',
'schedule_period', 'schedule_hour', 'export_formats',
'locations', 'name', 'last_run', 'next_run',
'simplified_geom', 'dataset_prefix', 'job_uid', 'license',
'subnational', 'extra_notes', 'is_private', 'buffer_aoi',
'the_geom','planet_file')
extra_kwargs = {
'simplified_geom': {
'read_only': True
},
'the_geom': {
'write_only': True
}
}
def create(self, validated_data): # noqa
def slice_dict(in_dict, wanted_keys):
return dict((k, in_dict[k]) for k in wanted_keys if k in in_dict)
job_dict = slice_dict(validated_data, [
'the_geom', 'export_formats', 'feature_selection', 'buffer_aoi'
])
job_dict['user'] = self.context['request'].user
job_dict['name'] = validated_data.get('dataset_prefix')
job_dict['description'] = validated_data.get('name')
region_dict = slice_dict(validated_data, [
'extra_notes', 'is_private', 'locations', 'license',
'schedule_period', 'schedule_hour', 'subnational','planet_file'
])
job = Job(**job_dict)
job.hidden = True
job.unlimited_extent = True
validate_model(job)
with transaction.atomic():
job.save()
region_dict['job'] = job
region = HDXExportRegion(**region_dict)
validate_model(region)
region.save()
return region
def update(self, instance, validated_data): # noqa
def update_attrs(model, v_data, keys):
for key in keys:
if key in v_data:
setattr(model, key, v_data[key])
job = instance.job
update_attrs(job, validated_data, [
'the_geom', 'export_formats', 'feature_selection', 'buffer_aoi'
])
job.name = validated_data.get('dataset_prefix')
job.description = validated_data.get('name')
validate_model(job)
update_attrs(instance, validated_data, [
'extra_notes', 'is_private', 'locations', 'license',
'schedule_period', 'schedule_hour', 'subnational', 'planet_file'
])
validate_model(instance)
with transaction.atomic():
instance.save()
job.save()
return instance
| |
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various function for graph rerouting."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.graph_editor import subgraph
from tensorflow.contrib.graph_editor import util
from tensorflow.python.framework import ops as tf_ops
__all__ = [
"swap_ts",
"reroute_a2b_ts",
"reroute_b2a_ts",
"swap_inputs",
"reroute_a2b_inputs",
"reroute_b2a_inputs",
"swap_outputs",
"reroute_a2b_outputs",
"reroute_b2a_outputs",
"swap",
"reroute_a2b",
"reroute_b2a",
"remove_control_inputs",
"add_control_inputs",
]
def _check_ts_compatibility(ts0, ts1):
"""Make sure the shape and dtype of the two tensor's lists are compatible.
Args:
ts0: an object convertible to a list of `tf.Tensor`.
ts1: an object convertible to a list of `tf.Tensor`.
Raises:
ValueError: if any pair of tensors (same index in ts0 and ts1) have
a dtype or a shape which is not compatible.
"""
ts0 = util.make_list_of_t(ts0)
ts1 = util.make_list_of_t(ts1)
if len(ts0) != len(ts1):
raise ValueError("ts0 and ts1 have different sizes: {} != {}".format(
len(ts0), len(ts1)))
for t0, t1 in zip(ts0, ts1):
# check dtype
dtype0, dtype1 = t0.dtype, t1.dtype
if not dtype0.is_compatible_with(dtype1):
raise ValueError("Dtypes {} and {} are not compatible.".format(dtype0,
dtype1))
# check shape
shape0, shape1 = t0.get_shape(), t1.get_shape()
if not shape0.is_compatible_with(shape1):
raise ValueError("Shapes {} and {} are not compatible.".format(shape0,
shape1))
class _RerouteMode(object):
"""Enums for reroute's mode.
swap: the end of tensors a and b are swapped.
a2b: the end of the tensor a are also rerouted to the end of the tensor b
(the end of b is left dangling).
b2a: the end of the tensor b are also rerouted to the end of the tensor a
(the end of a is left dangling).
"""
swap, a2b, b2a = range(3)
@classmethod
def check(cls, mode):
"""Check swap mode.
Args:
mode: an integer representing one of the modes.
Returns:
A tuple `(a2b, b2a)` boolean indicating what rerouting needs doing.
Raises:
ValueError: if mode is outside the enum range.
"""
if mode == cls.swap:
return True, True
elif mode == cls.b2a:
return False, True
elif mode == cls.a2b:
return True, False
else:
raise ValueError("Unknown _RerouteMode: {}".format(mode))
def _reroute_t(t0, t1, consumers1, can_modify=None, cannot_modify=None):
"""Reroute the end of the tensors (t0,t1).
Warning: this function is directly manipulating the internals of the
`tf.Graph`.
Args:
t0: a tf.Tensor.
t1: a tf.Tensor.
consumers1: The consumers of t1 which needs to be rerouted.
can_modify: iterable of operations which can be modified. Any operation
outside within_ops will be left untouched by this function.
cannot_modify: iterable of operations which cannot be modified.
Any operation within cannot_modify will be left untouched by this
function.
Returns:
The number of individual modifications made by the function.
"""
nb_update_inputs = 0
if can_modify is not None:
consumers1 &= can_modify
if cannot_modify is not None:
consumers1 -= cannot_modify
consumers1_indices = {}
for consumer1 in consumers1:
consumers1_indices[consumer1] = [i for i, t in enumerate(consumer1.inputs)
if t is t1]
for consumer1 in consumers1:
for i in consumers1_indices[consumer1]:
consumer1._update_input(i, t0) # pylint: disable=protected-access
nb_update_inputs += 1
return nb_update_inputs
def _reroute_ts(ts0, ts1, mode, can_modify=None, cannot_modify=None):
"""Reroute the end of the tensors in each pair (t0,t1) in ts0 x ts1.
This function is the back-bone of the Graph-Editor. It is essentially a thin
wrapper on top of the tf.Operation._update_input.
Given a pair of tensor t0, t1 in ts0 x ts1, this function re-route the end
of t0 and t1 in three possible ways:
1) The reroute mode is "a<->b" or "b<->a": the tensors' end are swapped. After
this operation, the previous consumers of t0 are now consumers of t1 and
vice-versa.
2) The reroute mode is "a->b": the tensors' end of t0 are re-routed to the
tensors's end of t1 (which are left dangling). After this operation, the
previous consumers of t0 are still consuming t0 but the previous consumers of
t1 are not also consuming t0. The tensor t1 has no consumer.
3) The reroute mode is "b->a": this mode is the symmetric of the "a->b" mode.
Note that this function is re-routing the end of two tensors, not the start.
Re-routing the start of two tensors is not supported by this library. The
reason for that is the following: TensorFlow, by design, creates a strong bond
between an op and its output tensor. This Graph editor follows this design and
treats an operation A and its generating tensors {t_i} as an entity which
cannot be broken. In other words, an op cannot be detached from any of its
output tensors, ever. But it is possible to detach an op from its input
tensors, which is what this function concerns itself with.
Warning: this function is directly manipulating the internals of the tf.Graph.
Args:
ts0: an object convertible to a list of `tf.Tensor`.
ts1: an object convertible to a list of `tf.Tensor`.
mode: what to do with those tensors: "a->b" or "b<->a" for swaping and
"a->b" or "b->a" for one direction re-routing.
can_modify: iterable of operations which can be modified. Any operation
outside within_ops will be left untouched by this function.
cannot_modify: iterable of operations which cannot be modified.
Any operation within cannot_modify will be left untouched by this
function.
Returns:
The number of individual modifications made by the function.
Raises:
TypeError: if `ts0` or `ts1` cannot be converted to a list of `tf.Tensor`.
TypeError: if `can_modify` or `cannot_modify` is not `None` and cannot be
converted to a list of `tf.Operation`.
"""
a2b, b2a = _RerouteMode.check(mode)
ts0 = util.make_list_of_t(ts0)
ts1 = util.make_list_of_t(ts1)
_check_ts_compatibility(ts0, ts1)
if cannot_modify is not None:
cannot_modify = frozenset(util.make_list_of_op(cannot_modify))
if can_modify is not None:
can_modify = frozenset(util.make_list_of_op(can_modify))
nb_update_inputs = 0
precomputed_consumers = []
# precompute consumers to avoid issue with repeated tensors:
for t0, t1 in zip(ts0, ts1):
consumers0 = set(t0.consumers())
consumers1 = set(t1.consumers())
precomputed_consumers.append((consumers0, consumers1))
for t0, t1, consumers in zip(ts0, ts1, precomputed_consumers):
if t0 is t1:
continue # Silently ignore identical tensors.
consumers0, consumers1 = consumers
if a2b:
nb_update_inputs += _reroute_t(t0, t1, consumers1, can_modify,
cannot_modify)
if b2a:
nb_update_inputs += _reroute_t(t1, t0, consumers0, can_modify,
cannot_modify)
return nb_update_inputs
def swap_ts(ts0, ts1, can_modify=None, cannot_modify=None):
"""For each tensor's pair, swap the end of (t0,t1).
B0 B1 B0 B1
| | => X
A0 A1 A0 A1
Args:
ts0: an object convertible to a list of `tf.Tensor`.
ts1: an object convertible to a list of `tf.Tensor`.
can_modify: iterable of operations which can be modified. Any operation
outside within_ops will be left untouched by this function.
cannot_modify: iterable of operations which cannot be modified.
Any operation within cannot_modify will be left untouched by this
function.
Returns:
The number of individual modifications made by the function.
Raises:
TypeError: if ts0 or ts1 cannot be converted to a list of tf.Tensor.
TypeError: if can_modify or cannot_modify is not None and cannot be
converted to a list of tf.Operation.
"""
return _reroute_ts(ts0, ts1, _RerouteMode.swap, can_modify, cannot_modify)
def reroute_a2b_ts(ts0, ts1, can_modify=None, cannot_modify=None):
"""For each tensor's pair, replace the end of t1 by the end of t0.
B0 B1 B0 B1
| | => |/
A0 A1 A0 A1
The end of the tensors in ts1 are left dangling.
Args:
ts0: an object convertible to a list of `tf.Tensor`.
ts1: an object convertible to a list of `tf.Tensor`.
can_modify: iterable of operations which can be modified. Any operation
outside within_ops will be left untouched by this function.
cannot_modify: iterable of operations which cannot be modified. Any
operation within cannot_modify will be left untouched by this function.
Returns:
The number of individual modifications made by the function.
Raises:
TypeError: if ts0 or ts1 cannot be converted to a list of tf.Tensor.
TypeError: if can_modify or cannot_modify is not None and cannot be
converted to a list of tf.Operation.
"""
return _reroute_ts(ts0, ts1, _RerouteMode.a2b, can_modify, cannot_modify)
def reroute_b2a_ts(ts0, ts1, can_modify=None, cannot_modify=None):
r"""For each tensor's pair, replace the end of t0 by the end of t1.
B0 B1 B0 B1
| | => \|
A0 A1 A0 A1
The end of the tensors in ts0 are left dangling.
Args:
ts0: an object convertible to a list of `tf.Tensor`.
ts1: an object convertible to a list of `tf.Tensor`.
can_modify: iterable of operations which can be modified. Any operation
outside within_ops will be left untouched by this function.
cannot_modify: iterable of operations which cannot be modified.
Any operation within cannot_modify will be left untouched by this
function.
Returns:
The number of individual modifications made by the function.
Raises:
TypeError: if ts0 or ts1 cannot be converted to a list of tf.Tensor.
TypeError: if can_modify or cannot_modify is not None and cannot be
converted to a list of tf.Operation.
"""
return _reroute_ts(ts0, ts1, _RerouteMode.b2a, can_modify, cannot_modify)
def _reroute_sgv_remap(sgv0, sgv1, mode):
"""Remap in place the inputs of two subgraph views to mimic the reroute.
This function is meant to used by reroute_inputs only.
Args:
sgv0: the first subgraph to have its inputs remapped.
sgv1: the second subgraph to have its inputs remapped.
mode: reroute mode, see _reroute_ts(...).
Raises:
TypeError: if svg0 or svg1 are not SubGraphView.
ValueError: if sgv0 and sgv1 do not belong to the same graph.
"""
a2b, b2a = _RerouteMode.check(mode)
if not isinstance(sgv0, subgraph.SubGraphView):
raise TypeError("Expected a SubGraphView, got {}".format(type(sgv0)))
if not isinstance(sgv1, subgraph.SubGraphView):
raise TypeError("Expected a SubGraphView, got {}".format(type(sgv1)))
util.check_graphs(sgv0, sgv1)
sgv0_ = sgv0.copy()
sgv1_ = sgv1.copy()
# pylint: disable=protected-access
if a2b and b2a:
(sgv0_._input_ts, sgv1_._input_ts) = (sgv1_._input_ts, sgv0_._input_ts)
(sgv0_._passthrough_ts, sgv1_._passthrough_ts) = (sgv1_._passthrough_ts,
sgv0_._passthrough_ts)
elif a2b:
sgv1_._input_ts = sgv0_._input_ts[:]
sgv1_._passthrough_ts = sgv0_._passthrough_ts[:]
elif b2a:
sgv0_._input_ts = sgv1_._input_ts[:]
sgv0_._passthrough_ts = sgv1_._passthrough_ts[:]
# pylint: enable=protected-access
# Update the passthrough outputs as well.
def update_passthrough_outputs(a, b):
# pylint: disable=protected-access
for i, t in enumerate(b._output_ts):
if t in a._passthrough_ts:
ii = a._input_ts.index(t)
b._output_ts[i] = b._input_ts[ii]
# pylint: enable=protected-access
if a2b:
update_passthrough_outputs(sgv0_, sgv1_)
if b2a:
update_passthrough_outputs(sgv1_, sgv0_)
# in-place
# pylint: disable=protected-access
sgv0._assign_from(sgv0_)
sgv1._assign_from(sgv1_)
# pylint: enable=protected-access
def _reroute_sgv_inputs(sgv0, sgv1, mode):
"""Re-route all the inputs of two subgraphs.
Args:
sgv0: the first subgraph to have its inputs swapped. This argument is
converted to a subgraph using the same rules than the function
subgraph.make_view.
sgv1: the second subgraph to have its inputs swapped. This argument is
converted to a subgraph using the same rules than the function
subgraph.make_view.
mode: reroute mode, see _reroute_ts(...).
Returns:
A tuple `(sgv0, sgv1)` of subgraph views with their inputs swapped.
Note that the function argument sgv0 and sgv1 are also modified in place.
Raises:
StandardError: if sgv0 or sgv1 cannot be converted to a SubGraphView using
the same rules than the function subgraph.make_view.
"""
sgv0 = subgraph.make_view(sgv0)
sgv1 = subgraph.make_view(sgv1)
util.check_graphs(sgv0, sgv1)
can_modify = sgv0.ops + sgv1.ops
# also allow consumers of passthrough to be modified:
can_modify += util.get_consuming_ops(sgv0.passthroughs)
can_modify += util.get_consuming_ops(sgv1.passthroughs)
_reroute_ts(sgv0.inputs, sgv1.inputs, mode, can_modify=can_modify)
_reroute_sgv_remap(sgv0, sgv1, mode)
return sgv0, sgv1
def _reroute_sgv_outputs(sgv0, sgv1, mode):
"""Re-route all the outputs of two operations.
Args:
sgv0: the first subgraph to have its outputs swapped. This argument is
converted to a subgraph using the same rules than the function
subgraph.make_view.
sgv1: the second subgraph to have its outputs swapped. This argument is
converted to a subgraph using the same rules than the function
subgraph.make_view.
mode: reroute mode, see _reroute_ts(...).
Returns:
A tuple `(sgv0, sgv1)` of subgraph views with their outputs swapped.
Note that the function argument sgv0 and sgv1 are also modified in place.
Raises:
StandardError: if sgv0 or sgv1 cannot be converted to a SubGraphView using
the same rules than the function subgraph.make_view.
"""
sgv0 = subgraph.make_view(sgv0)
sgv1 = subgraph.make_view(sgv1)
util.check_graphs(sgv0, sgv1)
cannot_modify = sgv0.ops + sgv1.ops
_reroute_ts(sgv0.outputs, sgv1.outputs, mode, cannot_modify=cannot_modify)
return sgv0, sgv1
def _reroute_sgv(sgv0, sgv1, mode):
"""Re-route both the inputs and the outputs of the two subgraph views.
This involves swapping all the inputs/ouputs of the two subgraph views.
Args:
sgv0: the first subgraph to be swapped. This argument is converted to a
subgraph using the same rules than the function subgraph.make_view.
sgv1: the second subgraph to be swapped. This argument is converted to a
subgraph using the same rules than the function subgraph.make_view.
mode: reroute mode, see _reroute_ts(...).
Returns:
A tuple `(sgv0, sgv1)` of subgraph views with their outputs and inputs
swapped.
Note that the function argument sgv0 and sgv1 are also modified in place.
Raises:
StandardError: if sgv0 or sgv1 cannot be converted to a SubGraphView using
the same rules than the function subgraph.make_view.
"""
_reroute_sgv_outputs(sgv0, sgv1, mode)
_reroute_sgv_inputs(sgv0, sgv1, mode)
return sgv0, sgv1
def swap_inputs(sgv0, sgv1):
"""Swap all the inputs of sgv0 and sgv1 (see reroute_inputs)."""
return _reroute_sgv_inputs(sgv0, sgv1, _RerouteMode.swap)
def reroute_a2b_inputs(sgv0, sgv1):
"""Re-route all the inputs of sgv0 to sgv1 (see reroute_inputs)."""
return _reroute_sgv_inputs(sgv0, sgv1, _RerouteMode.a2b)
def reroute_b2a_inputs(sgv0, sgv1):
"""Re-route all the inputs of sgv1 to sgv0 (see reroute_inputs)."""
return _reroute_sgv_inputs(sgv0, sgv1, _RerouteMode.b2a)
def swap_outputs(sgv0, sgv1):
"""Swap all the outputs of sgv0 and sgv1 (see _reroute_outputs)."""
return _reroute_sgv_outputs(sgv0, sgv1, _RerouteMode.swap)
def reroute_a2b_outputs(sgv0, sgv1):
"""Re-route all the outputs of sgv0 to sgv1 (see _reroute_outputs)."""
return _reroute_sgv_outputs(sgv0, sgv1, _RerouteMode.a2b)
def reroute_b2a_outputs(sgv0, sgv1):
"""Re-route all the outputs of sgv1 to sgv0 (see _reroute_outputs)."""
return _reroute_sgv_outputs(sgv0, sgv1, _RerouteMode.b2a)
def swap(sgv0, sgv1):
"""Swap the inputs and outputs of sgv1 to sgv0 (see _reroute)."""
return _reroute_sgv(sgv0, sgv1, _RerouteMode.swap)
def reroute_a2b(sgv0, sgv1):
"""Re-route the inputs and outputs of sgv0 to sgv1 (see _reroute)."""
return _reroute_sgv(sgv0, sgv1, _RerouteMode.a2b)
def reroute_b2a(sgv0, sgv1):
"""Re-route the inputs and outputs of sgv1 to sgv0 (see _reroute)."""
return _reroute_sgv(sgv0, sgv1, _RerouteMode.b2a)
def remove_control_inputs(op, cops):
"""Remove the control inputs cops from co.
Warning: this function is directly manipulating the internals of the
`tf.Graph`.
Args:
op: a `tf.Operation` from which to remove the control inputs.
cops: an object convertible to a list of `tf.Operation`.
Raises:
TypeError: if op is not a `tf.Operation`.
ValueError: if any cop in cops is not a control input of op.
"""
if not isinstance(op, tf_ops.Operation):
raise TypeError("Expected a tf.Operation, got: {}", type(op))
cops = util.make_list_of_op(cops, allow_graph=False)
for cop in cops:
if cop not in op.control_inputs:
raise ValueError("{} is not a control_input of {}".format(op.name,
cop.name))
# pylint: disable=protected-access
op._control_inputs = [cop for cop in op._control_inputs if cop not in cops]
op._recompute_node_def()
# pylint: enable=protected-access
def add_control_inputs(op, cops):
"""Add the control inputs cops to co.
Warning: this function is directly manipulating the internals of the tf.Graph.
Args:
op: a tf.Operation to which the control inputs are added.
cops: an object convertible to a list of `tf.Operation`.
Raises:
TypeError: if op is not a tf.Operation
ValueError: if any cop in cops is already a control input of op.
"""
if not isinstance(op, tf_ops.Operation):
raise TypeError("Expected a tf.Operation, got: {}", type(op))
cops = util.make_list_of_op(cops, allow_graph=False)
for cop in cops:
if cop in op.control_inputs:
raise ValueError("{} is already a control_input of {}".format(op.name,
cop.name))
# pylint: disable=protected-access
op._control_inputs += cops
op._recompute_node_def()
# pylint: enable=protected-access
| |
# -*- coding: utf-8 -*-
from __future__ import with_statement
import inspect
from contextlib import contextmanager
from cms import constants
from cms.models.pluginmodel import CMSPlugin
from cms.plugin_pool import plugin_pool
from cms.utils import get_cms_setting
from cms.management.commands.subcommands.list import plugin_report
from django.conf import settings
from django.utils.decorators import method_decorator
from django.utils.termcolors import colorize
from django.utils.translation.trans_real import accept_language_re
from sekizai.helpers import validate_template
SUCCESS = 1
WARNING = 2
ERROR = 3
SKIPPED = 4
CHECKERS = []
class FileOutputWrapper(object):
"""
Wraps two file-like objects (that support at the very least the 'write'
method) into an API to be used by the check function further down in
this module.
The following properties are public (and required) by alternative implementations:
errors: integer count of errors encountered
successes: integer count of successes encountered
warnings: integer count of warnings encountered
skips: integer count of skips encountered
successful: Whether the checks were successful (no errors)
They must also provide these methods:
write_line(message=''): writes a message to stdout
write_stderr_line(message=''): writes a message to stderr
success(message): reports and registers a successful check
error(message): reports and registers an error
warn(message); reports and registers a warning
skip(message): reports and registers a skipped check
section(title): A context manager that starts a new section. For the
Section API see FileSectionWrapper
"""
def __init__(self, stdout, stderr):
self.stdout = stdout
self.stderr = stderr
self.section_wrapper = FileSectionWrapper
self.errors = 0
self.successes = 0
self.warnings = 0
self.skips = 0
def colorize(self, msg, opts=(), **kwargs):
return colorize(msg, opts=opts, **kwargs)
def write_line(self, message=''):
self.write(u'%s\n' % message)
def write(self, message):
self.stdout.write(message)
def write_stderr_line(self, message=''):
self.write_stderr(u'%s\n' % message)
def write_stderr(self, message):
self.stderr.write(message)
def success(self, message):
self.successes += 1
self.write_line(u'%s %s' % (message, self.colorize('[OK]', fg='green', opts=['bold'])))
def error(self, message):
self.errors += 1
self.write_stderr_line(u'%s %s' % (message, self.colorize('[ERROR]', fg='red', opts=['bold'])))
def warn(self, message):
self.warnings += 1
self.write_stderr_line(u'%s %s' % (message, self.colorize('[WARNING]', fg='yellow', opts=['bold'])))
def skip(self, message):
self.skips += 1
self.write_line(u'%s %s' % (message, self.colorize('[SKIP]', fg='blue', opts=['bold'])))
@method_decorator(contextmanager)
def section(self, title):
self.write_line(self.colorize(title, opts=['bold']))
self.write_line(self.colorize('=' * len(title), opts=['bold']))
self.write_line()
wrapper = self.section_wrapper(self)
try:
yield wrapper
except:
self.error('Checker failed, see traceback')
raise
self.errors += wrapper.errors
self.successes += wrapper.successes
self.warnings += wrapper.warnings
self.skips += wrapper.skips
self.write_line('')
@property
def successful(self):
return not self.errors
class FileSectionWrapper(FileOutputWrapper):
"""
Used from FileOutputWrapper to report checks in a section.
If you want to provide your own output class, you may want to subclass
this class for the section reporting too. If you want to use your own,
you must defined at least the same API as FileOutputWrapper, as well
as these four additional methods:
finish_success(message): End the section (successfully)
finish_error(message): End the section with errors
finish_warning(message): End this section with a warning
finish_skip(message): End this (skipped) section
"""
def __init__(self, wrapper):
super(FileSectionWrapper, self).__init__(wrapper.stdout, wrapper.stderr)
self.wrapper = wrapper
def write_line(self, message=''):
self.write(u' - %s\n' % message)
def write_stderr_line(self, message=''):
self.write_stderr(u' - %s\n' % message)
def finish_success(self, message):
self.wrapper.write_line()
self.wrapper.success(message)
def finish_error(self, message):
self.wrapper.write_line()
self.wrapper.error(message)
def finish_warning(self, message):
self.wrapper.write_line()
self.wrapper.warning(message)
def finish_skip(self, message):
self.wrapper.write_lin()
self.wrapper.skip(message)
def define_check(func):
"""
Helper decorator to register a check function.
"""
CHECKERS.append(func)
return func
@define_check
def check_sekizai(output):
with output.section("Sekizai") as section:
if 'sekizai' in settings.INSTALLED_APPS:
section.success("Sekizai is installed")
else:
section.error("Sekizai is not installed, could not find 'sekizai' in INSTALLED_APPS")
if 'sekizai.context_processors.sekizai' in settings.TEMPLATE_CONTEXT_PROCESSORS:
section.success("Sekizai template context processor is installed")
else:
section.error("Sekizai template context processor is not installed, could not find 'sekizai.context_processors.sekizai' in TEMPLATE_CONTEXT_PROCESSORS")
for template, _ in get_cms_setting('TEMPLATES'):
if template == constants.TEMPLATE_INHERITANCE_MAGIC:
continue
if validate_template(template, ['js', 'css']):
section.success("Sekizai namespaces 'js' and 'css' found in %r" % template)
else:
section.error("Sekizai namespaces 'js' and 'css' not found in %r" % template)
if section.successful:
section.finish_success("Sekizai configuration okay")
else:
section.finish_error("Sekizai configuration has errors")
@define_check
def check_i18n(output):
with output.section("Internationalization") as section:
if isinstance(getattr(settings, 'CMS_LANGUAGES', {}), dict):
section.success("New style CMS_LANGUAGES")
else:
section.warn("Old style (tuple based) CMS_LANGUAGES, please switch to the new (dictionary based) style")
if getattr(settings, 'LANGUAGE_CODE', '').find('_') > -1:
section.warn("LANGUAGE_CODE must contain a valid language code, not a locale (e.g.: 'en-us' instead of 'en_US'): '%s' provided" % getattr(settings, 'LANGUAGE_CODE', ''))
for lang in getattr(settings, 'LANGUAGES', ()):
if lang[0].find('_') > -1:
section.warn("LANGUAGES must contain valid language codes, not locales (e.g.: 'en-us' instead of 'en_US'): '%s' provided" % lang[0])
for site, items in get_cms_setting('LANGUAGES').items():
if type(site) == int:
for lang in items:
if lang['code'].find('_') > -1:
section.warn("CMS_LANGUAGES entries must contain valid language codes, not locales (e.g.: 'en-us' instead of 'en_US'): '%s' provided" % lang['code'])
for deprecated in ['CMS_HIDE_UNTRANSLATED', 'CMS_LANGUAGE_FALLBACK', 'CMS_LANGUAGE_CONF', 'CMS_SITE_LANGUAGES', 'CMS_FRONTEND_LANGUAGES']:
if hasattr(settings, deprecated):
section.warn("Deprecated setting %s found. This setting is now handled in the new style CMS_LANGUAGES and can be removed" % deprecated)
@define_check
def check_deprecated_settings(output):
with output.section("Deprecated settings") as section:
found = False
for deprecated in ['CMS_FLAT_URLS', 'CMS_MODERATOR']:
if hasattr(settings, deprecated):
section.warn("Deprecated setting %s found. This setting is no longer in use and can be removed" % deprecated)
found = True
if not found:
section.skip("No deprecated settings found")
@define_check
def check_plugin_instances(output):
with output.section("Plugin instances") as section:
# get the report
report = plugin_report()
section.success("Plugin instances of %s types found in the database" % len(report))
# loop over plugin types in the report
for plugin_type in report:
# warn about those that are not installed
if not plugin_type["model"]:
section.error("%s has instances but is no longer installed" % plugin_type["type"] )
# warn about those that have unsaved instances
if plugin_type["unsaved_instances"]:
section.error("%s has %s unsaved instances" % (plugin_type["type"], len(plugin_type["unsaved_instances"])))
if section.successful:
section.finish_success("The plugins in your database are in good order")
else:
section.finish_error("There are potentially serious problems with the plugins in your database. \nEven if your site works, you should run the 'manage.py cms list plugins' \ncommand and then the 'manage.py cms delete_orphaned_plugins' command. \nThis will alter your database; read the documentation before using it.")
@define_check
def check_copy_relations(output):
c_to_s = lambda klass: '%s.%s' % (klass.__module__, klass.__name__)
def get_class(method_name, model):
for cls in inspect.getmro(model):
if method_name in cls.__dict__:
return cls
return None
with output.section('Presence of "copy_relations"') as section:
plugin_pool.discover_plugins()
for plugin in plugin_pool.plugins.values():
plugin_class = plugin.model
if get_class('copy_relations', plugin_class) is not CMSPlugin or plugin_class is CMSPlugin:
# this class defines a ``copy_relations`` method, nothing more
# to do
continue
for rel in plugin_class._meta.many_to_many:
section.warn('%s has a many-to-many relation to %s,\n but no "copy_relations" method defined.' % (
c_to_s(plugin_class),
c_to_s(rel.model),
))
for rel in plugin_class._meta.get_all_related_objects():
if rel.model != CMSPlugin:
section.warn('%s has a foreign key from %s,\n but no "copy_relations" method defined.' % (
c_to_s(plugin_class),
c_to_s(rel.model),
))
if not section.warnings:
section.finish_success('All plugins have "copy_relations" method if needed.')
else:
section.finish_success('Some plugins do not define a "copy_relations" method.\nThis might lead to data loss when publishing or copying plugins.\nSee https://django-cms.readthedocs.org/en/latest/extending_cms/custom_plugins.html#handling-relations')
def check(output):
"""
Checks the configuration/environment of this django CMS installation.
'output' should be an object that provides the same API as FileOutputWrapper.
Returns whether the configuration/environment are okay (has no errors)
"""
title = "Checking django CMS installation"
border = '*' * len(title)
output.write_line(output.colorize(border, opts=['bold']))
output.write_line(output.colorize(title, opts=['bold']))
output.write_line(output.colorize(border, opts=['bold']))
output.write_line()
for checker in CHECKERS:
checker(output)
output.write_line()
with output.section("OVERALL RESULTS") as section:
if output.errors:
output.write_stderr_line(output.colorize("%s errors!" % output.errors, opts=['bold'], fg='red'))
if output.warnings:
output.write_stderr_line(output.colorize("%s warnings!" % output.warnings, opts=['bold'], fg='yellow'))
if output.skips:
output.write_line(output.colorize("%s checks skipped!" % output.skips, opts=['bold'], fg='blue'))
output.write_line(output.colorize("%s checks successful!" % output.successes, opts=['bold'], fg='green'))
output.write_line()
if output.errors:
output.write_stderr_line(output.colorize('Please check the errors above', opts=['bold'], fg='red'))
elif output.warnings:
output.write_stderr_line(output.colorize('Installation okay, but please check warnings above', opts=['bold'], fg='yellow'))
else:
output.write_line(output.colorize('Installation okay', opts=['bold'], fg='green'))
return output.successful
| |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) Spyder Project Contributors
#
# Licensed under the terms of the MIT License
# (see LICENSE.txt for details)
# -----------------------------------------------------------------------------
"""Terminal Widget."""
# Standard library imports
import json
import os
import sys
# Third-party imports
import qstylizer
from qtpy.QtCore import (Qt, QUrl, Slot, QEvent, QTimer, Signal,
QObject)
from qtpy.QtGui import QKeySequence
from qtpy.QtWebChannel import QWebChannel
from qtpy.QtWebEngineWidgets import (QWebEnginePage, QWebEngineSettings,
QWebEngineView)
from qtpy.QtWidgets import QFrame, QVBoxLayout, QApplication
from spyder.api.widgets.mixins import SpyderWidgetMixin
from spyder.api.config.decorators import on_conf_change
from spyder.config.base import get_translation
from spyder.config.gui import is_dark_interface
from spyder.utils.palette import QStylePalette
# Local imports
from spyder_terminal.api import TerminalMainWidgetActions, TermViewMenus
from spyder_terminal.widgets.style.themes import ANSI_COLORS
PREFIX = 'spyder_terminal.default.'
# For translations
_ = get_translation('spyder_terminal')
class TermViewSections:
CommonActions = 'common_actions'
ZoomActions = 'zoom_actions'
class ChannelHandler(QObject):
"""QWebChannel handler for JS calls."""
sig_ready = Signal()
sig_closed = Signal()
def __init__(self, parent):
"""Handler main constructor."""
QObject.__init__(self, parent)
@Slot()
def ready(self):
"""Invoke signal when terminal prompt is ready."""
self.sig_ready.emit()
@Slot()
def close(self):
"""Invoke signal when terminal process was closed externally."""
self.sig_closed.emit()
class TerminalWidget(QFrame, SpyderWidgetMixin):
"""Terminal widget."""
zdotdir = os.environ.get('ZDOTDIR') or os.environ.get('HOME') # what about windows?
ENV_ROUTES = {
"bash": ["/etc/profile", "~/.bash_profile"],
"zsh": ["/etc/zshenv", f"{zdotdir}/.zshenv", "/etc/zprofile",
f"{zdotdir}/.zprofile", "/etc/zshrc", f"{zdotdir}/.zshrc",
"/etc/zlogin", f"{zdotdir}/.zlogin"],
"fish": ["~/.config/fish/config.fish"],
"sh": ["~/.profile", "~/.shrc", "~/.shinit"],
"ksh": ["~/.profile", "~/.kshrc"],
"csh": ["~/.cshrc", "~/.login"],
"pwsh": [],
"rbash": ["~/.bashrc", "~/.bash_profile"],
"dash": ["~/.profile"],
"screen": [],
"tmux": [],
"tcsh": ["~/.tcshrc"],
"xonsh": ["~/.xonshrc"]
}
terminal_closed = Signal()
terminal_ready = Signal()
def __init__(self, parent, port, path='~', font=None, theme=None,
color_scheme=None):
"""Frame main constructor."""
super().__init__(parent, class_parent=parent)
url = 'http://127.0.0.1:{0}?path={1}'.format(port, path)
self.handler = ChannelHandler(self)
self.handler.sig_ready.connect(lambda: self.terminal_ready.emit())
self.handler.sig_closed.connect(lambda: self.terminal_closed.emit())
self.view = TermView(self, term_url=url, handler=self.handler)
self.font = font
self.initial_path = path
self.theme = theme
self.color_scheme = color_scheme
self.parent = parent
layout = QVBoxLayout()
layout.addWidget(self.view)
layout.setContentsMargins(0, 0, 0, 0)
self.setFrameStyle(QFrame.NoFrame | QFrame.Plain)
self.setLayout(layout)
self.body = self.view.document
self.handler.sig_ready.connect(self.setup_term)
self.view.sig_focus_in_event.connect(
lambda: self._apply_stylesheet(focus=True))
self.view.sig_focus_out_event.connect(
lambda: self._apply_stylesheet(focus=False))
self._apply_stylesheet()
def setup_term(self):
"""Setup other terminal options after page has loaded."""
# This forces to display the black background
print("\0", end='')
self.set_font(self.font)
self.set_dir(self.initial_path)
self.current_theme = self.set_theme({})
self.set_scrollbar_style()
options = self.get_conf_options()
dict_options = {}
for option in options:
dict_options[option] = self.get_conf(option)
self.apply_settings(dict_options)
self.apply_zoom()
shell_name = self.get_conf('shell')
if os.name != 'nt':
# Find environment variables in the home directory given the actual
# shell
env_route = self.ENV_ROUTES[shell_name]
for act_file in env_route:
if os.path.exists(os.path.expanduser(act_file)):
self.exec_cmd(f"source {act_file}")
self.exec_cmd("clear")
def get_shortcut_data(self):
"""
Return shortcut data, a list of tuples (shortcut, text, default).
shortcut (QShortcut or QAction instance)
text (string): action/shortcut description
default (string): default key sequence
"""
return self.view.get_shortcut_data()
def eval_javascript(self, script):
"""Evaluate Javascript instructions inside view."""
return self.view.eval_javascript(script)
def set_scrollbar_style(self):
"""Set terminal scrollbar style."""
if is_dark_interface():
self.eval_javascript('addClassStyleToContainer("dark-scroll")')
def set_dir(self, path):
"""Set terminal initial current working directory."""
self.eval_javascript('setcwd("{0}")'.format(path))
def set_font(self, font):
"""Set terminal font via CSS."""
self.font = font
self.eval_javascript('fitFont("{0}")'.format(self.font))
@on_conf_change(section='appearance')
def set_theme(self, _values):
"""Set theme for the terminal."""
supported_themes = ANSI_COLORS
new_theme = {}
theme = self.get_conf('selected', section='appearance')
color_scheme = self.get_conf('ui_theme', section='appearance')
if theme not in supported_themes:
theme = 'spyder' if color_scheme == 'light' else 'spyder/dark'
new_theme['background'] = self.get_conf(
'{}/background'.format(theme), section='appearance')
new_theme['foreground'] = self.get_conf(
'{}/normal'.format(theme), section='appearance')[0]
new_theme['cursor'] = self.get_conf(
'{}/normal'.format(theme), section='appearance')[0]
new_theme['cursorAccent'] = self.get_conf(
'{}/ctrlclick'.format(theme), section='appearance')
new_theme['selection'] = self.get_conf(
'{}/occurrence'.format(theme), section='appearance')
theme_colors = ANSI_COLORS[theme]
for color in theme_colors:
new_theme[color] = theme_colors[color]
self.eval_javascript('setOption("{}", {})'.format('theme', new_theme))
self.set_conf(
'fontFamily', self.get_conf('font/family', section='appearance'))
return new_theme
def get_fonts(self):
"""List terminal CSS fonts."""
return self.eval_javascript('getFonts()')
def search_next(self, text, case=False, regex=False, word=False):
"""Search in the terminal for the given regex."""
search_options = {'regex': regex,
'wholeWord': word,
'caseSensitive': case}
return self.eval_javascript(
'searchNext("{}",{})'.format(text, json.dumps(search_options)))
def search_previous(self, text, case=False, regex=False, word=False):
"""Search in the terminal for the given regex."""
search_options = {'regex': regex,
'wholeWord': word,
'caseSensitive': case}
return self.eval_javascript(
'searchPrevious("{}", {})'.format(text,
json.dumps(search_options)))
def exec_cmd(self, cmd):
"""Execute a command inside the terminal."""
self.eval_javascript('exec("{0}")'.format(cmd))
def __alive_loopback(self):
alive = self.is_alive()
if not alive:
self.terminal_closed.emit()
else:
QTimer.singleShot(250, self.__alive_loopback)
def is_alive(self):
"""Check if terminal process is alive."""
alive = self.eval_javascript('isAlive()')
return alive
def apply_zoom(self):
zoom = self.get_conf('zoom')
if zoom > 0:
for __ in range(0, zoom):
self.view.increase_font(new_term=True)
if zoom < 0:
for __ in range(0, -zoom):
self.view.decrease_font(new_term=True)
def set_option(self, option_name, option):
"""Set a configuration option in the terminal."""
if type(option) == int:
self.eval_javascript('setOption("{}", {})'.format(option_name,
option))
else:
self.eval_javascript('setOption("{}", "{}")'.format(option_name,
option))
def apply_settings(self, options):
"""Apply custom settings given an option dictionary."""
# Bell style option
if 'sound' in options:
bell_style = 'sound' if options['sound'] else 'none'
self.set_option('bellStyle', bell_style)
# Cursor option
if 'cursor_type' in options:
cursor_id = options['cursor_type']
cursor_choices = {0: "block", 1: "underline", 2: "bar"}
self.set_option('cursorStyle', cursor_choices[cursor_id])
if 'buffer_limit' in options:
new_lim = options['buffer_limit']
self.set_option('scrollback', new_lim)
if 'cursor_blink' in options:
self.set_option('cursorBlink', int(options['cursor_blink']))
def _apply_stylesheet(self, focus=False):
"""Apply stylesheet according to the current focus."""
if focus:
border_color = QStylePalette.COLOR_ACCENT_3
else:
border_color = QStylePalette.COLOR_BACKGROUND_4
css = qstylizer.style.StyleSheet()
css.QFrame.setValues(
border=f'1px solid {border_color}',
margin='0px 1px 0px 1px',
padding='0px 0px 1px 0px',
borderRadius='3px'
)
self.setStyleSheet(css.toString())
class TermView(QWebEngineView, SpyderWidgetMixin):
"""XTerm Wrapper."""
sig_focus_in_event = Signal()
"""
This signal is emitted when the widget receives focus.
"""
sig_focus_out_event = Signal()
"""
This signal is emitted when the widget loses focus.
"""
def __init__(self, parent, term_url='http://127.0.0.1:8070', handler=None):
"""Webview main constructor."""
super().__init__(parent, class_parent=parent)
web_page = QWebEnginePage(self)
self.setPage(web_page)
self.source_text = ''
self.parent = parent
self.channel = QWebChannel(self.page())
self.page().setWebChannel(self.channel)
self.channel.registerObject('handler', handler)
self.term_url = QUrl(term_url)
self.load(self.term_url)
self.focusProxy().installEventFilter(self)
self.document = self.page()
try:
self.document.profile().clearHttpCache()
except AttributeError:
pass
self.initial_y_pos = 0
self.setFocusPolicy(Qt.ClickFocus)
self.setup()
def setup(self):
"""Create the terminal context menu."""
# Create context menu
self.context_menu = self.get_menu(TermViewMenus.Context)
for item in [self.get_action(TerminalMainWidgetActions.Copy),
self.get_action(TerminalMainWidgetActions.Paste),
self.get_action(TerminalMainWidgetActions.Clear)]:
self.add_item_to_menu(
item,
menu=self.context_menu,
section=TermViewSections.CommonActions,
)
for item in [self.get_action(TerminalMainWidgetActions.ZoomIn),
self.get_action(TerminalMainWidgetActions.ZoomOut)]:
self.add_item_to_menu(
item,
menu=self.context_menu,
section=TermViewSections.ZoomActions,
)
def copy(self):
"""Copy unicode text from terminal."""
self.triggerPageAction(QWebEnginePage.Copy)
def paste(self):
"""Paste unicode text into terminal."""
clipboard = QApplication.clipboard()
text = str(clipboard.text())
if len(text.splitlines()) > 1:
eol_chars = os.linesep
text = eol_chars.join((text + eol_chars).splitlines())
self.eval_javascript('pasteText({})'.format(repr(text)))
def clear(self):
"""Clear the terminal."""
self.eval_javascript('clearTerm()')
def increase_font(self, new_term=False):
"""Increase terminal font."""
if not new_term:
zoom = self.get_conf('zoom')
self.set_conf('zoom', zoom + 1)
return self.eval_javascript('increaseFontSize()')
def decrease_font(self, new_term=False):
"""Decrease terminal font."""
if not new_term:
zoom = self.get_conf('zoom')
self.set_conf('zoom', zoom - 1)
return self.eval_javascript('decreaseFontSize()')
def contextMenuEvent(self, event):
"""Override Qt method."""
self.context_menu.popup(event.globalPos())
event.accept()
def eval_javascript(self, script):
"""
Evaluate Javascript instructions inside DOM with the expected prefix.
"""
script = PREFIX + script
self.document.runJavaScript("{}".format(script), self.return_js_value)
def return_js_value(self, value):
"""Return the value of the function evaluated in Javascript."""
return value
def wheelEvent(self, event):
"""Catch and process wheel scrolling events via Javascript."""
delta = event.angleDelta().y()
self.eval_javascript('scrollTerm({0})'.format(delta))
def event(self, event):
"""Grab all keyboard input."""
if event.type() == QEvent.ShortcutOverride:
self.keyPressEvent(event)
return True
return True
def keyPressEvent(self, event):
"""Qt override method."""
key = event.key()
modifiers = event.modifiers()
if modifiers & Qt.ShiftModifier:
key += Qt.SHIFT
if modifiers & Qt.ControlModifier:
key += Qt.CTRL
if modifiers & Qt.AltModifier:
key += Qt.ALT
if modifiers & Qt.MetaModifier:
key += Qt.META
sequence = QKeySequence(key).toString(QKeySequence.PortableText)
if event == QKeySequence.Paste:
self.paste()
elif sequence == self.get_shortcut('copy'):
self.copy()
elif sequence == self.get_shortcut('paste'):
self.paste()
elif sequence == self.get_shortcut('clear'):
self.clear()
elif sequence == self.get_shortcut('zoom_in'):
self.increase_font()
elif sequence == self.get_shortcut('zoom_out'):
self.decrease_font()
else:
super().keyPressEvent(event)
def eventFilter(self, widget, event):
"""
Handle events that affect the view.
All events (e.g. focus in/out) reach the focus proxy, not this
widget itself. That's why this event filter is necessary.
"""
if self.focusProxy() is widget:
if event.type() == QEvent.FocusIn:
self.sig_focus_in_event.emit()
elif event.type() == QEvent.FocusOut:
self.sig_focus_out_event.emit()
return super().eventFilter(widget, event)
def test():
"""Plugin visual test."""
from spyder.utils.qthelpers import qapplication
app = qapplication(test_time=8)
term = TerminalWidget(None)
# term.resize(900, 700)
term.show()
sys.exit(app.exec_())
if __name__ == "__main__":
test()
| |
from django.test import TestCase
from wagtail.wagtailcore.models import Page, PageViewRestriction
from wagtail.wagtailcore.signals import page_unpublished
from wagtail.tests.testapp.models import EventPage, SingleEventPage
class TestPageQuerySet(TestCase):
fixtures = ['test.json']
def test_live(self):
pages = Page.objects.live()
# All pages must be live
for page in pages:
self.assertTrue(page.live)
# Check that the homepage is in the results
homepage = Page.objects.get(url_path='/home/')
self.assertTrue(pages.filter(id=homepage.id).exists())
def test_not_live(self):
pages = Page.objects.not_live()
# All pages must not be live
for page in pages:
self.assertFalse(page.live)
# Check that "someone elses event" is in the results
event = Page.objects.get(url_path='/home/events/someone-elses-event/')
self.assertTrue(pages.filter(id=event.id).exists())
def test_in_menu(self):
pages = Page.objects.in_menu()
# All pages must be be in the menus
for page in pages:
self.assertTrue(page.show_in_menus)
# Check that the events index is in the results
events_index = Page.objects.get(url_path='/home/events/')
self.assertTrue(pages.filter(id=events_index.id).exists())
def test_not_in_menu(self):
pages = Page.objects.not_in_menu()
# All pages must not be in menus
for page in pages:
self.assertFalse(page.show_in_menus)
# Check that the root page is in the results
self.assertTrue(pages.filter(id=1).exists())
def test_page(self):
homepage = Page.objects.get(url_path='/home/')
pages = Page.objects.page(homepage)
# Should only select the homepage
self.assertEqual(pages.count(), 1)
self.assertEqual(pages.first(), homepage)
def test_not_page(self):
homepage = Page.objects.get(url_path='/home/')
pages = Page.objects.not_page(homepage)
# Should select everything except for the homepage
self.assertEqual(pages.count(), Page.objects.all().count() - 1)
for page in pages:
self.assertNotEqual(page, homepage)
def test_descendant_of(self):
events_index = Page.objects.get(url_path='/home/events/')
pages = Page.objects.descendant_of(events_index)
# Check that all pages descend from events index
for page in pages:
self.assertTrue(page.get_ancestors().filter(id=events_index.id).exists())
def test_descendant_of_inclusive(self):
events_index = Page.objects.get(url_path='/home/events/')
pages = Page.objects.descendant_of(events_index, inclusive=True)
# Check that all pages descend from events index, includes event index
for page in pages:
self.assertTrue(page == events_index or page.get_ancestors().filter(id=events_index.id).exists())
# Check that event index was included
self.assertTrue(pages.filter(id=events_index.id).exists())
def test_not_descendant_of(self):
events_index = Page.objects.get(url_path='/home/events/')
pages = Page.objects.not_descendant_of(events_index)
# Check that no pages descend from events_index
for page in pages:
self.assertFalse(page.get_ancestors().filter(id=events_index.id).exists())
# As this is not inclusive, events index should be in the results
self.assertTrue(pages.filter(id=events_index.id).exists())
def test_not_descendant_of_inclusive(self):
events_index = Page.objects.get(url_path='/home/events/')
pages = Page.objects.not_descendant_of(events_index, inclusive=True)
# Check that all pages descend from homepage but not events index
for page in pages:
self.assertFalse(page.get_ancestors().filter(id=events_index.id).exists())
# As this is inclusive, events index should not be in the results
self.assertFalse(pages.filter(id=events_index.id).exists())
def test_child_of(self):
homepage = Page.objects.get(url_path='/home/')
pages = Page.objects.child_of(homepage)
# Check that all pages are children of homepage
for page in pages:
self.assertEqual(page.get_parent(), homepage)
def test_not_child_of(self):
events_index = Page.objects.get(url_path='/home/events/')
pages = Page.objects.not_child_of(events_index)
# Check that all pages are not children of events_index
for page in pages:
self.assertNotEqual(page.get_parent(), events_index)
def test_ancestor_of(self):
root_page = Page.objects.get(id=1)
homepage = Page.objects.get(url_path='/home/')
events_index = Page.objects.get(url_path='/home/events/')
pages = Page.objects.ancestor_of(events_index)
self.assertEqual(pages.count(), 2)
self.assertEqual(pages[0], root_page)
self.assertEqual(pages[1], homepage)
def test_ancestor_of_inclusive(self):
root_page = Page.objects.get(id=1)
homepage = Page.objects.get(url_path='/home/')
events_index = Page.objects.get(url_path='/home/events/')
pages = Page.objects.ancestor_of(events_index, inclusive=True)
self.assertEqual(pages.count(), 3)
self.assertEqual(pages[0], root_page)
self.assertEqual(pages[1], homepage)
self.assertEqual(pages[2], events_index)
def test_not_ancestor_of(self):
root_page = Page.objects.get(id=1)
homepage = Page.objects.get(url_path='/home/')
events_index = Page.objects.get(url_path='/home/events/')
pages = Page.objects.not_ancestor_of(events_index)
# Test that none of the ancestors are in pages
for page in pages:
self.assertNotEqual(page, root_page)
self.assertNotEqual(page, homepage)
# Test that events index is in pages
self.assertTrue(pages.filter(id=events_index.id).exists())
def test_not_ancestor_of_inclusive(self):
root_page = Page.objects.get(id=1)
homepage = Page.objects.get(url_path='/home/')
events_index = Page.objects.get(url_path='/home/events/')
pages = Page.objects.not_ancestor_of(events_index, inclusive=True)
# Test that none of the ancestors or the events_index are in pages
for page in pages:
self.assertNotEqual(page, root_page)
self.assertNotEqual(page, homepage)
self.assertNotEqual(page, events_index)
def test_parent_of(self):
homepage = Page.objects.get(url_path='/home/')
events_index = Page.objects.get(url_path='/home/events/')
pages = Page.objects.parent_of(events_index)
# Pages must only contain homepage
self.assertEqual(pages.count(), 1)
self.assertEqual(pages[0], homepage)
def test_not_parent_of(self):
homepage = Page.objects.get(url_path='/home/')
events_index = Page.objects.get(url_path='/home/events/')
pages = Page.objects.not_parent_of(events_index)
# Pages must not contain homepage
for page in pages:
self.assertNotEqual(page, homepage)
# Test that events index is in pages
self.assertTrue(pages.filter(id=events_index.id).exists())
def test_sibling_of_default(self):
"""
sibling_of should default to an inclusive definition of sibling
if 'inclusive' flag not passed
"""
events_index = Page.objects.get(url_path='/home/events/')
event = Page.objects.get(url_path='/home/events/christmas/')
pages = Page.objects.sibling_of(event)
# Check that all pages are children of events_index
for page in pages:
self.assertEqual(page.get_parent(), events_index)
# Check that the event is included
self.assertTrue(pages.filter(id=event.id).exists())
def test_sibling_of_exclusive(self):
events_index = Page.objects.get(url_path='/home/events/')
event = Page.objects.get(url_path='/home/events/christmas/')
pages = Page.objects.sibling_of(event, inclusive=False)
# Check that all pages are children of events_index
for page in pages:
self.assertEqual(page.get_parent(), events_index)
# Check that the event is not included
self.assertFalse(pages.filter(id=event.id).exists())
def test_sibling_of_inclusive(self):
events_index = Page.objects.get(url_path='/home/events/')
event = Page.objects.get(url_path='/home/events/christmas/')
pages = Page.objects.sibling_of(event, inclusive=True)
# Check that all pages are children of events_index
for page in pages:
self.assertEqual(page.get_parent(), events_index)
# Check that the event is included
self.assertTrue(pages.filter(id=event.id).exists())
def test_not_sibling_of_default(self):
"""
not_sibling_of should default to an inclusive definition of sibling -
i.e. eliminate self from the results as well -
if 'inclusive' flag not passed
"""
events_index = Page.objects.get(url_path='/home/events/')
event = Page.objects.get(url_path='/home/events/christmas/')
pages = Page.objects.not_sibling_of(event)
# Check that all pages are not children of events_index
for page in pages:
self.assertNotEqual(page.get_parent(), events_index)
# Check that the event is not included
self.assertFalse(pages.filter(id=event.id).exists())
# Test that events index is in pages
self.assertTrue(pages.filter(id=events_index.id).exists())
def test_not_sibling_of_exclusive(self):
events_index = Page.objects.get(url_path='/home/events/')
event = Page.objects.get(url_path='/home/events/christmas/')
pages = Page.objects.not_sibling_of(event, inclusive=False)
# Check that all pages are not children of events_index
for page in pages:
if page != event:
self.assertNotEqual(page.get_parent(), events_index)
# Check that the event is included
self.assertTrue(pages.filter(id=event.id).exists())
# Test that events index is in pages
self.assertTrue(pages.filter(id=events_index.id).exists())
def test_not_sibling_of_inclusive(self):
events_index = Page.objects.get(url_path='/home/events/')
event = Page.objects.get(url_path='/home/events/christmas/')
pages = Page.objects.not_sibling_of(event, inclusive=True)
# Check that all pages are not children of events_index
for page in pages:
self.assertNotEqual(page.get_parent(), events_index)
# Check that the event is not included
self.assertFalse(pages.filter(id=event.id).exists())
# Test that events index is in pages
self.assertTrue(pages.filter(id=events_index.id).exists())
def test_type(self):
pages = Page.objects.type(EventPage)
# Check that all objects are EventPages
for page in pages:
self.assertIsInstance(page.specific, EventPage)
# Check that "someone elses event" is in the results
event = Page.objects.get(url_path='/home/events/someone-elses-event/')
self.assertTrue(pages.filter(id=event.id).exists())
# Check that "Saint Patrick" (an instance of SingleEventPage, a subclass of EventPage)
# is in the results
event = Page.objects.get(url_path='/home/events/saint-patrick/')
self.assertTrue(pages.filter(id=event.id).exists())
def test_type_includes_subclasses(self):
from wagtail.wagtailforms.models import AbstractEmailForm
pages = Page.objects.type(AbstractEmailForm)
# Check that all objects are instances of AbstractEmailForm
for page in pages:
self.assertIsInstance(page.specific, AbstractEmailForm)
# Check that the contact form page is in the results
contact_us = Page.objects.get(url_path='/home/contact-us/')
self.assertTrue(pages.filter(id=contact_us.id).exists())
def test_not_type(self):
pages = Page.objects.not_type(EventPage)
# Check that no objects are EventPages
for page in pages:
self.assertNotIsInstance(page.specific, EventPage)
# Check that the homepage is in the results
homepage = Page.objects.get(url_path='/home/')
self.assertTrue(pages.filter(id=homepage.id).exists())
def test_exact_type(self):
pages = Page.objects.exact_type(EventPage)
# Check that all objects are EventPages (and not a subclass)
for page in pages:
self.assertEqual(type(page.specific), EventPage)
# Check that "someone elses event" is in the results
event = Page.objects.get(url_path='/home/events/someone-elses-event/')
self.assertTrue(pages.filter(id=event.id).exists())
# Check that "Saint Patrick" (an instance of SingleEventPage, a subclass of EventPage)
# is NOT in the results
event = Page.objects.get(url_path='/home/events/saint-patrick/')
self.assertFalse(pages.filter(id=event.id).exists())
def test_not_exact_type(self):
pages = Page.objects.not_exact_type(EventPage)
# Check that no objects are EventPages
for page in pages:
self.assertNotEqual(type(page.specific), EventPage)
# Check that the homepage is in the results
homepage = Page.objects.get(url_path='/home/')
self.assertTrue(pages.filter(id=homepage.id).exists())
# Check that "Saint Patrick" (an instance of SingleEventPage, a subclass of EventPage)
# is in the results
event = Page.objects.get(url_path='/home/events/saint-patrick/')
self.assertTrue(pages.filter(id=event.id).exists())
def test_public(self):
events_index = Page.objects.get(url_path='/home/events/')
event = Page.objects.get(url_path='/home/events/christmas/')
homepage = Page.objects.get(url_path='/home/')
# Add PageViewRestriction to events_index
PageViewRestriction.objects.create(page=events_index, password='hello')
# Get public pages
pages = Page.objects.public()
# Check that the homepage is in the results
self.assertTrue(pages.filter(id=homepage.id).exists())
# Check that the events index is not in the results
self.assertFalse(pages.filter(id=events_index.id).exists())
# Check that the event is not in the results
self.assertFalse(pages.filter(id=event.id).exists())
def test_not_public(self):
events_index = Page.objects.get(url_path='/home/events/')
event = Page.objects.get(url_path='/home/events/christmas/')
homepage = Page.objects.get(url_path='/home/')
# Add PageViewRestriction to events_index
PageViewRestriction.objects.create(page=events_index, password='hello')
# Get public pages
pages = Page.objects.not_public()
# Check that the homepage is not in the results
self.assertFalse(pages.filter(id=homepage.id).exists())
# Check that the events index is in the results
self.assertTrue(pages.filter(id=events_index.id).exists())
# Check that the event is in the results
self.assertTrue(pages.filter(id=event.id).exists())
class TestPageQuerySetSearch(TestCase):
fixtures = ['test.json']
def test_search(self):
pages = EventPage.objects.search('moon', fields=['location'])
self.assertEqual(pages.count(), 2)
self.assertIn(Page.objects.get(url_path='/home/events/tentative-unpublished-event/').specific, pages)
self.assertIn(Page.objects.get(url_path='/home/events/someone-elses-event/').specific, pages)
def test_operators(self):
results = EventPage.objects.search("moon ponies", operator='and')
self.assertEqual(list(results), [
Page.objects.get(url_path='/home/events/tentative-unpublished-event/').specific
])
results = EventPage.objects.search("moon ponies", operator='or')
sorted_results = sorted(results, key=lambda page: page.url_path)
self.assertEqual(sorted_results, [
Page.objects.get(url_path='/home/events/someone-elses-event/').specific,
Page.objects.get(url_path='/home/events/tentative-unpublished-event/').specific,
])
def test_custom_order(self):
pages = EventPage.objects.order_by('url_path').search('moon', fields=['location'], order_by_relevance=False)
self.assertEqual(list(pages), [
Page.objects.get(url_path='/home/events/someone-elses-event/').specific,
Page.objects.get(url_path='/home/events/tentative-unpublished-event/').specific,
])
pages = EventPage.objects.order_by('-url_path').search('moon', fields=['location'], order_by_relevance=False)
self.assertEqual(list(pages), [
Page.objects.get(url_path='/home/events/tentative-unpublished-event/').specific,
Page.objects.get(url_path='/home/events/someone-elses-event/').specific,
])
def test_unpublish(self):
# set up a listener for the unpublish signal
unpublish_signals_fired = []
def page_unpublished_handler(sender, instance, **kwargs):
unpublish_signals_fired.append((sender, instance))
page_unpublished.connect(page_unpublished_handler)
events_index = Page.objects.get(url_path='/home/events/')
events_index.get_children().unpublish()
# Previously-live children of event index should now be non-live
christmas = EventPage.objects.get(url_path='/home/events/christmas/')
saint_patrick = SingleEventPage.objects.get(url_path='/home/events/saint-patrick/')
unpublished_event = EventPage.objects.get(url_path='/home/events/tentative-unpublished-event/')
self.assertFalse(christmas.live)
self.assertFalse(saint_patrick.live)
# Check that a signal was fired for each unpublished page
self.assertIn((EventPage, christmas), unpublish_signals_fired)
self.assertIn((SingleEventPage, saint_patrick), unpublish_signals_fired)
# a signal should not be fired for pages that were in the queryset
# but already unpublished
self.assertNotIn((EventPage, unpublished_event), unpublish_signals_fired)
class TestSpecificQuery(TestCase):
"""
Test the .specific() queryset method. This is isolated in its own test case
because it is sensitive to database changes that might happen for other
tests.
The fixture sets up a page structure like:
=========== =========================================
Type Path
=========== =========================================
Page /
Page /home/
SimplePage /home/about-us/
EventIndex /home/events/
EventPage /home/events/christmas/
EventPage /home/events/someone-elses-event/
EventPage /home/events/tentative-unpublished-event/
SimplePage /home/other/
EventPage /home/other/special-event/
=========== =========================================
"""
fixtures = ['test_specific.json']
def test_specific(self):
root = Page.objects.get(url_path='/home/')
with self.assertNumQueries(0):
# The query should be lazy.
qs = root.get_descendants().specific()
with self.assertNumQueries(4):
# One query to get page type and ID, one query per page type:
# EventIndex, EventPage, SimplePage
pages = list(qs)
self.assertIsInstance(pages, list)
self.assertEqual(len(pages), 7)
for page in pages:
# An instance of the specific page type should be returned,
# not wagtailcore.Page.
content_type = page.content_type
model = content_type.model_class()
self.assertIsInstance(page, model)
# The page should already be the specific type, so this should not
# need another database query.
with self.assertNumQueries(0):
self.assertIs(page, page.specific)
def test_filtering_before_specific(self):
# This will get the other events, and then christmas
# 'someone-elses-event' and the tentative event are unpublished.
with self.assertNumQueries(0):
qs = Page.objects.live().order_by('-url_path')[:3].specific()
with self.assertNumQueries(3):
# Metadata, EventIndex and EventPage
pages = list(qs)
self.assertEqual(len(pages), 3)
self.assertEqual(pages, [
Page.objects.get(url_path='/home/other/special-event/').specific,
Page.objects.get(url_path='/home/other/').specific,
Page.objects.get(url_path='/home/events/christmas/').specific])
def test_filtering_after_specific(self):
# This will get the other events, and then christmas
# 'someone-elses-event' and the tentative event are unpublished.
with self.assertNumQueries(0):
qs = Page.objects.specific().live().in_menu().order_by('-url_path')[:4]
with self.assertNumQueries(4):
# Metadata, EventIndex, EventPage, SimplePage.
pages = list(qs)
self.assertEqual(len(pages), 4)
self.assertEqual(pages, [
Page.objects.get(url_path='/home/other/').specific,
Page.objects.get(url_path='/home/events/christmas/').specific,
Page.objects.get(url_path='/home/events/').specific,
Page.objects.get(url_path='/home/about-us/').specific])
def test_specific_query_with_search(self):
# 1276 - The database search backend didn't return results with the
# specific type when searching a specific queryset.
pages = list(Page.objects.specific().live().in_menu().search(None, backend='wagtail.wagtailsearch.backends.db'))
# Check that each page is in the queryset with the correct type.
# We don't care about order here
self.assertEqual(len(pages), 4)
self.assertIn(Page.objects.get(url_path='/home/other/').specific, pages)
self.assertIn(Page.objects.get(url_path='/home/events/christmas/').specific, pages)
self.assertIn(Page.objects.get(url_path='/home/events/').specific, pages)
self.assertIn(Page.objects.get(url_path='/home/about-us/').specific, pages)
| |
# -*- coding: utf-8 -*-
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
felix.profilerules
~~~~~~~~~~~~
ProfileRules actor, handles local profile chains.
"""
import logging
from calico.felix.actor import actor_message
from calico.felix.futils import FailedSystemCall
from calico.felix.refcount import ReferenceManager, RefCountedActor, RefHelper
from calico.felix.selectors import SelectorExpression
_log = logging.getLogger(__name__)
class RulesManager(ReferenceManager):
"""
Actor that manages the life cycle of ProfileRules objects.
Users must ensure that they correctly pair calls to
get_and_incref() and decref().
This class ensures that rules chains are properly quiesced
before their Actors are deleted.
"""
def __init__(self, config, ip_version, iptables_updater, ipset_manager):
super(RulesManager, self).__init__(qualifier="v%d" % ip_version)
self.iptables_generator = config.plugins["iptables_generator"]
self.ip_version = ip_version
self.iptables_updater = iptables_updater
self.ipset_manager = ipset_manager
self.rules_by_profile_id = {}
self._datamodel_in_sync = False
def _create(self, profile_id):
return ProfileRules(self.iptables_generator,
profile_id,
self.ip_version,
self.iptables_updater,
self.ipset_manager)
def _on_object_started(self, profile_id, active_profile):
profile_or_none = self.rules_by_profile_id.get(profile_id)
_log.debug("Applying initial update to rules %s: %s", profile_id,
profile_or_none)
active_profile.on_profile_update(profile_or_none, async=True)
def _maybe_start(self, obj_id, in_sync=False):
"""
Override: gates starting the ProfileRules on being in sync.
:param obj_id: The ID of the object (profile) that we'd like to start.
:param in_sync: True if we know that this profile is in-sync even if
we might not have received the global in-sync message.
"""
in_sync |= self._datamodel_in_sync
if in_sync or obj_id in self.rules_by_profile_id:
# Either we're globally in-sync or we've explicitly heard about
# this profile so we know it is in sync. Defer to the superclass.
_log.debug("Profile %s is in-sync, deferring to superclass.",
obj_id)
return super(RulesManager, self)._maybe_start(obj_id)
else:
_log.info("Delaying startup of profile %s because datamodel is"
"not in sync.", obj_id)
@actor_message()
def on_datamodel_in_sync(self):
if not self._datamodel_in_sync:
_log.info("%s: datamodel now in sync, unblocking profile startup",
self)
self._datamodel_in_sync = True
self._maybe_start_all()
@actor_message()
def on_rules_update(self, profile_id, profile, force_reprogram=False):
if profile is not None:
_log.info("Rules for profile %s updated.", profile_id)
self.rules_by_profile_id[profile_id] = profile
else:
_log.debug("Rules for profile %s deleted.", profile_id)
self.rules_by_profile_id.pop(profile_id, None)
if self._is_starting_or_live(profile_id):
_log.info("Profile %s is active, kicking the ProfileRules.",
profile_id)
ap = self.objects_by_id[profile_id]
ap.on_profile_update(profile, force_reprogram=force_reprogram,
async=True)
elif profile_id in self.objects_by_id:
_log.debug("Checking if the update allows us to start profile %s",
profile_id)
# Pass in_sync=True because we now explicitly know this profile is
# in sync, even if this is a deletion.
self._maybe_start(profile_id, in_sync=True)
class ProfileRules(RefCountedActor):
"""
Actor that owns the per-profile rules chains.
"""
def __init__(self, iptables_generator, profile_id, ip_version,
iptables_updater, ipset_mgr):
super(ProfileRules, self).__init__(qualifier=profile_id)
assert profile_id is not None
self.iptables_generator = iptables_generator
self.id = profile_id
self.ip_version = ip_version
self._ipset_mgr = ipset_mgr
self._iptables_updater = iptables_updater
self._ipset_refs = RefHelper(self, ipset_mgr, self._on_ipsets_acquired)
# Latest profile update - a profile dictionary.
self._pending_profile = None
# Currently-programmed profile dictionary.
self._profile = None
# The IDs of the tags and selector ipsets it requires.
self._required_ipsets = set()
# State flags.
self._notified_ready = False
self._cleaned_up = False
self._dead = False
self._dirty = True
@actor_message()
def on_profile_update(self, profile, force_reprogram=False):
"""
Update the programmed iptables configuration with the new
profile.
:param dict[str]|NoneType profile: Dictionary of all profile data or
None if profile is to be deleted.
"""
_log.debug("%s: Profile update: %s", self, profile)
assert not self._dead, "Shouldn't receive updates after we're dead."
self._pending_profile = profile
self._dirty |= force_reprogram
@actor_message()
def on_unreferenced(self):
"""
Called to tell us that this profile is no longer needed.
"""
# Flag that we're dead and then let finish_msg_batch() do the cleanup.
self._dead = True
def _on_ipsets_acquired(self):
"""
Callback from the RefHelper once it's acquired all the ipsets we
need.
This is called from an actor_message on our greenlet.
"""
# Nothing to do here, if this is being called then we're already in
# a message batch so _finish_msg_batch() will get called next.
_log.info("All required ipsets acquired.")
def _finish_msg_batch(self, batch, results):
# Due to dependency management in IptablesUpdater, we don't need to
# worry about programming the dataplane before notifying so do it on
# this common code path.
if not self._notified_ready:
self._notify_ready()
self._notified_ready = True
if self._dead:
# Only want to clean up once. Note: we can get here a second time
# if we had a pending ipset incref in-flight when we were asked
# to clean up.
if not self._cleaned_up:
try:
_log.info("%s unreferenced, removing our chains", self)
self._delete_chains()
self._ipset_refs.discard_all()
self._ipset_refs = None # Break ref cycle.
self._profile = None
self._pending_profile = None
finally:
self._cleaned_up = True
self._notify_cleanup_complete()
else:
if self._pending_profile != self._profile:
_log.debug("Profile data changed, updating ipset references.")
# Make sure that all the new tags and selectors are active.
# We can't discard unneeded ones until we've updated iptables.
new_tags_and_sels = extract_tags_and_selectors_from_profile(
self._pending_profile
)
for tag_or_sel in new_tags_and_sels:
_log.debug("Requesting ipset for tag %s", tag_or_sel)
# Note: acquire_ref() is a no-op if already acquired.
self._ipset_refs.acquire_ref(tag_or_sel)
self._dirty = True
self._profile = self._pending_profile
self._required_ipsets = new_tags_and_sels
if (self._dirty and
self._ipset_refs.ready and
self._pending_profile is not None):
_log.info("Ready to program rules for %s", self.id)
try:
self._update_chains()
except FailedSystemCall as e:
_log.error("Failed to program profile chain %s; error: %r",
self, e)
else:
# Now we've updated iptables, we can tell the RefHelper
# to discard the tags we no longer need.
self._ipset_refs.replace_all(self._required_ipsets)
self._dirty = False
elif not self._dirty:
_log.debug("No changes to program.")
elif self._pending_profile is None:
_log.info("Profile is None, removing our chains")
try:
self._delete_chains()
except FailedSystemCall:
_log.exception("Failed to delete chains for profile %s",
self.id)
else:
self._ipset_refs.discard_all()
self._dirty = False
else:
assert not self._ipset_refs.ready
_log.info("Can't program rules %s yet, waiting on ipsets",
self.id)
def _delete_chains(self):
"""
Removes our chains from the dataplane, blocks until complete.
"""
# Need to block here: have to wait for chains to be deleted
# before we can decref our ipsets.
self._iptables_updater.delete_chains(
self.iptables_generator.profile_chain_names(self.id),
async=False)
def _update_chains(self):
"""
Updates the chains in the dataplane.
Blocks until the update is complete.
On entry, self._pending_profile must not be None.
:raises FailedSystemCall: if the update fails.
"""
_log.info("%s Programming iptables with our chains.", self)
assert self._pending_profile is not None, \
"_update_chains called with no _pending_profile"
tag_to_ip_set_name = {}
sel_to_ip_set_name = {}
for tag_or_sel, ipset in self._ipset_refs.iteritems():
if isinstance(tag_or_sel, SelectorExpression):
sel_to_ip_set_name[tag_or_sel] = ipset.ipset_name
else:
tag_to_ip_set_name[tag_or_sel] = ipset.ipset_name
_log.info("Updating chains for profile %s", self.id)
_log.debug("Profile %s: %s", self.id, self._profile)
updates, deps = self.iptables_generator.profile_updates(
self.id,
self._pending_profile,
self.ip_version,
tag_to_ipset=tag_to_ip_set_name,
selector_to_ipset=sel_to_ip_set_name,
on_allow="RETURN",
comment_tag=self.id)
_log.debug("Queueing programming for rules %s: %s", self.id,
updates)
self._iptables_updater.rewrite_chains(updates, deps, async=False)
def extract_tags_and_selectors_from_profile(profile):
if profile is None:
return set()
tags_and_sels = set()
for in_or_out in ["inbound_rules", "outbound_rules"]:
for rule in profile.get(in_or_out, []):
for key in ["src_tag", "dst_tag", "src_selector", "dst_selector"]:
if key in rule:
tags_and_sels.add(rule[key])
return tags_and_sels
class UnsupportedICMPType(Exception):
pass
| |
# Copyright 2020 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from . import name_style
from .blink_v8_bridge import blink_class_name
from .code_node import EmptyNode
from .code_node import ListNode
from .code_node import TextNode
from .code_node_cxx import CxxClassDefNode
from .code_node_cxx import CxxFuncDeclNode
from .code_node_cxx import CxxFuncDefNode
from .code_node_cxx import CxxNamespaceNode
from .codegen_accumulator import CodeGenAccumulator
from .codegen_context import CodeGenContext
from .codegen_format import format_template as _format
from .codegen_utils import component_export
from .codegen_utils import component_export_header
from .codegen_utils import enclose_with_header_guard
from .codegen_utils import make_copyright_header
from .codegen_utils import make_forward_declarations
from .codegen_utils import make_header_include_directives
from .codegen_utils import write_code_node_to_file
from .mako_renderer import MakoRenderer
from .path_manager import PathManager
def make_factory_methods(cg_context):
assert isinstance(cg_context, CodeGenContext)
T = TextNode
decls = ListNode()
func_def = CxxFuncDefNode(
name="Create",
arg_decls=[
"v8::Isolate* isolate",
"v8::Local<v8::Value> value",
"ExceptionState& exception_state",
],
return_type="${class_name}",
static=True)
func_def.set_base_template_vars(cg_context.template_bindings())
decls.append(func_def)
func_def.body.extend([
T("const auto& result = bindings::FindIndexInEnumStringTable("
"isolate, value, string_table_, \"${enumeration.identifier}\", "
"exception_state);"),
T("return result.has_value() ? "
"${class_name}(static_cast<Enum>(result.value())) : "
"${class_name}();"),
])
func_def = CxxFuncDefNode(
name="Create",
arg_decls=["const String& value"],
return_type="base::Optional<${class_name}>",
static=True)
func_def.set_base_template_vars(cg_context.template_bindings())
decls.append(func_def)
func_def.body.extend([
T("const auto& result = bindings::FindIndexInEnumStringTable"
"(value, string_table_);"),
T("if (!result)\n"
" return base::nullopt;"),
T("return ${class_name}(static_cast<Enum>(result.value()));"),
])
return decls, None
def make_constructors(cg_context):
assert isinstance(cg_context, CodeGenContext)
T = TextNode
class_name = cg_context.class_name
decls = ListNode([
CxxFuncDeclNode(
name=class_name,
arg_decls=[],
return_type="",
constexpr=True,
default=True),
CxxFuncDefNode(
name=class_name,
arg_decls=["Enum value"],
return_type="",
explicit=True,
constexpr=True,
member_initializer_list=[
"${base_class_name}("
"static_cast<enum_int_t>(value), "
"string_table_[static_cast<enum_int_t>(value)])"
]),
CxxFuncDeclNode(
name=class_name,
arg_decls=["const ${class_name}&"],
return_type="",
constexpr=True,
default=True),
CxxFuncDeclNode(
name=class_name,
arg_decls=["${class_name}&&"],
return_type="",
constexpr=True,
default=True),
CxxFuncDeclNode(
name="~${class_name}", arg_decls=[], return_type="", default=True),
])
defs = ListNode([
T("static_assert("
"std::is_trivially_copyable<${class_name}>::value, \"\");"),
])
defs.set_base_template_vars(cg_context.template_bindings())
return decls, defs
def make_assignment_operators(cg_context):
assert isinstance(cg_context, CodeGenContext)
decls = ListNode([
CxxFuncDeclNode(
name="operator=",
arg_decls=["const ${class_name}&"],
return_type="${class_name}&",
default=True),
CxxFuncDeclNode(
name="operator=",
arg_decls=["${class_name}&&"],
return_type="${class_name}&",
default=True),
])
defs = ListNode()
# Migration adapter
func_decl = CxxFuncDeclNode(
name="operator=",
arg_decls=["const String&"],
return_type="${class_name}&")
func_def = CxxFuncDefNode(
name="operator=",
arg_decls=["const String& str_value"],
return_type="${class_name}&",
class_name=cg_context.class_name)
decls.append(func_decl)
defs.append(func_def)
func_def.set_base_template_vars(cg_context.template_bindings())
func_def.body.append(
TextNode("""\
const auto& index =
bindings::FindIndexInEnumStringTable(str_value, string_table_);
CHECK(index.has_value());
return operator=(${class_name}(static_cast<Enum>(index.value())));
"""))
return decls, defs
def make_equality_operators(cg_context):
assert isinstance(cg_context, CodeGenContext)
func1_def = CxxFuncDefNode(
name="operator==",
arg_decls=["const ${class_name}& lhs", "${class_name}::Enum rhs"],
return_type="bool",
inline=True)
func1_def.set_base_template_vars(cg_context.template_bindings())
func1_def.body.append(TextNode("return lhs.AsEnum() == rhs;"))
func2_def = CxxFuncDefNode(
name="operator==",
arg_decls=["${class_name}::Enum lhs", "const ${class_name}& rhs"],
return_type="bool",
inline=True)
func2_def.set_base_template_vars(cg_context.template_bindings())
func2_def.body.append(TextNode("return lhs == rhs.AsEnum();"))
decls = ListNode([func1_def, EmptyNode(), func2_def])
return decls, None
def make_as_enum_function(cg_context):
assert isinstance(cg_context, CodeGenContext)
func_def = CxxFuncDefNode(
name="AsEnum", arg_decls=[], return_type="Enum", const=True)
func_def.body.append(TextNode("return static_cast<Enum>(GetEnumValue());"))
return func_def, None
def make_nested_enum_class_def(cg_context):
assert isinstance(cg_context, CodeGenContext)
enum_values = [
TextNode(name_style.constant(value))
for value in cg_context.enumeration.values
]
return ListNode([
TextNode("enum class Enum : enum_int_t {"),
ListNode(enum_values, separator=", "),
TextNode("};"),
])
def make_enum_string_table(cg_context):
assert isinstance(cg_context, CodeGenContext)
str_values = [
TextNode("\"{}\"".format(value))
for value in cg_context.enumeration.values
]
decls = ListNode([
TextNode("static constexpr const char* const string_table_[] = {"),
ListNode(str_values, separator=", "),
TextNode("};"),
])
defs = TextNode("const char* const ${class_name}::string_table_[];")
defs.set_base_template_vars(cg_context.template_bindings())
return decls, defs
def generate_enumeration(enumeration):
path_manager = PathManager(enumeration)
assert path_manager.api_component == path_manager.impl_component
api_component = path_manager.api_component
# Class names
class_name = blink_class_name(enumeration)
cg_context = CodeGenContext(
enumeration=enumeration,
class_name=class_name,
base_class_name="bindings::EnumerationBase")
# Filepaths
header_path = path_manager.api_path(ext="h")
source_path = path_manager.api_path(ext="cc")
# Root nodes
header_node = ListNode(tail="\n")
header_node.set_accumulator(CodeGenAccumulator())
header_node.set_renderer(MakoRenderer())
source_node = ListNode(tail="\n")
source_node.set_accumulator(CodeGenAccumulator())
source_node.set_renderer(MakoRenderer())
# Namespaces
header_blink_ns = CxxNamespaceNode(name_style.namespace("blink"))
source_blink_ns = CxxNamespaceNode(name_style.namespace("blink"))
# Class definition
class_def = CxxClassDefNode(
cg_context.class_name,
base_class_names=["bindings::EnumerationBase"],
final=True,
export=component_export(api_component))
class_def.set_base_template_vars(cg_context.template_bindings())
# Implementation parts
factory_decls, factory_defs = make_factory_methods(cg_context)
ctor_decls, ctor_defs = make_constructors(cg_context)
assign_decls, assign_defs = make_assignment_operators(cg_context)
equal_decls, equal_defs = make_equality_operators(cg_context)
nested_enum_class_def = make_nested_enum_class_def(cg_context)
table_decls, table_defs = make_enum_string_table(cg_context)
as_enum_decl, as_enum_def = make_as_enum_function(cg_context)
# Header part (copyright, include directives, and forward declarations)
header_node.extend([
make_copyright_header(),
EmptyNode(),
enclose_with_header_guard(
ListNode([
make_header_include_directives(header_node.accumulator),
EmptyNode(),
header_blink_ns,
]), name_style.header_guard(header_path)),
])
header_blink_ns.body.extend([
make_forward_declarations(header_node.accumulator),
EmptyNode(),
])
source_node.extend([
make_copyright_header(),
EmptyNode(),
TextNode("#include \"{}\"".format(header_path)),
EmptyNode(),
make_header_include_directives(source_node.accumulator),
EmptyNode(),
source_blink_ns,
])
source_blink_ns.body.extend([
make_forward_declarations(source_node.accumulator),
EmptyNode(),
])
header_node.accumulator.add_include_headers([
component_export_header(api_component),
"third_party/blink/renderer/bindings/core/v8/generated_code_helper.h",
"third_party/blink/renderer/platform/bindings/enumeration_base.h",
])
# Assemble the parts.
header_blink_ns.body.append(class_def)
header_blink_ns.body.append(EmptyNode())
class_def.public_section.append(nested_enum_class_def)
class_def.public_section.append(EmptyNode())
class_def.public_section.append(factory_decls)
class_def.public_section.append(EmptyNode())
source_blink_ns.body.append(factory_defs)
source_blink_ns.body.append(EmptyNode())
class_def.private_section.append(table_decls)
class_def.private_section.append(EmptyNode())
source_blink_ns.body.append(table_defs)
source_blink_ns.body.append(EmptyNode())
class_def.public_section.append(ctor_decls)
class_def.public_section.append(EmptyNode())
source_blink_ns.body.append(ctor_defs)
source_blink_ns.body.append(EmptyNode())
class_def.public_section.append(assign_decls)
class_def.public_section.append(EmptyNode())
source_blink_ns.body.append(assign_defs)
source_blink_ns.body.append(EmptyNode())
class_def.public_section.append(as_enum_decl)
class_def.public_section.append(EmptyNode())
source_blink_ns.body.append(as_enum_def)
source_blink_ns.body.append(EmptyNode())
header_blink_ns.body.append(equal_decls)
header_blink_ns.body.append(EmptyNode())
source_blink_ns.body.append(equal_defs)
source_blink_ns.body.append(EmptyNode())
# Write down to the files.
write_code_node_to_file(header_node, path_manager.gen_path_to(header_path))
write_code_node_to_file(source_node, path_manager.gen_path_to(source_path))
def generate_enumerations(web_idl_database):
for enumeration in web_idl_database.enumerations:
generate_enumeration(enumeration)
| |
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# (1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# (3)The name of the author may not be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""
A simplistic etcd orm.
"""
import json
from etcdobj.fields import Field
__version__ = '0.0.0'
class _Server(object):
"""
Parent class for all Server implementations.
"""
def __init__(self, client, *args, **kwargs):
"""
Creates a new instance of a Server implementation.
:param client: The etcd client to use.
:type client: object
:param args: All other non-keyword arguments.
:type args: list
:param kwargs: All other keyword arguments.
:type kwargs: dict
:raises: ValueError
"""
self.client = None
self._verify_client(client)
def _verify_client(self, client):
"""
Does basic validation that the client can be used.
:param client: The client to check.
:type client: object
:raises: ValueError
"""
missing = []
for method in ('write', 'read', 'delete'):
if not callable(getattr(client, method, None)):
missing.append(method)
if missing:
raise ValueError('The following methods are missing from the '
'client: {0}'.format(', '.join(missing)))
self.client = client
def save(self, obj):
"""
Save an object.
:param obj: An instance that subclasses EtcdObj
:type obj: EtcdObj
:returns: The same instance
:rtype: EtcdObj
"""
for item in obj.render():
self.client.write(item['key'], item['value'], quorum=True)
return obj
def read(self, obj):
"""
Retrieve an object.
:param obj: An instance that subclasses EtcdObj
:type obj: EtcdObj
:returns: A filled out instance
:rtype: EtcdObj
"""
for item in obj.render():
etcd_resp = self.client.read(item['key'], quorum=True)
value = etcd_resp.value
if item['dir']:
key = item['key'].split('/')[-1]
dct = getattr(obj, item['name'])
dct[key] = value
else:
setattr(obj, item['name'], value)
return obj
class Server(_Server):
"""
Server implementation which creates an etcd.Client instance
as its client.
"""
def __init__(self, etcd_kwargs={}, *args, **kwargs):
"""
Creates a new instance of Server.
:param etcd_kwargs: The keyword arguments used to create an etcd.Client
:type client: dict
:param args: All other non-keyword arguments.
:type args: list
:param kwargs: All other keyword arguments.
:type kwargs: dict
:raises: ValueError
"""
import etcd
super(Server, self).__init__(
etcd.Client(**etcd_kwargs), *args, **kwargs)
class EtcdObj(object):
"""
Class all objects which want to persist to etcd must subclass.
"""
_fields = []
def __new__(cls, **kwargs):
"""
Creates a new instance.
:param kwargs: All keyword arguments.
:type kwargs: dict
:returns: The new instance
:rtype: EtcdObj
"""
cls = super(EtcdObj, cls).__new__(cls)
for key in dir(cls):
if not key.startswith('_'):
attr = getattr(cls, key)
if issubclass(attr.__class__, Field):
cls._fields.append(key)
if key in kwargs.keys():
attr.value = kwargs[key]
return cls
def __init__(self, **kwargs): # pragma: no cover
"""
Initializes a new instance. Required for __new__.
:param kwargs: All keyword arguments.
:type kwargs: dict
"""
pass
def __setattr__(self, name, value):
"""
Overridden setattr to catch fields or pass along if not a field.
:param name: The name of the field.
:type name: str
:param value: The value to set on name.
:type value: any
"""
attr = object.__getattribute__(self, name)
if name in self._fields:
attr.value = value
else:
object.__setattr__(self, name, value)
def __getattribute__(self, name):
"""
Overridden getattribute to catch fields or pass along if not a field.
:param name: The name of the field.
:type name: str
:returns: The value of the field or attribute
:rtype: any
:raises: AttributeError
"""
if name in object.__getattribute__(self, '_fields'):
return object.__getattribute__(self, name).value
else:
return object.__getattribute__(self, name)
def render(self):
"""
Renders the instance into a structure for settings in etcd.
:returns: The structure to use for setting.
:rtype: list(dict{key=str,value=any})
"""
rendered = []
for x in self._fields:
items = object.__getattribute__(self, x).render()
if type(items) != list:
items = [items]
for i in items:
i['key'] = '/{0}/{1}'.format(self.__name__, i['key'])
rendered.append(i)
return rendered
@property
def json(self):
"""
Dumps the entire object as a json structure.
"""
data = {}
for field in self._fields:
# FIXME: This is dumb :-)
attribute = object.__getattribute__(self, field)
data[attribute.name] = json.loads(attribute.json)
# Flatten if needed
if attribute.name in data[attribute.name].keys():
data[attribute.name] = data[attribute.name][attribute.name]
return json.dumps(data)
| |
# timeless routines
# PACKAGE Imports
import sys, os
import time, datetime, holidays
import urllib
import requests
import sqlite3
import numpy as np
import pandas as pd
import pandas.io.sql as pdsql
from bs4 import BeautifulSoup as bsoup
# CUSTOM Modules
from globalvars import accessStrings
from dbmgt import stockDB
# notes #
# remember to close DB Connection properly b/f exiting to avoid random unwanted behavior
class coreStocks(stockDB, accessStrings):
"""
See readme file for more details on this class.
Per stockDB's __init__ method,
you must pass in a db connection when instantiating this method.
Don't forget to establish a DB connection after you intantiate this class.
"""
# HELPER Functions
# alignReportColumns
# ****************** #
@staticmethod
def alignReportColumns(sheet):
"""
Helper function that works with format10KQSheet and createStockFinancialsReports
to align column names to allow for setting the index of the sheet to the symbol of
the stock.
Takes a single dataframe argument, which is some finanical report whose columns
need to be realigned with the Symbol column at the head.
Returns the dataframe passed in with columns realigned. Otherwise False, error tuple
"""
try:
if isinstance(sheet, pd.DataFrame):
# create new index by moving the Symbol col from the end to the beginning
cols = [i for i in sheet.columns.insert(0, sheet.columns[-1])]
# remove the extra entry for Symbol column
cols.pop(-1)
# reindex
sheet = sheet.reindex(columns=cols)
else:
raise ValueError( 'Variable with incorrect data type passed. A dataframe is required but a {kind} was passed'.format(kind=type(sheet)) )
except Exception as e:
return False, e
return sheet
# cleanNullColumns
# **************** #
@staticmethod
def cleanNullColumns(sheet):
"""
Helper function to discard columns in sheets where each value in column is null.
Accepts a DataFrame as the sheet argument.
Returns the cleaned dataframe or an error Tuple of (False, error)
"""
try:# check for and remove columns with all NaNs
for column in sheet.columns:
if pd.isnull(sheet[column]).all():
sheet.drop(column, axis=1, inplace=True)
return sheet
except Exception as e:
return False, e
# makeStockListURL
# **************** #
def makeStockListURL(self, exchange='NASDAQ'):
"""
Creates a URL for PANDAS to download a CSV list of all current stocks from NASDAQ.com
Argument exchange is a string of either 'NYSE' or 'NASDAQ' for the exchange, which it uses
to combine the URL path to the csv file. Defaults to NASDAQ if no exchange specified.
Returns the complete URL as a string.
Example Usage: makeStockListURL('NYSE')
"""
try:
the_exchange = self.all_cur_stocks_csv_exchange[0]
if exchange.lower() == 'nyse':
the_exchange = self.all_cur_stocks_csv_exchange[1]
return ''.join([self.all_cur_stocks_csv_base_url, the_exchange, self.all_cur_stocks_csv_tail])
except Exception as e:
return False, e
# REPORTS
# getAllCurStocks
# *************** #
def getAllCurStocks(self, exchanges):
"""
Convenience function for donwloading and cleaning the csv file of all stocks from NASDAQ and NYSE.
The function takes either a list or tuple of len = 2, consisting of the strings 'NYSE' and 'NASDAQ'.
It calls the function that makes the URL for downloading the data, and then retreives the data.
It also performs several cleanup functinos on the data, converting numerical strings to floats, and
putting data in a more manageable format.
Returns a single dataframe of all stocks traded on the exchanges requested.
Example Usage:
current_stocks = getAllCurStocks(['NASDAQ', 'NYSE'])
createSymbolsKeyTable(current_stocks[['Symbol', 'Market']]) # uses only the Symbol and Market
field names in the returned dataframe to create the table
"""
try:
#download all the stocks from NASDAQ and NYSE
stock_lists_to_download = [self.makeStockListURL(exchanges[0]), self.makeStockListURL(exchanges[1])]
exchange_data = [pd.read_csv(i, index_col = 0, encoding='utf-8') for i in stock_lists_to_download]
#make column in each frame for Exchange and assign the market that the stock belongs to
for idx, i in enumerate(exchange_data):
i.loc[:,'Market'] = 'NASDAQ' if idx == 0 else 'NYSE'
#merge data into single frame
all_exchanges = pd.concat([exchange_data[0], exchange_data[1]])
# drop the Unnamed and Summary Quote columns
all_exchanges.drop(['Unnamed: 8', 'Summary Quote'], axis=1, inplace=True)
#drop all n/a(s) in the LastSale column b/c I don't care about stock that's not selling.
all_exchanges = all_exchanges[ (all_exchanges.loc[:, 'LastSale'] != None) ] # (all_exchanges.loc[:,'LastSale'] != 'n/a') &
# cast all numeric values in LastSale as float instead of string
all_exchanges.loc[:, 'LastSale'] = all_exchanges.loc[:,'LastSale'].astype(float)
#add column for marketcap symbol and remove all symbols and numbers from marketcap that to get the multiplier
all_exchanges['MarketCapSym'] = all_exchanges['MarketCap'].replace('[$0-9.]', '', regex=True)
#remove $ and letters from MarketCap fields
all_exchanges['MarketCap'] = all_exchanges['MarketCap'].replace('[$MB]', '', regex=True)
all_exchanges.reset_index(inplace=True)
#remove any unwanted whitespace from symbol or name
all_exchanges['Symbol'] = all_exchanges['Symbol'].replace('\s+', '', regex=True)
#replace all n/a values in MarketCap with np.NAN
all_exchanges[all_exchanges['MarketCap'] == 'n/a'] = np.NAN
#convert MarketCap to a float.
all_exchanges['MarketCap'] = all_exchanges['MarketCap'].astype(float)
#round the LastSale column
all_exchanges['LastSale'] = all_exchanges['LastSale'].round(2)
#rename industry column
all_exchanges.rename(columns={'industry':'Industry'}, inplace=True)
all_exchanges = all_exchanges[all_exchanges['Symbol'].notnull()]
# remove any duplicate stock symbols using pandas unique() method
all_exchanges.drop_duplicates(subset='Symbol', keep='first', inplace=True)
return all_exchanges
except Exception as e:
return (False, e)
# createPriceHistoryReport
# ************************ #
def createPriceHistoryReport(self, stock):
"""
Calls get10YrPriceHistory() to package a price history report into a PANDAS dataframe, then cleans and returns the data.
This function will acquire a price history for the provided symbol, which must be a string and a valid stock symbol
along with the symbol's exchange, e.g., ('MMM', 'NYSE'). The get10YrPriceHistory() function requires the exchange.
After the data is loaded, the function adds a Symbol field to the price history for tracking in the database, reindexes
and renames some fields, properly formats the dates into datetime fields, and converts prices from strings to floats.
Returns the report as a PANDAS dataframe if successful, otherwise a tuple (False, error message).
Example Usage: createPriceHistoryReport(('MMM', 'NYSE'))
"""
try:
# get the raw data from morningstar
price_history = self.get10YrPriceHistory(stock)
if isinstance(price_history, pd.DataFrame): # the price_history has to exist, or else return the err msg of the function called
price_history['Symbol'] = stock[0]
# reorganize header order
price_history = price_history.reindex(columns=['Symbol','Date','Open','High','Low','Close','Volume'])
# rename the Date column for easier processing through SQLite's Date functionality
price_history.rename(columns={'Date':'Reference'}, inplace=True)
# convert all dates to ISO formatted yyyy-mm-dd strings
price_history['Reference'] = price_history['Reference'].apply(lambda x: time.strftime("%Y-%m-%d", time.strptime(x, "%m/%d/%Y")))
# convert volumes to integers # unicode err on ??? value for some volumes goes to NaN
price_history['Volume'] = pd.to_numeric(price_history['Volume'].str.replace(',',''), errors='coerce')
# set index b/f db commit so no duplicate numeric index columns
price_history.set_index(['Symbol'], inplace=True)
return price_history
except Exception as e:
return (False, e)
# get10YrPriceHistory
# ******************* #
def get10YrPriceHistory(self, symbol):
"""
Get 10Y price history, one symbol at a time.
Function takes two arguments.
symbol argument is a single stock symbol and it's exchange in the form of an iterable with two strings.
That symbol is used to build a URL path to collect the 10Y price history as a CSV. The data is loaded
into a PANDAS dataframe.
daily argument is a flag for triggering a simple report over YTD time period instead of for a 10y period.
Returns a pandas dataframe if successful. Otherwise, returns a tuble of (False, error message).
Example usage: get10YrPriceHistory(('ULTI', 'NASDAQ'))
"""
try:
exchange = self.stock_price_mngstar_csv_exchange[0] if symbol[1] == 'NASDAQ' else self.stock_price_mngstar_csv_exchange[1]
price_history_path = (self.stock_price_mngstar_csv_base_url +
exchange + symbol[0] +
self.stock_price_mngstar_csv_period[0] +
self.stock_price_mngstar_csv_freq_str +
self.stock_price_mngstar_csv_freq_period[0] +
self.stock_price_mngstar_csv_tail)
# throws EmptyDataError('No columns to parse from file') if nothing returned
price_history = pd.read_csv(price_history_path, header=1, encoding = 'utf8') # header is on second row
if not isinstance(price_history, pd.DataFrame):
raise ValueError('Price history report failed. No dataframe returned. Got %r.' % price_history )
return price_history
except Exception as e:
return False, e, 'There is no price history for {stock}. The stock may no longer be traded, or it is so new that there is no price report available for 10yr period.'.format(stock=symbol[0])
# getDividendHistory
# ****************** #
def getDividendHistory(self, symbol, period):
"""
Downloads and formats an HTML dividend table packaged as a PANDAS dataframe.
Unlike most report gathering functions, this one does not have a "createXXReport() method.
Instead, the getDividendHistory() method accomplishes all of this in one pass.
The reason is that we are using BeautifulSoup instead of PANDAS to gather the data.
Not fully finished...This function uses BeautifulSoup to gather the symbol's dividend history,
if any. The history is for cash dividends only. Upcoming dividends are included. The argument
symbol is a tuple ('SYMBOL', 'EXCHANGE') with any valid ticker and either NASDAQ or NYSE as the
exchange. The period argument is the integer number of years for which dividend history is desired.
High numbers that surpass the available data (10Y) will default to supply all available. The returned
data will be formatted. Field names are shortened and any string numbers are converted to np.float64.
The return value for this function will be 'no dividends' if there is no history. Otherwise,
return values will be either the pandas dataframe, or an error message of type tuple with the format
(False, error message).
Note that there have been observed bugs, e.g., returning "ImportError('html5lib not found')" when 'SLB'
is entered as ticker, as well as some Unicode errors.
Example Usage: getDividendHistory(('DUK','NYSE'), 10)
"""
try:
# set flag to track stock's upcoming dividend status
has_upcoming = False
# specify the exchange
exchange = self.stock_div_table_mngstar_exchange[0] if symbol[1] == 'NASDAQ' else self.stock_div_table_mngstar_exchange[1]
# cast years as str just in case an int was passed
years = str(period)
# create the path to get the data
upcoming_div_history_path = ''.join([self.stock_div_table_mngstar_head, self.stock_div_table_mngstar_type[0], self.stock_div_table_mngstar_action, exchange, symbol[0], self.stock_div_table_mngstar_region, self.stock_div_table_mngstar_tail, years])
div_history_path = ''.join([self.stock_div_table_mngstar_head, self.stock_div_table_mngstar_type[1], self.stock_div_table_mngstar_action, exchange, symbol[0], self.stock_div_table_mngstar_region, self.stock_div_table_mngstar_tail, years])
# get the data
upcoming_raw_html = requests.get(upcoming_div_history_path).text
past_raw_html = requests.get(div_history_path).text
# process the upcomming dividend table if there's any
upcoming_soup = bsoup(upcoming_raw_html, 'lxml').find('table')
upcoming_formatted_table = self.formatRawDivTable(upcoming_soup, 'upcoming')
# get the past div table
past_soup = bsoup(past_raw_html, 'lxml').find('table')
past_formatted_table = self.formatRawDivTable(past_soup, 'past')
# process the historical dividend table if there's any
if past_formatted_table == 'No Dividend': # check if empty
return 'No dividend history for stock.'
# if there's no data, flag it
if upcoming_formatted_table != 'No Upcoming':
has_upcoming = True
upcoming_div_table = pd.read_html(str(upcoming_formatted_table), header=0, parse_dates=True, encoding='utf-8')
upcoming_div_table = upcoming_div_table[0]
# pass the soup objects to pandas, using str as a backup measure to make sure to convert data to parsable format
past_div_table = pd.read_html(str(past_formatted_table), header=0, parse_dates=True, encoding='utf-8')[0] # since read_html returns a list, get the first element
# merge the tables
if has_upcoming == True:
div_table = past_div_table.append(upcoming_div_table, ignore_index = True)
else:
div_table = past_div_table.copy()
# set a symbol column
div_table['Symbol'] = symbol[0]
# reindex the columns, putting symbol at the front
div_table = div_table.reindex(columns=['Symbol','Ex-Dividend Date','Declaration Date','Record Date','Payable Date','Dividend Type','Amount'])
# set index to Symbol column for easy DB insertion
div_table.set_index('Symbol', inplace=True)
# check for stock splits or any numbers that don't fit the float format
# account for payment in different currency adding a currrency column
div_table['Currency'] = div_table['Amount'].str.extract('([A-Z]*)', expand=False)
# remove any remaining whitespace
div_table['Amount'] = div_table['Amount'].replace('(/\s/g)?([A-Z]?)','',regex=True)
# clean up Amount column by removing $ sign and converting number to float
div_table['Amount'] = div_table['Amount'].replace('\$', '', regex=True)
# replace spaces with underscores for sqlite3 compatability
div_table = self.removeColumnSpaces(div_table)
return div_table
except Exception as e:
return False, e
# getStockFinancials
# ****************** #
def getStockFinancials(self, symbol):
"""
Retrieve a given stock's key financial ratios and package them in a PANDAS dataframe.
This function builds a URL and fetches an individual stock's key performance ratios,
which tend to form a rather large table. The required symbol argument is of type tuple
('SYMBOL', 'EXCHANGE'). Most of these ratios can be calculated from the basic data
collected from 10K/Q and Price History reports. This function saves time and processing
power and is useful for tracking of more exotic ratios that might not be as important
to calculate on the fly in your algorithms.
Returns a PANDAS dataframe if successful. Otherwise, returns a tuple (False, error message).
Example Usage: getStockFinancials(('GPRO','NASDAQ'))
"""
try:
exchange = self.stock_financials_mngstar_exchange[0] if symbol[1] == 'NASDAQ' else self.stock_financials_mngstar_exchange[1]
stock_financials_path = self.stock_financials_mngstar_head + exchange + symbol[0] + self.stock_financials_mngstar_tail
raw_financials = pd.read_csv(stock_financials_path, header=2, encoding='utf-8')
return raw_financials
except Exception as e:
empty_msg = 'No available financial information for {equity}.'.format(equity=symbol[0])
if isinstance(e, pd.io.common.CParserError):
return empty_msg
elif isinstance(e, pd.io.common.EmptyDataError):
return empty_msg
else:
return False, e
# get10KQReports
# ************** #
def get10KQReport(self, symbol, report_type, freq):
"""
Get 10k/10q reports (Income, Balance, Cashflow) from Morningstar, one symbol at a time.
Function requires a tuple consisting of a stock symbol and the exchange ('SYMBOL', 'EXCHANGE'),
a report category of type string, and the time frequency of the report data as an integer.
Available options for report_type are 'is','bs','cf'. Frequency can be either 3 or 12.
If no report_type is specified, function falls back to a cashflow sheet. The default frequency is
12 month, which works both for 5yr 10K reports and for TTM 10Q reports.
Returns the requested report packaged in a PANDAS dataframe.
Example Usage: get10KQReport(('ANDA', 'NASDAQ'), 'bs', 12)
"""
try:
exchange = self.mngstar_fin_csv_exchange[0] if symbol[1] == 'NASDAQ' else self.mngstar_fin_csv_exchange[1]
frequency = self.mngstar_fin_csv_report_freq_str[0] if freq == 3 else self.mngstar_fin_csv_report_freq_str[1]
report = None
if report_type == 'is':
report = self.mngstar_fin_csv_report_type[0]
elif report_type == 'bs':
report = self.mngstar_fin_csv_report_type[1]
else:
report = self.mngstar_fin_csv_report_type[2]
report_path = (self.mngstar_fin_csv_base_url + exchange +
symbol[0] + self.mngstar_fin_csv_report_region +
report + self.mngstar_fin_csv_report_period +
frequency + self.mngstar_fin_csv_tail)
data = pd.read_csv(report_path, header=1, encoding='utf-8') # header is on second row. remove first.
return data
except Exception as e:
if isinstance(e, pd.io.common.EmptyDataError):
return 'No 10KQ {report} report available for {stock}.'.format(report=report_type.upper(), stock=symbol[0])
else:
return False, e
# format10KQSheet
# *************** #
def format10KQSheet(self, sheet, symbol, report):
"""
Helper function that works with get10KQReport to format financial data.
Accepts three arguments. sheet is the return value of get10KQReport.
symbol is the tuple ('SYMBOL', 'MARKET') passed with get10KQReport.
sheet_type is the report being generated, which will allow for properly
labeled column names.
Returns the formatted report in a dataframe to the calling function, or a tuple False, error.
"""
try:
assert report in ['is','bs','cf'], "Unknown report formatting requested. Expected is, bs, or cf but got %r" % report
# check for and remove columns with all NaNs
sheet = self.cleanNullColumns(sheet)
# add symbol column
sheet['Symbol'] = symbol[0]
# replace 1st column containing "Fiscal year ends".
col = sheet.columns[0]
assert 'Fiscal' in col, "Warning: The first column to be formatted in this sheet did not contain a reference to the fiscal year. Got %r instead." % col
if report is 'is':
sheet.rename(columns={col:'Income item'}, inplace=True)
elif report is 'bs':
sheet.rename(columns={col:'Balance item'}, inplace=True)
else: #report is 'cf'
sheet.rename(columns={col:'Cashflow item'}, inplace=True)
# remove spaces in all columns so sqlite3 commit doesn't issue warning.
sheet = self.alignReportColumns(self.removeColumnSpaces(sheet))
# set symbol as index for storage
sheet.set_index(['Symbol'], inplace=True)
return sheet
except Exception as e:
return False, e
# create10KCashflowReport
# *********************** #
def create10KCashflowReport(self, symbol):
"""
Create a 10K (annual) Cashflow report.
Function uses get10KQReport() to generate a cashflow report for the given stock. The downloaded data
is cleaned and field names are shortened for readability. This function takes a symbol argument that
is a tuple of form ('SYMBOL','EXCHANGE'). The symbol is any valid stock symbol. The exchange must be
either 'NYSE' or 'NASDAQ'.
Return value if successful is the cashflow report pacakged in a PANDAS dataframe. Otherwise will
return a tuple (False, error message)
Example Usage: create10KCashflowReport(('DDD','NASDAQ'))
"""
try:
ten_k_cashflow = self.get10KQReport(symbol, 'cf', 12) # note: a slow connection prevents download
if isinstance(ten_k_cashflow, pd.DataFrame): # no error downloading info for a new stock or simply initializing the db
ten_k_cashflow = self.format10KQSheet(ten_k_cashflow, symbol, 'cf')
return ten_k_cashflow
except Exception as e:
return False, e
# create10QCashflowReport
# *********************** #
def create10QCashflowReport(self, symbol):
"""
Create a 10Q (quarterly) cashflow report for a given ticker.
This function uses get10KQReport() to generate a TTM quarterly cashflow sheet for the given symbol.
The field names in this report are shortened for readiblity. This function takes a symbol argument
that is a tuple of form ('SYMBOL','EXCHANGE'). The symbol is any valid stock symbol. The exchange
must be either 'NYSE' or 'NASDAQ'.
Return value if successful is the TTM quarterly cashflow sheet packaged in a PANDAS dataframe.
Otherwise will return a tuple (False, error message).
Example Usage: crate10QCashflowReport(('GPRO','NASDAQ'))
"""
try:
ten_q_cashflow = self.get10KQReport(symbol, 'cf', 3)
if isinstance(ten_q_cashflow, pd.DataFrame): # if error downloading info for a new stock or simply initializing the db
ten_q_cashflow = self.format10KQSheet(ten_q_cashflow, symbol, 'cf')
return ten_q_cashflow
except Exception as e:
return False, e
# createSymbolsKeyTable
# ********************* #
def createSymbolsKeyTable(self, symbols):
"""
Initializes the xchanges DB by creating all needed tables for all stocks listed in NASDAQ and NYSE.
This function receives a PANDAS dataframe with fields stock symbol and exchange.
If the table is added to the DB correctly, returns True. If table already exists,
a ValueError is returned with False in a tuple.
Example Usage: createSymbolsKeyTable( getCurrentStocks() )
"""
try: # create the key symbols table
assert isinstance(symbols, pd.DataFrame), "Requires a Dataframe as argument. Got %r instead." % type(symbols)
if self.symbolTableExists() == True:
raise ValueError("The Symbols key table already exists. Move along.")
symbols.to_sql('AllStocksKey', con=self.dbcnx[0], if_exists='replace', index=False)
return True
except Exception as e:
return (False, e)
# DB LOGIC & MGT
# commitPriceHistory
# ****************** #
def commitPriceHistory(self, data, daily=False):
"""
Commits a stock's price history dataframe to the database.
This function receives a dataframe and will check to see if a 10Y price history for the stock it references.
Note that the history checking routine only looks to see that the referenced stock already exists in the
price history table. If so, it will report a ValueError.
If you want to do daily updates of stock prices, use True for the daily argument.
Returns a tuple (True, 'Success Message') if successful. Otherwise, returns a tuple (False, error message)
Example Usage: commitPriceHistory(data)
"""
try:
# return a 'no value' msg, not raise value error.
if isinstance(data, str) and 'No' in data:
return False, data
# pass on get[Recent]MngStarPrice error messages and failures to get price histories
if isinstance(data, tuple) and data[0] is False: # the only condition that can occure from getMngStarPrice...
return data
# catch the case where daily update returns no new information
if daily is True:
if isinstance(data, tuple) and 'You already have the latest' in data[1]:
return data
# catch if there is no known error but a dataframe didn't get passed
if not isinstance(data, pd.DataFrame):
return 'Requires a pandas dataframe. Got a {instance}.'.format(instance=type(data))
# if this is a completely new entry, make sure it's new
if daily is False:
# check if the stock symbol is already present
if self.priceHistoryExists(data.index[0]) == True:
raise ValueError("The symbol is already present. Try using updateStockPrice() instead, or delete the existing record.")
# otherwise, add new columns if needed to DB
self.checkAndAddDBColumns(data.columns,'TenYrPrices')
# then post all new records to the table
data.to_sql('TenYrPrices', con=self.dbcnx[0], if_exists='append')
return (True, 'Successfully commited {stock} price history to the DB.'.format(stock=data.index[0]))
except Exception as e:
return False, e
# commitFinancialsData
# ******************** #
def commitFinancialsData(self, report, report_type, report_period):
"""
Handles commitment of 10K/Q reports to the database in their respective tables.
This function will commit a generated financial report to DB and create the appropriate table if it doesn't exist.
The required report argument is the dataframe created by get10KQReport(). The stock symbol included is used to
check whether the financial history for this stock is present in this report_type's table. Report_type
consists of a string with options of 'is', 'bs', and 'cf' for income, balance, and cashflow sheets. Report_period
is an integer of either 3 or 12 for 3-month and 12-month.
Returns True if the commit was successful, otherwise it will return a tuple (False, ValuerError or other exception).
Note: PANDAS will implicitly set up tables. No need to write separate funcs to set up those tables or specify col names.
Example Usage: commitFinancialsData(report_df, 'bs', 12)
"""
try:
# catch if there's a string that says "no history", etc. must come first to avoid indexing error
if isinstance(report, str): # financial_reports is a string if this condition is true
if 'No' in report:
return False, report
if not isinstance(report, pd.DataFrame): # no errors retreiving data
# pass an error back to the calling function
raise ValueError("Got wrong data type to commit to DB. Report was a %r" % type(report))
# see if the stock symbol exists and raise an error if so.
if self.financialHistoryExists(report.index[0], report_type, report_period) is True: # must specify if true
raise ValueError('Error: There\'s already a record matching this one. Try using commitIndividualFinancials() method to update the financial info instead.')
# sort by report type
if report_type == 'is':
if report_period == 3:
# add columns if needed
# known issues in this code...must have consistent naming of columns
self.checkAndAddDBColumns(report.columns,'TenQIncome')
report.to_sql('TenQIncome', con=self.dbcnx[0], if_exists='append')
return True, 'Successfully commited TenQIncome report to the DB.'
# clean_df_db_dups()
elif report_period == 12: # report goes into annuals
self.checkAndAddDBColumns(report.columns,'TenKIncome')
report.to_sql('TenKIncome', con=self.dbcnx[0], if_exists='append')
return True, 'Successfully commited TenKIncome report to the DB.'
else: # catch formatting error
raise ValueError('Wrong report period of {pd} offered. Try again.'.format(pd=report_period))
elif report_type == 'bs':
if report_period == 3:
self.checkAndAddDBColumns(report.columns,'TenQBalance')
report.to_sql('TenQBalance', con=self.dbcnx[0], if_exists='append')
return True, 'Successfully commited TenQBalance report to the DB.'
elif report_period ==12:
self.checkAndAddDBColumns(report.columns,'TenKBalance')
report.to_sql('TenKBalance', con=self.dbcnx[0], if_exists='append')
return True, 'Successfully commited TenKBalance report to the DB.'
else:
raise ValueError('Wrong report period of {pd} offered. Try again.'.format(pd=report_period))
elif report_type == 'cf':
if report_period == 3:
self.checkAndAddDBColumns(report.columns,'TenQCashflow')
report.to_sql('TenQCashflow', con=self.dbcnx[0], if_exists='append')
return True, 'Successfully commited TenQCashflow report to the DB.'
elif report_period ==12:
self.checkAndAddDBColumns(report.columns,'TenKCashflow')
report.to_sql('TenKCashflow', con=self.dbcnx[0], if_exists='append')
return True, 'Successfully commited TenKCashflow report to the DB.'
else:
raise ValueError('Wrong report period of {pd} offered. Try again.'.format(pd=report_period))
else: # there was a formatting error in function call
raise ValueError("Formatting error in function call. Check your variables {rep_type} and {rep_period}".format(rep_type=report_type, rep_period=report_period))
except Exception as e:
return False, e
# financialHistoryExists
# ********************** #
def financialHistoryExists(self, symbol, report_type, report_period):
"""
Tells whether the DB has a financial report for a given stock symbol.
Takes a symbol string ('MMM'), report type string ('is', 'bs', or 'cf'), and report period integer
(3 or 12) to check the database for the symbols report.
Returns True if the stock and its table is already present. Otherwise, it will
return either False if no table exists. The final return option is a tuple
(False, Error). If you modifiy the functions that use this routine, make sure that
your error checking knows how to distinguish between a single False return and the
tuple that's returned if there's an error.
Example Usage:
"""
try:
if report_type == 'is': # set the table to search
if report_period == 3:
table = 'TenQIncome'
elif report_period == 12:
table = 'TenKIncome'
else:
raise ValueError('Incorrect period of {rpt_pd} requested. Try again.'.format(rpt_pd = report_period)) # wrong period specified
elif report_type == 'bs':
if report_period == 3:
table = 'TenQBalance'
elif report_period == 12:
table = 'TenKBalance'
else:
raise ValueError('Incorrect period of {rpt_pd} requested. Try again.'.format(rpt_pd = report_period))
elif report_type == 'cf':
if report_period == 3:
table = 'TenQCashflow'
elif report_period == 12:
table = 'TenKCashflow'
else:
raise ValueError('Incorrect period {rpt_pd} requested. Try again.'.format(rpt_pd = report_period))
else:
raise ValueError('A report type {rpt} was requested that does not match any you offer. Try again.'.format(rpt = report_type)) # unknown report type
# search the DB for the data
query = 'SELECT * FROM {tbl} WHERE Symbol = ?'.format(tbl = table)
if self.dbcnx[1].execute(query, (symbol,)).fetchone() is not None:
return True
else:
return (False, 'No records found.')
except Exception as e:
return (False, e)
# priceHistoryExists
# ****************** #
def priceHistoryExists(self, symbol):
"""
Searches the DB's Price History table for the selected symbol and returns True if found.
This function receives a string 'SYM' for the desired stock lookup. It searches the
database's Pricehistory table to find an instance of this symbol. If it does, the function
returns True. Otherwise, it will return a tuple (False, 'No records msg').
If the function encounters an error , it will also return a tuple (False, error message).
Note that any subsequent error checking built into functions that utilize this one will need
to distinguish between a not-found False and an error False.
Example Usage: priceHistoryExists('GOOG')
"""
try:
# double check to make sure this symbol is in symbol list
if self.dbcnx[1].execute('SELECT * FROM TenYrPrices WHERE Symbol = ?', (symbol,)).fetchone() is not None:
return True
#otherwise return false
return (False, 'No records found for {stock}.'.format(stock=symbol))
except Exception as e:
return (False, e)
# symbolTableExists
# ***************** #
def symbolTableExists(self):
"""
A helper function to determine whether or not the Symbol table exists in the DB.
If not, throw an error before any functions can try to add data to the database.
Returns True if a table exists, otherwise False. If error, returns tuple
(False, error message)
Example Usage: symbolTableExists()
"""
try:
if self.dbcnx[1].execute('SELECT 1 FROM sqlite_master WHERE type="table" AND name="AllStocksKey";').fetchone() is not None:
return True
return False
except Exception as e:
return False, e
# symbolExists
# ************ #
def symbolExists(self, symbol):
"""
Gatekeeper. Makes sure a symbol exists before making network calls.
Returns True if no errors. Otherwise, returns False with an error message.
"""
try:
# check if the symbol exists in the db
db_symbol = self.dbcnx[1].execute('SELECT * FROM AllStocksKey WHERE Symbol = ? LIMIT 1', (symbol,)).fetchone()
if db_symbol[0] != symbol: # issue a warning
raise ValueError('The stock symbol provided, {sym}, was not found in the database. Try again.'.format(sym=symbol[0] ))
return True
except Exception as e:
return False, e
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.