gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 42000
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
| |
"""
Elastic basis pursuit
"""
import numpy as np
import numpy.linalg as nla
import leastsqbound as lsq
import sklearn.linear_model as lm
import scipy.optimize as opt
def err_func(params, x, y, func):
"""
Error function for fitting a function
Parameters
----------
params : tuple
A tuple with the parameters of `func` according to their order of
input
x : float array
An independent variable.
y : float array
The dependent variable.
func : function
A function with inputs: `(x, *params)`
Returns
-------
The sum of squared marginals of the fit to x/y given the params
"""
# We ravel both, so that we can accomodate multi-d input without having
# to think about it:
return np.ravel(y) - np.ravel(func(x, params))
def gaussian_kernel(x, params):
"""
A multi-dimensional Gaussian kernel function
Useful for creating and testing EBP with simple Gaussian Mixture Models
Parameters
----------
x : ndarray
The independent variable over which the Gaussian is calculated
params : ndarray
If this is a 1D array, it could have one of few things:
[mu_1, mu_2, ... mu_n, sigma_1, sigma_2, ... sigma_n]
Or:
[mu_1, mu_2, ... mu_n, var_covar_matrix]
where:
var_covar_matrix needs to be reshaped into n-by-n
"""
mu = np.asarray(params[:x.shape[0]])
if len(params) == x.shape[0] * 2:
sigma = np.diag(params[x.shape[0]:])
elif len(params) == x.shape[0] + x.shape[0] ** 2:
mu = params[:x.shape[0]]
sigma = np.reshape(params[x.shape[0]:], (x.shape[0], x.shape[0]))
else:
e_s = "Inputs to gaussian_kernel don't have the right dimensions"
raise ValueError(e_s)
dims = mu.shape[0]
while len(mu.shape) < len(x.shape):
mu = mu[..., None]
shape_tuple = x.shape[1:]
diff = (x - mu).reshape(x.shape[0], -1)
sigma_inv = nla.inv(sigma)
mult1 = np.dot(diff.T, sigma_inv)
mult2 = (np.diag(np.dot(mult1, diff))).reshape(shape_tuple)
norm_factor = 1/(np.sqrt((2*np.pi)**dims * nla.det(sigma)))
gauss = norm_factor * np.exp(-0.5 * mult2)
return gauss
def leastsq_oracle(x, y, kernel, initial=None, bounds=None):
"""
This is a generic oracle function that uses bounded least squares to find
the parameters in each iteration of EBP, and requires initial parameters.
Parameters
----------
x : ndarray
Input to the kernel function.
y : ndarray
Data to fit to.
kernel : callalble
The kernel function to be specified by this oracle.
initial : list/array
initial setting for the parameters of the function. This has to be
something that kernel knows what to do with.
"""
return lsq.leastsqbound(err_func, initial, args=(x, y, kernel),
bounds=bounds)[0]
def mixture_of_kernels(x, betas, params, kernel):
"""
Generate the signal from a mixture of kernels
Parameters
----------
x : ndarray
betas : 1D array
Coefficients for the linear summation of the kernels
params : list
A set of parameters for each one of the kernels
kernel : callable
"""
betas = np.asarray(betas)
out = np.zeros(x.shape[1:])
for i in xrange(betas.shape[0]):
out += np.dot(betas[i], kernel(x, params[i]))
return out
def kernel_err(y, x, betas, params, kernel):
"""
An error function for a mixture of kernels, each one parameterized by its
own set of params, and weighted by a beta
Note
----
For a given set of betas, params, this can be used as a within set error
function, or to estimate the cross-validation error against another set of
y, x values, sub-sampled from the whole original set, or from a left-out
portion
"""
return y - mixture_of_kernels(x, betas, params, kernel)
def parameters_to_regressors(x, kernel, params):
"""
Maps from parameters to regressors through the kernel function
Parameters
----------
x : ndarray
Input
kernel : callable
The kernel function
params : list
The parameters for each one of the kernel functions
"""
# Ravel the secondary dimensions of this:
x = x.reshape(x.shape[0], -1)
regressors = np.zeros((len(params), x.shape[-1]))
for i, p in enumerate(params):
regressors[i] = kernel(x, p)
return regressors.T
def solve_nnls(x, y, kernel=None, params=None, design=None):
"""
Solve the mixture problem using NNLS
Parameters
----------
x : ndarray
y : ndarray
kernel : callable
params : list
"""
if design is None and (kernel is None or params is None):
e_s = "Need to provide either design matrix, or kernel and list of"
e_s += "params for generating the design matrix"
raise ValueError(e_s)
if design is None:
A = parameters_to_regressors(x, kernel, params)
else:
A = design
y = y.ravel()
beta_hat, rnorm = opt.nnls(A, y)
return beta_hat, rnorm
def elastic_basis_pursuit(x, y, oracle, kernel, initial_theta=None, bounds=None,
max_iter=1000, beta_tol=10e-6):
"""
Elastic basis pursuit
Fit a mixture model::
..math::
y = \sum{w_i f_{\theta_i} (x_i)}
with y data, f a kernel function parameterized by $\theta_i$ and \w_i a
non-negative weight, and x inputs to the kernel function
Parameters
----------
x : 1D/2D array
The independent variable that produces the data
y : 1D/2D darray
The data to be fit.
oracle : callable
This is a function that takes data (`x`/`y`) and a kernel function
(`kernel`) and returns the params theta for the kernel given x and
y. The oracle can use any optimization routine, and any cost function
kernel : callable
A skeleton for the oracle function to optimize. Must take something
of the dimensions of x (together with params, and with args) and return
something of the dimensions of y.
initial_theta : list/array
The initial parameter guess
bounds : the bounds on
"""
# Divide this up into a fit set and a validation set. We'll stop fitting
# when error on the validation set starts climbing:
fit_x = x[:, ::2]
validate_x = x[:, 1::2]
fit_y = y[::2]
validate_y = y[1::2]
# Initialize a bunch of empty lists to hold the state:
theta = []
est = []
design_list = []
r = []
err = [np.var(fit_y)] # Start with the assumption of
err_norm = []
# Initialize the residuals with the fit_data:
r.append(fit_y)
# Limit this by number of iterations
for i in range(max_iter):
theta.append(oracle(fit_x, r[-1], kernel, initial_theta,
bounds=bounds))
design = parameters_to_regressors(fit_x, kernel, theta)
beta_hat, rnorm = solve_nnls(fit_x, fit_y, design=design)
# Here comes the "elastic" bit. We exclude kernels with insignificant
# contributions:
keep_idx = np.where(beta_hat > beta_tol)
# We want this to still be a list (so we can 'append'):
theta = list(np.array(theta)[keep_idx])
beta_hat = beta_hat[keep_idx]
design = design[:, keep_idx[0]]
# Move on with the shrunken basis set:
est.append(np.dot(design, beta_hat))
r.append(fit_y - est[-1])
# Cross-validation:
xval_design = parameters_to_regressors(validate_x, kernel, theta)
xval_est = np.dot(xval_design, beta_hat)
xval_r = validate_y - xval_est
err.append(np.dot(xval_r, xval_r))
# If error just grew, we bail:
if err[i+1] > err[i]:
break
return theta, err, r
| |
#!/usr/bin/env python
"""Assorted utilities shared between parts of apitools."""
from __future__ import print_function
import collections
import contextlib
import json
import keyword
import logging
import os
import re
import urllib2
import urlparse
import six
class Error(Exception):
"""Base error for apitools generation."""
class CommunicationError(Error):
"""Error in network communication."""
def _SortLengthFirst(a, b):
return -cmp(len(a), len(b)) or cmp(a, b)
class Names(object):
"""Utility class for cleaning and normalizing names in a fixed style."""
DEFAULT_NAME_CONVENTION = 'LOWER_CAMEL'
NAME_CONVENTIONS = ['LOWER_CAMEL', 'LOWER_WITH_UNDER', 'NONE']
def __init__(self, strip_prefixes,
name_convention=None,
capitalize_enums=False):
self.__strip_prefixes = sorted(strip_prefixes, cmp=_SortLengthFirst)
self.__name_convention = name_convention or self.DEFAULT_NAME_CONVENTION
self.__capitalize_enums = capitalize_enums
@staticmethod
def __FromCamel(name, separator='_'):
name = re.sub(r'([a-z0-9])([A-Z])', r'\1%s\2' % separator, name)
return name.lower()
@staticmethod
def __ToCamel(name, separator='_'):
# TODO(craigcitro): Consider what to do about leading or trailing
# underscores (such as `_refValue` in discovery).
return ''.join(s[0:1].upper() + s[1:] for s in name.split(separator))
@staticmethod
def __ToLowerCamel(name, separator='_'):
name = Names.__ToCamel(name, separator=separator)
return name[0].lower() + name[1:]
def __StripName(self, name):
"""Strip strip_prefix entries from name."""
if not name:
return name
for prefix in self.__strip_prefixes:
if name.startswith(prefix):
return name[len(prefix):]
return name
@staticmethod
def CleanName(name):
"""Perform generic name cleaning."""
name = re.sub('[^_A-Za-z0-9]', '_', name)
if name[0].isdigit():
name = '_%s' % name
while keyword.iskeyword(name):
name = '%s_' % name
# If we end up with __ as a prefix, we'll run afoul of python
# field renaming, so we manually correct for it.
if name.startswith('__'):
name = 'f%s' % name
return name
@staticmethod
def NormalizeRelativePath(path):
"""Normalize camelCase entries in path."""
path_components = path.split('/')
normalized_components = []
for component in path_components:
if re.match(r'{[A-Za-z0-9_]+}$', component):
normalized_components.append(
'{%s}' % Names.CleanName(component[1:-1]))
else:
normalized_components.append(component)
return '/'.join(normalized_components)
def NormalizeEnumName(self, enum_name):
if self.__capitalize_enums:
enum_name = enum_name.upper()
return self.CleanName(enum_name)
def ClassName(self, name, separator='_'):
"""Generate a valid class name from name."""
# TODO(craigcitro): Get rid of this case here and in MethodName.
if name is None:
return name
# TODO(craigcitro): This is a hack to handle the case of specific
# protorpc class names; clean this up.
if name.startswith('protorpc.') or name.startswith('message_types.'):
return name
name = self.__StripName(name)
name = self.__ToCamel(name, separator=separator)
return self.CleanName(name)
def MethodName(self, name, separator='_'):
"""Generate a valid method name from name."""
if name is None:
return None
name = Names.__ToCamel(name, separator=separator)
return Names.CleanName(name)
def FieldName(self, name):
"""Generate a valid field name from name."""
# TODO(craigcitro): We shouldn't need to strip this name, but some
# of the service names here are excessive. Fix the API and then
# remove this.
name = self.__StripName(name)
if self.__name_convention == 'LOWER_CAMEL':
name = Names.__ToLowerCamel(name)
elif self.__name_convention == 'LOWER_WITH_UNDER':
name = Names.__FromCamel(name)
return Names.CleanName(name)
@contextlib.contextmanager
def Chdir(dirname, create=True):
if not os.path.exists(dirname):
if not create:
raise OSError('Cannot find directory %s' % dirname)
else:
os.mkdir(dirname)
previous_directory = os.getcwd()
os.chdir(dirname)
yield
os.chdir(previous_directory)
def NormalizeVersion(version):
# Currently, '.' is the only character that might cause us trouble.
return version.replace('.', '_')
class ClientInfo(collections.namedtuple('ClientInfo', (
'package', 'scopes', 'version', 'client_id', 'client_secret',
'user_agent', 'client_class_name', 'url_version', 'api_key'))):
"""Container for client-related info and names."""
@classmethod
def Create(cls, discovery_doc,
scope_ls, client_id, client_secret, user_agent, names, api_key):
"""Create a new ClientInfo object from a discovery document."""
scopes = set(
discovery_doc.get('auth', {}).get('oauth2', {}).get('scopes', {}))
scopes.update(scope_ls)
client_info = {
'package': discovery_doc['name'],
'version': NormalizeVersion(discovery_doc['version']),
'url_version': discovery_doc['version'],
'scopes': sorted(list(scopes)),
'client_id': client_id,
'client_secret': client_secret,
'user_agent': user_agent,
'api_key': api_key,
}
client_class_name = ''.join(
map(names.ClassName, (client_info['package'], client_info['version'])))
client_info['client_class_name'] = client_class_name
return cls(**client_info)
@property
def default_directory(self):
return self.package
@property
def cli_rule_name(self):
return '%s_%s' % (self.package, self.version)
@property
def cli_file_name(self):
return '%s.py' % self.cli_rule_name
@property
def client_rule_name(self):
return '%s_%s_client' % (self.package, self.version)
@property
def client_file_name(self):
return '%s.py' % self.client_rule_name
@property
def messages_rule_name(self):
return '%s_%s_messages' % (self.package, self.version)
@property
def services_rule_name(self):
return '%s_%s_services' % (self.package, self.version)
@property
def messages_file_name(self):
return '%s.py' % self.messages_rule_name
@property
def messages_proto_file_name(self):
return '%s.proto' % self.messages_rule_name
@property
def services_proto_file_name(self):
return '%s.proto' % self.services_rule_name
def GetPackage(path):
path_components = path.split(os.path.sep)
return '.'.join(path_components)
def CleanDescription(description):
"""Return a version of description safe for printing in a docstring."""
if not isinstance(description, six.string_types):
return description
return description.replace('"""', '" " "')
class SimplePrettyPrinter(object):
"""Simple pretty-printer that supports an indent contextmanager."""
def __init__(self, out):
self.__out = out
self.__indent = ''
self.__skip = False
self.__comment_context = False
@property
def indent(self):
return self.__indent
def CalculateWidth(self, max_width=78):
return max_width - len(self.indent)
@contextlib.contextmanager
def Indent(self, indent=' '):
previous_indent = self.__indent
self.__indent = '%s%s' % (previous_indent, indent)
yield
self.__indent = previous_indent
@contextlib.contextmanager
def CommentContext(self):
"""Print without any argument formatting."""
old_context = self.__comment_context
self.__comment_context = True
yield
self.__comment_context = old_context
def __call__(self, *args):
if self.__comment_context and args[1:]:
raise Error('Cannot do string interpolation in comment context')
if args and args[0]:
if not self.__comment_context:
line = (args[0] % args[1:]).rstrip()
else:
line = args[0].rstrip()
line = line.encode('ascii', 'backslashreplace')
print('%s%s' % (self.__indent, line), file=self.__out)
else:
print('', file=self.__out)
def NormalizeDiscoveryUrl(discovery_url):
"""Expands a few abbreviations into full discovery urls."""
if discovery_url.startswith('http'):
return discovery_url
elif '.' not in discovery_url:
raise ValueError('Unrecognized value "%s" for discovery url')
api_name, _, api_version = discovery_url.partition('.')
return 'https://www.googleapis.com/discovery/v1/apis/%s/%s/rest' % (
api_name, api_version)
def FetchDiscoveryDoc(discovery_url, retries=5):
"""Fetch the discovery document at the given url."""
discovery_url = NormalizeDiscoveryUrl(discovery_url)
discovery_doc = None
last_exception = None
for _ in range(retries):
try:
discovery_doc = json.loads(urllib2.urlopen(discovery_url).read())
break
except (urllib2.HTTPError, urllib2.URLError) as last_exception:
logging.warning('Attempting to fetch discovery doc again after "%s"',
last_exception)
if discovery_doc is None:
raise CommunicationError('Could not find discovery doc at url "%s": %s' % (
discovery_url, last_exception))
return discovery_doc
| |
import numpy as np
import h5py
from cs231n.layers import *
from cs231n.fast_layers import *
from cs231n.layer_utils import *
class PretrainedCNN(object):
def __init__(self, dtype=np.float32, num_classes=100, input_size=64, h5_file=None):
self.dtype = dtype
self.conv_params = []
self.input_size = input_size
self.num_classes = num_classes
# TODO: In the future it would be nice if the architecture could be loaded from
# the HDF5 file rather than being hardcoded. For now this will have to do.
self.conv_params.append({'stride': 2, 'pad': 2})
self.conv_params.append({'stride': 1, 'pad': 1})
self.conv_params.append({'stride': 2, 'pad': 1})
self.conv_params.append({'stride': 1, 'pad': 1})
self.conv_params.append({'stride': 2, 'pad': 1})
self.conv_params.append({'stride': 1, 'pad': 1})
self.conv_params.append({'stride': 2, 'pad': 1})
self.conv_params.append({'stride': 1, 'pad': 1})
self.conv_params.append({'stride': 2, 'pad': 1})
self.filter_sizes = [5, 3, 3, 3, 3, 3, 3, 3, 3]
self.num_filters = [64, 64, 128, 128, 256, 256, 512, 512, 1024]
hidden_dim = 512
self.bn_params = []
cur_size = input_size
prev_dim = 3
self.params = {}
for i, (f, next_dim) in enumerate(zip(self.filter_sizes, self.num_filters)):
fan_in = f * f * prev_dim
self.params['W%d' % (i + 1)] = np.sqrt(2.0 / fan_in) * np.random.randn(next_dim, prev_dim, f, f)
self.params['b%d' % (i + 1)] = np.zeros(next_dim)
self.params['gamma%d' % (i + 1)] = np.ones(next_dim)
self.params['beta%d' % (i + 1)] = np.zeros(next_dim)
self.bn_params.append({'mode': 'train'})
prev_dim = next_dim
if self.conv_params[i]['stride'] == 2: cur_size /= 2
# Add a fully-connected layers
fan_in = cur_size * cur_size * self.num_filters[-1]
self.params['W%d' % (i + 2)] = np.sqrt(2.0 / fan_in) * np.random.randn(fan_in, hidden_dim)
self.params['b%d' % (i + 2)] = np.zeros(hidden_dim)
self.params['gamma%d' % (i + 2)] = np.ones(hidden_dim)
self.params['beta%d' % (i + 2)] = np.zeros(hidden_dim)
self.bn_params.append({'mode': 'train'})
self.params['W%d' % (i + 3)] = np.sqrt(2.0 / hidden_dim) * np.random.randn(hidden_dim, num_classes)
self.params['b%d' % (i + 3)] = np.zeros(num_classes)
for k, v in self.params.iteritems():
self.params[k] = v.astype(dtype)
if h5_file is not None:
self.load_weights(h5_file)
def load_weights(self, h5_file, verbose=False):
"""
Load pretrained weights from an HDF5 file.
Inputs:
- h5_file: Path to the HDF5 file where pretrained weights are stored.
- verbose: Whether to print debugging info
"""
# Before loading weights we need to make a dummy forward pass to initialize
# the running averages in the bn_pararams
x = np.random.randn(1, 3, self.input_size, self.input_size)
y = np.random.randint(self.num_classes, size=1)
loss, grads = self.loss(x, y)
with h5py.File(h5_file, 'r') as f:
for k, v in f.iteritems():
v = np.asarray(v)
if k in self.params:
if verbose: print k, v.shape, self.params[k].shape
if v.shape == self.params[k].shape:
self.params[k] = v.copy()
elif v.T.shape == self.params[k].shape:
self.params[k] = v.T.copy()
else:
raise ValueError('shapes for %s do not match' % k)
if k.startswith('running_mean'):
i = int(k[12:]) - 1
assert self.bn_params[i]['running_mean'].shape == v.shape
self.bn_params[i]['running_mean'] = v.copy()
if verbose: print k, v.shape
if k.startswith('running_var'):
i = int(k[11:]) - 1
assert v.shape == self.bn_params[i]['running_var'].shape
self.bn_params[i]['running_var'] = v.copy()
if verbose: print k, v.shape
for k, v in self.params.iteritems():
self.params[k] = v.astype(self.dtype)
def forward(self, X, start=None, end=None, mode='test'):
"""
Run part of the model forward, starting and ending at an arbitrary layer,
in either training mode or testing mode.
You can pass arbitrary input to the starting layer, and you will receive
output from the ending layer and a cache object that can be used to run
the model backward over the same set of layers.
For the purposes of this function, a "layer" is one of the following blocks:
[conv - spatial batchnorm - relu] (There are 9 of these)
[affine - batchnorm - relu] (There is one of these)
[affine] (There is one of these)
Inputs:
- X: The input to the starting layer. If start=0, then this should be an
array of shape (N, C, 64, 64).
- start: The index of the layer to start from. start=0 starts from the first
convolutional layer. Default is 0.
- end: The index of the layer to end at. end=10 ends at the last
fully-connected layer, returning class scores. Default is 10.
- mode: The mode to use, either 'test' or 'train'. We need this because
batch normalization behaves differently at training time and test time.
Returns:
- out: Output from the end layer.
- cache: A cache object that can be passed to the backward method to run the
network backward over the same range of layers.
"""
X = X.astype(self.dtype)
if start is None: start = 0
if end is None: end = len(self.conv_params) + 1
layer_caches = []
prev_a = X
for i in xrange(start, end + 1):
i1 = i + 1
if 0 <= i < len(self.conv_params):
# This is a conv layer
w, b = self.params['W%d' % i1], self.params['b%d' % i1]
gamma, beta = self.params['gamma%d' % i1], self.params['beta%d' % i1]
conv_param = self.conv_params[i]
bn_param = self.bn_params[i]
bn_param['mode'] = mode
next_a, cache = conv_bn_relu_forward(prev_a, w, b, gamma, beta, conv_param, bn_param)
elif i == len(self.conv_params):
# This is the fully-connected hidden layer
w, b = self.params['W%d' % i1], self.params['b%d' % i1]
gamma, beta = self.params['gamma%d' % i1], self.params['beta%d' % i1]
bn_param = self.bn_params[i]
bn_param['mode'] = mode
next_a, cache = affine_bn_relu_forward(prev_a, w, b, gamma, beta, bn_param)
elif i == len(self.conv_params) + 1:
# This is the last fully-connected layer that produces scores
w, b = self.params['W%d' % i1], self.params['b%d' % i1]
next_a, cache = affine_forward(prev_a, w, b)
else:
raise ValueError('Invalid layer index %d' % i)
layer_caches.append(cache)
prev_a = next_a
out = prev_a
cache = (start, end, layer_caches)
return out, cache
def backward(self, dout, cache):
"""
Run the model backward over a sequence of layers that were previously run
forward using the self.forward method.
Inputs:
- dout: Gradient with respect to the ending layer; this should have the same
shape as the out variable returned from the corresponding call to forward.
- cache: A cache object returned from self.forward.
Returns:
- dX: Gradient with respect to the start layer. This will have the same
shape as the input X passed to self.forward.
- grads: Gradient of all parameters in the layers. For example if you run
forward through two convolutional layers, then on the corresponding call
to backward grads will contain the gradients with respect to the weights,
biases, and spatial batchnorm parameters of those two convolutional
layers. The grads dictionary will therefore contain a subset of the keys
of self.params, and grads[k] and self.params[k] will have the same shape.
"""
start, end, layer_caches = cache
dnext_a = dout
grads = {}
for i in reversed(range(start, end + 1)):
i1 = i + 1
if i == len(self.conv_params) + 1:
# This is the last fully-connected layer
dprev_a, dw, db = affine_backward(dnext_a, layer_caches.pop())
grads['W%d' % i1] = dw
grads['b%d' % i1] = db
elif i == len(self.conv_params):
# This is the fully-connected hidden layer
temp = affine_bn_relu_backward(dnext_a, layer_caches.pop())
dprev_a, dw, db, dgamma, dbeta = temp
grads['W%d' % i1] = dw
grads['b%d' % i1] = db
grads['gamma%d' % i1] = dgamma
grads['beta%d' % i1] = dbeta
elif 0 <= i < len(self.conv_params):
# This is a conv layer
temp = conv_bn_relu_backward(dnext_a, layer_caches.pop())
dprev_a, dw, db, dgamma, dbeta = temp
grads['W%d' % i1] = dw
grads['b%d' % i1] = db
grads['gamma%d' % i1] = dgamma
grads['beta%d' % i1] = dbeta
else:
raise ValueError('Invalid layer index %d' % i)
dnext_a = dprev_a
dX = dnext_a
return dX, grads
def loss(self, X, y=None):
"""
Classification loss used to train the network.
Inputs:
- X: Array of data, of shape (N, 3, 64, 64)
- y: Array of labels, of shape (N,)
If y is None, then run a test-time forward pass and return:
- scores: Array of shape (N, 100) giving class scores.
If y is not None, then run a training-time forward and backward pass and
return a tuple of:
- loss: Scalar giving loss
- grads: Dictionary of gradients, with the same keys as self.params.
"""
# Note that we implement this by just caling self.forward and self.backward
mode = 'test' if y is None else 'train'
scores, cache = self.forward(X, mode=mode)
if mode == 'test':
return scores
loss, dscores = softmax_loss(scores, y)
dX, grads = self.backward(dscores, cache)
return loss, grads
| |
# -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Pedro Silva.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""List duplicate tracks or albums.
"""
from __future__ import division, absolute_import, print_function
import shlex
from beets.plugins import BeetsPlugin
from beets.ui import decargs, print_, Subcommand, UserError
from beets.util import command_output, displayable_path, subprocess, \
bytestring_path
from beets.library import Item, Album
import six
PLUGIN = 'duplicates'
class DuplicatesPlugin(BeetsPlugin):
"""List duplicate tracks or albums
"""
def __init__(self):
super(DuplicatesPlugin, self).__init__()
self.config.add({
'album': False,
'checksum': '',
'copy': '',
'count': False,
'delete': False,
'format': '',
'full': False,
'keys': [],
'merge': False,
'move': '',
'path': False,
'tiebreak': {},
'strict': False,
'tag': '',
})
self._command = Subcommand('duplicates',
help=__doc__,
aliases=['dup'])
self._command.parser.add_option(
u'-c', u'--count', dest='count',
action='store_true',
help=u'show duplicate counts',
)
self._command.parser.add_option(
u'-C', u'--checksum', dest='checksum',
action='store', metavar='PROG',
help=u'report duplicates based on arbitrary command',
)
self._command.parser.add_option(
u'-d', u'--delete', dest='delete',
action='store_true',
help=u'delete items from library and disk',
)
self._command.parser.add_option(
u'-F', u'--full', dest='full',
action='store_true',
help=u'show all versions of duplicate tracks or albums',
)
self._command.parser.add_option(
u'-s', u'--strict', dest='strict',
action='store_true',
help=u'report duplicates only if all attributes are set',
)
self._command.parser.add_option(
u'-k', u'--key',
action='append', metavar='KEY',
help=u'report duplicates based on keys (use multiple times)',
)
self._command.parser.add_option(
u'-M', u'--merge', dest='merge',
action='store_true',
help=u'merge duplicate items',
)
self._command.parser.add_option(
u'-m', u'--move', dest='move',
action='store', metavar='DEST',
help=u'move items to dest',
)
self._command.parser.add_option(
u'-o', u'--copy', dest='copy',
action='store', metavar='DEST',
help=u'copy items to dest',
)
self._command.parser.add_option(
u'-t', u'--tag', dest='tag',
action='store',
help=u'tag matched items with \'k=v\' attribute',
)
self._command.parser.add_all_common_options()
def commands(self):
def _dup(lib, opts, args):
self.config.set_args(opts)
album = self.config['album'].get(bool)
checksum = self.config['checksum'].get(str)
copy = bytestring_path(self.config['copy'].as_str())
count = self.config['count'].get(bool)
delete = self.config['delete'].get(bool)
fmt = self.config['format'].get(str)
full = self.config['full'].get(bool)
keys = self.config['keys'].as_str_seq()
merge = self.config['merge'].get(bool)
move = bytestring_path(self.config['move'].as_str())
path = self.config['path'].get(bool)
tiebreak = self.config['tiebreak'].get(dict)
strict = self.config['strict'].get(bool)
tag = self.config['tag'].get(str)
if album:
if not keys:
keys = ['mb_albumid']
items = lib.albums(decargs(args))
else:
if not keys:
keys = ['mb_trackid', 'mb_albumid']
items = lib.items(decargs(args))
if path:
fmt = u'$path'
# Default format string for count mode.
if count and not fmt:
if album:
fmt = u'$albumartist - $album'
else:
fmt = u'$albumartist - $album - $title'
fmt += u': {0}'
if checksum:
for i in items:
k, _ = self._checksum(i, checksum)
keys = [k]
for obj_id, obj_count, objs in self._duplicates(items,
keys=keys,
full=full,
strict=strict,
tiebreak=tiebreak,
merge=merge):
if obj_id: # Skip empty IDs.
for o in objs:
self._process_item(o,
copy=copy,
move=move,
delete=delete,
tag=tag,
fmt=fmt.format(obj_count))
self._command.func = _dup
return [self._command]
def _process_item(self, item, copy=False, move=False, delete=False,
tag=False, fmt=u''):
"""Process Item `item`.
"""
print_(format(item, fmt))
if copy:
item.move(basedir=copy, copy=True)
item.store()
if move:
item.move(basedir=move, copy=False)
item.store()
if delete:
item.remove(delete=True)
if tag:
try:
k, v = tag.split('=')
except Exception:
raise UserError(
u"{}: can't parse k=v tag: {}".format(PLUGIN, tag)
)
setattr(item, k, v)
item.store()
def _checksum(self, item, prog):
"""Run external `prog` on file path associated with `item`, cache
output as flexattr on a key that is the name of the program, and
return the key, checksum tuple.
"""
args = [p.format(file=item.path) for p in shlex.split(prog)]
key = args[0]
checksum = getattr(item, key, False)
if not checksum:
self._log.debug(u'key {0} on item {1} not cached:'
u'computing checksum',
key, displayable_path(item.path))
try:
checksum = command_output(args)
setattr(item, key, checksum)
item.store()
self._log.debug(u'computed checksum for {0} using {1}',
item.title, key)
except subprocess.CalledProcessError as e:
self._log.debug(u'failed to checksum {0}: {1}',
displayable_path(item.path), e)
else:
self._log.debug(u'key {0} on item {1} cached:'
u'not computing checksum',
key, displayable_path(item.path))
return key, checksum
def _group_by(self, objs, keys, strict):
"""Return a dictionary with keys arbitrary concatenations of attributes
and values lists of objects (Albums or Items) with those keys.
If strict, all attributes must be defined for a duplicate match.
"""
import collections
counts = collections.defaultdict(list)
for obj in objs:
values = [getattr(obj, k, None) for k in keys]
values = [v for v in values if v not in (None, '')]
if strict and len(values) < len(keys):
self._log.debug(u'some keys {0} on item {1} are null or empty:'
u' skipping',
keys, displayable_path(obj.path))
elif (not strict and not len(values)):
self._log.debug(u'all keys {0} on item {1} are null or empty:'
u' skipping',
keys, displayable_path(obj.path))
else:
key = tuple(values)
counts[key].append(obj)
return counts
def _order(self, objs, tiebreak=None):
"""Return the objects (Items or Albums) sorted by descending
order of priority.
If provided, the `tiebreak` dict indicates the field to use to
prioritize the objects. Otherwise, Items are placed in order of
"completeness" (objects with more non-null fields come first)
and Albums are ordered by their track count.
"""
if tiebreak:
kind = 'items' if all(isinstance(o, Item)
for o in objs) else 'albums'
key = lambda x: tuple(getattr(x, k) for k in tiebreak[kind])
else:
kind = Item if all(isinstance(o, Item) for o in objs) else Album
if kind is Item:
def truthy(v):
# Avoid a Unicode warning by avoiding comparison
# between a bytes object and the empty Unicode
# string ''.
return v is not None and \
(v != '' if isinstance(v, six.text_type) else True)
fields = kind.all_keys()
key = lambda x: sum(1 for f in fields if truthy(getattr(x, f)))
else:
key = lambda x: len(x.items())
return sorted(objs, key=key, reverse=True)
def _merge_items(self, objs):
"""Merge Item objs by copying missing fields from items in the tail to
the head item.
Return same number of items, with the head item modified.
"""
fields = Item.all_keys()
for f in fields:
for o in objs[1:]:
if getattr(objs[0], f, None) in (None, ''):
value = getattr(o, f, None)
if value:
self._log.debug(u'key {0} on item {1} is null '
u'or empty: setting from item {2}',
f, displayable_path(objs[0].path),
displayable_path(o.path))
setattr(objs[0], f, value)
objs[0].store()
break
return objs
def _merge_albums(self, objs):
"""Merge Album objs by copying missing items from albums in the tail
to the head album.
Return same number of albums, with the head album modified."""
ids = [i.mb_trackid for i in objs[0].items()]
for o in objs[1:]:
for i in o.items():
if i.mb_trackid not in ids:
missing = Item.from_path(i.path)
missing.album_id = objs[0].id
missing.add(i._db)
self._log.debug(u'item {0} missing from album {1}:'
u' merging from {2} into {3}',
missing,
objs[0],
displayable_path(o.path),
displayable_path(missing.destination()))
missing.move(copy=True)
return objs
def _merge(self, objs):
"""Merge duplicate items. See ``_merge_items`` and ``_merge_albums``
for the relevant strategies.
"""
kind = Item if all(isinstance(o, Item) for o in objs) else Album
if kind is Item:
objs = self._merge_items(objs)
else:
objs = self._merge_albums(objs)
return objs
def _duplicates(self, objs, keys, full, strict, tiebreak, merge):
"""Generate triples of keys, duplicate counts, and constituent objects.
"""
offset = 0 if full else 1
for k, objs in self._group_by(objs, keys, strict).items():
if len(objs) > 1:
objs = self._order(objs, tiebreak)
if merge:
objs = self._merge(objs)
yield (k, len(objs) - offset, objs[offset:])
| |
"""
Source: http://www.djangosnippets.org/snippets/1350/
A smarter {% if %} tag for django templates.
While retaining current Django functionality, it also handles equality,
greater than and less than operators. Some common case examples::
{% if articles|length >= 5 %}...{% endif %}
{% if "ifnotequal tag" != "beautiful" %}...{% endif %}
"""
import unittest
from django import template
register = template.Library()
#==============================================================================
# Calculation objects
#==============================================================================
class BaseCalc(object):
def __init__(self, var1, var2=None, negate=False):
self.var1 = var1
self.var2 = var2
self.negate = negate
def resolve(self, context):
try:
var1, var2 = self.resolve_vars(context)
outcome = self.calculate(var1, var2)
except:
outcome = False
if self.negate:
return not outcome
return outcome
def resolve_vars(self, context):
var2 = self.var2 and self.var2.resolve(context)
return self.var1.resolve(context), var2
def calculate(self, var1, var2):
raise NotImplementedError()
class Or(BaseCalc):
def calculate(self, var1, var2):
return var1 or var2
class And(BaseCalc):
def calculate(self, var1, var2):
return var1 and var2
class Equals(BaseCalc):
def calculate(self, var1, var2):
return var1 == var2
class Greater(BaseCalc):
def calculate(self, var1, var2):
return var1 > var2
class GreaterOrEqual(BaseCalc):
def calculate(self, var1, var2):
return var1 >= var2
class In(BaseCalc):
def calculate(self, var1, var2):
return var1 in var2
#==============================================================================
# Tests
#==============================================================================
class TestVar(object):
"""
A basic self-resolvable object similar to a Django template variable. Used
to assist with tests.
"""
def __init__(self, value):
self.value = value
def resolve(self, context):
return self.value
class SmartIfTests(unittest.TestCase):
def setUp(self):
self.true = TestVar(True)
self.false = TestVar(False)
self.high = TestVar(9000)
self.low = TestVar(1)
def assertCalc(self, calc, context=None):
"""
Test a calculation is True, also checking the inverse "negate" case.
"""
context = context or {}
self.assert_(calc.resolve(context))
calc.negate = not calc.negate
self.assertFalse(calc.resolve(context))
def assertCalcFalse(self, calc, context=None):
"""
Test a calculation is False, also checking the inverse "negate" case.
"""
context = context or {}
self.assertFalse(calc.resolve(context))
calc.negate = not calc.negate
self.assert_(calc.resolve(context))
def test_or(self):
self.assertCalc(Or(self.true))
self.assertCalcFalse(Or(self.false))
self.assertCalc(Or(self.true, self.true))
self.assertCalc(Or(self.true, self.false))
self.assertCalc(Or(self.false, self.true))
self.assertCalcFalse(Or(self.false, self.false))
def test_and(self):
self.assertCalc(And(self.true, self.true))
self.assertCalcFalse(And(self.true, self.false))
self.assertCalcFalse(And(self.false, self.true))
self.assertCalcFalse(And(self.false, self.false))
def test_equals(self):
self.assertCalc(Equals(self.low, self.low))
self.assertCalcFalse(Equals(self.low, self.high))
def test_greater(self):
self.assertCalc(Greater(self.high, self.low))
self.assertCalcFalse(Greater(self.low, self.low))
self.assertCalcFalse(Greater(self.low, self.high))
def test_greater_or_equal(self):
self.assertCalc(GreaterOrEqual(self.high, self.low))
self.assertCalc(GreaterOrEqual(self.low, self.low))
self.assertCalcFalse(GreaterOrEqual(self.low, self.high))
def test_in(self):
list_ = TestVar([1,2,3])
invalid_list = TestVar(None)
self.assertCalc(In(self.low, list_))
self.assertCalcFalse(In(self.low, invalid_list))
def test_parse_bits(self):
var = IfParser([True]).parse()
self.assert_(var.resolve({}))
var = IfParser([False]).parse()
self.assertFalse(var.resolve({}))
var = IfParser([False, 'or', True]).parse()
self.assert_(var.resolve({}))
var = IfParser([False, 'and', True]).parse()
self.assertFalse(var.resolve({}))
var = IfParser(['not', False, 'and', 'not', False]).parse()
self.assert_(var.resolve({}))
var = IfParser(['not', 'not', True]).parse()
self.assert_(var.resolve({}))
var = IfParser([1, '=', 1]).parse()
self.assert_(var.resolve({}))
var = IfParser([1, 'not', '=', 1]).parse()
self.assertFalse(var.resolve({}))
var = IfParser([1, 'not', 'not', '=', 1]).parse()
self.assert_(var.resolve({}))
var = IfParser([1, '!=', 1]).parse()
self.assertFalse(var.resolve({}))
var = IfParser([3, '>', 2]).parse()
self.assert_(var.resolve({}))
var = IfParser([1, '<', 2]).parse()
self.assert_(var.resolve({}))
var = IfParser([2, 'not', 'in', [2, 3]]).parse()
self.assertFalse(var.resolve({}))
var = IfParser([1, 'or', 1, '=', 2]).parse()
self.assert_(var.resolve({}))
def test_boolean(self):
var = IfParser([True, 'and', True, 'and', True]).parse()
self.assert_(var.resolve({}))
var = IfParser([False, 'or', False, 'or', True]).parse()
self.assert_(var.resolve({}))
var = IfParser([True, 'and', False, 'or', True]).parse()
self.assert_(var.resolve({}))
var = IfParser([False, 'or', True, 'and', True]).parse()
self.assert_(var.resolve({}))
var = IfParser([True, 'and', True, 'and', False]).parse()
self.assertFalse(var.resolve({}))
var = IfParser([False, 'or', False, 'or', False]).parse()
self.assertFalse(var.resolve({}))
var = IfParser([False, 'or', True, 'and', False]).parse()
self.assertFalse(var.resolve({}))
var = IfParser([False, 'and', True, 'or', False]).parse()
self.assertFalse(var.resolve({}))
def test_invalid(self):
self.assertRaises(ValueError, IfParser(['not']).parse)
self.assertRaises(ValueError, IfParser(['==']).parse)
self.assertRaises(ValueError, IfParser([1, 'in']).parse)
self.assertRaises(ValueError, IfParser([1, '>', 'in']).parse)
self.assertRaises(ValueError, IfParser([1, '==', 'not', 'not']).parse)
self.assertRaises(ValueError, IfParser([1, 2]).parse)
OPERATORS = {
'=': (Equals, True),
'==': (Equals, True),
'!=': (Equals, False),
'>': (Greater, True),
'>=': (GreaterOrEqual, True),
'<=': (Greater, False),
'<': (GreaterOrEqual, False),
'or': (Or, True),
'and': (And, True),
'in': (In, True),
}
BOOL_OPERATORS = ('or', 'and')
class IfParser(object):
error_class = ValueError
def __init__(self, tokens):
self.tokens = tokens
def _get_tokens(self):
return self._tokens
def _set_tokens(self, tokens):
self._tokens = tokens
self.len = len(tokens)
self.pos = 0
tokens = property(_get_tokens, _set_tokens)
def parse(self):
if self.at_end():
raise self.error_class('No variables provided.')
var1 = self.get_bool_var()
while not self.at_end():
op, negate = self.get_operator()
var2 = self.get_bool_var()
var1 = op(var1, var2, negate=negate)
return var1
def get_token(self, eof_message=None, lookahead=False):
negate = True
token = None
pos = self.pos
while token is None or token == 'not':
if pos >= self.len:
if eof_message is None:
raise self.error_class()
raise self.error_class(eof_message)
token = self.tokens[pos]
negate = not negate
pos += 1
if not lookahead:
self.pos = pos
return token, negate
def at_end(self):
return self.pos >= self.len
def create_var(self, value):
return TestVar(value)
def get_bool_var(self):
"""
Returns either a variable by itself or a non-boolean operation (such as
``x == 0`` or ``x < 0``).
This is needed to keep correct precedence for boolean operations (i.e.
``x or x == 0`` should be ``x or (x == 0)``, not ``(x or x) == 0``).
"""
var = self.get_var()
if not self.at_end():
op_token = self.get_token(lookahead=True)[0]
if isinstance(op_token, basestring) and (op_token not in
BOOL_OPERATORS):
op, negate = self.get_operator()
return op(var, self.get_var(), negate=negate)
return var
def get_var(self):
token, negate = self.get_token('Reached end of statement, still '
'expecting a variable.')
if isinstance(token, basestring) and token in OPERATORS:
raise self.error_class('Expected variable, got operator (%s).' %
token)
var = self.create_var(token)
if negate:
return Or(var, negate=True)
return var
def get_operator(self):
token, negate = self.get_token('Reached end of statement, still '
'expecting an operator.')
if not isinstance(token, basestring) or token not in OPERATORS:
raise self.error_class('%s is not a valid operator.' % token)
if self.at_end():
raise self.error_class('No variable provided after "%s".' % token)
op, true = OPERATORS[token]
if not true:
negate = not negate
return op, negate
#==============================================================================
# Actual templatetag code.
#==============================================================================
class TemplateIfParser(IfParser):
error_class = template.TemplateSyntaxError
def __init__(self, parser, *args, **kwargs):
self.template_parser = parser
return super(TemplateIfParser, self).__init__(*args, **kwargs)
def create_var(self, value):
return self.template_parser.compile_filter(value)
class SmartIfNode(template.Node):
def __init__(self, var, nodelist_true, nodelist_false=None):
self.nodelist_true, self.nodelist_false = nodelist_true, nodelist_false
self.var = var
def render(self, context):
if self.var.resolve(context):
return self.nodelist_true.render(context)
if self.nodelist_false:
return self.nodelist_false.render(context)
return ''
def __repr__(self):
return "<Smart If node>"
def __iter__(self):
for node in self.nodelist_true:
yield node
if self.nodelist_false:
for node in self.nodelist_false:
yield node
def get_nodes_by_type(self, nodetype):
nodes = []
if isinstance(self, nodetype):
nodes.append(self)
nodes.extend(self.nodelist_true.get_nodes_by_type(nodetype))
if self.nodelist_false:
nodes.extend(self.nodelist_false.get_nodes_by_type(nodetype))
return nodes
@register.tag('if')
def smart_if(parser, token):
"""
A smarter {% if %} tag for django templates.
While retaining current Django functionality, it also handles equality,
greater than and less than operators. Some common case examples::
{% if articles|length >= 5 %}...{% endif %}
{% if "ifnotequal tag" != "beautiful" %}...{% endif %}
Arguments and operators _must_ have a space between them, so
``{% if 1>2 %}`` is not a valid smart if tag.
All supported operators are: ``or``, ``and``, ``in``, ``=`` (or ``==``),
``!=``, ``>``, ``>=``, ``<`` and ``<=``.
"""
bits = token.split_contents()[1:]
var = TemplateIfParser(parser, bits).parse()
nodelist_true = parser.parse(('else', 'endif'))
token = parser.next_token()
if token.contents == 'else':
nodelist_false = parser.parse(('endif',))
parser.delete_first_token()
else:
nodelist_false = None
return SmartIfNode(var, nodelist_true, nodelist_false)
if __name__ == '__main__':
unittest.main()
| |
import ctypes
from sys import stdout, platform as _platform
from datetime import datetime, timedelta
from pokemongo_bot.base_task import BaseTask
from pokemongo_bot.worker_result import WorkerResult
from pokemongo_bot.tree_config_builder import ConfigException
class UpdateLiveStats(BaseTask):
"""
Periodically displays stats about the bot in the terminal and/or in its title.
Fetching some stats requires making API calls. If you're concerned about the amount of calls
your bot is making, don't enable this worker.
Example config :
{
"type": "UpdateLiveStats",
"config": {
"min_interval": 10,
"stats": ["login", "uptime", "km_walked", "level_stats", "xp_earned", "xp_per_hour"],
"terminal_log": true,
"terminal_title": false
}
}
min_interval : The minimum interval at which the stats are displayed,
in seconds (defaults to 120 seconds).
The update interval cannot be accurate as workers run synchronously.
stats : An array of stats to display and their display order (implicitly),
see available stats below (defaults to []).
terminal_log : Logs the stats into the terminal (defaults to false).
terminal_title : Displays the stats into the terminal title (defaults to true).
Available stats :
- login : The account login (from the credentials).
- username : The trainer name (asked at first in-game connection).
- uptime : The bot uptime.
- km_walked : The kilometers walked since the bot started.
- level : The current character's level.
- level_completion : The current level experience, the next level experience and the completion
percentage.
- level_stats : Puts together the current character's level and its completion.
- xp_per_hour : The estimated gain of experience per hour.
- xp_earned : The experience earned since the bot started.
- stops_visited : The number of visited stops.
- pokemon_encountered : The number of encountered pokemon.
- pokemon_caught : The number of caught pokemon.
- captures_per_hour : The estimated number of pokemon captured per hour.
- pokemon_released : The number of released pokemon.
- pokemon_evolved : The number of evolved pokemon.
- pokemon_unseen : The number of pokemon never seen before.
- pokemon_stats : Puts together the pokemon encountered, caught, released, evolved and unseen.
- pokeballs_thrown : The number of thrown pokeballs.
- stardust_earned : The number of earned stardust since the bot started.
- highest_cp_pokemon : The caught pokemon with the highest CP since the bot started.
- most_perfect_pokemon : The most perfect caught pokemon since the bot started.
- location : The location where the player is located.
- next_egg_hatching : The remaining distance to the next egg hatching (km).
- hatched_eggs : The number of hatched eggs since the bot started.
"""
SUPPORTED_TASK_API_VERSION = 1
def __init__(self, bot, config):
"""
Initializes the worker.
:param bot: The bot instance.
:type bot: PokemonGoBot
:param config: The task configuration.
:type config: dict
"""
super(UpdateLiveStats, self).__init__(bot, config)
self.next_update = None
self.min_interval = int(self.config.get('min_interval', 120))
self.displayed_stats = self.config.get('stats', [])
self.terminal_log = bool(self.config.get('terminal_log', False))
self.terminal_title = bool(self.config.get('terminal_title', True))
self.bot.event_manager.register_event('log_stats', parameters=('stats', 'stats_raw'))
def initialize(self):
pass
def work(self):
"""
Displays the stats if necessary.
:return: Always returns WorkerResult.SUCCESS.
:rtype: WorkerResult
"""
if not self._should_display():
return WorkerResult.SUCCESS
line = self._get_stats_line(self._get_player_stats())
# If line is empty, it couldn't be generated.
if not line:
return WorkerResult.SUCCESS
if self.terminal_title:
self._update_title(line, _platform)
if self.terminal_log:
self._log_on_terminal(line)
return WorkerResult.SUCCESS
def _should_display(self):
"""
Returns a value indicating whether the stats should be displayed.
:return: True if the stats should be displayed; otherwise, False.
:rtype: bool
"""
if not self.terminal_title and not self.terminal_log:
return False
return self.next_update is None or datetime.now() >= self.next_update
def _compute_next_update(self):
"""
Computes the next update datetime based on the minimum update interval.
:return: Nothing.
:rtype: None
"""
self.next_update = datetime.now() + timedelta(seconds=self.min_interval)
def _log_on_terminal(self, stats):
"""
Logs the stats into the terminal using an event.
:param stats: The stats to display.
:type stats: string
:return: Nothing.
:rtype: None
"""
self.emit_event(
'log_stats',
formatted="{stats}",
data={
'stats': stats,
'stats_raw': self._get_stats(self._get_player_stats())
}
)
self._compute_next_update()
def _update_title(self, title, platform):
"""
Updates the window title using different methods, according to the given platform.
:param title: The new window title.
:type title: string
:param platform: The platform string.
:type platform: string
:return: Nothing.
:rtype: None
:raise: RuntimeError: When the given platform isn't supported.
"""
try:
if platform == "linux" or platform == "linux2" or platform == "cygwin":
stdout.write("\x1b]2;{}\x07".format(title))
stdout.flush()
elif platform == "darwin":
stdout.write("\033]0;{}\007".format(title))
stdout.flush()
elif platform == "win32":
ctypes.windll.kernel32.SetConsoleTitleA(title.encode())
else:
raise RuntimeError("unsupported platform '{}'".format(platform))
except AttributeError:
self.emit_event(
'log_stats',
level='error',
formatted="Unable to write window title"
)
self.terminal_title = False
self._compute_next_update()
def _get_stats(self, player_stats):
metrics = self.bot.metrics
metrics.capture_stats()
runtime = metrics.runtime()
login = self.bot.config.username
player_data = self.bot.player_data
username = player_data.get('username', '?')
distance_travelled = metrics.distance_travelled()
current_level = int(player_stats.get('level', 0))
prev_level_xp = int(player_stats.get('prev_level_xp', 0))
next_level_xp = int(player_stats.get('next_level_xp', 0))
experience = player_stats.get('experience', 0)
current_level_xp = experience - prev_level_xp
whole_level_xp = next_level_xp - prev_level_xp
level_completion_percentage = (current_level_xp * 100) / whole_level_xp
experience_per_hour = metrics.xp_per_hour()
xp_earned = metrics.xp_earned()
stops_visited = metrics.visits['latest'] - metrics.visits['start']
pokemon_encountered = metrics.num_encounters()
pokemon_caught = metrics.num_captures()
captures_per_hour = metrics.captures_per_hour()
pokemon_released = metrics.releases
pokemon_evolved = metrics.num_evolutions()
pokemon_unseen = metrics.num_new_mons()
pokeballs_thrown = metrics.num_throws()
stardust_earned = metrics.earned_dust()
highest_cp_pokemon = metrics.highest_cp['desc']
if not highest_cp_pokemon:
highest_cp_pokemon = "None"
most_perfect_pokemon = metrics.most_perfect['desc']
if not most_perfect_pokemon:
most_perfect_pokemon = "None"
next_egg_hatching = metrics.next_hatching_km(0)
hatched_eggs = metrics.hatched_eggs(0)
# Create stats strings.
available_stats = {
'login': login,
'username': username,
'uptime': '{}'.format(runtime),
'km_walked': distance_travelled,
'level': current_level,
'experience': experience,
'current_level_xp': whole_level_xp,
'whole_level_xp': whole_level_xp,
'level_completion_percentage': level_completion_percentage,
'xp_per_hour': experience_per_hour,
'xp_earned': xp_earned,
'stops_visited': stops_visited,
'pokemon_encountered': pokemon_encountered,
'pokemon_caught': pokemon_caught,
'captures_per_hour': captures_per_hour,
'pokemon_released': pokemon_released,
'pokemon_evolved': pokemon_evolved,
'pokemon_unseen': pokemon_unseen,
'pokeballs_thrown': pokeballs_thrown,
'stardust_earned': stardust_earned,
'highest_cp_pokemon': highest_cp_pokemon,
'most_perfect_pokemon': most_perfect_pokemon,
'location': [self.bot.position[0], self.bot.position[1]],
'next_egg_hatching': float(next_egg_hatching),
'hatched_eggs': hatched_eggs
}
return available_stats
def _get_stats_line(self, player_stats):
"""
Generates a stats string with the given player stats according to the configuration.
:return: A string containing human-readable stats, ready to be displayed.
:rtype: string
"""
# No player stats available, won't be able to gather all informations.
if player_stats is None:
return ''
# No stats to display, avoid any useless overhead.
if not self.displayed_stats:
return ''
# Gather stats values.
metrics = self.bot.metrics
metrics.capture_stats()
runtime = metrics.runtime()
login = self.bot.config.username
player_data = self.bot.player_data
username = player_data.get('username', '?')
distance_travelled = metrics.distance_travelled()
current_level = int(player_stats.get('level', 0))
prev_level_xp = int(player_stats.get('prev_level_xp', 0))
next_level_xp = int(player_stats.get('next_level_xp', 0))
experience = int(player_stats.get('experience', 0))
current_level_xp = experience - prev_level_xp
whole_level_xp = next_level_xp - prev_level_xp
level_completion_percentage = int((current_level_xp * 100) / whole_level_xp)
experience_per_hour = int(metrics.xp_per_hour())
xp_earned = metrics.xp_earned()
stops_visited = metrics.visits['latest'] - metrics.visits['start']
pokemon_encountered = metrics.num_encounters()
pokemon_caught = metrics.num_captures()
captures_per_hour = int(metrics.captures_per_hour())
pokemon_released = metrics.releases
pokemon_evolved = metrics.num_evolutions()
pokemon_unseen = metrics.num_new_mons()
pokeballs_thrown = metrics.num_throws()
stardust_earned = metrics.earned_dust()
highest_cp_pokemon = metrics.highest_cp['desc']
if not highest_cp_pokemon:
highest_cp_pokemon = "None"
most_perfect_pokemon = metrics.most_perfect['desc']
if not most_perfect_pokemon:
most_perfect_pokemon = "None"
next_egg_hatching = metrics.next_hatching_km(0)
hatched_eggs = metrics.hatched_eggs(0)
# Create stats strings.
available_stats = {
'login': login,
'username': username,
'uptime': 'Uptime : {}'.format(runtime),
'km_walked': '{:,.2f}km walked'.format(distance_travelled),
'level': 'Level {}'.format(current_level),
'level_completion': '{:,} / {:,} XP ({}%)'.format(current_level_xp, whole_level_xp,
level_completion_percentage),
'level_stats': 'Level {} ({:,} / {:,}, {}%)'.format(current_level, current_level_xp,
whole_level_xp,
level_completion_percentage),
'xp_per_hour': '{:,} XP/h'.format(experience_per_hour),
'xp_earned': '+{:,} XP'.format(xp_earned),
'stops_visited': 'Visited {:,} stops'.format(stops_visited),
'pokemon_encountered': 'Encountered {:,} pokemon'.format(pokemon_encountered),
'pokemon_caught': 'Caught {:,} pokemon'.format(pokemon_caught),
'captures_per_hour': '{:,} pokemon/h'.format(captures_per_hour),
'pokemon_released': 'Released {:,} pokemon'.format(pokemon_released),
'pokemon_evolved': 'Evolved {:,} pokemon'.format(pokemon_evolved),
'pokemon_unseen': 'Encountered {} new pokemon'.format(pokemon_unseen),
'pokemon_stats': 'Encountered {:,} pokemon, {:,} caught, {:,} released, {:,} evolved, '
'{} never seen before'.format(pokemon_encountered, pokemon_caught,
pokemon_released, pokemon_evolved,
pokemon_unseen),
'pokeballs_thrown': 'Threw {:,} pokeballs'.format(pokeballs_thrown),
'stardust_earned': 'Earned {:,} Stardust'.format(stardust_earned),
'highest_cp_pokemon': 'Highest CP pokemon : {}'.format(highest_cp_pokemon),
'most_perfect_pokemon': 'Most perfect pokemon : {}'.format(most_perfect_pokemon),
'location': 'Location : ({}, {})'.format(self.bot.position[0], self.bot.position[1]),
'next_egg_hatching': 'Next egg hatches in : {:.2f} km'.format(float(next_egg_hatching)),
'hatched_eggs': 'Hatched {} eggs.'.format(hatched_eggs)
}
def get_stat(stat):
"""
Fetches a stat string from the available stats dictionary.
:param stat: The stat name.
:type stat: string
:return: The generated stat string.
:rtype: string
:raise: ConfigException: When the provided stat string isn't in the available stats
dictionary.
"""
if stat not in available_stats:
raise ConfigException("stat '{}' isn't available for displaying".format(stat))
return available_stats[stat]
# Map stats the user wants to see to available stats and join them with pipes.
line = ' | '.join(map(get_stat, self.displayed_stats))
return line
def _get_player_stats(self):
"""
Helper method parsing the bot inventory object and returning the player stats object.
:return: The player stats object.
:rtype: dict
"""
# TODO : find a better solution than calling the api
inventory_items = self.bot.api.get_inventory() \
.get('responses', {}) \
.get('GET_INVENTORY', {}) \
.get('inventory_delta', {}) \
.get('inventory_items', {})
return next((x["inventory_item_data"]["player_stats"]
for x in inventory_items
if x.get("inventory_item_data", {}).get("player_stats", {})),
None)
| |
# -*- coding: utf8 -*-
from __future__ import unicode_literals
import time
import logging
logger = logging.getLogger(__name__)
from parametizable import Parametizable
from consts import ParamsTypes, Spaces, Hooks
class AgentException(Exception):
pass
class Agent(Parametizable):
"""Run and execute a given algorithm in the given environment."""
PARAMS = {
'nEpisodes': ParamsTypes.Number,
'renderFreq': ParamsTypes.Number,
'stepDelay': ParamsTypes.Number,
'episodeDelay': ParamsTypes.Number,
'renderStepDelay': ParamsTypes.Number
}
PARAMS_DOMAIN = {
'nEpisodes': {
'range': (0, float('inf')),
'values': [10, 100, 1000, 10000, 100000]
},
'renderFreq': {
'range': (-1, float('inf')),
'values': ['testOnly', -1, 10, 100, 1000, 2000, 10000]
},
'stepDelay': {
'range': (0, 10000),
'values': [0, 1, 100]
},
'renderStepDelay': {
'range': (0, 10000),
'values': [0, 1, 100, 1000]
},
'episodeDelay': {
'range': (0, 10000),
'values': [0, 1, 100]
}
}
PARAMS_DEFAULT = {
'nEpisodes': 2500,
'renderFreq': 500,
'stepDelay': 0,
'episodeDelay': 1,
'renderStepDelay': 0
}
PARAMS_DESCRIPTION = {
'nEpisodes': "\
Number of episodes one run of the training will last unless manually \
interrupted.",
'renderFreq': "\
If the environment has rendering capabilities, this is the frequency with which\
a rendered episode should happen. Rendering is done server-side. \
Set to -1 to disable.",
'stepDelay': "\
Delay in ms between steps. Set to 0 will disable delaying.",
'renderStepDelay': "Delay in ms between steps while rendering.",
'episodeDelay': "\
Delay in ms between episodes. Set to 0 will disable delaying. Note that server \
will only reply to requests during delays."
}
def __init__(self, inspectorsFactory=None, **kwargs):
super(Agent, self).__init__(**kwargs)
self._problem = None
self._algo = None
self._inspectorsFactory = inspectorsFactory or []
self.isSetup = False
self._minDuration = float('inf')
self._iEpisode = 0
self._isTesting = False
def _checkCompatibility(self, problem, algo):
"""
Make sure the algo can solve the given problem.
This simply validate domains compatibility. Continuous domain
algorithms can solve discrete problems, but discrete domain
algorithms cannot solve continuout problems.
"""
for space in ('state', 'action'):
if problem.DOMAIN[space] == Spaces.Continuous:
if algo.DOMAIN[space] == Spaces.Discrete:
# incompatibility
raise AgentException(
"Incompatible %s space: %s algorithms cannot solve %s "
"problem." % (space, algo.DOMAIN[space],
problem.DOMAIN[space]))
def setup(self, problem, algo):
logger.info(
"Agent setup: Algorithm=%s, Problem=%s",
algo.__class__.__name__,
problem.__class__.__name__)
self._checkCompatibility(problem, algo)
self._problem = problem
self._algo = algo
self._problem.setup()
self._algo.setup(self._problem)
self.isSetup = True
def test(self):
"""
Returns an iterator that will run a single episode of the environment.
It will yield the accumulated reward, the step number and a boolean
indicating whether the episode is terminated.
If rendering wasn't specifically disabled, the episode will be rendered.
"""
self._isTesting = True
state = self._problem.reset()
action = self._algo.pickAction(
state, self.nEpisodes, optimize=True)
episodeReturn = 0
startT = time.time()
iStep = 0
didRender = False
self._algo.startEpisode(state)
for iStep in xrange(self._problem.maxSteps):
newState, reward, _, info = self._problem.step(action)
episodeReturn += reward
# no training this time
action = self._algo.pickAction(
newState, self.nEpisodes, optimize=True)
state = newState
if self.renderFreq != -1:
didRender = True
self._problem.render()
done = self._problem.episodeDone(stepI=iStep)
yield episodeReturn, self.nEpisodes, iStep, done or (
iStep == self._problem.maxSteps - 1)
if done:
break
if didRender:
self._problem.render(close=True)
duration = time.time() - startT
self._minDuration = min(self._minDuration, duration)
yield (episodeReturn, self.nEpisodes, self._problem.maxSteps, True)
self._inspectorsFactory.dispatch(
hook=Hooks.trainingProgress,
iEpisode=self.nEpisodes,
nEpisodes=self.nEpisodes,
episodeReturn=episodeReturn,
episodeSteps=iStep,
episodeDuration=duration if not didRender else self._minDuration)
self._isTesting = False
def shouldRender(self):
# render if we are in testing and the renderFreq isn't -1
shouldRender = self._isTesting and self.renderFreq != -1
# or, alternatively, render if renderFreq is neither -1 nor 'testOnly'
# AND it is either 0 or the module of renderFreq is 0
shouldRender |= (
self.renderFreq != -1 and self.renderFreq != 'testOnly' and
(self.renderFreq == 0 or (
self.renderFreq > 0 and
(self._iEpisode - 1) % self.renderFreq == 0)))
return shouldRender
def train(self):
"""
Returns an iterator that will execute one step of the environment
each time its next() function is called.
After each step it yields the return for the episode (so far),
the episode number, the step number and a boolean indicating whether
the episode is terminated.
Use inspectors and associated hook functions to gather more
information about the execution of the environment.
"""
for self._iEpisode in xrange(0, self.nEpisodes):
startT = time.time()
timeSpentRendering = 0
state = self._problem.reset()
action = self._algo.pickAction(state, self._iEpisode)
episodeReturn = 0
didRender = False
self._algo.startEpisode(state)
for iStep in xrange(self._problem.maxSteps):
shouldRender = self.shouldRender()
newState, reward, _, info = self._problem.step(action)
episodeReturn += reward
action = self._algo.train(
oldState=state,
newState=newState,
action=action,
reward=reward,
episodeI=self._iEpisode,
stepI=iStep)
state = newState
if shouldRender:
self._problem.render()
done = self._problem.episodeDone(stepI=iStep)
yield episodeReturn, self._iEpisode, iStep, False
if done:
if shouldRender:
self._problem.render(close=True)
break
duration = time.time() - startT - timeSpentRendering
self._minDuration = min(self._minDuration, duration)
yield (episodeReturn, self._iEpisode, iStep, True)
self._algo.endEpisode(totalReturn=episodeReturn)
self._inspectorsFactory.dispatch(
hook=Hooks.trainingProgress,
iEpisode=self._iEpisode,
nEpisodes=self.nEpisodes,
episodeReturn=episodeReturn,
episodeSteps=iStep,
episodeDuration=(
duration if not didRender else self._minDuration))
def release(self):
"""
Release handles and memory before deletion.
Used notably to close opened windows server-side.
"""
if self._problem is not None:
self._problem.terminate()
self._problem.release()
if self.renderFreq != -1:
self._problem.render(close=True)
| |
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Examine success/fail history for Chrome/ium OS builds.
Used to check in a LKGM version for Chrome OS for other consumers.
"""
from __future__ import print_function
import distutils.version
import os
from chromite.cbuildbot import archive_lib
from chromite.cbuildbot import cbuildbot_config
from chromite.cbuildbot import constants
from chromite.cbuildbot import manifest_version
from chromite.cbuildbot import tree_status
from chromite.lib import commandline
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import gclient
from chromite.lib import gs
from chromite.lib import osutils
from chromite.lib import parallel
class LKGMNotFound(Exception):
"""Raised if a newer valid LKGM could not be found."""
class LKGMNotCommitted(Exception):
"""Raised if we could not submit a new LKGM."""
class ChromeCommitter(object):
"""Committer object responsible for obtaining a new LKGM and committing it."""
_COMMIT_MSG = ('Automated Commit: Committing new LKGM version %(version)s '
'for chromeos.')
_CANDIDATES_TO_CONSIDER = 10
_SLEEP_TIMEOUT = 30
_TREE_TIMEOUT = 7200
def __init__(self, checkout_dir, dryrun):
self._checkout_dir = checkout_dir
self._dryrun = dryrun
self._lkgm = None
self._old_lkgm = None
def CheckoutChromeLKGM(self):
"""Checkout chromeos LKGM file for chrome into tmp checkout dir."""
if not os.path.exists(self._checkout_dir):
cros_build_lib.RunCommand(
['git', 'clone', constants.CHROMIUM_GOB_URL,
self._checkout_dir])
else:
cros_build_lib.RunCommand(
['git', 'fetch', 'origin'], cwd=self._checkout_dir)
cros_build_lib.RunCommand(
['git', 'checkout', '-f', 'origin/master'], cwd=self._checkout_dir)
cros_build_lib.RunCommand(
['git', 'branch', '-D', 'lkgm-roll'], cwd=self._checkout_dir,
error_code_ok=True)
cros_build_lib.RunCommand(
['git', 'checkout', '-b', 'lkgm-roll', 'origin/master'],
cwd=self._checkout_dir)
self._old_lkgm = osutils.ReadFile(
os.path.join(self._checkout_dir, constants.PATH_TO_CHROME_LKGM))
@cros_build_lib.MemoizedSingleCall
def _GetLatestCanaryVersions(self):
"""Returns the latest CANDIDATES_TO_CONSIDER canary versions."""
gs_handle = gs.GSContext()
version_paths = gs_handle.LS(manifest_version.BUILD_STATUS_URL)
# Strip gs://<path> prefix and trailing /'s.
versions = [os.path.basename(v.rstrip('/')) for v in version_paths]
lv = distutils.version.LooseVersion
# We only care about canary versions which always end in 0.0.
canary_versions = [v for v in versions if v.endswith('.0.0')]
new_canary_versions = [v for v in canary_versions
if lv(v) > lv(self._old_lkgm)]
return sorted(new_canary_versions, key=lv,
reverse=True)[0:self._CANDIDATES_TO_CONSIDER]
def FindNewLKGM(self):
"""Finds a new LKGM for chrome from previous chromeos releases."""
versions = self._GetLatestCanaryVersions()
if not versions:
raise LKGMNotFound('No valid LKGM found newer than the old LKGM.')
canaries = cbuildbot_config.GetCanariesForChromeLKGM()
logging.info('Considering the following versions: %s', ' '.join(versions))
logging.info('Using scores from the following canaries: %s',
' '.join(canaries))
# Scores are based on passing builders.
version_scores = {}
for version in versions:
for builder in canaries:
status = manifest_version.BuildSpecsManager.GetBuildStatus(
builder, version, retries=0)
if status:
if status.Passed():
version_scores[version] = version_scores.get(version, 0) + 1
elif status.Failed():
# We don't consider builds with any reporting failures.
version_scores[version] = 0
break
logging.info('Version %s had score %d', version,
version_scores.get(version, 0))
# We want to get the version with the highest score. In case of a tie, we
# want to choose the highest version.
lkgm = max((v, k) for k, v in version_scores.iteritems())[1]
if not version_scores[lkgm] > 0:
raise LKGMNotFound('No valid LKGM found. Scores are too low.')
self._lkgm = lkgm
def CommitNewLKGM(self):
"""Commits the new LKGM file using our template commit message."""
lv = distutils.version.LooseVersion
if not self._lkgm and not lv(self._lkgm) < lv(self._old_lkgm):
raise LKGMNotFound('No valid LKGM found. Did you run FindNewLKGM?')
commit_msg = self._COMMIT_MSG % dict(version=self._lkgm)
try:
# Add the new versioned file.
osutils.WriteFile(
os.path.join(self._checkout_dir, constants.PATH_TO_CHROME_LKGM),
self._lkgm)
cros_build_lib.RunCommand(
['git', 'add', constants.PATH_TO_CHROME_LKGM], cwd=self._checkout_dir)
# Commit it!
cros_build_lib.RunCommand(
['git', 'commit', '-m', commit_msg],
cwd=self._checkout_dir)
except cros_build_lib.RunCommandError as e:
raise LKGMNotCommitted(
'Could not create git commit with new LKGM: %r' % e)
if not tree_status.IsTreeOpen(status_url=gclient.STATUS_URL,
period=self._SLEEP_TIMEOUT,
timeout=self._TREE_TIMEOUT):
raise LKGMNotCommitted('Chromium Tree is closed')
if not self._dryrun:
try:
cros_build_lib.RunCommand(
['git', 'cl', 'land', '-f', '--bypass-hooks', '-m', commit_msg],
cwd=self._checkout_dir)
except cros_build_lib.RunCommandError as e:
raise LKGMNotCommitted('Could not submit LKGM: %r' % e)
def UpdateLatestFilesForBot(self, config, versions):
"""Update the LATEST files, for a given bot, in Google Storage.
Args:
config: The builder config to update.
versions: Versions of ChromeOS to look at, sorted in descending order.
"""
base_url = archive_lib.GetBaseUploadURI(config)
acl = archive_lib.GetUploadACL(config)
latest_url = None
# gs.GSContext skips over all commands (including read-only checks)
# when dry_run is True, so we have to create two context objects.
# TODO(davidjames): Fix this.
gs_ctx = gs.GSContext()
copy_ctx = gs.GSContext(dry_run=self._dryrun)
for version in reversed(versions):
url = os.path.join(base_url, 'LATEST-%s' % version)
found = gs_ctx.Exists(url, print_cmd=False)
if not found and latest_url:
try:
copy_ctx.Copy(latest_url, url, version=0, acl=acl)
logging.info('Copied %s -> %s', latest_url, url)
except gs.GSContextPreconditionFailed:
found = True
if found:
logging.info('Found %s', url)
latest_url = url
def UpdateLatestFiles(self):
"""Update the LATEST files since LKGM, in Google Storage."""
ext_cfgs, int_cfgs = cbuildbot_config.FindFullConfigsForBoard(board=None)
versions = self._GetLatestCanaryVersions() + [self._old_lkgm]
tasks = [[cfg, versions] for cfg in ext_cfgs + int_cfgs]
parallel.RunTasksInProcessPool(self.UpdateLatestFilesForBot, tasks,
processes=100)
def _GetParser():
"""Returns the parser to use for this module."""
parser = commandline.ArgumentParser(usage=__doc__, caching=True)
parser.add_argument('--dryrun', action='store_true', default=False,
help="Find the next LKGM but don't commit it.")
parser.add_argument('--workdir', default=os.path.join(os.getcwd(), 'src'),
help=('Path to a checkout of chromium/src. '
'Defaults to PWD/src'))
return parser
def main(argv):
parser = _GetParser()
args = parser.parse_args(argv)
committer = ChromeCommitter(args.workdir, dryrun=args.dryrun)
committer.CheckoutChromeLKGM()
committer.UpdateLatestFiles()
committer.FindNewLKGM()
committer.CommitNewLKGM()
return 0
| |
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Obtaining the trajectory of the SO(3) N=1 vacuum on SL(2)x7.
Producing all the artefacts:
python3 -i -m dim4.papers.bfis2021.so3n1_on_sl2x7 all
"""
import os
# For interactive debugging only.
import pdb # pylint:disable=unused-import
import pprint
from dim4.papers.bfis2021 import analyze_sl2x7
from dim4.papers.bfis2021 import u4xr12_boundary_equilibrium
from dim4.so8.src import dyonic
from dim4.theta.src import gaugings
from m_theory_lib import algebra
from m_theory_lib import m_util as mu
from matplotlib import pyplot
import numpy
def get_theta_u4xr12(c=1.0):
"""Returns the Dyonic-U(4)|xR12 Theta-tensor."""
spin8 = algebra.g.spin8
su8 = algebra.g.su8
theta = numpy.zeros([56, 133])
d6 = numpy.diag([0.0] * 6 + [1.0] * 2)
cd6 = numpy.eye(8) - d6
theta[:28, 105:] += (-1 / 64.0) * mu.nsum(
'Iij,Kkl,ijab,klcd,ac,bd->IK',
su8.m_28_8_8, su8.m_28_8_8,
spin8.gamma_vvss, spin8.gamma_vvss,
cd6, numpy.eye(8))
theta[28:, 105:] += (-1 / 64.0) * mu.nsum(
'Iij,Kkl,ijab,klcd,ac,bd->IK',
su8.m_28_8_8, su8.m_28_8_8,
spin8.gamma_vvss, spin8.gamma_vvss,
c * d6, numpy.eye(8))
theta[:28, :35] += -(1 / 16.0) * mu.nsum(
'Iij,Aab,ijcd,ac,bd->IA',
su8.m_28_8_8,
su8.m_35_8_8,
spin8.gamma_vvss,
numpy.eye(8), d6)
theta[28:, :35] += -(1 / 16.0) * mu.nsum(
'Iij,Aab,ijcd,ac,bd->IA',
su8.m_28_8_8,
su8.m_35_8_8,
spin8.gamma_vvss,
numpy.eye(8), c * d6)
return theta
if __name__ == '__main__':
target_dir = mu.home_relative('tmp/traj_so3n1')
trajectory_npy_filename = os.path.join(
target_dir, 'trajectory_so3n1.npy')
sl2x7 = algebra.g.e7.sl2x7[:2, :, :].reshape(14, 133)
subspace_an = sl2x7[:, :70].T
sugra = dyonic.SO8c_SUGRA(subspace_an=subspace_an)
ds_step = 0.003
scan_boundary_gauging_num_samples = 50
scan_file = os.path.join(target_dir, 'u4xr12_equilibria.csv')
analyzed_file = os.path.join(target_dir, 'u4xr12_equilibria_analyzed.pytxt')
os.makedirs(target_dir, exist_ok=True)
if mu.arg_enabled(__name__, 'compute_trajectory'):
print('# Computing SO(3) N=1 trajectory on SL2x7...')
v14 = analyze_sl2x7.v14_from_7z(analyze_sl2x7.get_7z_from_bfp_z123(
# Numbers match Eq. (4.31) in BFP, https://arxiv.org/abs/1909.10969
(0.1696360+0.1415740j, 0.4833214+0.3864058j, -0.3162021-0.5162839j)))
v70_so3n1 = subspace_an.dot(v14)
# Check that we do have the correct equilibrium.
pot, stat = sugra.potential_and_stationarity(v70_so3n1,
t_omega=mu.tff64(0.0))
assert abs(-13.84096 - pot) < 1e-4 and stat < 1e-8
dyonic.analyze_omega_deformation(
mu.home_relative(target_dir),
v70_so3n1,
ds=ds_step)
glob_pos, glob_neg = (
os.path.join(target_dir, f'S1384096/omega_0.0000_{tag}_*.log')
for tag in ('pos', 'neg'))
tdata = dyonic.collect_trajectory_logs(glob_pos, glob_neg)
numpy.save(trajectory_npy_filename, tdata)
if mu.arg_enabled(__name__, 'extrapolate_and_plot'):
print('# Extrapolating trajectory and plotting...')
tdata = numpy.load(trajectory_npy_filename)
omega_min, omega_max = (-0.25 * numpy.pi), (0.5 * numpy.pi)
pot_stat_zs_js_by_omega = (
analyze_sl2x7.get_pot_stat_zs_js_by_omega_from_trajectory_data(tdata))
trajectory_fn_zs = analyze_sl2x7.get_trajectory_fn_zs(
sugra,
{omega: psz[2] for omega, psz in pot_stat_zs_js_by_omega.items()},
omega_min, omega_max)
figs, singular_values = analyze_sl2x7.plot_trajectory(
sugra,
trajectory_fn_zs,
numpy.linspace(omega_min, omega_max, 200),
[(0, +1), (2, -1), (6, +1)], # z_selectors,
z_styles=('#00cccc', '#0000ff', '#ff0000'),
per_z_special_omegas=(
[(0, '0', 0), (numpy.pi/8, r'$\pi/8$', 0)],
[(0, '0', -0.07+0.04j), (numpy.pi/8, r'$\pi/8$', 0)],
[(0, '0', 0),
(numpy.pi/8, r'$\pi/8$', 0),
(numpy.pi/2, r'$\pi/2$', -0.16-0.04j),
(-numpy.pi/4, r'$-\pi/4$', -0.18-0.07j),
],
),
refined_points=True,
filename=os.path.join(target_dir, 'traj_so3n1_sl2z7.pdf'))
for fig in figs:
pyplot.close(fig)
if mu.arg_enabled(__name__, 'get_boundary_gauging'):
print('# Obtaining boundary gauging...')
tdata = numpy.load(trajectory_npy_filename)
pot_stat_zs_js_by_omega = (
analyze_sl2x7.get_pot_stat_zs_js_by_omega_from_trajectory_data(tdata))
boundary_gauging, v70_finite_lim, ext_pot_stat_zs_by_omega = (
analyze_sl2x7.get_boundary_gauging(
sugra,
numpy.pi / 2,
# Here, we have to trim off the already-added extrapolated points
# again.
{omega: pszj[:-1]
for omega, pszj in sorted(pot_stat_zs_js_by_omega.items())[1:-1]}))
boundary_gauging.save(os.path.join(target_dir, 'boundary_gauging.npz'))
numpy.save(os.path.join(target_dir, 'v70_finite_lim.npy'), v70_finite_lim)
if mu.arg_enabled(__name__, 'scan_u4xr12'):
print('# Scan for equilibria in the (for now conjectured) boundary-gauging.')
# Here, we only do a small scan. The specific solution used further down
# was found with index #1715 in a deeper scan.
mu.rm(scan_file)
theta_sugra = gaugings.Dim4SUGRA(
get_theta_u4xr12(c=1.0),
gaugeability_atol=1e-10)
sols = []
for nn, (pot, stat, params) in zip(
range(scan_boundary_gauging_num_samples),
theta_sugra.scan(
x0s=theta_sugra.get_generator_x0s(seed=2, scale=0.25),
minimize_kwargs=dict(default_maxiter=2000),
verbosity='SF')):
sol = nn, pot, stat, *params.tolist()
sols.append(sol)
print(f'### nn={nn} P={pot:.8f} S={stat:.6g}\n')
with open(scan_file, 'at') as h_out:
print(','.join(map(repr, sol)), file=h_out)
if mu.arg_enabled(__name__, 'analyze_u4xr12_scan'):
# A deeper scan would reveal 24 different critical points.
# There might be more.
stationarity_limit = 1e-14
theta_sugra = gaugings.Dim4SUGRA(
get_theta_u4xr12(c=1.0),
gaugeability_atol=1e-10)
scanned = list(mu.csv_numdata(scan_file))
analyzed = {}
for row in scanned:
num_row = int(row[0])
if num_row % 10 == 0:
print(f'Row {num_row}...')
if row[2] > stationarity_limit:
continue # Skip bad data.
m_grav = theta_sugra.gravitino_masses_from_position(row[-70:])
key = f'{row[1]:.6f}'
analyzed.setdefault(key, []).append(
(num_row, ' '.join(f'{m:+.4f}' for m in m_grav)))
with open(analyzed_file, 'wt') as h_out:
h_out.write(pprint.pformat(analyzed, width=120))
if mu.arg_enabled(__name__, 'align_u4xr12_equilibrium'):
v70 = numpy.array(u4xr12_boundary_equilibrium.v70)
theta_sugra = gaugings.Dim4SUGRA(
get_theta_u4xr12(c=1.0),
gaugeability_atol=1e-10,
# Target mass spectrum (from anlytic-continuation boundary gauging):
# (Details are discussed in u4xr12_boundary_equilibrium.py)
stationarity_tweak=('M2G', [41/9] * 3 + [4.0] * 4 + [1.0]))
eq_info = theta_sugra.find_equilibrium(
v70, verbosity='S',
minimize_kwargs=dict(default_gtol=1e-30, default_maxiter=10**4))
phys = theta_sugra.get_physics(eq_info[-1], {})
print('=== The U4xR12 Equilibrium ===')
print(theta_sugra.show_physics_text(phys))
with open(os.path.join(target_dir, 'physics_u4xr12.tex'), 'wt') as h_tex:
h_tex.write(theta_sugra.show_physics_tex(phys)[0])
h_tex.write('\n\n%%%\n\n')
h_tex.write(theta_sugra.show_physics_tex(phys)[1])
#
gauging_bg = gaugings.Gauging.load(
os.path.join(target_dir, 'boundary_gauging.npz'))
theta_bg = gauging_bg.theta
v70_finite_lim = numpy.load(
os.path.join(target_dir, 'v70_finite_lim.npy'))
sugra_bg = gaugings.Dim4SUGRA(theta_bg,
check_gaugeability=False)
opt_pot, opt_stat, opt_v70 = sugra_bg.find_equilibrium(
# The way the boundary gauging was constructed,
# we have to take the *negative* finite-part-limit-v70.
-v70_finite_lim, verbosity='S')
opt_phys = sugra_bg.get_physics(opt_v70, metadata={})
print('=== The Boundary Gauging Equilibrium ===')
print(sugra_bg.show_physics_text(opt_phys))
with open(os.path.join(target_dir, 'physics_bg.tex'), 'wt') as h_tex:
h_tex.write(theta_sugra.show_physics_tex(opt_phys)[0])
h_tex.write('\n\n%%%\n\n')
h_tex.write(theta_sugra.show_physics_tex(opt_phys)[1])
if mu.arg_enabled(__name__, 'plot_trajectory70'):
print('# Plotting "70-parameters" trajectory...')
tdata = numpy.load(trajectory_npy_filename)
dyonic.plot_trajectory(
tdata,
filename=os.path.join(target_dir, 'traj_so3n1_70.pdf'))
if mu.arg_enabled(__name__, 'get_trajectory_story'):
print('# Getting trajectory story...')
tdata = numpy.load(trajectory_npy_filename)
story_dir = os.path.join(target_dir, 'traj_story')
os.makedirs(story_dir, exist_ok=True)
story = dyonic.trajectory_get_story(sugra, tdata, story_dir)
with open(os.path.join(story_dir, 'story.pytxt'), 'wt') as h_out:
h_out.write(repr(story))
| |
"""
Events instrumenting STS internals.
"""
import time
from hb_json_event import JsonEvent
from hb_json_event import AttributeCombiningMetaclass
from hb_utils import base64_decode_openflow
from hb_utils import base64_encode
from hb_utils import base64_encode_flow
from hb_utils import base64_encode_flow_list
from hb_utils import base64_encode_flow_table
from hb_utils import decode_flow_table
from hb_utils import decode_flow_mod
from hb_utils import decode_packet
from hb_utils import get_port_no
from hb_utils import ofp_type_to_str
from hb_utils import ofp_flow_removed_reason_to_str
from hb_utils import str_to_ofp_flow_removed_reason
class TraceSwitchEvent(JsonEvent):
__metaclass__ = AttributeCombiningMetaclass
_attr_combining_metaclass_args = ["_to_json_attrs"]
#TODO(jm): clean up/remove unused ones, check which ones are actually used in hb_graph and hb_events and remove the ones that are not used.
_to_json_attrs = ['dpid',
'controller_id', # socket.getpeername(), NOT the STS cid
'hid',
('packet', base64_encode),
('in_port', get_port_no),
('out_port', get_port_no),
'buffer_id',
('msg', base64_encode),
('flow_table', lambda x: x if x is None else base64_encode_flow_table(x)),
('flow_mod', base64_encode),
('removed', base64_encode),
('expired_flows', base64_encode_flow_list),
('matched_flow', base64_encode),
('touched_flow', base64_encode),
'touched_flow_bytes',
('t', lambda fp: repr(fp)), # str() is not precise for floating point numbers in Python < v3.2
'duration_sec',
'duration_nsec',
('reason', ofp_flow_removed_reason_to_str),
]
_from_json_attrs = {
'eid': lambda x: x,
'dpid': lambda x: x,
'controller_id': lambda x: x, # socket.getpeername(), NOT the STS cid
'hid': lambda x: x,
'packet': decode_packet,
'in_port': lambda x: x,
'out_port': lambda x: x,
'buffer_id': lambda x: x,
'msg': base64_decode_openflow,
'flow_table': lambda x: None if x is None else decode_flow_table(x),
'flow_mod': decode_flow_mod,
'removed': decode_flow_mod,
'expired_flows': lambda flows: [decode_flow_mod(x) for x in flows],
'matched_flow': decode_flow_mod,
'touched_flow': decode_flow_mod,
'touched_flow_bytes': lambda x: x,
't': lambda x: float(x),
'duration_sec': lambda x: x,
'duration_nsec': lambda x: x,
'reason': str_to_ofp_flow_removed_reason,
}
def __init__(self, t, eid=None):
super(TraceSwitchEvent, self).__init__(eid=eid)
self.t = t or time.time()
class TraceAsyncSwitchFlowExpiryBegin(TraceSwitchEvent):
def __init__(self, dpid, t=None, eid=None):
TraceSwitchEvent.__init__(self, t=t, eid=eid)
self.dpid = dpid
class TraceAsyncSwitchFlowExpiryEnd(TraceSwitchEvent):
def __init__(self, dpid, t=None, eid=None):
TraceSwitchEvent.__init__(self, t=t, eid=eid)
self.dpid = dpid
class TraceSwitchPacketHandleBegin(TraceSwitchEvent):
def __init__(self, dpid, packet, in_port, t=None, eid=None):
TraceSwitchEvent.__init__(self, t=t, eid=eid)
self.dpid = dpid
self.packet = packet
self.in_port = in_port
class TraceSwitchPacketHandleEnd(TraceSwitchEvent):
def __init__(self, dpid, t=None, eid=None):
TraceSwitchEvent.__init__(self, t=t, eid=eid)
self.dpid = dpid
# TODO(jm): remove this, and all uses of TraceSwitchMessageRx
class TraceSwitchMessageRx(TraceSwitchEvent):
def __init__(self, dpid, controller_id, msg, b64msg, t=None, eid=None):
TraceSwitchEvent.__init__(self, t=t, eid=eid)
self.dpid = dpid
self.controller_id = controller_id
self.msg = msg
self.b64msg = b64msg
class TraceSwitchMessageTx(TraceSwitchEvent):
def __init__(self, dpid, controller_id, msg, b64msg, t=None, eid=None):
TraceSwitchEvent.__init__(self, t=t, eid=eid)
self.dpid = dpid
self.controller_id = controller_id
self.msg = msg
self.b64msg = b64msg
class TraceSwitchMessageHandleBegin(TraceSwitchEvent):
def __init__(self, dpid, controller_id, msg, msg_type, t=None, eid=None):
TraceSwitchEvent.__init__(self, t=t, eid=eid)
self.dpid = dpid
self.controller_id = controller_id
self.msg = msg
self.msg_type = msg_type
@property
def msg_type_str(self):
return ofp_type_to_str(self.msg_type)
class TraceSwitchMessageHandleEnd(TraceSwitchEvent):
def __init__(self, dpid, t=None, eid=None):
TraceSwitchEvent.__init__(self, t=t, eid=eid)
self.dpid = dpid
class TraceSwitchMessageSend(TraceSwitchEvent):
def __init__(self, dpid, cid, controller_id, msg, t=None, eid=None):
TraceSwitchEvent.__init__(self, t=t, eid=eid)
self.dpid = dpid
self.cid = cid
self.controller_id = controller_id
self.msg = msg
class TraceSwitchPacketSend(TraceSwitchEvent):
def __init__(self, dpid, packet, out_port, t=None, eid=None):
TraceSwitchEvent.__init__(self, t=t, eid=eid)
self.dpid = dpid
self.packet = packet
self.out_port = out_port
class TraceSwitchFlowTableRead(TraceSwitchEvent):
def __init__(self, dpid, packet, in_port, flow_mod,
touched_flow_bytes=None, touched_flow_now=None, flow_table=None, t=None, eid=None,
make_copy=True):
TraceSwitchEvent.__init__(self, t=t, eid=eid)
self.dpid = dpid
self.packet = packet
self.in_port = in_port
if make_copy:
self.flow_table = None if flow_table is None else decode_flow_table(base64_encode_flow_table(flow_table, set_zero_XID=True))
self.flow_mod = decode_flow_mod(base64_encode_flow(flow_mod, set_zero_XID=True))
self.entry = decode_flow_mod(base64_encode_flow(flow_mod, set_zero_XID=True)) #TODO(jm): unused
else:
self.flow_table = flow_table
self.flow_mod = flow_mod
self.entry = flow_mod
self.touched_flow_bytes = touched_flow_bytes
self.touched_flow_now = touched_flow_now
class TraceSwitchFlowTableWrite(TraceSwitchEvent):
def __init__(self, dpid, flow_mod, flow_table=None, t=None, eid=None, make_copy=True):
TraceSwitchEvent.__init__(self, t=t, eid=eid)
self.dpid = dpid
if make_copy:
self.flow_table = None if flow_table is None else decode_flow_table(base64_encode_flow_table(flow_table, set_zero_XID=True))
self.flow_mod = decode_flow_mod(base64_encode_flow(flow_mod))
else:
self.flow_table = flow_table
self.flow_mod = flow_mod
class TraceSwitchFlowTableEntryExpiry(TraceSwitchEvent):
def __init__(self, dpid, flow_mod, duration_sec, duration_nsec, reason, flow_table=None, t=None, eid=None, make_copy=True):
TraceSwitchEvent.__init__(self, t=t, eid=eid)
self.dpid = dpid
if make_copy:
self.flow_table = None if flow_table is None else decode_flow_table(base64_encode_flow_table(flow_table, set_zero_XID=True))
self.flow_mod = decode_flow_mod(base64_encode_flow(flow_mod, set_zero_XID=True))
else:
self.flow_table = flow_table
self.flow_mod = flow_mod
self.duration_sec = duration_sec
self.duration_nsec = duration_nsec
self.reason = reason
class TraceSwitchBarrier(TraceSwitchEvent):
def __init__(self, dpid, t=None, eid=None):
TraceSwitchEvent.__init__(self, t=t, eid=eid)
self.dpid = dpid
class TraceSwitchPacketDrop(TraceSwitchEvent):
def __init__(self, dpid, packet, in_port, flow_table, t=None, eid=None, make_copy=True):
TraceSwitchEvent.__init__(self, t=t, eid=eid)
self.dpid = dpid
self.packet = packet
self.in_port = in_port
if make_copy:
self.flow_table = decode_flow_table(base64_encode_flow_table(flow_table, set_zero_XID=True))
else:
self.flow_table = flow_table
class TraceSwitchNoOp(TraceSwitchEvent):
def __init__(self, dpid, t=None, eid=None):
TraceSwitchEvent.__init__(self, t=t, eid=eid)
self.dpid = dpid
class TraceSwitchBufferPut(TraceSwitchEvent):
def __init__(self, dpid, packet, in_port, buffer_id, t=None, eid=None):
TraceSwitchEvent.__init__(self, t=t, eid=eid)
self.dpid = dpid
self.packet = packet
self.in_port = in_port
self.buffer_id = buffer_id
class TraceSwitchBufferGet(TraceSwitchEvent):
def __init__(self, dpid, packet, in_port, buffer_id, t=None, eid=None):
TraceSwitchEvent.__init__(self, t=t, eid=eid)
self.dpid = dpid
self.packet = packet
self.in_port = in_port
self.buffer_id = buffer_id
class TraceSwitchPacketUpdateBegin(TraceSwitchEvent):
def __init__(self, dpid, packet, t=None, eid=None):
TraceSwitchEvent.__init__(self, t=t, eid=eid)
self.dpid = dpid
self.packet = packet
class TraceSwitchPacketUpdateEnd(TraceSwitchEvent):
def __init__(self, dpid, packet, t=None, eid=None):
TraceSwitchEvent.__init__(self, t=t, eid=eid)
self.dpid = dpid
self.packet = packet
class TraceHostEvent(JsonEvent):
__metaclass__ = AttributeCombiningMetaclass
_attr_combining_metaclass_args = ["_to_json_attrs"]
_to_json_attrs = [
'hid',
('packet', base64_encode),
('in_port', get_port_no),
('out_port', get_port_no),
('t', lambda fp: repr(fp)),
]
_from_json_attrs = {
'eid': lambda x: x,
'hid': lambda x: x,
'packet': decode_packet,
'in_port': lambda x: x,
'out_port': lambda x: x,
't': lambda x: float(x),
}
def __init__(self, t=None, eid=None):
super(TraceHostEvent, self).__init__(eid=eid)
self.t = t
class TraceHostPacketHandleBegin(TraceHostEvent):
def __init__(self, hid, packet, in_port, t=None, eid=None):
TraceHostEvent.__init__(self, t=t, eid=eid)
self.hid = hid
self.packet = packet
self.in_port = in_port
class TraceHostPacketHandleEnd(TraceHostEvent):
def __init__(self, hid, t=None, eid=None):
TraceHostEvent.__init__(self, t=t, eid=eid)
self.hid = hid
class TraceHostPacketSend(TraceHostEvent):
def __init__(self, hid, packet, out_port, t=None, eid=None):
TraceHostEvent.__init__(self, t=t, eid=eid)
self.hid = hid
self.packet = packet
self.out_port = out_port
JsonEvent.register_type(TraceAsyncSwitchFlowExpiryBegin)
JsonEvent.register_type(TraceAsyncSwitchFlowExpiryEnd)
JsonEvent.register_type(TraceSwitchPacketHandleBegin)
JsonEvent.register_type(TraceSwitchPacketHandleEnd)
JsonEvent.register_type(TraceSwitchMessageRx)
JsonEvent.register_type(TraceSwitchMessageTx)
JsonEvent.register_type(TraceSwitchMessageHandleBegin)
JsonEvent.register_type(TraceSwitchMessageHandleEnd)
JsonEvent.register_type(TraceSwitchMessageSend)
JsonEvent.register_type(TraceSwitchPacketSend)
JsonEvent.register_type(TraceSwitchFlowTableRead)
JsonEvent.register_type(TraceSwitchFlowTableWrite)
JsonEvent.register_type(TraceSwitchFlowTableEntryExpiry)
JsonEvent.register_type(TraceSwitchBufferPut)
JsonEvent.register_type(TraceSwitchBufferGet)
JsonEvent.register_type(TraceSwitchPacketUpdateBegin)
JsonEvent.register_type(TraceSwitchPacketUpdateEnd)
JsonEvent.register_type(TraceHostEvent)
JsonEvent.register_type(TraceHostPacketHandleBegin)
JsonEvent.register_type(TraceHostPacketHandleEnd)
JsonEvent.register_type(TraceHostPacketSend)
JsonEvent.register_type(TraceSwitchBarrier)
JsonEvent.register_type(TraceSwitchPacketDrop)
JsonEvent.register_type(TraceSwitchNoOp)
| |
import random
import time
import math
import copy
from agents.agent import Agent
from util import info, opponent
class MonteCarloAgent(Agent):
def __init__(self, reversi, color, **kwargs):
self.color = color
self.reversi = reversi
self.sim_time = kwargs.get('sim_time', 5)
# map states to nodes for quick lookup
self.tree_manager = TreeManager(self.reversi)
def reset(self):
pass
def observe_win(self, winner):
pass
def get_action(self, game_state, legal_moves):
"""
Interface from class Agent. Given a game state
and a set of legal moves, pick a legal move and return it.
This will be called by the Reversi game object. Does not mutate
the game state argument.
"""
if not legal_moves:
return None
# make a deep copy to keep the promise that we won't mutate
game_state = copy.deepcopy(game_state)
move = self.monte_carlo_search(game_state)
return move
def monte_carlo_search(self, game_state):
"""
Given a game state, return the best action decided by
using Monte Carlo Tree Search with an Upper Confidence Bound.
"""
root = self.tree_manager.get_node(game_state)
# even if this is a "recycled" node we've already used,
# remove its parent as it is now considered our root level node
root.parent = None
sim_count = 0
now = time.time()
while time.time() - now < self.sim_time:
# pick move to simulate with UCT
picked_node = self.tree_policy(root)
# run the simulation and get the result
result = self.simulate(picked_node.game_state)
# back prop the result of this move up the tree
self.back_prop(picked_node, result)
sim_count += 1
# the following is purely for printing information
results = {}
for child in root.children:
wins, plays = child.get_wins_plays()
position = child.move
results[position] = (wins, plays)
for position in sorted(results, key=lambda x: results[x][1]):
wins, plays = results[position][0], results[position][1]
info('{}: ({}/{}) ({:.2f})'.format(position,
wins, plays, wins / plays))
info('{} simulations performed.'.format(sim_count))
return self.best_action(root)
@staticmethod
def best_action(node):
"""
Returns the best action from this game state node.
In Monte Carlo Tree Search we pick the one that was
visited the most. We can break ties by picking
the state that won the most.
"""
most_plays = -float('inf')
best_wins = -float('inf')
best_actions = []
for child in node.children:
wins, plays = child.get_wins_plays()
if plays > most_plays:
most_plays = plays
best_actions = [child.move]
best_wins = wins
elif plays == most_plays:
# break ties with wins
if wins > best_wins:
best_wins = wins
best_actions = [child.move]
elif wins == best_wins:
best_actions.append(child.move)
return random.choice(best_actions)
@staticmethod
def back_prop(node, delta):
"""
Given a node and a delta value for wins,
propagate that information up the tree to the root.
"""
while node is not None:
node.plays += 1
node.wins += delta
node = node.parent
def tree_policy(self, root):
"""
Given a root node, determine which child to visit
using Upper Confidence Bound.
"""
# legal moves represent potential children of root node
legal_moves = root.legal_moves
if not legal_moves:
return root
elif legal_moves == [None]:
# if player must pass turn
next_state = self.reversi.next_state(root.game_state, None)
pass_node = self.tree_manager.add_node(next_state, None, root)
return pass_node
elif len(root.children) < len(legal_moves):
# we have not yet tried all the children for this node
untried = [
move for move in legal_moves
if move not in root.moves_tried
]
assert untried
# we have no information about these nodes at all, so pick randomly
move = random.choice(untried)
next_state = self.reversi.next_state(root.game_state, move)
root.moves_tried.add(move)
return self.tree_manager.add_node(next_state, move, root)
else:
# we have tried every child node at least once, so traverse tree
# with UCT
return self.tree_policy(self.best_child(root))
def best_child(self, node):
"""
UCT, used in the tree policy to determine
which child of the input node is the best to
simulate right now.
"""
enemy_turn = (node.game_state[1] != self.color)
C = 1 # 'exploration' value
values = {}
_, parent_plays = node.get_wins_plays()
for child in node.children:
wins, plays = child.get_wins_plays()
if enemy_turn:
# the enemy will play against us, not for us
wins = plays - wins
assert parent_plays > 0
values[child] = (wins / plays) + C * \
math.sqrt(2 * math.log(parent_plays) / plays)
best_choice = max(values, key=values.get)
return best_choice
def simulate(self, game_state):
"""
Starting from the given game state, simulate
a random game to completion, and return the profit value
(1 for a win, 0 for a loss)
"""
WIN_PRIZE = 1
LOSS_PRIZE = 0
state = copy.deepcopy(game_state)
while True:
board = state[0]
winner = self.reversi.winner(board)
if winner is not False:
black_count, white_count = board.get_stone_counts()
if black_count == white_count:
# we don't want to tie, we want to win!
return LOSS_PRIZE
elif winner == self.color:
return WIN_PRIZE
elif winner == opponent[self.color]:
return LOSS_PRIZE
else:
raise ValueError
moves = self.reversi.legal_moves(state)
if not moves:
# if no moves, turn passes to opponent
state = (state[0], opponent[state[1]])
moves = self.reversi.legal_moves(state)
picked = random.choice(moves)
state = self.reversi.apply_move(state, picked)
class TreeManager:
def __init__(self, reversi):
self.state_node = {}
self.reversi = reversi
def add_node(self, game_state, move, parent=None):
legal_moves = self.reversi.legal_moves(game_state)
is_game_over = self.reversi.winner(game_state[0]) is not False
if len(legal_moves) == 0 and not is_game_over:
legal_moves = [None] # it can only make one move: pass turn
n = Node(game_state, move, legal_moves)
n.parent = parent
if parent is not None:
parent.add_child(n)
self.state_node[game_state] = n
return n
def get_node(self, game_state):
"""
Get the existing Node for this game_state.
Creates one if it does not yet exist.
"""
if game_state in self.state_node:
return self.state_node[game_state]
else:
return self.add_node(game_state, None)
class Node:
def __init__(self, game_state, move, legal_moves):
self.game_state = game_state
self.plays = 0
self.wins = 0
self.children = [] # child Nodes
self.parent = None
self.move = move # move that led from parent to this child
self.legal_moves = legal_moves
self.moves_tried = set()
# how many children have NOT been fully expanded (had their subtrees
# completely searched)?
self.amount_children_unexpanded = len(self.legal_moves)
def add_child(self, child_node):
self.children.append(child_node)
child_node.parent = self
def has_children(self):
return len(self.children) > 0
def get_wins_plays(self):
return self.wins, self.plays
def __hash__(self):
return hash(self.game_state)
def __repr__(self):
return 'move: {} wins: {} plays: {}'.format(self.move, self.wins, self.plays)
def __eq__(self, other):
if not isinstance(other, Node):
return False
return self.game_state == other.game_state
| |
from .helpers import filingparams, cps_yr_idx, C_TAM_BENEFIT_TUPLES, CPS_BENEFIT_TUPLES
INCOME_TUPLES = [
("wsal_val", "e00200"),
("int_val", "interest"),
("semp_val", "e00900"),
("frse_val", "e02100"),
("div_val", "divs"),
("rnt_val", "rents"),
("rtm_val", "e01500"),
("alimony", "e00800"),
("ss_impute", "e02400"),
("UI_impute", "e02300"),
]
class TaxUnit:
def __init__(
self,
data: dict,
year: int,
hh_inc: float = 0.0,
dep_status: bool = False,
ctam_benefits: bool = True,
):
"""
Parameters
----------
data: dictionary of data from the CPS
dep_status: indicator for whether or not this is a
dependent filer
"""
# set benefit tuples based on if we have C-TAM imputations or not
if ctam_benefits:
self.BENEFIT_TUPLES = C_TAM_BENEFIT_TUPLES
else:
self.BENEFIT_TUPLES = CPS_BENEFIT_TUPLES
self.ctam_benefits = ctam_benefits
# counters for medicaid and medicare
self.mcare_count = 0
self.mcaid_count = 0
# add attributes of the tax unit
self.tot_inc = 0
for cps_var, tc_var in INCOME_TUPLES:
if cps_var == "ss_impute" and not ctam_benefits:
val = data["ss_val"]
elif cps_var == "UI_impute" and not ctam_benefits:
val = data["uc_val"]
else:
val = data[cps_var]
setattr(self, tc_var, val)
setattr(self, f"{tc_var}p", val)
setattr(self, f"{tc_var}s", 0)
self.tot_inc += val
# add benefit data
for cps_var, tc_var in self.BENEFIT_TUPLES:
if tc_var == "mcaid_ben" and data[cps_var] != 0:
self.mcaid_count += 1
elif tc_var == "mcare_ben" and data[cps_var] != 0:
self.mcare_count += 1
setattr(self, tc_var, data[cps_var])
# SNAP and housing benefits need to only be added for head of unit
try:
self.snap_ben = data["snap_impute"]
self.housing_ben = data["housing_impute"]
except KeyError:
self.snap_ben = data["hfdval"]
self.housing_ben = data["housing_val"]
self.agi = data["agi"]
self.age_head = data["a_age"]
self.age_spouse = 0
self.blind_head = data["pediseye"]
self.fips = data["gestfips"]
self.h_seq = data["hhid"]
self.a_lineno = data["a_lineno"]
self.ffpos = data["ffpos"]
self.s006 = data["fsup_wgt"]
self.FLPDYR = year
self.EIC = 0
self.dep_stat = 0
if dep_status:
self.XTOT = 0
self.dep_stat = 1
self.mars = 1 # start with being single
# update marital status based on CPS indication
# note that to match the IRS PUF we include widowed people in this
if data["a_maritl"] in [1, 2, 3]:
self.mars = 2
if data["filestat"] in [5, 6] and data["a_maritl"] == 3:
self.mars = 3
self.XTOT = 1
if self.mars == 2:
self.XTOT = 2
if data["filestat"] == 4:
self.mars = 4
self.hh_inc = hh_inc
self.filer = 0
if data["filestat"] != 6:
self.filer = 1
# age data
self.nu18 = 0
self.n1820 = 0
self.n21 = 0
self.nu06 = 0
self.nu13 = 0
self.n24 = 0
self.elderly_dependents = 0
self.f2441 = 0
self.check_age(data["a_age"])
# home related data
self.home_owner = 0
if not dep_status and data["h_tenure"] == 1:
self.home_owner = 1
# property taxes
self.prop_tax = data["prop_tax"]
# state and local taxes
self.statetax = max(0.0, data["statetax_ac"])
# property value
self.prop_value = data["hprop_val"]
# presence of a mortgage
self.mortgage_yn = 0
if data["hpres_mort"] == 1:
self.mortgage_yn = 1
# list to hold line numbers of dependents and spouses
self.deps_spouses = []
self.depne = 0 # number of dependents
def add_spouse(self, spouse: dict):
"""
Add a spouse to the unit
"""
for cps_var, tc_var in INCOME_TUPLES:
if cps_var == "ss_impute" and not self.ctam_benefits:
val = spouse["ss_val"]
elif cps_var == "UI_impute" and not self.ctam_benefits:
val = spouse["uc_val"]
else:
val = spouse[cps_var]
self.tot_inc += val
setattr(self, tc_var, getattr(self, tc_var) + val)
setattr(self, f"{tc_var}s", val)
for cps_var, tc_var in self.BENEFIT_TUPLES:
if tc_var == "mcaid_ben" and spouse[cps_var] != 0:
self.mcaid_count += 1
elif tc_var == "mcare_ben" and spouse[cps_var] != 0:
self.mcare_count += 1
setattr(self, tc_var, getattr(self, tc_var) + spouse[cps_var])
self.agi += spouse["agi"]
setattr(self, "blind_spouse", spouse["pediseye"])
self.deps_spouses.append(spouse["a_lineno"])
setattr(self, "age_spouse", spouse["a_age"])
spouse["s_flag"] = True
self.check_age(spouse["a_age"])
self.statetax += spouse["statetax_ac"]
def add_dependent(self, dependent: dict, eic: int):
"""
Add dependent to the unit
"""
for cps_var, tc_var in self.BENEFIT_TUPLES:
dep_val = dependent[cps_var]
if tc_var == "mcaid_ben" and dep_val != 0:
self.mcaid_count += 1
elif tc_var == "mcare_ben" and dep_val != 0:
self.mcare_count += 1
setattr(self, tc_var, getattr(self, tc_var) + dep_val)
self.check_age(dependent["a_age"], True)
self.XTOT += 1
self.EIC += eic
self.deps_spouses.append(dependent["a_lineno"])
dependent["d_flag"] = True
dependent["claimer"] = self.a_lineno
self.depne += 1
def remove_dependent(self, dependent: dict):
"""
Remove dependent from the tax unit
"""
for cps_var, tc_var in self.BENEFIT_TUPLES:
dep_val = dependent[cps_var]
if tc_var == "mcaid_ben" and dep_val != 0:
self.mcaid_count -= 1
elif tc_var == "mcare_ben" and dep_val != 0:
self.mcare_count -= 1
setattr(self, tc_var, getattr(self, tc_var) - dep_val)
if dependent["a_age"] < 6:
self.nu06 -= 1
if dependent["a_age"] < 13:
self.nu13 -= 1
if dependent["a_age"] < 17:
self.n24 -= 1
if dependent["a_age"] < 18:
self.nu18 -= 1
elif 18 <= dependent["a_age"] < 21:
self.n1820 -= 1
elif dependent["a_age"] >= 21:
self.n21 -= 1
if dependent["a_age"] >= filingparams.elderly_age[cps_yr_idx]:
self.elderly_dependents -= 1
self.depne -= 1
self.XTOT -= 1
def check_age(self, age: int, dependent: bool = False):
"""
Modify the age variables in the tax unit
"""
if age < 18:
self.nu18 += 1
elif 18 <= age < 21:
self.n1820 += 1
elif age >= 21:
self.n21 += 1
if dependent:
if age < 17:
self.n24 += 1
if age < 6:
self.nu06 += 1
if age < 13:
self.nu13 += 1
self.f2441 += 1
if age >= filingparams.elderly_age[cps_yr_idx]:
self.elderly_dependents += 1
def output(self) -> dict:
"""
Return tax attributes as a dictionary
"""
# enforce that all spouse income variables are zero for non-married
if self.mars != 2:
for _, tc_var in INCOME_TUPLES:
value = getattr(self, f"{tc_var}s")
msg = f"{tc_var}s is not zero for household {self.h_seq}"
assert value == 0, msg
# add family size variable
fam_size = 1 + self.depne
if self.mars == 2:
fam_size += 1
setattr(self, "fam_size", fam_size)
m = f"{self.XTOT} != {sum([self.nu18, self.n1820, self.n21])}"
assert self.XTOT >= sum([self.nu18, self.n1820, self.n21]), m
return self.__dict__
# private methods
def _must_file(self):
"""
determine if this unit must file
"""
aidx = 0 # age index for filing parameters
midx = 0 # marital index for filing parameters
if self.mars == 1:
if self.age_head >= filingparams.elderly_age[cps_yr_idx]:
aidx = 1
elif self.mars == 2:
midx = 1
if self.age_head >= filingparams.elderly_age[cps_yr_idx]:
aidx = 1
if self.age_spouse >= filingparams.elderly_age[cps_yr_idx]:
aidx = 2
elif self.age_spouse >= filingparams.elderly_age[cps_yr_idx]:
aidx = 1
elif self.mars == 4:
midx = 2
if self.age_head >= filingparams.elderly_age[cps_yr_idx]:
aidx = 1
else:
msg = (
f"Filing status not in [1, 2, 4]. HHID: {self.h_seq} "
f"a_lineno: {self.a_lineno}"
)
raise ValueError(msg)
income_min = filingparams.gross_inc_thd[cps_yr_idx][midx][aidx]
if self.tot_inc >= income_min:
setattr(self, "filer", 1)
else:
setattr(self, "filer", 0)
| |
from multiprocessing import Process
from sklearn import mixture
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_score
import time
import numpy as np
import os
import math
import sys
# single gauss component
class Gauss:
def __init__(self, weight, mean, precision, determinant):
self.weight_ = weight
self.mean_ = mean # 3D array
self.precision_ = precision # 9D array
self.determinant_ = determinant
# single bin in histogram
class Cluster:
def __init__(self, probability, gauss_count, sample_value):
self.probability_ = probability
self.gauss_count_ = gauss_count
self.sample_value_ = sample_value
self.gausses_ = []
def add_gauss(self, gauss):
self.gausses_.append(gauss)
# single block
class Block:
def __init__(self):
self.clusters_ = []
self.cluster_num_ = 1
def add_cluster(self, cluster):
self.clusters_.append(cluster)
class Point3d:
def __init__(self, xx = 0, yy = 0,zz = 0):
self.x = xx
self.y = yy;
self.z = zz;
# width = 480
# depth = 720
# height = 112
# data_source = ''
# result_disk_address = 'c:/train/'
# process_num = 16 # make sure it is a divisor of block number
#
# side = 16
# zero_block_threshold = 0.003
# ubg = 4 # max component number
# cluster_weights = [1.0, 1.0, 1.0, 1.0] # weights when clustering
# src_raw_name = result_disk_address + data_source+'.raw'
# restored_raw_name = result_disk_address+data_source+'_restored.raw'
# result_raw_name = result_disk_address+data_source+'_SGMM_Result.raw' # restored data
# training_done_signal = result_disk_address + 'Training_Done.txt'
#
# block_size = side * side * side
# width_num = width / side
# depth_num = depth / side
# height_num = height / side
# total_num = width_num * depth_num * height_num
# restore_raw = bytearray(width * depth * height)
# np.random.seed(1)
# stride = total_num / process_num
#
# f_all_data = open(src_raw_name, 'rb')
# f_all_data.seek(0, 0)
# all_data = bytearray(f_all_data.read())
# all_hit = [0] * width * depth * height
# read index th block data
# def read_block(index, all_data, width, depth, width_num, depth_num,block_size,side):
# height_index = index / (width_num * depth_num)
# depth_index = (index - height_index * width_num * depth_num) / width_num
# width_index = index - height_index * width_num * depth_num - depth_index * width_num
#
# result_data = [0] * block_size
# for z in range(0, side): # width
# for y in range(0, side): # depth
# for x in range(0, side): # height
# final_index_z = height_index * side + z
# final_index_y = depth_index * side + y
# final_index_x = width_index * side + x
# final_index = final_index_z * width * depth + final_index_y * width + final_index_x
# result_data[z * side * side + y * side + x] = all_data[final_index]
#
# return result_data
def read_block(min_point,max_point, all_data, width, depth):
width_length = max_point.x - min_point.x
depth_length = max_point.y - min_point.y
height_lenght = max_point.z - min_point.z
block_size = width_length*depth_length*height_lenght
result_data = [0]*block_size
for z in range(0,height_lenght):
for y in range(0,depth_length):
for x in range(0,width_length):
final_index_z = min_point.z+z
final_index_y = min_point.y+y
final_index_x = min_point.x+x
final_index = final_index_x + final_index_y*width+final_index_z*depth*width
result_data[x+y*width_length+z*width_length*depth_length] = all_data[final_index]
return result_data
def CH(k_means, train_data):
means_of_all_data = np.mean(train_data, axis=0)
k = len(k_means.cluster_centers_)
n = len(train_data)
count = [0]*k
for i in range(0,n):
count[k_means.labels_[i]]+=1
traceB = 0
for i in range(0,k):
traceB += count[i]*np.sum(np.square(means_of_all_data-k_means.cluster_centers_[i]))
traceW = 0
for i in range(0,k):
for j in range(0,n):
traceW += np.sum(np.square((k_means.cluster_centers_[i]-train_data[j])))
return (traceB/(k-1))/(traceW/(n-k))
# train index th block data
# def train_single_block(block_index, block_data, max_cluster_num, block_size, side, cluster_weights, ubg):
# # clustering by kmeans
#
# cluster_data = []
# count = [0] * max_cluster_num
# non_zero_count = 0
# for z in range(0, side):
# for y in range(0, side):
# for x in range(0, side):
# index = z * side * side + y * side + x
# cluster_data.append([cluster_weights[0]*x, cluster_weights[1]*y, cluster_weights[2]*z, cluster_weights[3] * block_data[index]])
# if block_data[index] != 0:
# non_zero_count += 1
#
#
# # here need a new standard to check the cluster number!
#
#
# # check if zero_count too big
# if non_zero_count > int(side * side * side * 0.003):
# # print('here3')
# # kmeans = KMeans(n_clusters=2, max_iter=1000).fit(cluster_data)
# # final_kmeans = kmeans
# # final_cluster_num = 2
# # label = kmeans.labels_
# # max_score = silhouette_score(cluster_data, label, metric='euclidean')
# # # max_score = CH(kmeans, cluster_data)
# # for cluster_index_num in range(3, max_cluster_num+1):
# # print('here4',cluster_index_num)
# # kmeans = KMeans(n_clusters=cluster_index_num, max_iter=1000).fit(cluster_data)
# # label = kmeans.labels_
# # present_score = silhouette_score(cluster_data, label, metric='euclidean')
# # # present_score = CH(kmeans, cluster_data)
# # # print(str(present_score)+' block index='+str(block_index))
# # if present_score > max_score:
# # final_kmeans = kmeans
# # final_cluster_num = cluster_index_num
# # max_score = present_score
#
# ############
# kmeans = KMeans(n_clusters=max_cluster_num, max_iter=3000).fit(cluster_data)
# final_kmeans = kmeans
# final_cluster_num = max_cluster_num
# else:
# final_cluster_num = 0
# print("block index = "+str(block_index)+", final_cluster_num = " + str(final_cluster_num))
# # train sgmm
# train_data = []
# for i in range(0, final_cluster_num):
# train_data.append([])
# if final_cluster_num != 0:
# for voxel_index in range(0, block_size):
# count[final_kmeans.labels_[voxel_index]] += 1
# train_data[final_kmeans.labels_[voxel_index]].append([cluster_data[voxel_index][0], cluster_data[voxel_index][1], cluster_data[voxel_index][2]])
#
# block = Block()
# block.cluster_num_ = final_cluster_num
# for cluster_index in range(0, final_cluster_num):
# g = mixture.GaussianMixture(n_components=1, tol=1e-5, max_iter=1000)
# g.fit(train_data[cluster_index])
# max_bic = g.bic(np.array(train_data[cluster_index]))
# final_g = g
# final_component_num = 1
# max_num = min(ubg, len(train_data[cluster_index]))
# for component_num in range(2, max_num+1):
# g = mixture.GaussianMixture(n_components=component_num, tol=1e-5, max_iter=1000)
# g.fit(train_data[cluster_index])
# bic_temp = g.bic(np.array(train_data[cluster_index]))
# if bic_temp < max_bic:
# final_g = g
# final_component_num = component_num
# max_bic = bic_temp
#
# # already got final SGMM for cluster_index
# cluster = Cluster(1.0 * count[cluster_index]/block_size, final_component_num, final_kmeans.cluster_centers_[cluster_index][3])
# for component_index in range(0, final_component_num):
# determinant = final_g.covariances_[component_index][0][0] * final_g.covariances_[component_index][1][1] * final_g.covariances_[component_index][2][2]
# + final_g.covariances_[component_index][0][1] * final_g.covariances_[component_index][1][2] * final_g.covariances_[component_index][2][0]
# + final_g.covariances_[component_index][0][2] * final_g.covariances_[component_index][1][0] * final_g.covariances_[component_index][2][1]
# - final_g.covariances_[component_index][0][2] * final_g.covariances_[component_index][1][1] * final_g.covariances_[component_index][2][0]
# - final_g.covariances_[component_index][0][1] * final_g.covariances_[component_index][1][0] * final_g.covariances_[component_index][2][2]
# - final_g.covariances_[component_index][0][0] * final_g.covariances_[component_index][1][2] * final_g.covariances_[component_index][2][1]
# gauss = Gauss(final_g.weights_[component_index], final_g.means_[component_index], final_g.precisions_[component_index], determinant)
# cluster.add_gauss(gauss)
# block.add_cluster(cluster)
#
# return block
#
#
# # train a part of original data
# def train_blocks(result_disk_address,data_source,index, stride, max_cluster_num, src_raw_name, all_data, width, depth, width_num, depth_num, block_size, side,cluster_weights,ubg):
#
# # print(index,stride,max_cluster_num,src_raw_name,width,depth,width_num,depth_num,block_size,side)
#
# block_sgmm = [Block()] * stride
# with open(src_raw_name, 'rb') as f_src:
# for i in range(0, stride):
# block_data = read_block(index * stride + i, all_data,width,depth,width_num, depth_num, block_size, side)
# block_sgmm[i] = train_single_block(index * stride + i, block_data, max_cluster_num,block_size,side,cluster_weights,ubg)
#
# sgmm_output = result_disk_address + data_source + '_SGMM_Result_Cluster_'+str(index)+'.txt' # only sgmm arguments
#
# # restore block_sgmm into txt file
# with open(sgmm_output, "w") as f_out:
# for i in range(0, stride):
# f_out.write(str(block_sgmm[i].cluster_num_)+'\n')
# for j in range(0, block_sgmm[i].cluster_num_):
# f_out.write(str(block_sgmm[i].clusters_[j].probability_)+' '+str(block_sgmm[i].clusters_[j].gauss_count_)+' '+str(block_sgmm[i].clusters_[j].sample_value_)+'\n')
# for k in range(0, block_sgmm[i].clusters_[j].gauss_count_):
# f_out.write(str(block_sgmm[i].clusters_[j].gausses_[k].weight_)+'\n')
# f_out.write(str(block_sgmm[i].clusters_[j].gausses_[k].determinant_) + '\n')
# f_out.write(str(block_sgmm[i].clusters_[j].gausses_[k].mean_[0])+' '+str(block_sgmm[i].clusters_[j].gausses_[k].mean_[1])+' '+str(block_sgmm[i].clusters_[j].gausses_[k].mean_[2])+'\n')
# f_out.write(str(block_sgmm[i].clusters_[j].gausses_[k].precision_[0][0])+' '+str(block_sgmm[i].clusters_[j].gausses_[k].precision_[0][1])+' '+str(block_sgmm[i].clusters_[j].gausses_[k].precision_[0][2])+'\n')
# f_out.write(str(block_sgmm[i].clusters_[j].gausses_[k].precision_[1][1])+' '+str(block_sgmm[i].clusters_[j].gausses_[k].precision_[1][2])+'\n')
# f_out.write(str(block_sgmm[i].clusters_[j].gausses_[k].precision_[2][2])+'\n')
#
# print("training and saving blocks from "+str(index*stride)+" to "+str((index+1)*stride)+" done")
def train_single_block(block_index, block_data, max_cluster_num, block_width, block_depth, block_height, cluster_weights, ubg):
cluster_data = []
count = [0] * max_cluster_num
non_zero_count = 0
for z in range(0, block_height):
for y in range(0, block_depth):
for x in range(0, block_width):
index = z * block_depth * block_width + y * block_width + x
cluster_data.append([cluster_weights[0]*x, cluster_weights[1]*y, cluster_weights[2]*z, cluster_weights[3] * block_data[index]])
if block_data[index] != 0:
non_zero_count +=1
block_size = block_width*block_depth*block_height
if non_zero_count > int(block_size*0.003):
# print('here3')
# kmeans = KMeans(n_clusters=2, max_iter=1000).fit(cluster_data)
# final_kmeans = kmeans
# final_cluster_num = 2
# label = kmeans.labels_
# max_score = silhouette_score(cluster_data, label, metric='euclidean')
# # max_score = CH(kmeans, cluster_data)
# for cluster_index_num in range(3, max_cluster_num+1):
# print('here4',cluster_index_num)
# kmeans = KMeans(n_clusters=cluster_index_num, max_iter=1000).fit(cluster_data)
# label = kmeans.labels_
# present_score = silhouette_score(cluster_data, label, metric='euclidean')
# # present_score = CH(kmeans, cluster_data)
# # print(str(present_score)+' block index='+str(block_index))
# if present_score > max_score:
# final_kmeans = kmeans
# final_cluster_num = cluster_index_num
# max_score = present_score
############
kmeans = KMeans(n_clusters=max_cluster_num, max_iter=3000).fit(cluster_data)
final_kmeans = kmeans
final_cluster_num = max_cluster_num
else:
final_cluster_num = 0
print("block index = "+str(block_index)+", final_cluster_num = " + str(final_cluster_num))
#train sgmm
train_data = []
for i in range(0, final_cluster_num):
train_data.append([])
if final_cluster_num != 0:
for voxel_index in range(0, block_size):
count[final_kmeans.labels_[voxel_index]] += 1
train_data[final_kmeans.labels_[voxel_index]].append([cluster_data[voxel_index][0], cluster_data[voxel_index][1], cluster_data[voxel_index][2]])
block = Block()
block.cluster_num_ = final_cluster_num
for cluster_index in range(0, final_cluster_num):
g = mixture.GaussianMixture(n_components=1, tol=1e-5, max_iter=1000)
g.fit(train_data[cluster_index])
max_bic = g.bic(np.array(train_data[cluster_index]))
final_g = g
final_component_num = 1
max_num = min(ubg, len(train_data[cluster_index]))
for component_num in range(2, max_num+1):
g = mixture.GaussianMixture(n_components=component_num, tol=1e-5, max_iter=1000)
g.fit(train_data[cluster_index])
bic_temp = g.bic(np.array(train_data[cluster_index]))
if bic_temp < max_bic:
final_g = g
final_component_num = component_num
max_bic = bic_temp
# already got final SGMM for cluster_index
cluster = Cluster(1.0 * count[cluster_index]/block_size, final_component_num, final_kmeans.cluster_centers_[cluster_index][3])
for component_index in range(0, final_component_num):
determinant = final_g.covariances_[component_index][0][0] * final_g.covariances_[component_index][1][1] * final_g.covariances_[component_index][2][2]
+ final_g.covariances_[component_index][0][1] * final_g.covariances_[component_index][1][2] * final_g.covariances_[component_index][2][0]
+ final_g.covariances_[component_index][0][2] * final_g.covariances_[component_index][1][0] * final_g.covariances_[component_index][2][1]
- final_g.covariances_[component_index][0][2] * final_g.covariances_[component_index][1][1] * final_g.covariances_[component_index][2][0]
- final_g.covariances_[component_index][0][1] * final_g.covariances_[component_index][1][0] * final_g.covariances_[component_index][2][2]
- final_g.covariances_[component_index][0][0] * final_g.covariances_[component_index][1][2] * final_g.covariances_[component_index][2][1]
gauss = Gauss(final_g.weights_[component_index], final_g.means_[component_index], final_g.precisions_[component_index], determinant)
cluster.add_gauss(gauss)
block.add_cluster(cluster)
return block
debug_info = []
def train_blocks(result_disk_address, data_source, total_num, index, stride, block_info, max_cluster_num, src_raw_name, all_data, width, depth, cluster_weights, ubg):
number_in_block = total_num - index*stride
block_sgmm = [Block()] * stride
# f_debug=open(result_disk_address+data_source+str(index)+".pydbg","w")
# debug_info=""
# for i in range(0,stride):
# id = index*stride+i
# if id >= total_num:
# return
# xmin = block_info[id][0].x
# ymin = block_info[id][0].y
# zmin = block_info[id][0].z
# xmax = block_info[id][1].x
# ymax = block_info[id][1].y
# zmax = block_info[id][1].z
# debug_info =str(xmin)+" "+str(ymin)+" "+str(zmin)+" "+str(xmax)+" "+str(ymax)+" "+str(zmax)+" "+str(id)+str("\n")
# f_debug.write(debug_info)
# f_debug.close()
end_block = (index+1)*stride
end_index = stride
with open(src_raw_name, 'rb') as f_src:
for i in range(0, stride):
if index * stride + i >= total_num:
end_block = index*stride+i
end_index = i
print("the last block breaks at index:" + str(end_block) + ", file no." + str(index))
break
block_data = read_block(block_info[index*stride+i][0], block_info[index*stride+i][1], all_data, width, depth)
block_width = block_info[index*stride+i][1].x - block_info[index*stride+i][0].x
block_depth = block_info[index*stride+i][1].y - block_info[index*stride+i][0].y
block_height = block_info[index*stride+i][1].z - block_info[index*stride+i][0].z
block_sgmm[i] = train_single_block(index*stride+i,
block_data,
max_cluster_num,
block_width=block_width,
block_depth=block_depth,
block_height=block_height,
cluster_weights=cluster_weights,
ubg=ubg)
sgmm_output = result_disk_address + data_source + '_SGMM_Result_Cluster_'+str(index)+'.txt' # only sgmm arguments
with open(sgmm_output, "w") as f_out:
for i in range(0, end_index):
f_out.write(str(block_sgmm[i].cluster_num_)+'\n')
for j in range(0, block_sgmm[i].cluster_num_):
f_out.write(str(block_sgmm[i].clusters_[j].probability_)+' '+str(block_sgmm[i].clusters_[j].gauss_count_)+' '+str(block_sgmm[i].clusters_[j].sample_value_)+'\n')
for k in range(0, block_sgmm[i].clusters_[j].gauss_count_):
f_out.write(str(block_sgmm[i].clusters_[j].gausses_[k].weight_)+'\n')
f_out.write(str(block_sgmm[i].clusters_[j].gausses_[k].determinant_) + '\n')
f_out.write(str(block_sgmm[i].clusters_[j].gausses_[k].mean_[0])+' '+str(block_sgmm[i].clusters_[j].gausses_[k].mean_[1])+' '+str(block_sgmm[i].clusters_[j].gausses_[k].mean_[2])+'\n')
f_out.write(str(block_sgmm[i].clusters_[j].gausses_[k].precision_[0][0])+' '+str(block_sgmm[i].clusters_[j].gausses_[k].precision_[0][1])+' '+str(block_sgmm[i].clusters_[j].gausses_[k].precision_[0][2])+'\n')
f_out.write(str(block_sgmm[i].clusters_[j].gausses_[k].precision_[1][1])+' '+str(block_sgmm[i].clusters_[j].gausses_[k].precision_[1][2])+'\n')
f_out.write(str(block_sgmm[i].clusters_[j].gausses_[k].precision_[2][2])+'\n')
print("---------------IN FILE:"+str(index)+" training and saving blocks from "+str(index*stride)+" to "+str(end_block)+" done")
def read_block_info_data(path):
if not os.path.exists(path):
print('oc file doesn\'t exist')
exit(0)
f_oc = open(path)
num = f_oc.readline()
lines = f_oc.readlines()
block_info = []
for line in lines:
arr = line.split(' ')
b = []
min_point = Point3d(int(arr[0]), int(arr[1]), int(arr[2]))
max_point = Point3d(int(arr[3]), int(arr[4]), int(arr[5]))
b.append(min_point)
b.append(max_point)
block_info.append(b)
return block_info
# train all block, parallel computing, assign into 4 cpu kernel
if __name__ == '__main__':
result_disk_address = 'e:/train/'
process_num = 10 # make sure it is a divisor of block number
data_source=""
width = 0
depth = 0
height = 0
process_num = 0
if len(sys.argv) == 1:
result_disk_address = raw_input("input disk address:")
data_source = raw_input('input the data name:')
width = int(raw_input('weight:'))
depth = int(raw_input('depth:'))
height = int(raw_input('height:'))
process_num = int(raw_input('input the process num (must be the divisor of the block number):'))
else:
result_disk_address = sys.argv[1]
data_source=sys.argv[2]
width = int(sys.argv[3])
depth = int(sys.argv[4])
height = int(sys.argv[5])
process_num = int(sys.argv[6])
print("disk address:"+result_disk_address)
print("data name:"+data_source)
print("width:"+str(width)+" depth:"+str(depth)+" height:"+str(height))
print("process num (file num):"+str(process_num))
src_raw_name = result_disk_address + data_source+'.raw'
restored_raw_name = result_disk_address+data_source+'_restored.raw'
result_raw_name = result_disk_address+data_source+'_SGMM_Result.raw' # restored data
training_done_signal = result_disk_address + 'Training_Done.txt'
if not os.path.exists(result_disk_address+data_source+".raw"):
print('raw file doesn\'t exist')
exit(0)
block_info = read_block_info_data(result_disk_address+data_source+'.reoc')
zero_block_threshold = 0.003
ubg = 4 # max component number
cluster_weights = [1.0, 1.0, 1.0, 1.0] # weights when clustering
# block_size = side * side * side
# width_num = width / side
# depth_num = depth / side
# height_num = height / side
# total_num = width_num * depth_num * height_num
total_num = len(block_info)
print("block num:"+str(total_num))
stride = (total_num + process_num - 1) / process_num
print("stride:" + str(stride))
restore_raw = bytearray(width * depth * height)
np.random.seed(1)
f_all_data = open(src_raw_name, 'rb')
f_all_data.seek(0, 0)
all_data = bytearray(f_all_data.read())
print(len(all_data))
all_hit = [0] * width * depth * height
begin_time = time.localtime(time.time())
cpu_time_begin = time.clock()
for loop_index in [10]:
print("loop_index = " + str(loop_index))
proc_record = []
for i in range(0, process_num): # a block / 3 seconds
# Iprint('thread '+str(i))
# p = Process(target=train_blocks, args=(result_disk_address, data_source, i, stride, loop_index, src_raw_name, all_data, width, depth, width_num, depth_num, block_size, side, cluster_weights,ubg))
p = Process(target=train_blocks,args=(result_disk_address,
data_source,
total_num,
i,
stride,
block_info,
loop_index,
src_raw_name,
all_data,
width,
depth,
cluster_weights,
ubg))
p.start()
proc_record.append(p)
for p in proc_record:
p.join()
print("training SGMM done.")
cpu_time_end = time.clock()
print time.strftime('Training begin at %Y-%m-%d %H:%M:%S', begin_time)
print time.strftime('Training done at %Y-%m-%d %H:%M:%S', time.localtime(time.time()))
print("cpu time cost in python:" + str(cpu_time_end - cpu_time_begin)+".s")
# single_block_data = read_block(5764)
# train_single_block(5764, single_block_data, loop_index)
| |
from __future__ import unicode_literals
import xml.etree.ElementTree
from .common import InfoExtractor
from ..utils import (
ExtractorError,
int_or_none,
)
from ..compat import compat_HTTPError
class BBCCoUkIE(InfoExtractor):
IE_NAME = 'bbc.co.uk'
IE_DESC = 'BBC iPlayer'
_VALID_URL = r'https?://(?:www\.)?bbc\.co\.uk/(?:(?:(?:programmes|iplayer(?:/[^/]+)?/(?:episode|playlist))/)|music/clips[/#])(?P<id>[\da-z]{8})'
_TESTS = [
{
'url': 'http://www.bbc.co.uk/programmes/b039g8p7',
'info_dict': {
'id': 'b039d07m',
'ext': 'flv',
'title': 'Kaleidoscope, Leonard Cohen',
'description': 'The Canadian poet and songwriter reflects on his musical career.',
'duration': 1740,
},
'params': {
# rtmp download
'skip_download': True,
}
},
{
'url': 'http://www.bbc.co.uk/iplayer/episode/b00yng5w/The_Man_in_Black_Series_3_The_Printed_Name/',
'info_dict': {
'id': 'b00yng1d',
'ext': 'flv',
'title': 'The Man in Black: Series 3: The Printed Name',
'description': "Mark Gatiss introduces Nicholas Pierpan's chilling tale of a writer's devilish pact with a mysterious man. Stars Ewan Bailey.",
'duration': 1800,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Episode is no longer available on BBC iPlayer Radio',
},
{
'url': 'http://www.bbc.co.uk/iplayer/episode/b03vhd1f/The_Voice_UK_Series_3_Blind_Auditions_5/',
'info_dict': {
'id': 'b00yng1d',
'ext': 'flv',
'title': 'The Voice UK: Series 3: Blind Auditions 5',
'description': "Emma Willis and Marvin Humes present the fifth set of blind auditions in the singing competition, as the coaches continue to build their teams based on voice alone.",
'duration': 5100,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
},
{
'url': 'http://www.bbc.co.uk/iplayer/episode/p026c7jt/tomorrows-worlds-the-unearthly-history-of-science-fiction-2-invasion',
'info_dict': {
'id': 'b03k3pb7',
'ext': 'flv',
'title': "Tomorrow's Worlds: The Unearthly History of Science Fiction",
'description': '2. Invasion',
'duration': 3600,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'Currently BBC iPlayer TV programmes are available to play in the UK only',
}, {
'url': 'http://www.bbc.co.uk/programmes/b04v20dw',
'info_dict': {
'id': 'b04v209v',
'ext': 'flv',
'title': 'Pete Tong, The Essential New Tune Special',
'description': "Pete has a very special mix - all of 2014's Essential New Tunes!",
'duration': 10800,
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
'url': 'http://www.bbc.co.uk/music/clips/p02frcc3',
'note': 'Audio',
'info_dict': {
'id': 'p02frcch',
'ext': 'flv',
'title': 'Pete Tong, Past, Present and Future Special, Madeon - After Hours mix',
'description': 'French house superstar Madeon takes us out of the club and onto the after party.',
'duration': 3507,
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
'url': 'http://www.bbc.co.uk/music/clips/p025c0zz',
'note': 'Video',
'info_dict': {
'id': 'p025c103',
'ext': 'flv',
'title': 'Reading and Leeds Festival, 2014, Rae Morris - Closer (Live on BBC Three)',
'description': 'Rae Morris performs Closer for BBC Three at Reading 2014',
'duration': 226,
},
'params': {
# rtmp download
'skip_download': True,
}
}, {
'url': 'http://www.bbc.co.uk/iplayer/episode/b054fn09/ad/natural-world-20152016-2-super-powered-owls',
'info_dict': {
'id': 'p02n76xf',
'ext': 'flv',
'title': 'Natural World, 2015-2016: 2. Super Powered Owls',
'description': 'md5:e4db5c937d0e95a7c6b5e654d429183d',
'duration': 3540,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'geolocation',
}, {
'url': 'http://www.bbc.co.uk/iplayer/episode/b05zmgwn/royal-academy-summer-exhibition',
'info_dict': {
'id': 'b05zmgw1',
'ext': 'flv',
'description': 'Kirsty Wark and Morgan Quaintance visit the Royal Academy as it prepares for its annual artistic extravaganza, meeting people who have come together to make the show unique.',
'title': 'Royal Academy Summer Exhibition',
'duration': 3540,
},
'params': {
# rtmp download
'skip_download': True,
},
'skip': 'geolocation',
}, {
'url': 'http://www.bbc.co.uk/iplayer/playlist/p01dvks4',
'only_matching': True,
}, {
'url': 'http://www.bbc.co.uk/music/clips#p02frcc3',
'only_matching': True,
}, {
'url': 'http://www.bbc.co.uk/iplayer/cbeebies/episode/b0480276/bing-14-atchoo',
'only_matching': True,
}
]
def _extract_asx_playlist(self, connection, programme_id):
asx = self._download_xml(connection.get('href'), programme_id, 'Downloading ASX playlist')
return [ref.get('href') for ref in asx.findall('./Entry/ref')]
def _extract_connection(self, connection, programme_id):
formats = []
protocol = connection.get('protocol')
supplier = connection.get('supplier')
if protocol == 'http':
href = connection.get('href')
# ASX playlist
if supplier == 'asx':
for i, ref in enumerate(self._extract_asx_playlist(connection, programme_id)):
formats.append({
'url': ref,
'format_id': 'ref%s_%s' % (i, supplier),
})
# Direct link
else:
formats.append({
'url': href,
'format_id': supplier,
})
elif protocol == 'rtmp':
application = connection.get('application', 'ondemand')
auth_string = connection.get('authString')
identifier = connection.get('identifier')
server = connection.get('server')
formats.append({
'url': '%s://%s/%s?%s' % (protocol, server, application, auth_string),
'play_path': identifier,
'app': '%s?%s' % (application, auth_string),
'page_url': 'http://www.bbc.co.uk',
'player_url': 'http://www.bbc.co.uk/emp/releases/iplayer/revisions/617463_618125_4/617463_618125_4_emp.swf',
'rtmp_live': False,
'ext': 'flv',
'format_id': supplier,
})
return formats
def _extract_items(self, playlist):
return playlist.findall('./{http://bbc.co.uk/2008/emp/playlist}item')
def _extract_medias(self, media_selection):
error = media_selection.find('./{http://bbc.co.uk/2008/mp/mediaselection}error')
if error is not None:
raise ExtractorError(
'%s returned error: %s' % (self.IE_NAME, error.get('id')), expected=True)
return media_selection.findall('./{http://bbc.co.uk/2008/mp/mediaselection}media')
def _extract_connections(self, media):
return media.findall('./{http://bbc.co.uk/2008/mp/mediaselection}connection')
def _extract_video(self, media, programme_id):
formats = []
vbr = int(media.get('bitrate'))
vcodec = media.get('encoding')
service = media.get('service')
width = int(media.get('width'))
height = int(media.get('height'))
file_size = int(media.get('media_file_size'))
for connection in self._extract_connections(media):
conn_formats = self._extract_connection(connection, programme_id)
for format in conn_formats:
format.update({
'format_id': '%s_%s' % (service, format['format_id']),
'width': width,
'height': height,
'vbr': vbr,
'vcodec': vcodec,
'filesize': file_size,
})
formats.extend(conn_formats)
return formats
def _extract_audio(self, media, programme_id):
formats = []
abr = int(media.get('bitrate'))
acodec = media.get('encoding')
service = media.get('service')
for connection in self._extract_connections(media):
conn_formats = self._extract_connection(connection, programme_id)
for format in conn_formats:
format.update({
'format_id': '%s_%s' % (service, format['format_id']),
'abr': abr,
'acodec': acodec,
})
formats.extend(conn_formats)
return formats
def _get_subtitles(self, media, programme_id):
subtitles = {}
for connection in self._extract_connections(media):
captions = self._download_xml(connection.get('href'), programme_id, 'Downloading captions')
lang = captions.get('{http://www.w3.org/XML/1998/namespace}lang', 'en')
subtitles[lang] = [
{
'url': connection.get('href'),
'ext': 'ttml',
},
]
return subtitles
def _download_media_selector(self, programme_id):
try:
media_selection = self._download_xml(
'http://open.live.bbc.co.uk/mediaselector/5/select/version/2.0/mediaset/pc/vpid/%s' % programme_id,
programme_id, 'Downloading media selection XML')
except ExtractorError as ee:
if isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 403:
media_selection = xml.etree.ElementTree.fromstring(ee.cause.read().decode('utf-8'))
else:
raise
formats = []
subtitles = None
for media in self._extract_medias(media_selection):
kind = media.get('kind')
if kind == 'audio':
formats.extend(self._extract_audio(media, programme_id))
elif kind == 'video':
formats.extend(self._extract_video(media, programme_id))
elif kind == 'captions':
subtitles = self.extract_subtitles(media, programme_id)
return formats, subtitles
def _download_playlist(self, playlist_id):
try:
playlist = self._download_json(
'http://www.bbc.co.uk/programmes/%s/playlist.json' % playlist_id,
playlist_id, 'Downloading playlist JSON')
version = playlist.get('defaultAvailableVersion')
if version:
smp_config = version['smpConfig']
title = smp_config['title']
description = smp_config['summary']
for item in smp_config['items']:
kind = item['kind']
if kind != 'programme' and kind != 'radioProgramme':
continue
programme_id = item.get('vpid')
duration = int(item.get('duration'))
formats, subtitles = self._download_media_selector(programme_id)
return programme_id, title, description, duration, formats, subtitles
except ExtractorError as ee:
if not (isinstance(ee.cause, compat_HTTPError) and ee.cause.code == 404):
raise
# fallback to legacy playlist
playlist = self._download_xml(
'http://www.bbc.co.uk/iplayer/playlist/%s' % playlist_id,
playlist_id, 'Downloading legacy playlist XML')
no_items = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}noItems')
if no_items is not None:
reason = no_items.get('reason')
if reason == 'preAvailability':
msg = 'Episode %s is not yet available' % playlist_id
elif reason == 'postAvailability':
msg = 'Episode %s is no longer available' % playlist_id
elif reason == 'noMedia':
msg = 'Episode %s is not currently available' % playlist_id
else:
msg = 'Episode %s is not available: %s' % (playlist_id, reason)
raise ExtractorError(msg, expected=True)
for item in self._extract_items(playlist):
kind = item.get('kind')
if kind != 'programme' and kind != 'radioProgramme':
continue
title = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}title').text
description = playlist.find('./{http://bbc.co.uk/2008/emp/playlist}summary').text
programme_id = item.get('identifier')
duration = int(item.get('duration'))
formats, subtitles = self._download_media_selector(programme_id)
return programme_id, title, description, duration, formats, subtitles
def _real_extract(self, url):
group_id = self._match_id(url)
webpage = self._download_webpage(url, group_id, 'Downloading video page')
programme_id = None
tviplayer = self._search_regex(
r'mediator\.bind\(({.+?})\s*,\s*document\.getElementById',
webpage, 'player', default=None)
if tviplayer:
player = self._parse_json(tviplayer, group_id).get('player', {})
duration = int_or_none(player.get('duration'))
programme_id = player.get('vpid')
if not programme_id:
programme_id = self._search_regex(
r'"vpid"\s*:\s*"([\da-z]{8})"', webpage, 'vpid', fatal=False, default=None)
if programme_id:
formats, subtitles = self._download_media_selector(programme_id)
title = self._og_search_title(webpage)
description = self._search_regex(
r'<p class="[^"]*medium-description[^"]*">([^<]+)</p>',
webpage, 'description', fatal=False)
else:
programme_id, title, description, duration, formats, subtitles = self._download_playlist(group_id)
self._sort_formats(formats)
return {
'id': programme_id,
'title': title,
'description': description,
'thumbnail': self._og_search_thumbnail(webpage, default=None),
'duration': duration,
'formats': formats,
'subtitles': subtitles,
}
| |
"""TensorFlow ops for directed graphs."""
import tensorflow as tf
from syntaxnet.util import check
def ArcPotentialsFromTokens(source_tokens, target_tokens, weights):
r"""Returns arc potentials computed from token activations and weights.
For each batch of source and target token activations, computes a scalar
potential for each arc as the 3-way product between the activation vectors of
the source and target of the arc and the |weights|. Specifically,
arc[b,s,t] =
\sum_{i,j} source_tokens[b,s,i] * weights[i,j] * target_tokens[b,t,j]
Note that the token activations can be extended with bias terms to implement a
"biaffine" model (Dozat and Manning, 2017).
Args:
source_tokens: [B,N,S] tensor of batched activations for the source token in
each arc.
target_tokens: [B,N,T] tensor of batched activations for the target token in
each arc.
weights: [S,T] matrix of weights.
B,N may be statically-unknown, but S,T must be statically-known. The dtype
of all arguments must be compatible.
Returns:
[B,N,N] tensor A of arc potentials where A_{b,s,t} is the potential of the
arc from s to t in batch element b. The dtype of A is the same as that of
the arguments. Note that the diagonal entries (i.e., where s==t) represent
self-loops and may not be meaningful.
"""
# All arguments must have statically-known rank.
check.Eq(source_tokens.get_shape().ndims, 3, 'source_tokens must be rank 3')
check.Eq(target_tokens.get_shape().ndims, 3, 'target_tokens must be rank 3')
check.Eq(weights.get_shape().ndims, 2, 'weights must be a matrix')
# All activation dimensions must be statically-known.
num_source_activations = weights.get_shape().as_list()[0]
num_target_activations = weights.get_shape().as_list()[1]
check.NotNone(num_source_activations, 'unknown source activation dimension')
check.NotNone(num_target_activations, 'unknown target activation dimension')
check.Eq(source_tokens.get_shape().as_list()[2], num_source_activations,
'dimension mismatch between weights and source_tokens')
check.Eq(target_tokens.get_shape().as_list()[2], num_target_activations,
'dimension mismatch between weights and target_tokens')
# All arguments must share the same type.
check.Same([weights.dtype.base_dtype,
source_tokens.dtype.base_dtype,
target_tokens.dtype.base_dtype],
'dtype mismatch')
source_tokens_shape = tf.shape(source_tokens)
target_tokens_shape = tf.shape(target_tokens)
batch_size = source_tokens_shape[0]
num_tokens = source_tokens_shape[1]
with tf.control_dependencies([
tf.assert_equal(batch_size, target_tokens_shape[0]),
tf.assert_equal(num_tokens, target_tokens_shape[1])]):
# Flatten out the batch dimension so we can use one big multiplication.
targets_bnxt = tf.reshape(target_tokens, [-1, num_target_activations])
# Matrices are row-major, so we arrange for the RHS argument of each matmul
# to have its transpose flag set. That way no copying is required to align
# the rows of the LHS with the columns of the RHS.
weights_targets_bnxs = tf.matmul(targets_bnxt, weights, transpose_b=True)
# The next computation is over pairs of tokens within each batch element, so
# restore the batch dimension.
weights_targets_bxnxs = tf.reshape(
weights_targets_bnxs, [batch_size, num_tokens, num_source_activations])
# Note that this multiplication is repeated across the batch dimension,
# instead of being one big multiplication as in the first matmul. There
# doesn't seem to be a way to arrange this as a single multiplication given
# the pairwise nature of this computation.
arcs_bxnxn = tf.matmul(source_tokens, weights_targets_bxnxs,
transpose_b=True)
return arcs_bxnxn
def ArcSourcePotentialsFromTokens(tokens, weights):
r"""Returns arc source potentials computed from tokens and weights.
For each batch of token activations, computes a scalar potential for each arc
as the product between the activations of the source token and the |weights|.
Specifically,
arc[b,s,:] = \sum_{i} weights[i] * tokens[b,s,i]
Args:
tokens: [B,N,S] tensor of batched activations for source tokens.
weights: [S] vector of weights.
B,N may be statically-unknown, but S must be statically-known. The dtype of
all arguments must be compatible.
Returns:
[B,N,N] tensor A of arc potentials as defined above. The dtype of A is the
same as that of the arguments. Note that the diagonal entries (i.e., where
s==t) represent self-loops and may not be meaningful.
"""
# All arguments must have statically-known rank.
check.Eq(tokens.get_shape().ndims, 3, 'tokens must be rank 3')
check.Eq(weights.get_shape().ndims, 1, 'weights must be a vector')
# All activation dimensions must be statically-known.
num_source_activations = weights.get_shape().as_list()[0]
check.NotNone(num_source_activations, 'unknown source activation dimension')
check.Eq(tokens.get_shape().as_list()[2], num_source_activations,
'dimension mismatch between weights and tokens')
# All arguments must share the same type.
check.Same([weights.dtype.base_dtype,
tokens.dtype.base_dtype],
'dtype mismatch')
tokens_shape = tf.shape(tokens)
batch_size = tokens_shape[0]
num_tokens = tokens_shape[1]
# Flatten out the batch dimension so we can use a couple big matmuls.
tokens_bnxs = tf.reshape(tokens, [-1, num_source_activations])
weights_sx1 = tf.expand_dims(weights, 1)
sources_bnx1 = tf.matmul(tokens_bnxs, weights_sx1)
sources_bnxn = tf.tile(sources_bnx1, [1, num_tokens])
# Restore the batch dimension in the output.
sources_bxnxn = tf.reshape(sources_bnxn, [batch_size, num_tokens, num_tokens])
return sources_bxnxn
def RootPotentialsFromTokens(root, tokens, weights):
r"""Returns root selection potentials computed from tokens and weights.
For each batch of token activations, computes a scalar potential for each root
selection as the 3-way product between the activations of the artificial root
token, the token activations, and the |weights|. Specifically,
roots[b,r] = \sum_{i,j} root[i] * weights[i,j] * tokens[b,r,j]
Args:
root: [S] vector of activations for the artificial root token.
tokens: [B,N,T] tensor of batched activations for root tokens.
weights: [S,T] matrix of weights.
B,N may be statically-unknown, but S,T must be statically-known. The dtype
of all arguments must be compatible.
Returns:
[B,N] matrix R of root-selection potentials as defined above. The dtype of
R is the same as that of the arguments.
"""
# All arguments must have statically-known rank.
check.Eq(root.get_shape().ndims, 1, 'root must be a vector')
check.Eq(tokens.get_shape().ndims, 3, 'tokens must be rank 3')
check.Eq(weights.get_shape().ndims, 2, 'weights must be a matrix')
# All activation dimensions must be statically-known.
num_source_activations = weights.get_shape().as_list()[0]
num_target_activations = weights.get_shape().as_list()[1]
check.NotNone(num_source_activations, 'unknown source activation dimension')
check.NotNone(num_target_activations, 'unknown target activation dimension')
check.Eq(root.get_shape().as_list()[0], num_source_activations,
'dimension mismatch between weights and root')
check.Eq(tokens.get_shape().as_list()[2], num_target_activations,
'dimension mismatch between weights and tokens')
# All arguments must share the same type.
check.Same([weights.dtype.base_dtype,
root.dtype.base_dtype,
tokens.dtype.base_dtype],
'dtype mismatch')
root_1xs = tf.expand_dims(root, 0)
tokens_shape = tf.shape(tokens)
batch_size = tokens_shape[0]
num_tokens = tokens_shape[1]
# Flatten out the batch dimension so we can use a couple big matmuls.
tokens_bnxt = tf.reshape(tokens, [-1, num_target_activations])
weights_targets_bnxs = tf.matmul(tokens_bnxt, weights, transpose_b=True)
roots_1xbn = tf.matmul(root_1xs, weights_targets_bnxs, transpose_b=True)
# Restore the batch dimension in the output.
roots_bxn = tf.reshape(roots_1xbn, [batch_size, num_tokens])
return roots_bxn
def CombineArcAndRootPotentials(arcs, roots):
"""Combines arc and root potentials into a single set of potentials.
Args:
arcs: [B,N,N] tensor of batched arc potentials.
roots: [B,N] matrix of batched root potentials.
Returns:
[B,N,N] tensor P of combined potentials where
P_{b,s,t} = s == t ? roots[b,t] : arcs[b,s,t]
"""
# All arguments must have statically-known rank.
check.Eq(arcs.get_shape().ndims, 3, 'arcs must be rank 3')
check.Eq(roots.get_shape().ndims, 2, 'roots must be a matrix')
# All arguments must share the same type.
dtype = arcs.dtype.base_dtype
check.Same([dtype, roots.dtype.base_dtype], 'dtype mismatch')
roots_shape = tf.shape(roots)
arcs_shape = tf.shape(arcs)
batch_size = roots_shape[0]
num_tokens = roots_shape[1]
with tf.control_dependencies([
tf.assert_equal(batch_size, arcs_shape[0]),
tf.assert_equal(num_tokens, arcs_shape[1]),
tf.assert_equal(num_tokens, arcs_shape[2])]):
return tf.matrix_set_diag(arcs, roots)
def LabelPotentialsFromTokens(tokens, weights):
r"""Computes label potentials from tokens and weights.
For each batch of token activations, computes a scalar potential for each
label as the product between the activations of the source token and the
|weights|. Specifically,
labels[b,t,l] = \sum_{i} weights[l,i] * tokens[b,t,i]
Args:
tokens: [B,N,T] tensor of batched token activations.
weights: [L,T] matrix of weights.
B,N may be dynamic, but L,T must be static. The dtype of all arguments must
be compatible.
Returns:
[B,N,L] tensor of label potentials as defined above, with the same dtype as
the arguments.
"""
check.Eq(tokens.get_shape().ndims, 3, 'tokens must be rank 3')
check.Eq(weights.get_shape().ndims, 2, 'weights must be a matrix')
num_labels = weights.get_shape().as_list()[0]
num_activations = weights.get_shape().as_list()[1]
check.NotNone(num_labels, 'unknown number of labels')
check.NotNone(num_activations, 'unknown activation dimension')
check.Eq(tokens.get_shape().as_list()[2], num_activations,
'activation mismatch between weights and tokens')
tokens_shape = tf.shape(tokens)
batch_size = tokens_shape[0]
num_tokens = tokens_shape[1]
check.Same([tokens.dtype.base_dtype,
weights.dtype.base_dtype],
'dtype mismatch')
# Flatten out the batch dimension so we can use one big matmul().
tokens_bnxt = tf.reshape(tokens, [-1, num_activations])
labels_bnxl = tf.matmul(tokens_bnxt, weights, transpose_b=True)
# Restore the batch dimension in the output.
labels_bxnxl = tf.reshape(labels_bnxl, [batch_size, num_tokens, num_labels])
return labels_bxnxl
def LabelPotentialsFromTokenPairs(sources, targets, weights):
r"""Computes label potentials from source and target tokens and weights.
For each aligned pair of source and target token activations, computes a
scalar potential for each label on the arc from the source to the target.
Specifically,
labels[b,t,l] = \sum_{i,j} sources[b,t,i] * weights[l,i,j] * targets[b,t,j]
Args:
sources: [B,N,S] tensor of batched source token activations.
targets: [B,N,T] tensor of batched target token activations.
weights: [L,S,T] tensor of weights.
B,N may be dynamic, but L,S,T must be static. The dtype of all arguments
must be compatible.
Returns:
[B,N,L] tensor of label potentials as defined above, with the same dtype as
the arguments.
"""
check.Eq(sources.get_shape().ndims, 3, 'sources must be rank 3')
check.Eq(targets.get_shape().ndims, 3, 'targets must be rank 3')
check.Eq(weights.get_shape().ndims, 3, 'weights must be rank 3')
num_labels = weights.get_shape().as_list()[0]
num_source_activations = weights.get_shape().as_list()[1]
num_target_activations = weights.get_shape().as_list()[2]
check.NotNone(num_labels, 'unknown number of labels')
check.NotNone(num_source_activations, 'unknown source activation dimension')
check.NotNone(num_target_activations, 'unknown target activation dimension')
check.Eq(sources.get_shape().as_list()[2], num_source_activations,
'activation mismatch between weights and source tokens')
check.Eq(targets.get_shape().as_list()[2], num_target_activations,
'activation mismatch between weights and target tokens')
check.Same([sources.dtype.base_dtype,
targets.dtype.base_dtype,
weights.dtype.base_dtype],
'dtype mismatch')
sources_shape = tf.shape(sources)
targets_shape = tf.shape(targets)
batch_size = sources_shape[0]
num_tokens = sources_shape[1]
with tf.control_dependencies([tf.assert_equal(batch_size, targets_shape[0]),
tf.assert_equal(num_tokens, targets_shape[1])]):
# For each token, we must compute a vector-3tensor-vector product. There is
# no op for this, but we can use reshape() and matmul() to compute it.
# Reshape |weights| and |targets| so we can use a single matmul().
weights_lsxt = tf.reshape(weights, [num_labels * num_source_activations,
num_target_activations])
targets_bnxt = tf.reshape(targets, [-1, num_target_activations])
weights_targets_bnxls = tf.matmul(targets_bnxt, weights_lsxt,
transpose_b=True)
# Restore all dimensions.
weights_targets_bxnxlxs = tf.reshape(
weights_targets_bnxls,
[batch_size, num_tokens, num_labels, num_source_activations])
# Incorporate the source activations. In this case, we perform a batched
# matmul() between the trailing [L,S] matrices of the current result and the
# trailing [S] vectors of the tokens.
sources_bxnx1xs = tf.expand_dims(sources, 2)
labels_bxnxlx1 = tf.matmul(weights_targets_bxnxlxs, sources_bxnx1xs,
transpose_b=True)
labels_bxnxl = tf.squeeze(labels_bxnxlx1, [3])
return labels_bxnxl
| |
"""Support for Bond lights."""
from __future__ import annotations
import logging
from typing import Any
from aiohttp.client_exceptions import ClientResponseError
from bond_api import Action, BPUPSubscriptions, DeviceType
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
SUPPORT_BRIGHTNESS,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv, entity_platform
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import BondHub
from .const import (
ATTR_POWER_STATE,
BPUP_SUBS,
DOMAIN,
HUB,
SERVICE_SET_LIGHT_BRIGHTNESS_TRACKED_STATE,
SERVICE_SET_LIGHT_POWER_TRACKED_STATE,
)
from .entity import BondEntity
from .utils import BondDevice
_LOGGER = logging.getLogger(__name__)
SERVICE_START_INCREASING_BRIGHTNESS = "start_increasing_brightness"
SERVICE_START_DECREASING_BRIGHTNESS = "start_decreasing_brightness"
SERVICE_STOP = "stop"
ENTITY_SERVICES = [
SERVICE_START_INCREASING_BRIGHTNESS,
SERVICE_START_DECREASING_BRIGHTNESS,
SERVICE_STOP,
]
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up Bond light devices."""
data = hass.data[DOMAIN][entry.entry_id]
hub: BondHub = data[HUB]
bpup_subs: BPUPSubscriptions = data[BPUP_SUBS]
platform = entity_platform.async_get_current_platform()
platform = entity_platform.async_get_current_platform()
for service in ENTITY_SERVICES:
platform.async_register_entity_service(
service,
{},
f"async_{service}",
)
fan_lights: list[Entity] = [
BondLight(hub, device, bpup_subs)
for device in hub.devices
if DeviceType.is_fan(device.type)
and device.supports_light()
and not (device.supports_up_light() and device.supports_down_light())
]
fan_up_lights: list[Entity] = [
BondUpLight(hub, device, bpup_subs, "up_light")
for device in hub.devices
if DeviceType.is_fan(device.type) and device.supports_up_light()
]
fan_down_lights: list[Entity] = [
BondDownLight(hub, device, bpup_subs, "down_light")
for device in hub.devices
if DeviceType.is_fan(device.type) and device.supports_down_light()
]
fireplaces: list[Entity] = [
BondFireplace(hub, device, bpup_subs)
for device in hub.devices
if DeviceType.is_fireplace(device.type)
]
fp_lights: list[Entity] = [
BondLight(hub, device, bpup_subs, "light")
for device in hub.devices
if DeviceType.is_fireplace(device.type) and device.supports_light()
]
lights: list[Entity] = [
BondLight(hub, device, bpup_subs)
for device in hub.devices
if DeviceType.is_light(device.type)
]
platform.async_register_entity_service(
SERVICE_SET_LIGHT_BRIGHTNESS_TRACKED_STATE,
{
vol.Required(ATTR_BRIGHTNESS): vol.All(
vol.Number(scale=0), vol.Range(0, 255)
)
},
"async_set_brightness_belief",
)
platform.async_register_entity_service(
SERVICE_SET_LIGHT_POWER_TRACKED_STATE,
{vol.Required(ATTR_POWER_STATE): vol.All(cv.boolean)},
"async_set_power_belief",
)
async_add_entities(
fan_lights + fan_up_lights + fan_down_lights + fireplaces + fp_lights + lights,
True,
)
class BondBaseLight(BondEntity, LightEntity):
"""Representation of a Bond light."""
_attr_supported_features = 0
async def async_set_brightness_belief(self, brightness: int) -> None:
"""Set the belief state of the light."""
if not self._device.supports_set_brightness():
raise HomeAssistantError("This device does not support setting brightness")
if brightness == 0:
await self.async_set_power_belief(False)
return
try:
await self._hub.bond.action(
self._device.device_id,
Action.set_brightness_belief(round((brightness * 100) / 255)),
)
except ClientResponseError as ex:
raise HomeAssistantError(
f"The bond API returned an error calling set_brightness_belief for {self.entity_id}. Code: {ex.code} Message: {ex.message}"
) from ex
async def async_set_power_belief(self, power_state: bool) -> None:
"""Set the belief state of the light."""
try:
await self._hub.bond.action(
self._device.device_id, Action.set_light_state_belief(power_state)
)
except ClientResponseError as ex:
raise HomeAssistantError(
f"The bond API returned an error calling set_light_state_belief for {self.entity_id}. Code: {ex.code} Message: {ex.message}"
) from ex
class BondLight(BondBaseLight, BondEntity, LightEntity):
"""Representation of a Bond light."""
def __init__(
self,
hub: BondHub,
device: BondDevice,
bpup_subs: BPUPSubscriptions,
sub_device: str | None = None,
) -> None:
"""Create HA entity representing Bond light."""
super().__init__(hub, device, bpup_subs, sub_device)
if device.supports_set_brightness():
self._attr_supported_features = SUPPORT_BRIGHTNESS
def _apply_state(self, state: dict) -> None:
self._attr_is_on = state.get("light") == 1
brightness = state.get("brightness")
self._attr_brightness = round(brightness * 255 / 100) if brightness else None
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn on the light."""
brightness = kwargs.get(ATTR_BRIGHTNESS)
if brightness:
await self._hub.bond.action(
self._device.device_id,
Action.set_brightness(round((brightness * 100) / 255)),
)
else:
await self._hub.bond.action(self._device.device_id, Action.turn_light_on())
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn off the light."""
await self._hub.bond.action(self._device.device_id, Action.turn_light_off())
@callback
def _async_has_action_or_raise(self, action: str) -> None:
"""Raise HomeAssistantError if the device does not support an action."""
if not self._device.has_action(action):
raise HomeAssistantError(f"{self.entity_id} does not support {action}")
async def async_start_increasing_brightness(self) -> None:
"""Start increasing the light brightness."""
self._async_has_action_or_raise(Action.START_INCREASING_BRIGHTNESS)
await self._hub.bond.action(
self._device.device_id, Action(Action.START_INCREASING_BRIGHTNESS)
)
async def async_start_decreasing_brightness(self) -> None:
"""Start decreasing the light brightness."""
self._async_has_action_or_raise(Action.START_DECREASING_BRIGHTNESS)
await self._hub.bond.action(
self._device.device_id, Action(Action.START_DECREASING_BRIGHTNESS)
)
async def async_stop(self) -> None:
"""Stop all actions and clear the queue."""
self._async_has_action_or_raise(Action.STOP)
await self._hub.bond.action(self._device.device_id, Action(Action.STOP))
class BondDownLight(BondBaseLight, BondEntity, LightEntity):
"""Representation of a Bond light."""
def _apply_state(self, state: dict) -> None:
self._attr_is_on = bool(state.get("down_light") and state.get("light"))
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn on the light."""
await self._hub.bond.action(
self._device.device_id, Action(Action.TURN_DOWN_LIGHT_ON)
)
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn off the light."""
await self._hub.bond.action(
self._device.device_id, Action(Action.TURN_DOWN_LIGHT_OFF)
)
class BondUpLight(BondBaseLight, BondEntity, LightEntity):
"""Representation of a Bond light."""
def _apply_state(self, state: dict) -> None:
self._attr_is_on = bool(state.get("up_light") and state.get("light"))
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn on the light."""
await self._hub.bond.action(
self._device.device_id, Action(Action.TURN_UP_LIGHT_ON)
)
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn off the light."""
await self._hub.bond.action(
self._device.device_id, Action(Action.TURN_UP_LIGHT_OFF)
)
class BondFireplace(BondEntity, LightEntity):
"""Representation of a Bond-controlled fireplace."""
_attr_supported_features = SUPPORT_BRIGHTNESS
def _apply_state(self, state: dict) -> None:
power = state.get("power")
flame = state.get("flame")
self._attr_is_on = power == 1
self._attr_brightness = round(flame * 255 / 100) if flame else None
self._attr_icon = "mdi:fireplace" if power == 1 else "mdi:fireplace-off"
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn the fireplace on."""
_LOGGER.debug("Fireplace async_turn_on called with: %s", kwargs)
brightness = kwargs.get(ATTR_BRIGHTNESS)
if brightness:
flame = round((brightness * 100) / 255)
await self._hub.bond.action(self._device.device_id, Action.set_flame(flame))
else:
await self._hub.bond.action(self._device.device_id, Action.turn_on())
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the fireplace off."""
_LOGGER.debug("Fireplace async_turn_off called with: %s", kwargs)
await self._hub.bond.action(self._device.device_id, Action.turn_off())
async def async_set_brightness_belief(self, brightness: int) -> None:
"""Set the belief state of the light."""
if not self._device.supports_set_brightness():
raise HomeAssistantError("This device does not support setting brightness")
if brightness == 0:
await self.async_set_power_belief(False)
return
try:
await self._hub.bond.action(
self._device.device_id,
Action.set_brightness_belief(round((brightness * 100) / 255)),
)
except ClientResponseError as ex:
raise HomeAssistantError(
f"The bond API returned an error calling set_brightness_belief for {self.entity_id}. Code: {ex.code} Message: {ex.message}"
) from ex
async def async_set_power_belief(self, power_state: bool) -> None:
"""Set the belief state of the light."""
try:
await self._hub.bond.action(
self._device.device_id, Action.set_power_state_belief(power_state)
)
except ClientResponseError as ex:
raise HomeAssistantError(
f"The bond API returned an error calling set_power_state_belief for {self.entity_id}. Code: {ex.code} Message: {ex.message}"
) from ex
| |
"""
Processes for Species distribution
Author: Nils Hempelmann (nils.hempelmann@lsce.ipsl.fr)
"""
from pywps.Process import WPSProcess
from flyingpigeon.sdm import _SDMINDICES_
from flyingpigeon.log import init_process_logger
import logging
logger = logging.getLogger(__name__)
class sdmcsvProcess(WPSProcess):
def __init__(self):
WPSProcess.__init__(
self,
identifier="sdm_csv",
title="SDM -- CSV table",
version="0.9",
metadata=[
{"title": "LWF",
"href": "http://www.lwf.bayern.de/"},
{"title": "Doc",
"href": "http://flyingpigeon.readthedocs.io/en/latest/\
descriptions/index.html#species-distribution-model"},
{"title": "Paper",
"href": "http://www.hindawi.com/journals/jcli/2013/787250/"},
{"title": "Tutorial",
"href": "http://flyingpigeon.readthedocs.io/en/latest/\
tutorials/sdm.html"},
],
abstract="Species distribution model for tree species based on\
GBIF presence/absence data and climate model data. Indices will\
be calculated while processing",
statusSupported=True,
storeSupported=True
)
# Literal Input Data
# ------------------
self.resources = self.addComplexInput(
identifier="resources",
title="tas/pr files",
abstract="Raw climate model outputs as stored in netCDF files. archives (tar/zip) can also be provided",
minOccurs=1,
maxOccurs=500,
maxmegabites=50000,
formats=[{"mimeType": "application/x-netcdf"},
{"mimeType": "application/x-tar"},
{"mimeType": "application/zip"}],
)
self.gbif = self.addLiteralInput(
identifier="gbif",
title="GBIF csv file",
abstract="GBIF table (csv) with tree occurence \
(output of 'GBIF data fetch' process )",
type=type(''),
minOccurs=1,
maxOccurs=1,
default='http://localhost:8090/wpsoutputs/flyingpigeon/output_csv-abe15f64-c30d-11e6-bf63-142d277ef1f3.csv'
# maxmegabites=50,
# formats=[{"mimeType": "text/csv"}],
)
self.input_indices = self.addLiteralInput(
identifier="input_indices",
title="Indices",
abstract="Climate indices related to growth conditions",
# default=['TG_JJA', 'TNn_Jan'],
type=type(''),
minOccurs=1,
maxOccurs=10,
allowedValues=_SDMINDICES_
)
self.period = self.addLiteralInput(
identifier="period",
title="Reference period",
abstract="Reference period for climate conditions\
(all = entire timeseries)",
default="all",
type=type(''),
minOccurs=1,
maxOccurs=1,
allowedValues=['all', '1951-1980', '1961-1990',
'1971-2000', '1981-2010']
)
self.archive_format = self.addLiteralInput(
identifier="archive_format",
title="Archive format",
abstract="Result files will be compressed into archives.\
Choose an appropriate format",
default="tar",
type=type(''),
minOccurs=1,
maxOccurs=1,
allowedValues=['zip', 'tar']
)
###########
# OUTPUTS
###########
#
# self.output_csv = self.addComplexOutput(
# identifier="output_csv",
# title="Tree species table",
# abstract="Extracted CSV file containing the tree species table",
# formats=[{"mimeType": "text/csv"}],
# asReference=True,
# )
self.output_gbif = self.addComplexOutput(
identifier="output_gbif",
title="Graphic of GBIF coordinates",
abstract="PNG graphic file showing the presence of tree species\
according to the CSV file",
formats=[{"mimeType": "image/png"}],
asReference=True,
)
self.output_PA = self.addComplexOutput(
identifier="output_PA",
title="Graphic of PA mask",
abstract="PNG graphic file showing PA mask generated based on\
netCDF spatial increment",
formats=[{"mimeType": "image/png"}],
asReference=True,
)
self.output_indices = self.addComplexOutput(
identifier="output_indices",
title="Climate indices for growth conditions over all timesteps",
abstract="Archive (tar/zip) containing calculated climate indices",
formats=[{"mimeType": "application/x-tar"},
{"mimeType": "application/zip"}],
asReference=True,
)
self.output_reference = self.addComplexOutput(
identifier="output_reference",
title="Climate indices for growth conditions of reference period",
abstract="Archive (tar/zip) containing calculated climate indices",
formats=[{"mimeType": "application/x-tar"},
{"mimeType": "application/zip"}],
asReference=True,
)
self.output_prediction = self.addComplexOutput(
identifier="output_prediction",
title="predicted growth conditions",
abstract="Archive containing files of the predicted\
growth conditions",
formats=[{"mimeType": "application/x-tar"},
{"mimeType": "application/zip"}],
asReference=True,
)
self.output_info = self.addComplexOutput(
identifier="output_info",
title="GAM statistics information",
abstract="Graphics and information of the learning statistics",
formats=[{"mimeType": "application/pdf"}],
asReference=True,
)
self.output_log = self.addComplexOutput(
identifier="output_log",
title="Logging information",
abstract="Collected logs during process run.",
formats=[{"mimeType": "text/plain"}],
asReference=True,
)
def execute(self):
init_process_logger('log.txt')
self.output_log.setValue('log.txt')
from os.path import basename
from flyingpigeon import sdm
from flyingpigeon.utils import archive, archiveextract, download
self.status.set('Start process', 0)
try:
logger.info('reading the arguments')
resources_raw = self.getInputValues(identifier='resources')
csv_url = self.getInputValues(identifier='gbif')[0]
period = self.getInputValues(identifier='period')
period = period[0]
indices = self.getInputValues(identifier='input_indices')
archive_format = self.archive_format.getValue()
logger.info('indices %s ' % indices)
logger.debug('csv_url %s' % csv_url)
except Exception as e:
logger.error('failed to read in the arguments %s ' % e)
raise
try:
logger.info('set up the environment')
csv_file = download(csv_url)
resources = archiveextract(resources_raw)
except Exception as e:
logger.error('failed to set up the environment %s ' % e)
raise
try:
self.status.set('read in latlon coordinates', 10)
latlon = sdm.latlon_gbifcsv(csv_file)
logger.info('got occurence coordinates %s ' % csv_file)
except Exception as e:
logger.exception('failed to extract the latlon points from file: %s: %s' % (csv_file, e))
try:
self.status.set('plot map', 20)
from flyingpigeon.visualisation import map_gbifoccurrences
# latlon = sdm.latlon_gbifdic(gbifdic)
occurence_map = map_gbifoccurrences(latlon)
except Exception as e:
logger.exception('failed to plot occurence map %s' % e)
#################################
# calculate the climate indices
#################################
# get the indices
ncs_indices = None
try:
self.status.set('start calculation of climate indices for %s'
% indices, 30)
ncs_indices = sdm.get_indices(resources=resources, indices=indices)
logger.info('indice calculation done')
except:
msg = 'failed to calculate indices'
logger.exception(msg)
raise Exception(msg)
try:
self.status.set('get domain', 30)
domains = set()
for resource in ncs_indices:
# get_domain works only if metadata are set in a correct way
domains = domains.union([basename(resource).split('_')[1]])
if len(domains) == 1:
domain = list(domains)[0]
logger.debug('Domain %s found in indices files' % domain)
else:
logger.error('Not a single domain in indices files %s' % domains)
except Exception as e:
logger.exception('failed to get domains %s' % e)
try:
self.status.set('generating the PA mask', 20)
PAmask = sdm.get_PAmask(coordinates=latlon, domain=domain)
logger.info('PA mask sucessfully generated')
except Exception as e:
logger.exception('failed to generate the PA mask: %s' % e)
try:
self.status.set('Ploting PA mask', 25)
from flyingpigeon.visualisation import map_PAmask
PAmask_png = map_PAmask(PAmask)
except Exception as e:
logger.exception('failed to plot the PA mask: %s' % e)
try:
# sort indices
indices_dic = None
indices_dic = sdm.sort_indices(ncs_indices)
logger.info('indice files sorted for %s Datasets' %
len(indices_dic.keys()))
except:
msg = 'failed to sort indices'
logger.exception(msg)
raise Exception(msg)
ncs_references = []
species_files = []
stat_infos = []
for count, key in enumerate(indices_dic.keys()):
try:
staus_nr = 40 + count*10
self.status.set('Start processing of %s' % key, staus_nr)
ncs = indices_dic[key]
logger.info('with %s files' % len(ncs))
try:
ncs_reference = sdm.get_reference(ncs_indices=ncs, period=period)
ncs_references.extend(ncs_reference)
logger.info('reference indice calculated %s '
% ncs_references)
except:
msg = 'failed to calculate the reference'
logger.exception(msg)
raise Exception(msg)
try:
gam_model, predict_gam, gam_info = sdm.get_gam(ncs_reference, PAmask)
stat_infos.append(gam_info)
self.status.set('GAM sucessfully trained', staus_nr + 5)
except Exception as e:
msg = 'failed to train GAM for %s : %s' % (key, e)
logger.debug(msg)
try:
prediction = sdm.get_prediction(gam_model, ncs)
self.status.set('prediction done', staus_nr + 7)
except Exception as e:
msg = 'failed to predict tree occurence %s' % e
logger.exception(msg)
# raise Exception(msg)
try:
self.status.set('land sea mask for predicted data', staus_nr + 8)
from numpy import invert, isnan, nan, broadcast_arrays # , array, zeros, linspace, meshgrid
mask = invert(isnan(PAmask))
mask = broadcast_arrays(prediction, mask)[1]
prediction[mask is False] = nan
except Exception as e:
logger.debug('failed to mask predicted data: %s' % e)
try:
species_files.append(sdm.write_to_file(ncs[0], prediction))
logger.info('Favourabillity written to file')
except Exception as e:
msg = 'failed to write species file %s' % e
logger.debug(msg)
# raise Exception(msg)
except Exception as e:
msg = 'failed to calculate reference indices. %s ' % e
logger.exception(msg)
raise Exception(msg)
try:
archive_indices = None
archive_indices = archive(ncs_indices, format=archive_format)
logger.info('indices added to archive')
except:
msg = 'failed adding indices to archive'
logger.exception(msg)
raise Exception(msg)
archive_references = None
try:
archive_references = archive(ncs_references, format=archive_format)
logger.info('indices reference added to archive')
except:
msg = 'failed adding reference indices to archive'
logger.exception(msg)
raise Exception(msg)
archive_predicion = None
try:
archive_predicion = archive(species_files, format=archive_format)
logger.info('species_files added to archive')
except:
msg = 'failed adding species_files indices to archive'
logger.exception(msg)
raise Exception(msg)
try:
from flyingpigeon.visualisation import pdfmerge
stat_infosconcat = pdfmerge(stat_infos)
logger.info('stat infos pdfs merged')
except:
logger.exception('failed to concat images')
_, stat_infosconcat = tempfile.mkstemp(suffix='.pdf', prefix='foobar-', dir='.')
# self.output_csv.setValue(csv_file)
self.output_gbif.setValue(occurence_map)
self.output_PA.setValue(PAmask_png)
self.output_indices.setValue(archive_indices)
self.output_reference.setValue(archive_references)
self.output_prediction.setValue(archive_predicion)
self.output_info.setValue(stat_infosconcat)
self.status.set('done', 100)
| |
"""Tests for disk storage broker."""
import os
import pytz
import datetime
import time
import pytest
from . import tmp_dir_fixture # NOQA
from . import tmp_uri_fixture # NOQA
from . import TEST_SAMPLE_DATA
def test_initialise():
from dtoolcore.storagebroker import DiskStorageBroker
path = '/a/path'
storagebroker = DiskStorageBroker(uri=path) # NOQA
def test_create_structure(tmp_dir_fixture): # NOQA
from dtoolcore.storagebroker import DiskStorageBroker
from dtoolcore.storagebroker import StorageBrokerOSError
storagebroker = DiskStorageBroker(tmp_dir_fixture)
with pytest.raises(StorageBrokerOSError):
storagebroker.create_structure()
destination_path = os.path.join(tmp_dir_fixture, 'my_proto_dataset')
storagebroker = DiskStorageBroker(destination_path)
assert not os.path.exists(destination_path)
storagebroker.create_structure()
assert os.path.isdir(destination_path)
destination_path = os.path.join(tmp_dir_fixture, 'sub', 'my_proto_dataset')
storagebroker = DiskStorageBroker(destination_path)
with pytest.raises(OSError):
storagebroker.create_structure()
def test_store_and_retrieve_readme(tmp_dir_fixture): # NOQA
from dtoolcore.storagebroker import DiskStorageBroker
destination_path = os.path.join(tmp_dir_fixture, 'my_proto_dataset')
storagebroker = DiskStorageBroker(destination_path)
storagebroker.create_structure()
storagebroker.put_readme('Hello world')
assert storagebroker.get_readme_content() == 'Hello world'
def test_update_readme(tmp_dir_fixture): # NOQA
from dtoolcore.storagebroker import DiskStorageBroker
destination_path = os.path.join(tmp_dir_fixture, 'my_proto_dataset')
storagebroker = DiskStorageBroker(destination_path)
storagebroker.create_structure()
storagebroker.put_readme('Hello world')
assert storagebroker.get_readme_content() == 'Hello world'
assert len(storagebroker._list_historical_readme_keys()) == 0
storagebroker.update_readme('Updated')
assert storagebroker.get_readme_content() == 'Updated'
assert len(storagebroker._list_historical_readme_keys()) == 1
with open(storagebroker._list_historical_readme_keys()[0]) as fh:
assert fh.read() == 'Hello world'
time.sleep(0.1)
storagebroker.update_readme('Updated again')
assert storagebroker.get_readme_content() == 'Updated again'
assert len(storagebroker._list_historical_readme_keys()) == 2
def test_store_and_retrieve_admin_metadata(tmp_dir_fixture): # NOQA
from dtoolcore.storagebroker import DiskStorageBroker
destination_path = os.path.join(tmp_dir_fixture, 'my_proto_dataset')
storagebroker = DiskStorageBroker(destination_path)
storagebroker.create_structure()
admin_metadata = {'hello': 'world'}
storagebroker.put_admin_metadata(admin_metadata)
storagebroker_2 = DiskStorageBroker(destination_path)
retrieved_admin_metadata = storagebroker_2.get_admin_metadata()
assert retrieved_admin_metadata == admin_metadata
def test_put_item(tmp_dir_fixture): # NOQA
from dtoolcore.storagebroker import DiskStorageBroker
destination_path = os.path.join(tmp_dir_fixture, 'my_proto_dataset')
storagebroker = DiskStorageBroker(destination_path)
storagebroker.create_structure()
input_file_path = os.path.join(TEST_SAMPLE_DATA, 'tiny.png')
storagebroker.put_item(
fpath=input_file_path,
relpath='tiny.png'
)
handles = list(storagebroker.iter_item_handles())
assert 'tiny.png' in handles
def test_store_and_retrieve_manifest(tmp_dir_fixture): # NOQA
from dtoolcore.storagebroker import DiskStorageBroker
destination_path = os.path.join(tmp_dir_fixture, 'my_proto_dataset')
storagebroker = DiskStorageBroker(destination_path)
storagebroker.create_structure()
manifest = {'a': 'b', 'c': [1, 2, 3]}
storagebroker.put_manifest(manifest)
retrieved_manifest = storagebroker.get_manifest()
assert retrieved_manifest == manifest
# Test the formatting on disk.
# expected = '{\n "a": "b", \n "c": [\n 1, \n 2, \n 3\n ]\n}'
expected_lines = [
'{',
' "a": "b",',
' "c": [',
' 1,',
' 2,',
' 3',
' ]',
'}'
]
with open(storagebroker.get_manifest_key()) as fh:
for i, actual in enumerate(fh):
actual = actual.rstrip()
expected = expected_lines[i]
assert actual == expected
def test_item_properties(tmp_dir_fixture): # NOQA
from dtoolcore.storagebroker import DiskStorageBroker
destination_path = os.path.join(tmp_dir_fixture, 'my_proto_dataset')
storagebroker = DiskStorageBroker(destination_path)
storagebroker.create_structure()
input_file_path = os.path.join(TEST_SAMPLE_DATA, 'tiny.png')
storagebroker.put_item(
fpath=input_file_path,
relpath='tiny.png'
)
handles = list(storagebroker.iter_item_handles())
handle = handles[0]
item_properties = storagebroker.item_properties(handle)
# Check size_in_bytes property
assert item_properties['size_in_bytes'] == 276
# Check timestamp property
assert 'utc_timestamp' in item_properties
time_from_item = datetime.datetime.fromtimestamp(
float(item_properties['utc_timestamp']),
tz=pytz.UTC
)
time.sleep(0.1) # Make tests more robust on Windows.
time_delta = datetime.datetime.now(tz=pytz.UTC) - time_from_item
assert time_delta.days == 0
assert time_delta.seconds < 20
# Check hash property
from dtoolcore.filehasher import md5sum_hexdigest
expected_hash = md5sum_hexdigest(input_file_path)
assert item_properties['hash'] == expected_hash
# Check relpath property
assert item_properties['relpath'] == 'tiny.png'
def test_store_and_retrieve_item_metadata(tmp_dir_fixture): # NOQA
from dtoolcore.storagebroker import DiskStorageBroker
destination_path = os.path.join(tmp_dir_fixture, 'my_proto_dataset')
storagebroker = DiskStorageBroker(destination_path)
storagebroker.create_structure()
handle = 'dummy'
# Here we add two set of metadata with different keys
storagebroker.add_item_metadata(
handle=handle,
key='foo',
value='bar'
)
storagebroker.add_item_metadata(
handle=handle,
key='key',
value={'subkey': 'subval',
'morekey': 'moreval'}
)
# Test metadata retrieval (we get back both sets of metadata)
metadata = storagebroker.get_item_metadata(handle)
assert metadata == {
'foo': 'bar',
'key': {
'subkey': 'subval',
'morekey': 'moreval'
}
}
def test_store_and_retrieve_item_metadata(tmp_dir_fixture): # NOQA
from dtoolcore.storagebroker import DiskStorageBroker
destination_path = os.path.join(tmp_dir_fixture, 'my_proto_dataset')
storagebroker = DiskStorageBroker(destination_path)
storagebroker.create_structure()
example_overlay = {
'abcdef': 1,
'ghijkl': 2
}
storagebroker.put_overlay(
overlay_name="example",
overlay=example_overlay
)
retrieved_overlay = storagebroker.get_overlay('example')
assert example_overlay == retrieved_overlay
def test_post_freeze_hook(tmp_dir_fixture): # NOQA
from dtoolcore.storagebroker import DiskStorageBroker
destination_path = os.path.join(tmp_dir_fixture, 'my_proto_dataset')
storagebroker = DiskStorageBroker(destination_path)
storagebroker.create_structure()
# The below should not raise an OSError because the .dtool/tmp_fragments
# directory has not been created.
storagebroker.post_freeze_hook()
handle = 'dummy'
storagebroker.add_item_metadata(handle, key='foo', value='bar')
assert os.path.isdir(storagebroker._metadata_fragments_abspath)
storagebroker.post_freeze_hook()
assert not os.path.isdir(storagebroker._metadata_fragments_abspath)
def test_has_admin_metadata(tmp_dir_fixture): # NOQA
from dtoolcore.storagebroker import DiskStorageBroker
destination_path = os.path.join(tmp_dir_fixture, 'my_proto_dataset')
storagebroker = DiskStorageBroker(destination_path)
assert not storagebroker.has_admin_metadata()
storagebroker.create_structure()
assert not storagebroker.has_admin_metadata()
admin_metadata = {'hello': 'world'}
storagebroker.put_admin_metadata(admin_metadata)
assert storagebroker.has_admin_metadata()
def test_list_dataset_uris(tmp_uri_fixture): # NOQA
import dtoolcore
from dtoolcore.storagebroker import DiskStorageBroker
assert [] == DiskStorageBroker.list_dataset_uris(
base_uri=tmp_uri_fixture,
config_path=None
)
# Create two datasets to be copied.
expected_uris = []
for name in ["test_ds_1", "test_ds_2"]:
admin_metadata = dtoolcore.generate_admin_metadata(name)
proto_dataset = dtoolcore.generate_proto_dataset(
admin_metadata=admin_metadata,
base_uri=tmp_uri_fixture,
)
proto_dataset.create()
expected_uris.append(proto_dataset.uri)
actual_uris = DiskStorageBroker.list_dataset_uris(
base_uri=tmp_uri_fixture,
config_path=None
)
assert set(expected_uris) == set(actual_uris)
def test_pre_freeze_hook(tmp_dir_fixture): # NOQA
from dtoolcore.storagebroker import DiskStorageBroker
destination_path = os.path.join(tmp_dir_fixture, 'my_proto_dataset')
storagebroker = DiskStorageBroker(destination_path)
storagebroker.create_structure()
# Add a data file.
data_fpath = os.path.join(storagebroker._data_abspath, "sample.txt")
with open(data_fpath, "w") as fh:
fh.write("some sample data")
# The below should not raise an DiskStorageBrokerValidationError
# because the structure is correct.
storagebroker.pre_freeze_hook()
# Now we add a rogue file.
rogue_fpath = os.path.join(destination_path, "rogue.txt")
with open(rogue_fpath, "w") as fh:
fh.write("I should not be here")
from dtoolcore.storagebroker import DiskStorageBrokerValidationWarning
with pytest.raises(DiskStorageBrokerValidationWarning):
storagebroker.pre_freeze_hook()
def test_unix_relpaths_from_iter_item_handles(tmp_dir_fixture): # NOQA
from dtoolcore.storagebroker import DiskStorageBroker
destination_path = os.path.join(tmp_dir_fixture, 'my_proto_dataset')
storagebroker = DiskStorageBroker(destination_path)
storagebroker.create_structure()
# Add a data file.
data_subdir = os.path.join(storagebroker._data_abspath, "level")
os.mkdir(data_subdir)
data_fpath = os.path.join(data_subdir, "sample.txt")
with open(data_fpath, "w") as fh:
fh.write("some sample data")
handles = [h for h in storagebroker.iter_item_handles()]
assert len(handles) == 1
assert handles[0] == "level/sample.txt"
def test_put_get_annotation(tmp_dir_fixture): # NOQA
from dtoolcore.storagebroker import DiskStorageBroker
destination_path = os.path.join(tmp_dir_fixture, 'my_proto_dataset')
storagebroker = DiskStorageBroker(destination_path)
storagebroker.create_structure()
# Test list annotation names.
assert storagebroker.list_annotation_names() == []
# Test various types of values.
storagebroker.put_annotation("project", "value")
assert storagebroker.get_annotation("project") == "value"
storagebroker.put_annotation("project", 1)
assert storagebroker.get_annotation("project") == 1
assert type(storagebroker.get_annotation("project")) is int
storagebroker.put_annotation("project", True)
assert storagebroker.get_annotation("project") is True
storagebroker.put_annotation("project", [1, 2, 3])
assert storagebroker.get_annotation("project") == [1, 2, 3]
storagebroker.put_annotation("project", {"a": 1})
assert storagebroker.get_annotation("project") == {"a": 1}
# Test list annotation names.
assert storagebroker.list_annotation_names() == ["project"]
def test_put_text_creates_missing_subdirectories(tmp_dir_fixture): # NOQA
from dtoolcore.storagebroker import DiskStorageBroker
destination_path = os.path.join(tmp_dir_fixture, 'my_proto_dataset')
storagebroker = DiskStorageBroker(destination_path)
assert not os.path.exists(destination_path)
storagebroker.create_structure()
assert os.path.isdir(destination_path)
assert os.path.isdir(storagebroker._annotations_abspath)
os.rmdir(storagebroker._annotations_abspath)
assert not os.path.isdir(storagebroker._annotations_abspath)
annotation_key = os.path.join(
storagebroker._annotations_abspath,
"a.json"
)
storagebroker.put_text(annotation_key, "{}")
assert os.path.isdir(storagebroker._annotations_abspath)
assert os.path.isfile(annotation_key)
assert not os.path.isdir(annotation_key)
| |
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "treemap.hoverlabel"
_path_str = "treemap.hoverlabel.font"
_valid_props = {"color", "colorsrc", "family", "familysrc", "size", "sizesrc"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
- A list or array of any of the above
Returns
-------
str|numpy.ndarray
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# colorsrc
# --------
@property
def colorsrc(self):
"""
Sets the source reference on Chart Studio Cloud for color .
The 'colorsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["colorsrc"]
@colorsrc.setter
def colorsrc(self, val):
self["colorsrc"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
str|numpy.ndarray
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# familysrc
# ---------
@property
def familysrc(self):
"""
Sets the source reference on Chart Studio Cloud for family .
The 'familysrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["familysrc"]
@familysrc.setter
def familysrc(self, val):
self["familysrc"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
- A tuple, list, or one-dimensional numpy array of the above
Returns
-------
int|float|numpy.ndarray
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# sizesrc
# -------
@property
def sizesrc(self):
"""
Sets the source reference on Chart Studio Cloud for size .
The 'sizesrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["sizesrc"]
@sizesrc.setter
def sizesrc(self, val):
self["sizesrc"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
family .
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
"""
def __init__(
self,
arg=None,
color=None,
colorsrc=None,
family=None,
familysrc=None,
size=None,
sizesrc=None,
**kwargs
):
"""
Construct a new Font object
Sets the font used in hover labels.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.treemap.hoverlabel.Font`
color
colorsrc
Sets the source reference on Chart Studio Cloud for
color .
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
familysrc
Sets the source reference on Chart Studio Cloud for
family .
size
sizesrc
Sets the source reference on Chart Studio Cloud for
size .
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.treemap.hoverlabel.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.treemap.hoverlabel.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("colorsrc", None)
_v = colorsrc if colorsrc is not None else _v
if _v is not None:
self["colorsrc"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("familysrc", None)
_v = familysrc if familysrc is not None else _v
if _v is not None:
self["familysrc"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
_v = arg.pop("sizesrc", None)
_v = sizesrc if sizesrc is not None else _v
if _v is not None:
self["sizesrc"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import math
import os
import random
import tempfile
import unittest
from six.moves import range # pylint: disable=redefined-builtin
from tracing.metrics import compare_samples
REJECT = 'REJECT'
FAIL_TO_REJECT = 'FAIL_TO_REJECT'
NEED_MORE_DATA = 'NEED_MORE_DATA'
def Mean(l):
if len(l):
return float(sum(l))/len(l)
return 0
class CompareSamplesUnittest(unittest.TestCase):
def setUp(self):
self._tempfiles = []
self._tempdir = tempfile.mkdtemp()
def tearDown(self):
for tf in self._tempfiles:
try:
os.remove(tf)
except OSError:
pass
try:
os.rmdir(self._tempdir)
except OSError:
pass
def NewJsonTempfile(self, jsonable_contents):
f_handle, new_json_file = tempfile.mkstemp(
suffix='.json',
dir=self._tempdir,
text=True)
os.close(f_handle)
self._tempfiles.append(new_json_file)
with open(new_json_file, 'w') as f:
json.dump(jsonable_contents, f)
return new_json_file
def MakeMultipleChartJSONHistograms(self, metric, seed, mu, sigma, n, m):
result = []
random.seed(seed)
for _ in range(m):
result.append(self.MakeChartJSONHistogram(metric, mu, sigma, n))
return result
def MakeChartJSONHistogram(self, metric, mu, sigma, n):
"""Creates a histogram for a normally distributed pseudo-random sample.
This function creates a deterministic pseudo-random sample and stores it in
chartjson histogram format to facilitate the testing of the sample
comparison logic.
For simplicity we use sqrt(n) buckets with equal widths.
Args:
metric (str pair): name of chart, name of the trace.
seed (hashable obj): to make the sequences deterministic we seed the RNG.
mu (float): desired mean for the sample
sigma (float): desired standard deviation for the sample
n (int): number of values to generate.
"""
chart_name, trace_name = metric
values = [random.gauss(mu, sigma) for _ in range(n)]
bucket_count = int(math.ceil(math.sqrt(len(values))))
width = (max(values) - min(values))/(bucket_count - 1)
prev_bucket = min(values)
buckets = []
for _ in range(bucket_count):
buckets.append({'low': prev_bucket,
'high': prev_bucket + width,
'count': 0})
prev_bucket += width
for value in values:
for bucket in buckets:
if value >= bucket['low'] and value < bucket['high']:
bucket['count'] += 1
break
charts = {
'charts': {
chart_name: {
trace_name: {
'type': 'histogram',
'buckets': buckets
}
}
}
}
return self.NewJsonTempfile(charts)
def MakeChart(self, metric, seed, mu, sigma, n, keys=None):
"""Creates a normally distributed pseudo-random sample. (continuous).
This function creates a deterministic pseudo-random sample and stores it in
chartjson format to facilitate the testing of the sample comparison logic.
Args:
metric (str pair): name of chart, name of the trace.
seed (hashable obj): to make the sequences deterministic we seed the RNG.
mu (float): desired mean for the sample
sigma (float): desired standard deviation for the sample
n (int): number of values to generate.
"""
chart_name, trace_name = metric
random.seed(seed)
values = [random.gauss(mu, sigma) for _ in range(n)]
charts = {
'charts': {
chart_name: {
trace_name: {
'type': 'list_of_scalar_values',
'values': values}
}
}
}
if keys:
grouping_keys = dict(enumerate(keys))
charts['charts'][chart_name][trace_name]['grouping_keys'] = grouping_keys
return self.NewJsonTempfile(charts)
def MakeNoneValuesChart(self, metric, keys=None):
"""Creates a chart with merged None values.
Args:
metric (str pair): name of chart, name of the trace.
"""
chart_name, trace_name = metric
charts = {
'charts': {
chart_name: {
trace_name: {
'type': 'list_of_scalar_values',
'values': None
}
}
}
}
if keys:
grouping_keys = dict(enumerate(keys))
charts['charts'][chart_name][trace_name]['grouping_keys'] = grouping_keys
return self.NewJsonTempfile(charts)
def MakeCharts(self, metric, seed, mu, sigma, n, keys=None):
return [
self.MakeChartJSONScalar(metric, seed + '%d' % i, mu, sigma, keys)
for i in range(n)]
def MakeChartJSONScalar(self, metric, seed, mu, sigma, keys=None):
"""Creates a normally distributed pseudo-random sample. (continuous).
This function creates a deterministic pseudo-random sample and stores it in
chartjson format to facilitate the testing of the sample comparison logic.
Args:
metric (str pair): name of chart, name of the trace.
seed (hashable obj): to make the sequences deterministic we seed the RNG.
mu (float): desired mean for the sample
sigma (float): desired standard deviation for the sample
"""
chart_name, trace_name = metric
random.seed(seed)
charts = {
'charts': {
chart_name: {
trace_name: {
'type': 'scalar',
'value': random.gauss(mu, sigma)}
}
}
}
if keys:
grouping_keys = dict(enumerate(keys))
charts['charts'][chart_name][trace_name]['grouping_keys'] = grouping_keys
return self.NewJsonTempfile(charts)
def testCompareClearRegressionListOfScalars(self):
metric = ('some_chart', 'some_trace')
lower_values = ','.join(self.MakeCharts(metric=metric, seed='lower',
mu=10, sigma=1, n=10))
higher_values = ','.join(self.MakeCharts(metric=metric, seed='higher',
mu=20, sigma=2, n=10))
result = json.loads(compare_samples.CompareSamples(
lower_values, higher_values, '/'.join(metric)).stdout)
self.assertEqual(result['result']['significance'], REJECT)
def testCompareListOfScalarsWithNoneValue(self):
metric = ('some_chart', 'some_trace')
lower_values = ','.join(self.MakeCharts(metric=metric, seed='lower',
mu=10, sigma=1, n=10))
lower_values += ',' + self.MakeNoneValuesChart(metric=metric)
higher_values = ','.join(self.MakeCharts(metric=metric, seed='higher',
mu=20, sigma=2, n=10))
result = json.loads(compare_samples.CompareSamples(
lower_values, higher_values, '/'.join(metric)).stdout)
self.assertEqual(result['result']['significance'], REJECT)
def testCompareClearRegressionScalars(self):
metric = ('some_chart', 'some_trace')
lower_values = ','.join(
[self.MakeChartJSONScalar(
metric=metric, seed='lower', mu=10, sigma=1) for _ in range(10)])
higher_values = ','.join(
[self.MakeChartJSONScalar(
metric=metric, seed='higher', mu=20, sigma=2) for _ in range(10)])
result = json.loads(compare_samples.CompareSamples(
lower_values, higher_values, '/'.join(metric)).stdout)
self.assertEqual(result['result']['significance'], REJECT)
def testCompareUnlikelyRegressionWithMultipleRuns(self):
metric = ('some_chart', 'some_trace')
lower_values = ','.join(
self.MakeCharts(
metric=metric, seed='lower', mu=10, sigma=1, n=20))
higher_values = ','.join(
self.MakeCharts(
metric=metric, seed='higher', mu=10.01, sigma=0.95, n=20))
result = json.loads(compare_samples.CompareSamples(
lower_values, higher_values, '/'.join(metric)).stdout)
self.assertEqual(result['result']['significance'], FAIL_TO_REJECT)
def testCompareGroupingLabel(self):
parts = ('some_chart', 'some_label', 'some_trace')
metric_name = ('%s@@%s' % (parts[1], parts[0]), parts[2])
lower_values = ','.join(self.MakeCharts(
metric=metric_name, seed='lower', mu=10, sigma=1, n=10))
higher_values = ','.join(self.MakeCharts(
metric=metric_name, seed='higher', mu=20, sigma=2, n=10))
result = json.loads(compare_samples.CompareSamples(
lower_values, higher_values, '/'.join(parts)).stdout)
self.assertEqual(result['result']['significance'], REJECT)
def testCompareGroupingLabelMissingSummary(self):
parts = ('some_chart', 'some_label')
metric_name = ('%s@@%s' % (parts[1], parts[0]), 'summary')
lower_values = ','.join(self.MakeCharts(
metric=metric_name, seed='lower', mu=10, sigma=1, n=10))
higher_values = ','.join(self.MakeCharts(
metric=metric_name, seed='higher', mu=20, sigma=2, n=10))
result = json.loads(compare_samples.CompareSamples(
lower_values, higher_values, '/'.join(parts)).stdout)
self.assertEqual(result['result']['significance'], REJECT)
def testCompareInsufficientData(self):
metric = ('some_chart', 'some_trace')
lower_values = ','.join([self.MakeChart(metric=metric, seed='lower',
mu=10, sigma=1, n=5)])
higher_values = ','.join([self.MakeChart(metric=metric, seed='higher',
mu=10.40, sigma=0.95, n=5)])
result = json.loads(compare_samples.CompareSamples(
lower_values, higher_values, '/'.join(metric)).stdout)
self.assertEqual(result['result']['significance'], NEED_MORE_DATA)
def testCompareMissingFile(self):
metric = ('some_chart', 'some_trace')
lower_values = ','.join([self.MakeChart(metric=metric, seed='lower',
mu=10, sigma=1, n=5)])
higher_values = '/path/does/not/exist.json'
with self.assertRaises(RuntimeError):
compare_samples.CompareSamples(
lower_values, higher_values, '/'.join(metric))
def testCompareMissingMetric(self):
metric = ('some_chart', 'some_trace')
lower_values = ','.join([self.MakeChart(metric=metric, seed='lower',
mu=10, sigma=1, n=5)])
higher_values = ','.join([self.MakeChart(metric=metric, seed='higher',
mu=20, sigma=2, n=5)])
metric = ('some_chart', 'missing_trace')
result = json.loads(compare_samples.CompareSamples(
lower_values, higher_values, '/'.join(metric)).stdout)
self.assertEqual(result['result']['significance'], NEED_MORE_DATA)
def testCompareBadChart(self):
metric = ('some_chart', 'some_trace')
lower_values = ','.join([self.MakeChart(metric=metric, seed='lower',
mu=10, sigma=1, n=5)])
higher_values = self.NewJsonTempfile(['obviously', 'not', 'a', 'chart]'])
result = json.loads(compare_samples.CompareSamples(
lower_values, higher_values, '/'.join(metric)).stdout)
self.assertEqual(result['result']['significance'], NEED_MORE_DATA)
def testCompareBuildbotOutput(self):
bb = os.path.join(os.path.dirname(__file__),
'buildbot_output_for_compare_samples_test.txt')
result = compare_samples.CompareSamples(
bb, bb, 'DrawCallPerf_gl/score',
data_format='buildbot')
result = json.loads(result.stdout)
self.assertEqual(result['result']['significance'], NEED_MORE_DATA)
self.assertEqual(Mean(result['sampleA']), 4123)
self.assertEqual(Mean(result['sampleB']), 4123)
def testCompareChartJsonHistogram(self):
metric = ('some_chart', 'some_trace')
lower_values = ','.join(self.MakeMultipleChartJSONHistograms(
metric=metric, seed='lower', mu=10, sigma=1, n=100, m=10))
higher_values = ','.join(self.MakeMultipleChartJSONHistograms(
metric=metric, seed='higher', mu=20, sigma=2, n=100, m=10))
result = json.loads(compare_samples.CompareSamples(
lower_values, higher_values, '/'.join(metric)).stdout)
self.assertEqual(result['result']['significance'], REJECT)
def testParseComplexMetricName(self):
full_metric_name = ('memory:chrome:all_processes:reported_by_os:'
'system_memory:native_heap:'
'proportional_resident_size_avg/blank_about/'
'blank_about_blank')
chart_name = ('blank_about@@memory:chrome:all_processes:reported_by_os:'
'system_memory:native_heap:proportional_resident_size_avg')
trace_name = 'blank:about:blank'
metric = chart_name, trace_name
keys = 'blank', 'about'
lower_values = ','.join(self.MakeCharts(metric=metric, seed='lower',
mu=10, sigma=1, n=10, keys=keys))
higher_values = ','.join(self.MakeCharts(metric=metric, seed='higher',
mu=20, sigma=2, n=10, keys=keys))
result = compare_samples.CompareSamples(
lower_values, higher_values, full_metric_name).stdout
print(result)
result = json.loads(result)
self.assertEqual(result['result']['significance'], REJECT)
| |
from numpy import array
from _core import BlenderModule, BlenderResource
__name__ = 'fauxton'
__all__ = ['Action', 'Prop', 'Scene', 'read_scene', 'write_scene']
#===============================================================================
# Private Symbols
#===============================================================================
bl_prop = BlenderModule('''
def create(type_, data):
prop = bpy.data.objects.new('', data)
prop['__type__'] = type_
return prop
def get_position(prop):
return list(prop.location)
def set_position(prop, position):
prop.location = position
def get_rotation(prop):
prop.rotation_mode = 'QUATERNION'
return list(prop.rotation_quaternion)
def set_rotation(prop, rotation):
prop.rotation_mode = 'QUATERNION'
prop.rotation_quaternion = rotation
def get_scale(prop):
return list(prop.scale)
def set_scale(prop, scale):
prop.scale = scale
def get_action(prop):
prop.rotation_mode = 'QUATERNION'
return prop.animation_data.action if prop.animation_data else None
def set_action(prop, action):
if prop.animation_data is None:
prop.animation_data_create()
prop.rotation_mode = 'QUATERNION'
prop.animation_data.action = action
''')
bl_action = BlenderModule('''
def create(type_):
action = bpy.data.actions.new('')
action['__type__'] = type_
return action
def get_position(action):
return action.get('position', [])
def set_position(action, position):
action['position'] = position
for curve in list(action.fcurves):
if curve.data_path == 'location':
action.fcurves.remove(curve)
for i in range(3):
curve = action.fcurves.new('location', i)
curve.keyframe_points.add(len(position))
for j, point in enumerate(position):
curve.keyframe_points[j].co = point[0], point[1 + i]
curve.keyframe_points[j].interpolation = 'LINEAR'
def get_rotation(action):
return action.get('rotation', [])
def set_rotation(action, rotation):
action['rotation'] = rotation
for curve in list(action.fcurves):
if curve.data_path == 'rotation_quaternion':
action.fcurves.remove(curve)
for i in range(4):
curve = action.fcurves.new('rotation_quaternion', i)
curve.keyframe_points.add(len(rotation))
for j, point in enumerate(rotation):
curve.keyframe_points[j].co = point[0], point[1 + i]
curve.keyframe_points[j].interpolation = 'LINEAR'
def get_scale(action):
return action.get('scale', [])
def set_scale(action, scale):
action['scale'] = scale
for curve in list(action.fcurves):
if curve.data_path == 'scale':
action.fcurves.remove(curve)
for i in range(3):
curve = action.fcurves.new('scale', i)
curve.keyframe_points.add(len(scale))
for j, point in enumerate(scale):
curve.keyframe_points[j].co = point[0], point[1 + i]
curve.keyframe_points[j].interpolation = 'LINEAR'
''')
bl_scene = BlenderModule('''
from random import randint
def create(type_):
scene = bpy.data.scenes.new('')
scene.world = bpy.data.worlds.new('')
scene.world.horizon_color = (0, 0, 0)
scene['__type__'] = type_
scene['global_names'] = {}
scene['local_names'] = {}
return scene
def get_size(scene):
return len(scene.objects)
def get_prop_names(scene):
return scene['global_names'].keys()
def contains(scene, name):
return name in scene['global_names']
def get_by_name(scene, name):
global_name = scene['global_names'][name]
return bpy.data.objects[global_name]
def set_by_name(scene, name, prop):
if contains(scene, name):
scene.objects.unlink(get_by_name(scene, name))
scene.objects.link(prop)
scene['global_names'][name] = prop.name
scene['local_names'][prop.name] = name
def remove_by_name(scene, name):
prop = get_by_name(scene, name)
scene.objects.unlink(prop)
del scene['global_names'][name]
del scene['local_names'][prop.name]
def add(scene, prop):
def unused_name():
name = str(randint(0, 2*32))
return name if name not in scene['global_names'] else unused_name()
set_by_name(scene, unused_name(), prop)
return prop
def remove(scene, prop):
remove_by_key(scene['local_names'][prop])
return prop
def get_time(scene):
return scene.frame_current
def set_time(scene, time):
scene.frame_current = time
def read(path):
with bpy.data.libraries.load(path) as (src, dst):
local_names = list(src.objects)
dst.scenes = [src.scenes[0]]
dst.objects = src.objects
scene = dst.scenes[0]
global_names = [o.name for o in dst.objects]
scene['global_names'] = dict(zip(local_names, global_names))
scene['local_names'] = dict(zip(global_names, local_names))
return scene
def write(path, scene):
conflicting_scene = bpy.data.scenes.get('0', None)
if conflicting_scene: conflicting_scene.name = ''
old_scene_name = scene.name
scene.name = '0'
removed_objects = {}
for s in bpy.data.scenes[1:]:
removed_objects[s.name] = list(s.objects)
[s.objects.unlink(o) for o in s.objects]
old_object_names = {o: o.name for o in bpy.data.objects}
for global_name, local_name in scene['local_names'].items():
bpy.data.objects[global_name].name = local_name
bpy.ops.wm.save_as_mainfile(filepath=path)
for o, name in old_object_names.items():
o.name = name
for s in bpy.data.scenes[1:]:
[s.objects.link(o) for o in removed_objects[s.name]]
if conflicting_scene: conflicting_scene.name = '0'
scene.name = old_scene_name
''')
#===============================================================================
# Public Symbols
#===============================================================================
class Prop(BlenderResource):
'''
A graphical object that can be added to a ``Scene``.
:param BlenderResource data: Resource to wrap.
:param dict \**properties: Initial values of instance variables.
:var numpy.ndarray position: 3D spatial location.
:var numpy.ndarray rotation: 4D rotation quaternion.
:var numpy.ndarray scale: 3D scale--1 component for each object-space axis.
:var tuple pose: `(position, rotation, scale)`.
:var Action action: Animation currently being performed.
'''
resource_type = 'Object'
def __new__(cls, data=None, **properties):
result = bl_prop.create(cls.resource_type, data)
[setattr(result, k, v) for k, v in properties.items()]
return result
@property
def position(self):
return array(bl_prop.get_position(self))
@position.setter
def position(self, position):
bl_prop.set_position(self, list(map(float, position)))
@property
def rotation(self):
return array(bl_prop.get_rotation(self))
@rotation.setter
def rotation(self, rotation):
bl_prop.set_rotation(self, list(map(float, rotation)))
@property
def scale(self):
return array(bl_prop.get_scale(self))
@scale.setter
def scale(self, scale):
bl_prop.set_scale(self, list(map(float, scale)))
@property
def pose(self):
return self.position, self.rotation, self.scale
@pose.setter
def pose(self, pose):
self.position, self.rotation, self.scale = pose
@property
def action(self):
return bl_prop.get_action(self)
@action.setter
def action(self, action):
if not isinstance(action, Action):
action = Action(action)
bl_prop.set_action(self, action)
class Action(BlenderResource):
'''
A keyframe-based animation that can be applied to a ``Prop``.
:param dict \**properties: Initial values of instance variables.
:var numpy.ndarray position: Sequence of (t, x, y, z) keypoints.
:var numpy.ndarray rotation: Sequence of (t, w, x, y, z) keypoints.
:var numpy.ndarray scale: Sequence of (t, x, y, z) keypoints.
'''
resource_type = 'Action'
def __new__(cls, **properties):
result = bl_action.create(cls.resource_type)
[setattr(result, k, v) for k, v in properties.items()]
return result
@property
def position(self):
return array(bl_action.get_position(self), 'f')
@position.setter
def position(self, position):
bl_action.set_position(self, [list(map(float, e)) for e in position])
@property
def rotation(self):
return array(bl_action.get_rotation(self), 'f')
@rotation.setter
def rotation(self, rotation):
bl_action.set_rotation(self, [list(map(float, e)) for e in rotation])
@property
def scale(self):
return array(bl_action.get_scale(self), 'f')
@scale.setter
def scale(self, scale):
bl_action.set_scale(self, [list(map(float, e)) for e in scale])
class Scene(BlenderResource):
'''
A collection of graphical objects.
:param dict \**properties: Initial values of instance variables.
Operations defined on a `Scene` `s`:
========== =============================================================
`len(s)` Return the number of props in `s`.
`iter(s)` Return an iterator over the names of props in `s`.
`n in s` Return whether a prop is stored in `s` under the name `n`.
`s[n]` Return the prop stored in `s` under the name `n`.
`s[n] = p` Add the prop `p` to `s`, storing it under the name `n`.
`del s[n]` Remove the prop stored under the name `n` from `s`.
========== =============================================================
'''
resource_type = 'Scene'
def __new__(cls, **properties):
result = bl_scene.create(cls.resource_type)
[setattr(result, k, v) for k, v in properties.items()]
return result
def __len__(self):
return bl_scene.get_size(self)
def __iter__(self):
return iter(bl_scene.get_prop_names(self))
def __contains__(self, name):
return bl_scene.contains(self, name)
def __getitem__(self, name):
return bl_scene.get_by_name(self, name)
def __setitem__(self, name, prop):
bl_scene.set_by_name(self, name, prop)
def __delitem__(self, name):
bl_scene.remove_by_name(self, name)
@property
def time(self):
return bl_scene.get_time(self)
@time.setter
def time(self, time):
bl_scene.set_time(self, float(time))
def add(self, prop):
'''
Generate a name for a prop, add it to the scene, then return it.
:param Prop prop: Prop to add.
:rtype: Prop
'''
return bl_scene.add(self, prop)
def remove(self, prop):
'''
Remove a prop from the scene, then return it.
:param Prop prop: Prop to remove.
:rtype: Prop
'''
return bl_scene.remove(self, prop)
def read_scene(path):
'''
Read a scene from a ".blend" file into memory.
:param str path: Location on the filesystem.
:rtype: Scene
'''
return bl_scene.read(path)
def write_scene(path, scene):
'''
Write a scene in memory to a ".blend" file.
:param str path: Location on the filesystem.
:param Scene scene: Scene to write.
'''
bl_scene.write(path, scene)
| |
#!/usr/bin/env python3
#
# $Id$
import os
import re
import sys
import subprocess
import argparse
import readchar
import textwrap
import json
import logging
from utils import mixrange
LOGGER = logging.getLogger("read-a-script")
LOGGER.addHandler(logging.StreamHandler())
VOICES = {
# Spring and Port Wine
"rafe": "tom",
"arthur": "lee",
"harold": "daniel",
"wilfred": "oliver",
"daisy": "fiona",
"florence": "kate",
"hilda": "serena",
"betsy jane": "karen",
None: "tom",
"stage directions": "moira",
"all": "daniel",
# Joining The Club
"tom": "tom",
"jenny": "karen",
# The Pigeon with the Silver Foot
"waiter": "luca",
"mary": "kate",
"joanna": "serena",
"bianca": "karen",
"lover": "lee",
"customer": "alice",
"beggar": "alice",
"single female voice": "allison",
}
class LineSpeaker(object):
def __init__(
self,
role=None,
debug=False,
quiet=False,
speed=150,
mute=False,
clear=False,
scenes=(),
display_role=False,
):
self.role = role
if debug:
LOGGER.setLevel(logging.DEBUG)
self.quiet = quiet
self.speed = speed
LOGGER.debug("Speed = {}".format(speed))
self.mute = mute
self.clear = clear
self.display_role = display_role
self.scenes = set()
for scene in scenes:
for num in mixrange(scene):
LOGGER.debug("Adding scene {}".format(num))
self.scenes.add(num)
LOGGER.debug("Scenes: {}".format(self.scenes))
self._prev_role = "STAGE DIRECTIONS"
self._current_scene = 0
self._voices = {}
for k, v in VOICES.items():
try:
self._voices[k.lower()] = v
except AttributeError:
self._voices[k] = v
if None not in self._voices:
self._voices[None] = "tom"
self._rows, self._columns = list(
map(int, os.popen("stty size", "r").read().split())
)
DIALOGUE_RE = re.compile(r"^([A-Z\s_,\'ac&]+):\s*(.*)")
@property
def current_scene(self):
return self._current_scene
@current_scene.setter
def current_scene(self, value):
LOGGER.debug("=== setting current scene to {}".format(value))
self._current_scene = value
def next_scene(self):
self.current_scene += 1
def perform_line(self, line):
if self.scenes and self.current_scene not in self.scenes:
LOGGER.debug(
"=== skipping (not in scene {}): {}".format(self.current_scene, line)
)
return
LOGGER.debug("=== perform_line({})".format(line))
matcher = self.DIALOGUE_RE.match(line)
if matcher:
role, line = matcher.groups()
role_to_speak = self.find_role_to_use(role)
LOGGER.debug("=== {} === {}".format(role, line))
if line.strip() == "":
self.speak_a_line(role.lower(), line, role_to_speak)
self._prev_role = role.upper()
else:
for y in re.split(r"(\([^(]*\))", line):
y = y.strip()
if y != "":
if y.startswith("("):
self.speak_a_line("stage directions", y)
else:
self.speak_a_line(role.lower(), y, role_to_speak)
self._prev_role = role.upper()
else:
self.perform_line("{}: {}".format(self._prev_role, line))
def list_scenes_and_roles(self, scriptfile):
print("\nROLES:\n")
LOGGER.debug(self._voices)
for role in sorted(
self._voices.keys(),
key=lambda x: x if isinstance(x, str) else chr(sys.maxunicode),
):
if role:
print(
"{0}: {1}{2}".format(
role.upper(),
self._voices[role][0].upper(),
self._voices[role][1:].lower(),
)
)
print("\nSCENES:\n")
counter = 1
for line in scriptfile:
if line.startswith("{scene}"):
print("{0}: {1}".format(counter, line[7:].strip()))
counter += 1
def find_role_to_use(self, role):
LOGGER.debug("=== find_role_to_use({0})".format(role))
# deal with multiple roles
roles = role.split(",")
roles = [x.lower().strip() for x in roles]
if self.role in roles and self.role in self._voices:
LOGGER.debug("=== using {0}".format(self.role))
return self.role
else:
for r in roles:
if r in self._voices:
LOGGER.debug("=== using {0}".format(r))
return r
else:
LOGGER.debug("=== using {0}".format(None))
return None
def speak_a_line(self, role, line, role_to_speak=None):
if role_to_speak is None:
role_to_speak = role
if self.clear:
subprocess.call(["/usr/bin/clear"])
if role_to_speak in self._voices:
voice = self._voices[role_to_speak]
else:
voice = self._voices[None]
line = "{} says: {}".format(role, line)
sys.stdout.write("\n{}\n".format(role.upper()))
if role == self.role and not self.mute:
if self.display_role:
sys.stdout.write("{}\n".format(textwrap.fill(line, self._columns)))
while True:
sys.stdout.flush()
say_it = readchar.readchar().lower()
LOGGER.debug(">>>{}<<<".format(say_it))
if say_it == "\x03":
raise KeyboardInterrupt
elif say_it == "\x04":
raise EOFError
elif say_it == "?":
sys.stdout.write(
" Hit H for a hint, Y to read the whole line,\n"
" Ctrl-C or Ctrl-D to exit, or any key to move on to the next line\n"
)
elif say_it == "h":
if " " in line:
hint, line = re.split(r"\s+", line, 1)
else:
hint, line = line, None
if not self.display_role:
sys.stdout.write("{} ".format(hint))
self.vocalise(voice, hint, self.mute)
if line is None:
sys.stdout.write("\n")
return
else:
break
else:
say_it = "y"
if not (role == self.role and (self.mute or self.display_role)):
sys.stdout.write("{}\n".format(textwrap.fill(line, self._columns)))
if not say_it.lower().startswith("y"):
return
self.vocalise(voice, line, mute=(self.mute and role == self.role))
def vocalise(self, voice, line, mute):
if self.quiet:
LOGGER.debug("--- say -v {} {}".format(voice, line))
else:
if mute:
subprocess.call(
["/usr/bin/osascript", "-e", "set volume output muted true"]
)
subprocess.call(
["/usr/bin/say", "-v", voice, "-r", "150", "--interactive", line]
)
subprocess.call(
["/usr/bin/osascript", "-e", "set volume output muted false"]
)
else:
subprocess.call(
[
"/usr/bin/say",
"-v",
voice,
"-r",
str(self.speed),
textwrap.fill(line, self._columns),
]
)
def interactively_get_args(scriptfile):
"""
It's too hard to remember all these arguments. Let the program do the heavy lifting.
"""
print("You are learning {0}".format(scriptfile.name))
print("")
role = None
while not role:
role = input("Which role are you learning? ")
args = ["-r", role]
no_arg_opts = [
["Suppress audio output", "-q"],
["Produce debugging output", "-d"],
["Mute while delivering the role's line", "-m"],
["Clear the screen before each line", "-c"],
["Always display the role's line", "--display"],
]
for opt in no_arg_opts:
val = input("{0}? [y|n, default=n] ".format(opt[0])).lower()
if val and val[0] == "y":
args.append(opt[1])
speed = input("Spoken audio speed (wpm)? [default=180]")
if speed:
args.extend(["-s", int(speed)])
print()
print("I know the following scenes:")
speaker = LineSpeaker(role)
speaker.list_scenes_and_roles(scriptfile)
print()
print("Which scenes would you like to rehearse?")
scenes = input(
"Enter a set of scene numbers, such as 1,2,4-6 [default=all scenes]: "
)
if scenes:
args.extend(["--scene", scenes])
print()
print("Thank you. Next time you could skip this introduction by just running:")
print(" {0} {1}".format(sys.argv[0], " ".join(args),))
args.append(scriptfile.name)
return args
def main():
parser = argparse.ArgumentParser(description="Learn a script")
parser.add_argument(
"-r", "--role", metavar="ROLE", type=str, nargs=1, help="Role you are learning"
)
parser.add_argument(
"-q", "--quiet", action="store_true", help="Don't produce any audio output"
)
parser.add_argument(
"-d", "--debug", action="store_true", help="Debugging output on"
)
parser.add_argument(
"-s",
"--speed",
metavar="SPEED",
type=int,
default=180,
help="Speed of speech in wpm (default=180)",
)
parser.add_argument(
"-m",
"--mute",
action="store_true",
help="Mute while delivering the role's line, rather than pausing",
)
parser.add_argument(
"-c", "--clear", action="store_true", help="Clear the screen before each line"
)
parser.add_argument(
"-v",
"--voices",
type=argparse.FileType("r"),
nargs=1,
help="JSON file containing voices",
)
parser.add_argument(
"-S",
"--scene",
action="append",
dest="scenes",
default=[],
help="Only read the specified scene numbers",
)
parser.add_argument(
"--display", action="store_true", help="Always display the role's line"
)
parser.add_argument(
"--list",
action="store_true",
help="List all known roles and all scenes by number and exit",
)
parser.add_argument(
"-x",
"--no-defaults",
action="store_true",
help="Ignore defaults; take all arguments from command line "
"(NB: your voices definition file must include a definition for 'STAGE DIRECTIONS')",
)
parser.add_argument(
"-i", "--interactive", action="store_true", help="Interactively set options"
)
parser.add_argument(
"scriptfile", type=argparse.FileType("r"), help="File containing the script"
)
args = parser.parse_args()
if args.debug:
LOGGER.setLevel(logging.DEBUG)
LOGGER.debug("testing debug")
global VOICES
if args.voices:
if args.no_defaults:
LOGGER.debug("Ignoring default voices")
VOICES = {}
VOICES.update(json.load(args.voices[0]))
else:
default_voices = os.path.join(
os.path.split(args.scriptfile.name)[0], "voices.json"
)
if os.path.exists(default_voices):
print(
"No voices.json found, but I found one at {0}, which I'm loading".format(
default_voices
)
)
VOICES.update(json.load(open(default_voices)))
if args.interactive:
args = parser.parse_args(interactively_get_args(args.scriptfile))
if args.role:
role = args.role[0].lower()
else:
role = "_no_role"
speaker = LineSpeaker(
role,
quiet=args.quiet,
debug=args.debug,
speed=args.speed,
mute=args.mute,
clear=args.clear,
scenes=args.scenes,
# voices=VOICES,
display_role=args.display,
)
if args.list:
speaker.list_scenes_and_roles(args.scriptfile)
return
print("You are learning {}".format(role))
for line in args.scriptfile:
line = line.strip()
if line.startswith("{scene}"):
speaker.next_scene()
speaker.perform_line("({})".format(line[7:]))
elif line != "":
speaker.perform_line(line)
if __name__ == "__main__":
main()
| |
import time
import datetime
from django.http import Http404
from django.core.paginator import Paginator, InvalidPage, EmptyPage
from django.views.generic.base import View, ContextMixin, TemplateResponseMixin
from blog.models import Post
from blog.settings import BLOG_PAGINATE_BY
__all__ = [
'BlogPostYearArchiveView',
'BlogPostMonthArchiveView',
'BlogPostWeekArchiveView',
'BlogPostWeekDayArchiveView',
'BlogPostDayArchiveView',
'BlogPostUpdatedArchiveView',
'BlogPostArchiveView'
]
class BlogPostArchiveView(TemplateResponseMixin, ContextMixin, View):
template_name = 'blog/archive/archive.html'
def get(self, request, **kwargs):
posts = Post.objects.published()
years = posts.dates('published', 'year')
archive = {}
for year in years:
archive[year] = Post.objects.archvie_year(year).dates('published', 'month')
context = self.get_context_data()
context = {
'archive': archive
}
return self.render_to_response(context)
class BlogPostYearArchiveView(TemplateResponseMixin, ContextMixin, View):
template_name = "blog/archive/year.html"
def get(self, request, year, *args, **kwargs):
try:
this_year = datetime.date(int(year), 1, 1)
except ValueError:
raise Http404
posts = Post.objects.archvie_year(this_year).select_related()
if not posts:
raise Http404
next_year = this_year + datetime.timedelta(days=+366)
prev_year = this_year + datetime.timedelta(days=-365)
context = self.get_context_data()
context = {
'post_list': posts,
'this_year': this_year,
'next_year': next_year,
'prev_year': prev_year,
}
return self.render_to_response(context)
class BlogPostMonthArchiveView(TemplateResponseMixin, ContextMixin, View):
template_name = "blog/archive/month.html"
def get(self, request, year, month, *args, **kwargs):
try:
date = datetime.date(*time.strptime(year+month, '%Y%b')[:3])
except ValueError:
raise Http404
posts = Post.objects.archive_month(date).select_related()
if not posts:
raise Http404
first_day = date.replace(day=1)
if first_day.month == 12:
last_day = first_day.replace(year=first_day.year + 1, month=1)
else:
last_day = first_day.replace(month=first_day.month + 1)
next_month = last_day + datetime.timedelta(days=1)
prev_month = first_day - datetime.timedelta(days=-1)
context = self.get_context_data()
context = {
'post_list': posts,
'this_month': date,
'next_month': next_month,
'prev_month': prev_month,
}
return self.render_to_response(context)
class BlogPostWeekArchiveView(TemplateResponseMixin, ContextMixin, View):
template_name = "blog/archive/week.html"
def get(self, request, year, week, *args, **kwargs):
try:
date = datetime.date(*time.strptime("%s-0-%s" % (year, week), '%Y-%w-%U')[:3])
except ValueError:
raise Http404
first_day = date
last_day = date + datetime.timedelta(days=7)
posts = Post.objects.archive_week(first_day, last_day).select_related()
if not posts:
raise Http404
next_week = last_day + datetime.timedelta(days=1)
prev_week = first_day + datetime.timedelta(days=-1)
context = self.get_context_data()
context = {
'post_list': posts,
'this_week': date,
'next_week': next_week,
'prev_week': prev_week,
}
return self.render_to_response(context)
class BlogPostWeekDayArchiveView(TemplateResponseMixin, ContextMixin, View):
template_name = "blog/archive/weekday.html"
def get(self, request, year, week, weekday, *args, **kwargs):
try:
this_day = datetime.date(*time.strptime("%s-%s-%s" % (year, week, weekday), "%Y-%U-%a")[:3])
except ValueError:
raise Http404
next_day = this_day + datetime.timedelta(days=+1)
prev_day = this_day - datetime.timedelta(days=-1)
posts = Post.objects.archive_day(this_day).select_related()
if not posts:
raise Http404
context = self.get_context_data()
context = {
'post_list': posts,
'week_number': this_day.strftime("%U"),
'this_day': this_day,
'next_day': next_day,
'prev_day': prev_day,
}
return self.render_to_response(context)
class BlogPostDayArchiveView(TemplateResponseMixin, ContextMixin, View):
template_name = "blog/archive/day.html"
def get(self, request, year, month, day, *args, **kwargs):
try:
this_day = datetime.date(*time.strptime(year+month+day, '%Y%b%d')[:3])
except ValueError:
raise Http404
next_day = this_day + datetime.timedelta(days=+1)
prev_day = this_day - datetime.timedelta(days=-1)
posts = Post.objects.archive_day(this_day).select_related()
if not posts:
raise Http404
context = self.get_context_data()
context = {
'post_list': posts,
'this_day': this_day,
'next_day': next_day,
'prev_day': prev_day,
}
return self.render_to_response(context)
class BlogPostUpdatedArchiveView(TemplateResponseMixin, ContextMixin, View):
template_name = "blog/archive/updated.html"
def get(self, request, *args, **kwargs):
context = self.get_context_data()
posts = Post.objects.updated()
if not posts:
raise Http404
context = {
"post_list": posts
}
return self.render_to_response(context)
| |
from django.core.urlresolvers import resolve
from django.http import HttpRequest
from django.http import QueryDict
from django.test import TestCase
from django.test import Client
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.conf.urls.static import static, settings
import json
from registrar.models import Course
from registrar.models import Teacher
from registrar.models import Student
from registrar.models import Exam
from registrar.models import ExamSubmission
from registrar.models import MultipleChoiceQuestion
from registrar.models import MultipleChoiceSubmission
from student.views import exam
TEST_USER_EMAIL = "ledo@gah.com"
TEST_USER_USERNAME = "Ledo"
TEST_USER_PASSWORD = "ContinentalUnion"
TEST_USER_EMAIL2 = "whalesquid@hideauze.com"
TEST_USER_USERNAME2 = "whalesquid"
TEST_USER_PASSWORD2 = "Evolvers"
class ExamTestCase(TestCase):
def tearDown(self):
courses = Course.objects.all()
for course in courses:
course.delete()
User.objects.all().delete()
def setUp(self):
# Create our Trudy student
User.objects.create_user(
email=TEST_USER_EMAIL2,
username=TEST_USER_USERNAME2,
password=TEST_USER_PASSWORD2
)
user = User.objects.get(email=TEST_USER_EMAIL2)
teacher = Teacher.objects.create(user=user)
Student.objects.create(user=user).save()
# Create our Student.
User.objects.create_user(
email=TEST_USER_EMAIL,
username=TEST_USER_USERNAME,
password=TEST_USER_PASSWORD
)
user = User.objects.get(email=TEST_USER_EMAIL)
teacher = Teacher.objects.create(user=user)
student = Student.objects.create(user=user)
# Create a test course.
Course.objects.create(
id=1,
title="Comics Book Course",
sub_title="The definitive course on comics!",
category="",
teacher=teacher,
)
course = Course.objects.get(id=1)
# Create our assignment(s)
Exam.objects.create(
exam_id=1,
exam_num=1,
title="Hideauze",
description="Anime related assignment.",
worth=50,
course=course,
)
exam = Exam.objects.get(exam_id=1)
# Create questions
MultipleChoiceQuestion.objects.create(
question_id=2,
exam=exam,
title="Hideauze",
description="Who where the Hideauze?",
a="Former Humans",
a_is_correct=True,
b="Aliens",
b_is_correct=False,
c="Magical or Supernatural Creatures",
c_is_correct=False,
d="Transhumanists",
d_is_correct=True,
e="Heavenly Creatures",
e_is_correct=True,
)
def get_logged_in_client(self):
client = Client()
client.login(
username=TEST_USER_USERNAME,
password=TEST_USER_PASSWORD
)
return client
def test_url_resolves_to_exams_page_view(self):
found = resolve('/course/1/exams')
self.assertEqual(found.func, exam.exams_page)
def test_exams_page_with_no_submissions(self):
client = self.get_logged_in_client()
response = client.post('/course/1/exams')
self.assertEqual(response.status_code, 200)
self.assertIn(b'Comics Book Course',response.content)
self.assertIn(b'view_exam(1);',response.content)
def test_url_resolves_to_exams_table_view(self):
found = resolve('/course/1/exams_table')
self.assertEqual(found.func, exam.exams_table)
def test_exams_table_returns_with_no_submissions(self):
client = self.get_logged_in_client()
response = client.post('/course/1/exams_table')
self.assertEqual(response.status_code, 200)
self.assertIn(b'view_exam(1);',response.content)
def test_url_resolves_to_delete_exam(self):
found = resolve('/course/1/delete_exam')
self.assertEqual(found.func, exam.delete_exam)
def test_delete_exam_with_no_submissions(self):
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/course/1/delete_exam',{
'exam_id': 1,
}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['status'], 'failed')
self.assertEqual(array['message'], 'record does not exist')
def test_delete_exam_with_submissions_and_correct_user(self):
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/course/1/exam/1/submit_exam',{}, **kwargs)
self.assertEqual(response.status_code, 200)
response = client.post('/course/1/delete_exam',{
'exam_id': 1,
}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['status'], 'success')
self.assertEqual(array['message'], 'exam was deleted')
def test_delete_exam_with_submissions_and_incorrect_user(self):
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/course/1/exam/1/submit_exam',{}, **kwargs)
self.assertEqual(response.status_code, 200)
client.logout()
client.login(
username=TEST_USER_USERNAME2,
password=TEST_USER_PASSWORD2
)
response = client.post('/course/1/delete_exam',{
'exam_id': 1,
}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['status'], 'failed')
self.assertEqual(array['message'], 'record does not exist')
def test_url_resolves_to_exam_page_view(self):
found = resolve('/course/1/exam/1')
self.assertEqual(found.func, exam.exam_page)
def test_assignment_page(self):
client = self.get_logged_in_client()
response = client.post('/course/1/exam/1')
self.assertEqual(response.status_code, 200)
self.assertIn(b'Exam #1',response.content)
def test_submit_mc_exam_answer_with_submissions(self):
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/course/1/exam/1/submit_mc_exam_answer',{
'question_id': 2,
'answer': 'A',
}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['status'], 'success')
self.assertEqual(array['message'], 'submitted')
def test_submit_exam_without_answering_questions(self):
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
response = client.post('/course/1/exam/1/submit_exam',{}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'submitted')
self.assertEqual(array['status'], 'success')
def test_submit_quiz_with_answering_questions(self):
kwargs = {'HTTP_X_REQUESTED_WITH':'XMLHttpRequest'}
client = self.get_logged_in_client()
client.post('/course/1/exam/1/submit_tf_exam_answer',{
'question_id': 1,
'answer': 'A',
}, **kwargs)
response = client.post('/course/1/exam/1/submit_exam',{}, **kwargs)
self.assertEqual(response.status_code, 200)
json_string = response.content.decode(encoding='UTF-8')
array = json.loads(json_string)
self.assertEqual(array['message'], 'submitted')
self.assertEqual(array['status'], 'success')
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
import copy
import threading
class ConcurrentUpdate(Exception): pass
class BusyRetry(Exception): pass
class Value(object):
def __init__(self, version, value,store,key):
self.version = version
self.value = value
self.store = store
self.key = key
def __repr__(self):
return "Value"+repr((self.version,self.value))
def set(self, value):
self.value = value
def commit(self):
self.store.set(self.key, self)
def clone(self):
return Value(self.version, copy.deepcopy(self.value),self.store,self.key)
class Collection(dict):
def set_store(self,store):
self.store = store
def commit(self):
self.store.set_values(self)
class Store(object):
def __init__(self):
self.store = {}
self.lock = threading.Lock()
def get(self, key):
return self.store[key].clone()
def set_values(self, D):
Fail = False
if self.lock.acquire(0):
try:
for key in D:
value = D[key]
if not (self.store[key].version > value.version):
self.store[key] = Value(value.version+1, copy.deepcopy(value.value), self, key)
value.version= value.version+1
else:
Fail = True
break
finally:
self.lock.release()
else:
raise BusyRetry
if Fail:
raise ConcurrentUpdate
def set(self, key, value):
success = False
if self.lock.acquire(0):
try:
if not (self.store[key].version > value.version):
self.store[key] = Value(value.version+1, copy.deepcopy(value.value), self, key)
value.version= value.version+1
success = True
finally:
self.lock.release()
else:
raise BusyRetry
if not success:
raise ConcurrentUpdate
def using(self, *keys):
D = Collection()
new = []
# Grab the values that already exist
for key in keys:
if key in self.store:
D[key] = self.store[key].clone()
else:
new.append(key)
# Now add in the values that don't already exist
if self.lock.acquire(0):
try:
for key in new:
self.store[key] = Value(0, None,self,key)
D[key] = self.store[key].clone()
finally:
self.lock.release()
else:
raise BusyRetry
D.set_store(self)
return D
def usevar(self, key):
try:
return self.get(key)
except KeyError:
if self.lock.acquire(0):
try:
self.store[key] = Value(0, None,self,key)
finally:
self.lock.release()
else:
raise BusyRetry
return self.get(key)
def dump(self):
print "DEBUG: Store dump ------------------------------"
for k in self.store:
print " ",k, ":", self.store[k]
print
if 1:
S = Store()
D = S.using("account_one", "account_two", "myaccount")
D["myaccount"].set(0)
D["account_one"].set(50)
D["account_two"].set(100)
D.commit() # 1
S.dump()
D = S.using("account_one", "account_two", "myaccount")
D["myaccount"].set(D["account_one"].value+D["account_two"].value)
E = S.using("account_one", "myaccount")
E["myaccount"].set(E["myaccount"].value-100)
E["account_one"].set(100)
E.commit() # 2
D["account_one"].set(0)
D["account_two"].set(0)
D.commit() # 3 - should fail
S.dump()
if 0:
S = Store()
D = S.using("account_one", "account_two", "myaccount")
D["account_one"].set(50)
D["account_two"].set(100)
D.commit()
S.dump()
D = S.using("account_one", "account_two", "myaccount")
D["myaccount"].set(D["account_one"].value+D["account_two"].value)
D["account_one"].set(0)
D["account_two"].set(0)
D.commit()
S.dump()
if 0:
S = Store()
D = S.usevar("accounts")
D.set({"account_one":50, "account_two":100, "myaccount":0})
D.commit() # First
S.dump()
X = D.value
X["myaccount"] = X["account_one"] + X["account_two"]
X["account_one"] = 0
E = S.usevar("accounts")
Y = E.value
Y["myaccount"] = Y["myaccount"]-100
Y["account_one"]= 100
E.set(Y)
E.commit() # Second
S.dump()
X["account_two"] = 0
D.set(X)
D.commit() # Third
S.dump()
print "Committed", D.value["myaccount"]
if 0:
S = Store()
greeting = S.usevar("hello")
print repr(greeting.value)
greeting.set("Hello World")
greeting.commit()
# ------------------------------------------------------
print greeting
S.dump()
# ------------------------------------------------------
par = S.usevar("hello")
par.set("Woo")
par.commit()
# ------------------------------------------------------
print greeting
S.dump()
# ------------------------------------------------------
greeting.set("Woo")
greeting.commit()
print repr(greeting), repr(greeting.value)
S.dump()
| |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Analyse the results of subunit fitting.
"""
import sys
import tensorflow as tf
from absl import app
from absl import flags
from absl import gfile
import matplotlib
matplotlib.use("TkAgg")
from matplotlib import pylab
import matplotlib.pyplot as plt
import numpy as np, h5py
import scipy.io as sio
from scipy import ndimage
import random
import re # regular expression matching
FLAGS = flags.FLAGS
flags.DEFINE_float('lam_w', 0.0001, 'sparsitiy regularization of w')
flags.DEFINE_float('lam_a', 0.0001, 'sparsitiy regularization of a')
flags.DEFINE_integer('ratio_SU', 7, 'ratio of subunits/cells')
flags.DEFINE_float('su_grid_spacing', 3, 'grid spacing')
flags.DEFINE_integer('np_randseed', 23, 'numpy RNG seed')
flags.DEFINE_integer('randseed', 65, 'python RNG seed')
flags.DEFINE_float('eta_w', 1e-3, 'learning rate for optimization functions')
flags.DEFINE_float('eta_a', 1e-2, 'learning rate for optimization functions')
flags.DEFINE_float('bias_init_scale', -1, 'bias initialized at scale*std')
flags.DEFINE_string('model_id', 'relu_window', 'which model to learn?');
flags.DEFINE_string('save_location',
'/home/bhaishahster/',
'where to store logs and outputs?');
flags.DEFINE_string('data_location',
'/home/bhaishahster/data_breakdown/',
'where to take data from?')
flags.DEFINE_integer('batchsz', 100, 'batch size for training')
flags.DEFINE_integer('n_chunks', 216, 'number of data chunks') # should be 216
flags.DEFINE_integer('n_b_in_c', 10, 'number of batches in one chunk of data')
flags.DEFINE_integer('window', 3, 'size of window for each subunit in relu_window model')
flags.DEFINE_integer('stride', 3, 'stride for relu_window')
flags.DEFINE_string('folder_name', 'experiment4', 'folder where to store all the data')
def main(argv):
#plt.ion() # interactive plotting
window = FLAGS.window
n_pix = (2* window + 1) ** 2
dimx = np.floor(1 + ((40 - (2 * window + 1))/FLAGS.stride)).astype('int')
dimy = np.floor(1 + ((80 - (2 * window + 1))/FLAGS.stride)).astype('int')
nCells = 107
# load model
# load filename
print(FLAGS.model_id)
with tf.Session() as sess:
if FLAGS.model_id == 'relu':
# lam_c(X) = sum_s(a_cs relu(k_s.x)) , a_cs>0
short_filename = ('data_model=' + str(FLAGS.model_id) +
'_lam_w=' + str(FLAGS.lam_w) +
'_lam_a='+str(FLAGS.lam_a) + '_ratioSU=' + str(FLAGS.ratio_SU) +
'_grid_spacing=' + str(FLAGS.su_grid_spacing) + '_normalized_bg')
w = tf.Variable(np.array(np.random.randn(3200,749), dtype='float32'))
a = tf.Variable(np.array(np.random.randn(749,107), dtype='float32'))
if FLAGS.model_id == 'relu_window':
short_filename = ('data_model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) + '_lam_w=' + str(FLAGS.lam_w) + '_bg')
w = tf.Variable(np.array(0.1+ 0.05*np.random.rand(dimx, dimy, n_pix),dtype='float32')) # exp 5
a = tf.Variable(np.array(np.random.rand(dimx*dimy, nCells),dtype='float32'))
if FLAGS.model_id == 'relu_window_mother':
short_filename = ('data_model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) + '_lam_w=' + str(FLAGS.lam_w) + '_bg')
w_del = tf.Variable(np.array(0.1+ 0.05*np.random.rand(dimx, dimy, n_pix),dtype='float32'))
w_mother = tf.Variable(np.array(np.ones((2 * window + 1, 2 * window + 1, 1, 1)),dtype='float32'))
a = tf.Variable(np.array(np.random.rand(dimx*dimy, nCells),dtype='float32'))
if FLAGS.model_id == 'relu_window_mother_sfm':
short_filename = ('data_model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) + '_lam_w=' + str(FLAGS.lam_w) + '_bg')
w_del = tf.Variable(np.array(0.1+ 0.05*np.random.rand(dimx, dimy, n_pix),dtype='float32'))
w_mother = tf.Variable(np.array(np.ones((2 * window + 1, 2 * window + 1, 1, 1)),dtype='float32'))
a = tf.Variable(np.array(np.random.rand(dimx*dimy, nCells),dtype='float32'))
if FLAGS.model_id == 'relu_window_mother_sfm_exp':
short_filename = ('data_model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) + '_lam_w=' + str(FLAGS.lam_w) + '_bg')
w_del = tf.Variable(np.array(0.1+ 0.05*np.random.rand(dimx, dimy, n_pix),dtype='float32'))
w_mother = tf.Variable(np.array(np.ones((2 * window + 1, 2 * window + 1, 1, 1)),dtype='float32'))
a = tf.Variable(np.array(np.random.rand(dimx*dimy, nCells),dtype='float32'))
if FLAGS.model_id == 'relu_window_exp':
short_filename = ('data_model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) + '_lam_w=' + str(FLAGS.lam_w) + '_bg')
w = tf.Variable(np.array(0.01+ 0.005*np.random.rand(dimx, dimy, n_pix),dtype='float32'))
a = tf.Variable(np.array(0.02+np.random.rand(dimx*dimy, nCells),dtype='float32'))
if FLAGS.model_id == 'relu_window_mother_exp':
short_filename = ('data_model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) + '_lam_w=' + str(FLAGS.lam_w) + '_bg')
w_del = tf.Variable(np.array(0.1+ 0.05*np.random.rand(dimx, dimy, n_pix),dtype='float32'))
w_mother = tf.Variable(np.array(np.ones((2 * window + 1, 2 * window + 1, 1, 1)),dtype='float32'))
a = tf.Variable(np.array(np.random.rand(dimx*dimy, nCells),dtype='float32'))
if FLAGS.model_id == 'relu_window_a_support':
short_filename = ('data_model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) + '_lam_w=' + str(FLAGS.lam_w) + '_bg')
w = tf.Variable(np.array(0.001+ 0.0005*np.random.rand(dimx, dimy, n_pix),dtype='float32'))
a = tf.Variable(np.array(0.002*np.random.rand(dimx*dimy, nCells),dtype='float32'))
if FLAGS.model_id == 'exp_window_a_support':
short_filename = ('data_model=' + str(FLAGS.model_id) + '_window=' +
str(FLAGS.window) + '_stride=' + str(FLAGS.stride) + '_lam_w=' + str(FLAGS.lam_w) + '_bg')
w = tf.Variable(np.array(0.001+ 0.0005*np.random.rand(dimx, dimy, n_pix),dtype='float32'))
a = tf.Variable(np.array(0.002*np.random.rand(dimx*dimy, nCells),dtype='float32'))
parent_folder = FLAGS.save_location + FLAGS.folder_name + '/'
FLAGS.save_location = parent_folder +short_filename + '/'
# get relevant files
file_list = gfile.ListDirectory(FLAGS.save_location)
save_filename = FLAGS.save_location + short_filename
print('\nLoading: ', save_filename)
bin_files = []
meta_files = []
for file_n in file_list:
if re.search(short_filename + '.', file_n):
if re.search('.meta', file_n):
meta_files += [file_n]
else:
bin_files += [file_n]
#print(bin_files)
print(len(meta_files), len(bin_files), len(file_list))
# get iteration numbers
iterations = np.array([])
for file_name in bin_files:
try:
iterations = np.append(iterations, int(file_name.split('/')[-1].split('-')[-1]))
except:
print('Could not load filename: ' + file_name)
iterations.sort()
print(iterations)
iter_plot = iterations[-1]
print(int(iter_plot))
# load tensorflow variables
saver_var = tf.train.Saver(tf.all_variables())
restore_file = save_filename + '-' + str(int(iter_plot))
saver_var.restore(sess, restore_file)
# plot subunit - cell connections
plt.figure()
plt.cla()
plt.imshow(a.eval(), cmap='gray', interpolation='nearest')
print(np.shape(a.eval()))
plt.title('Iteration: ' + str(int(iter_plot)))
plt.show()
plt.draw()
# plot all subunits on 40x80 grid
try:
wts = w.eval()
for isu in range(100):
fig = plt.subplot(10, 10, isu+1)
plt.imshow(np.reshape(wts[:, isu],[40, 80]), interpolation='nearest', cmap='gray')
plt.title('Iteration: ' + str(int(iter_plot)))
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
except:
print('w full does not exist? ')
# plot a few subunits - wmother + wdel
try:
wts = w.eval()
print('wts shape:', np.shape(wts))
icnt=1
for idimx in np.arange(dimx):
for idimy in np.arange(dimy):
fig = plt.subplot(dimx, dimy, icnt)
plt.imshow(np.reshape(np.squeeze(wts[idimx, idimy, :]), (2*window+1,2*window+1)), interpolation='nearest', cmap='gray')
icnt = icnt+1
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
plt.show()
plt.draw()
except:
print('w does not exist?')
# plot wmother
try:
w_mot = np.squeeze(w_mother.eval())
print(w_mot)
plt.imshow(w_mot, interpolation='nearest', cmap='gray')
plt.title('Mother subunit')
plt.show()
plt.draw()
except:
print('w mother does not exist')
# plot wmother + wdel
try:
w_mot = np.squeeze(w_mother.eval())
w_del = np.squeeze(w_del.eval())
wts = np.array(np.random.randn(dimx, dimy, (2*window +1)**2))
for idimx in np.arange(dimx):
print(idimx)
for idimy in np.arange(dimy):
wts[idimx, idimy, :] = np.ndarray.flatten(w_mot) + w_del[idimx, idimy, :]
except:
print('w mother + w delta do not exist? ')
'''
try:
icnt=1
for idimx in np.arange(dimx):
for idimy in np.arange(dimy):
fig = plt.subplot(dimx, dimy, icnt)
plt.imshow(np.reshape(np.squeeze(wts[idimx, idimy, :]), (2*window+1,2*window+1)), interpolation='nearest', cmap='gray')
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
except:
print('w mother + w delta plotting error? ')
# plot wdel
try:
w_del = np.squeeze(w_del.eval())
icnt=1
for idimx in np.arange(dimx):
for idimy in np.arange(dimy):
fig = plt.subplot(dimx, dimy, icnt)
plt.imshow( np.reshape(w_del[idimx, idimy, :], (2*window+1,2*window+1)), interpolation='nearest', cmap='gray')
icnt = icnt+1
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
except:
print('w delta do not exist? ')
plt.suptitle('Iteration: ' + str(int(iter_plot)))
plt.show()
plt.draw()
'''
# select a cell, and show its subunits.
#try:
## Load data summary, get mask
filename = FLAGS.data_location + 'data_details.mat'
summary_file = gfile.Open(filename, 'r')
data_summary = sio.loadmat(summary_file)
total_mask = np.squeeze(data_summary['totalMaskAccept_log']).T
stas = data_summary['stas']
print(np.shape(total_mask))
# a is 2D
a_eval = a.eval()
print(np.shape(a_eval))
# get softmax numpy
if FLAGS.model_id == 'relu_window_mother_sfm' or FLAGS.model_id == 'relu_window_mother_sfm_exp':
b = np.exp(a_eval) / np.sum(np.exp(a_eval),0)
else:
b = a_eval
plt.figure();
plt.imshow(b, interpolation='nearest', cmap='gray')
plt.show()
plt.draw()
# plot subunits for multiple cells.
n_cells = 10
n_plots_max = 20
plt.figure()
for icell_cnt, icell in enumerate(np.arange(n_cells)):
mask2D = np.reshape(total_mask[icell,: ], [40, 80])
nz_idx = np.nonzero(mask2D)
np.shape(nz_idx)
print(nz_idx)
ylim = np.array([np.min(nz_idx[0])-1, np.max(nz_idx[0])+1])
xlim = np.array([np.min(nz_idx[1])-1, np.max(nz_idx[1])+1])
icnt = -1
a_thr = np.percentile(np.abs(b[:, icell]), 99.5)
n_plots = np.sum(np.abs(b[:, icell]) > a_thr)
nx = np.ceil(np.sqrt(n_plots)).astype('int')
ny = np.ceil(np.sqrt(n_plots)).astype('int')
ifig=0
ww_sum = np.zeros((40,80))
for idimx in np.arange(dimx):
for idimy in np.arange(dimy):
icnt = icnt + 1
if(np.abs(b[icnt,icell]) > a_thr):
ifig = ifig + 1
fig = plt.subplot(n_cells, n_plots_max, icell_cnt*n_plots_max + ifig + 2)
ww = np.zeros((40,80))
ww[idimx*FLAGS.stride: idimx*FLAGS.stride + (2*window+1),
idimy*FLAGS.stride: idimy*FLAGS.stride + (2*window+1)] = b[icnt, icell] * (np.reshape(wts[idimx, idimy, :],
(2*window+1,2*window+1)))
plt.imshow(ww, interpolation='nearest', cmap='gray')
plt.ylim(ylim)
plt.xlim(xlim)
plt.title(b[icnt,icell])
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
ww_sum = ww_sum + ww
fig = plt.subplot(n_cells, n_plots_max, icell_cnt*n_plots_max + 2)
plt.imshow(ww_sum, interpolation='nearest', cmap='gray')
plt.ylim(ylim)
plt.xlim(xlim)
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
plt.title('STA from model')
fig = plt.subplot(n_cells, n_plots_max, icell_cnt*n_plots_max + 1)
plt.imshow(np.reshape(stas[:, icell], [40, 80]), interpolation='nearest', cmap='gray')
plt.ylim(ylim)
plt.xlim(xlim)
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
plt.title('True STA')
plt.show()
plt.draw()
#except:
# print('a not 2D?')
# using xlim and ylim, and plot the 'windows' which are relevant with their weights
sq_flat = np.zeros((dimx, dimy))
icnt = 0
for idimx in np.arange(dimx):
for idimy in np.arange(dimy):
sq_flat[idimx, idimy] = icnt
icnt = icnt + 1
n_cells = 1
n_plots_max = 10
plt.figure()
for icell_cnt, icell in enumerate(np.array([1, 2, 3, 4, 5])):#enumerate(np.arange(n_cells)):
a_thr = np.percentile(np.abs(b[:, icell]), 99.5)
mask2D = np.reshape(total_mask[icell,: ], [40, 80])
nz_idx = np.nonzero(mask2D)
np.shape(nz_idx)
print(nz_idx)
ylim = np.array([np.min(nz_idx[0])-1, np.max(nz_idx[0])+1])
xlim = np.array([np.min(nz_idx[1])-1, np.max(nz_idx[1])+1])
print(xlim, ylim)
win_startx = np.ceil((xlim[0] - (2*window+1)) / FLAGS.stride)
win_endx = np.floor((xlim[1]-1) / FLAGS.stride )
win_starty = np.ceil((ylim[0] - (2*window+1)) / FLAGS.stride)
win_endy = np.floor((ylim[1]-1) / FLAGS.stride )
dimx_plot = win_endx - win_startx + 1
dimy_plot = win_endy - win_starty + 1
ww_sum = np.zeros((40,80))
for irow, idimy in enumerate(np.arange(win_startx, win_endx+1)):
for icol, idimx in enumerate(np.arange(win_starty, win_endy+1)):
fig = plt.subplot(dimx_plot+1, dimy_plot, (irow + 1) * dimy_plot + icol+1 )
ww = np.zeros((40,80))
ww[idimx*FLAGS.stride: idimx*FLAGS.stride + (2*window+1),
idimy*FLAGS.stride: idimy*FLAGS.stride + (2*window+1)] = (np.reshape(wts[idimx, idimy, :],
(2*window+1,2*window+1)))
plt.imshow(ww, interpolation='nearest', cmap='gray')
plt.ylim(ylim)
plt.xlim(xlim)
if b[sq_flat[idimx, idimy],icell] > a_thr:
plt.title(b[sq_flat[idimx, idimy],icell], fontsize=10, color='g')
else:
plt.title(b[sq_flat[idimx, idimy],icell], fontsize=10, color='r')
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
ww_sum = ww_sum + ww * b[sq_flat[idimx, idimy],icell]
fig = plt.subplot(dimx_plot+1, dimy_plot, 2)
plt.imshow(ww_sum, interpolation='nearest', cmap='gray')
plt.ylim(ylim)
plt.xlim(xlim)
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
plt.title('STA from model')
fig = plt.subplot(dimx_plot+1, dimy_plot, 1)
plt.imshow(np.reshape(stas[:, icell], [40, 80]), interpolation='nearest', cmap='gray')
plt.ylim(ylim)
plt.xlim(xlim)
fig.axes.get_xaxis().set_visible(False)
fig.axes.get_yaxis().set_visible(False)
plt.title('True STA')
plt.show()
plt.draw()
if __name__ == '__main__':
app.run()
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import json
from threading import Thread
import publisher
from entity import *
from ..artifactmgt.git.agentgithandler import *
from ..artifactmgt.repository import Repository
from ..util import cartridgeagentutils
from ..util.log import LogFactory
SUPER_TENANT_ID = "-1234"
SUPER_TENANT_REPO_PATH = "/repository/deployment/server/"
TENANT_REPO_PATH = "/repository/tenants/"
log = LogFactory().get_log(__name__)
"""
Event execution related logic
"""
def on_instance_started_event():
log.debug("Processing instance started event...")
# TODO: copy artifacts extension
execute_event_extendables(constants.INSTANCE_STARTED_EVENT, {})
def create_dummy_interface():
log.debug("Processing lvs dummy interface creation...")
lvs_vip = Config.lvs_virtual_ip.split("|")
log.debug("LVS dummy interface creation values %s %s " % (lvs_vip[0], lvs_vip[1]))
execute_event_extendables(
constants.CREATE_LVS_DUMMY_INTERFACE,
{"EVENT": constants.CREATE_LVS_DUMMY_INTERFACE,
"LVS_DUMMY_VIRTUAL_IP": lvs_vip[0],
"LVS_SUBNET_MASK": lvs_vip[1]}
)
def on_instance_activated_event():
log.debug("Processing instance activated event...")
execute_event_extendables(constants.INSTANCE_ACTIVATED_EVENT, {})
def on_artifact_updated_event(artifacts_updated_event):
log.debug(
"Processing artifact updated event for [tenant] %s [cluster] %s [status] %s"
% (str(artifacts_updated_event.tenant_id), artifacts_updated_event.cluster_id, artifacts_updated_event.status))
cluster_id_event = str(artifacts_updated_event.cluster_id).strip()
cluster_id_payload = Config.cluster_id
repo_url = str(artifacts_updated_event.repo_url).strip()
if repo_url == "":
log.error("Repository URL is empty. Failed to process artifact updated event.")
return
if cluster_id_payload is None or cluster_id_payload == "":
log.error("Cluster ID in payload is empty. Failed to process artifact updated event.")
return
if cluster_id_payload != cluster_id_event:
log.debug("Cluster ID in artifact updated event does not match. Skipping event handler.")
return
repo_password = None
if artifacts_updated_event.repo_password is not None:
secret = Config.cartridge_key
repo_password = cartridgeagentutils.decrypt_password(artifacts_updated_event.repo_password, secret)
if Config.app_path is None:
log.error("Repository path is empty. Failed to process artifact updated event.")
return
if not validate_repo_path(Config.app_path):
log.error(
"Repository path cannot be accessed, or is invalid. Failed to process artifact updated event. [App Path] %s"
% Config.app_path)
return
repo_username = artifacts_updated_event.repo_username
tenant_id = artifacts_updated_event.tenant_id
is_multitenant = Config.is_multiTenant
commit_enabled = artifacts_updated_event.commit_enabled
# create repo object
local_repo_path = get_repo_path_for_tenant(str(tenant_id), Config.app_path, is_multitenant)
repo_info = Repository(repo_url, repo_username, repo_password, local_repo_path, tenant_id, commit_enabled)
log.info("Executing checkout job on artifact updated event...")
try:
Config.artifact_checkout_plugin.plugin_object.checkout(repo_info)
except Exception as e:
log.exception(
"Checkout job on artifact updated event failed for tenant: %s %s" % (repo_info.tenant_id, e))
# execute artifact updated extension
plugin_values = {"ARTIFACT_UPDATED_CLUSTER_ID": artifacts_updated_event.cluster_id,
"ARTIFACT_UPDATED_TENANT_ID": artifacts_updated_event.tenant_id,
"ARTIFACT_UPDATED_REPO_URL": artifacts_updated_event.repo_url,
"ARTIFACT_UPDATED_REPO_PASSWORD": artifacts_updated_event.repo_password,
"ARTIFACT_UPDATED_REPO_USERNAME": artifacts_updated_event.repo_username,
"ARTIFACT_UPDATED_STATUS": artifacts_updated_event.status}
try:
execute_event_extendables(constants.ARTIFACT_UPDATED_EVENT, plugin_values)
except Exception as e:
log.exception("Could not execute plugins for artifact updated event: %s" % e)
if not Config.activated:
# publish instance activated event if not yet activated
publisher.publish_instance_activated_event()
on_instance_activated_event()
update_artifacts = Config.read_property(constants.ENABLE_ARTIFACT_UPDATE, True)
auto_commit = Config.is_commits_enabled
auto_checkout = Config.is_checkout_enabled
if update_artifacts:
try:
update_interval = int(Config.artifact_update_interval)
except ValueError:
log.debug("Invalid artifact sync interval specified: %s, defaulting to 10 seconds" % ValueError)
update_interval = 10
AgentGitHandler.schedule_artifact_update_task(
repo_info,
auto_checkout,
auto_commit,
update_interval)
def on_instance_cleanup_cluster_event():
log.debug("Processing instance cleanup cluster event...")
cleanup(constants.INSTANCE_CLEANUP_CLUSTER_EVENT)
def on_instance_cleanup_member_event():
log.debug("Processing instance cleanup member event...")
cleanup(constants.INSTANCE_CLEANUP_MEMBER_EVENT)
def on_member_activated_event(member_activated_event):
log.debug(
"Processing Member activated event: [service] %r [cluster] %r [member] %r"
% (member_activated_event.service_name,
member_activated_event.cluster_id,
member_activated_event.member_id))
member_initialized = is_member_initialized_in_topology(
member_activated_event.service_name,
member_activated_event.cluster_id,
member_activated_event.member_id)
if not member_initialized:
log.debug("Member has not initialized, failed to execute member activated event")
return
execute_event_extendables(constants.MEMBER_ACTIVATED_EVENT, {})
def on_complete_topology_event(complete_topology_event):
log.debug("Processing Complete topology event...")
service_name_in_payload = Config.service_name
cluster_id_in_payload = Config.cluster_id
member_id_in_payload = Config.member_id
if not Config.initialized:
member_initialized = is_member_initialized_in_topology(
service_name_in_payload,
cluster_id_in_payload,
member_id_in_payload)
if member_initialized:
# Set cartridge agent as initialized since member is available and it is in initialized state
Config.initialized = True
log.info(
"Member initialized [member id] %s, [cluster-id] %s, [service] %s"
% (member_id_in_payload, cluster_id_in_payload, service_name_in_payload))
else:
log.info("Member not initialized in topology.")
topology = complete_topology_event.get_topology()
service = topology.get_service(service_name_in_payload)
if service is None:
raise Exception("Service not found in topology [service] %s" % service_name_in_payload)
cluster = service.get_cluster(cluster_id_in_payload)
if cluster is None:
raise Exception("Cluster id not found in topology [cluster] %s" % cluster_id_in_payload)
plugin_values = {"TOPOLOGY_JSON": json.dumps(topology.json_str),
"MEMBER_LIST_JSON": json.dumps(cluster.member_list_json)}
execute_event_extendables(constants.COMPLETE_TOPOLOGY_EVENT, plugin_values)
def on_member_initialized_event(member_initialized_event):
"""
Member initialized event is sent by cloud controller once volume attachment and
ip address allocation is completed successfully
:param member_initialized_event:
:return:
"""
log.debug("Processing Member initialized event...")
service_name_in_payload = Config.service_name
cluster_id_in_payload = Config.cluster_id
member_id_in_payload = Config.member_id
if not Config.initialized and member_id_in_payload == member_initialized_event.member_id:
member_exists = member_exists_in_topology(
service_name_in_payload,
cluster_id_in_payload,
member_id_in_payload)
log.debug("Member exists: %s" % member_exists)
if member_exists:
Config.initialized = True
mark_member_as_initialized(service_name_in_payload, cluster_id_in_payload, member_id_in_payload)
log.info("Instance marked as initialized on member initialized event")
else:
raise Exception("Member [member-id] %s not found in topology while processing member initialized "
"event. [Topology] %s" % (member_id_in_payload, TopologyContext.get_topology()))
execute_event_extendables(constants.MEMBER_INITIALIZED_EVENT, {})
def on_complete_tenant_event(complete_tenant_event):
log.debug("Processing Complete tenant event...")
tenant_list_json = complete_tenant_event.tenant_list_json
log.debug("Complete tenants:" + json.dumps(tenant_list_json))
plugin_values = {"TENANT_LIST_JSON": json.dumps(tenant_list_json)}
execute_event_extendables(constants.COMPLETE_TENANT_EVENT, plugin_values)
def on_member_terminated_event(member_terminated_event):
log.debug(
"Processing Member terminated event: [service] %s [cluster] %s [member] %s"
% (member_terminated_event.service_name, member_terminated_event.cluster_id, member_terminated_event.member_id))
member_initialized = is_member_initialized_in_topology(
member_terminated_event.service_name,
member_terminated_event.cluster_id,
member_terminated_event.member_id
)
if not member_initialized:
log.debug("Member has not initialized, failed to execute member terminated event")
return
execute_event_extendables(constants.MEMBER_TERMINATED_EVENT, {})
def on_member_suspended_event(member_suspended_event):
log.debug(
"Processing Member suspended event: [service] %s [cluster] %s [member] %s"
% (member_suspended_event.service_name, member_suspended_event.cluster_id, member_suspended_event.member_id))
member_initialized = is_member_initialized_in_topology(
member_suspended_event.service_name,
member_suspended_event.cluster_id,
member_suspended_event.member_id
)
if not member_initialized:
log.debug("Member has not initialized, failed to execute member suspended event")
return
execute_event_extendables(constants.MEMBER_SUSPENDED_EVENT, {})
def on_member_started_event(member_started_event):
log.debug(
"Processing Member started event: [service] %s [cluster] %s [member] %s"
% (member_started_event.service_name, member_started_event.cluster_id, member_started_event.member_id))
member_initialized = is_member_initialized_in_topology(
member_started_event.service_name,
member_started_event.cluster_id,
member_started_event.member_id
)
if not member_initialized:
log.debug("Member has not initialized, failed to execute member started event")
return
execute_event_extendables(constants.MEMBER_STARTED_EVENT, {})
def start_server_extension():
log.debug("Processing start server extension...")
service_name_in_payload = Config.service_name
cluster_id_in_payload = Config.cluster_id
member_id_in_payload = Config.member_id
member_initialized = is_member_initialized_in_topology(
service_name_in_payload, cluster_id_in_payload, member_id_in_payload)
if not member_initialized:
log.debug("Member has not initialized, failed to execute start server event")
return
execute_event_extendables("StartServers", {})
def volume_mount_extension(persistence_mappings_payload):
log.debug("Processing volume mount extension...")
execute_event_extendables("VolumeMount", persistence_mappings_payload)
def on_domain_mapping_added_event(domain_mapping_added_event):
tenant_domain = find_tenant_domain(domain_mapping_added_event.tenant_id)
log.debug(
"Processing Domain mapping added event: [tenant-id] " + str(domain_mapping_added_event.tenant_id) +
" [tenant-domain] " + tenant_domain + " [domain-name] " + domain_mapping_added_event.domain_name +
" [application-context] " + domain_mapping_added_event.application_context
)
plugin_values = {"SUBSCRIPTION_APPLICATION_ID": domain_mapping_added_event.application_id,
"SUBSCRIPTION_SERVICE_NAME": domain_mapping_added_event.service_name,
"SUBSCRIPTION_DOMAIN_NAME": domain_mapping_added_event.domain_name,
"SUBSCRIPTION_CLUSTER_ID": domain_mapping_added_event.cluster_id,
"SUBSCRIPTION_TENANT_ID": int(domain_mapping_added_event.tenant_id),
"SUBSCRIPTION_TENANT_DOMAIN": tenant_domain,
"SUBSCRIPTION_CONTEXT_PATH":
domain_mapping_added_event.context_path}
execute_event_extendables(constants.DOMAIN_MAPPING_ADDED_EVENT, plugin_values)
def on_domain_mapping_removed_event(domain_mapping_removed_event):
tenant_domain = find_tenant_domain(domain_mapping_removed_event.tenant_id)
log.info(
"Domain mapping removed event received: [tenant-id] " + str(domain_mapping_removed_event.tenant_id) +
" [tenant-domain] " + tenant_domain + " [domain-name] " + domain_mapping_removed_event.domain_name
)
plugin_values = {"SUBSCRIPTION_APPLICATION_ID": domain_mapping_removed_event.application_id,
"SUBSCRIPTION_SERVICE_NAME": domain_mapping_removed_event.service_name,
"SUBSCRIPTION_DOMAIN_NAME": domain_mapping_removed_event.domain_name,
"SUBSCRIPTION_CLUSTER_ID": domain_mapping_removed_event.cluster_id,
"SUBSCRIPTION_TENANT_ID": int(domain_mapping_removed_event.tenant_id),
"SUBSCRIPTION_TENANT_DOMAIN": tenant_domain}
execute_event_extendables(constants.DOMAIN_MAPPING_REMOVED_EVENT, plugin_values)
def on_copy_artifacts_extension(src, dest):
log.debug("Processing Copy artifacts extension...")
plugin_values = {"SOURCE": src, "DEST": dest}
execute_event_extendables("CopyArtifacts", plugin_values)
def on_tenant_subscribed_event(tenant_subscribed_event):
log.debug(
"Processing Tenant subscribed event: [tenant] " + str(tenant_subscribed_event.tenant_id) +
" [service] " + tenant_subscribed_event.service_name + " [cluster] " + tenant_subscribed_event.cluster_ids
)
execute_event_extendables(constants.TENANT_SUBSCRIBED_EVENT, {})
def on_application_signup_removed_event(application_signup_removal_event):
log.debug(
"Processing Tenant unsubscribed event: [tenant] " + str(application_signup_removal_event.tenantId) +
" [application ID] " + str(application_signup_removal_event.applicationId)
)
if Config.application_id == application_signup_removal_event.applicationId:
AgentGitHandler.remove_repo(application_signup_removal_event.tenantId)
execute_event_extendables(constants.APPLICATION_SIGNUP_REMOVAL_EVENT, {})
def cleanup(event):
log.debug("Executing cleanup extension for event %s..." % event)
publisher.publish_maintenance_mode_event()
execute_event_extendables("clean", {})
publisher.publish_instance_ready_to_shutdown_event()
def execute_event_extendables(event, input_values):
""" Execute the extensions and plugins related to the event
:param event: The event name string
:param input_values: the values to be passed to the plugin
:return:
"""
try:
input_values = add_common_input_values(input_values)
except Exception as e:
log.error("Error while adding common input values for event extendables: %s" % e)
input_values["EVENT"] = event
log.debug("Executing extensions for [event] %s with [input values] %s" % (event, input_values))
# Execute the extension
execute_extension_for_event(event, input_values)
# Execute the plugins
execute_plugins_for_event(event, input_values)
def execute_plugins_for_event(event, input_values):
""" For each plugin registered for the specified event, start a plugin execution thread
:param str event: The event name string
:param dict input_values: the values to be passed to the plugin
:return:
"""
try:
plugins_for_event = Config.plugins.get(event)
if plugins_for_event is not None:
for plugin_info in plugins_for_event:
log.debug("Executing plugin %s for event %s" % (plugin_info.name, event))
plugin_thread = PluginExecutor(plugin_info, input_values)
plugin_thread.setName("PluginExecutorThreadForPlugin%s" % plugin_info.name)
log.debug("Starting a PluginExecutor Thread for event %s" % event.__class__.__name__)
plugin_thread.start()
# block till plugin run completes.
plugin_thread.join()
else:
log.debug("No plugins registered for event %s" % event)
except Exception as e:
log.exception("Error while executing plugin for event %s: %s" % (event, e))
def execute_extension_for_event(event, extension_values):
""" Execute the extension related to the event
:param event: The event name string
:param extension_values: the values to be passed to the plugin
:return:
"""
try:
if Config.extension_executor is not None:
log.debug("Executing extension for event [%s]" % event)
extension_thread = PluginExecutor(Config.extension_executor, extension_values)
extension_thread.setName("ExtensionExecutorThreadForExtension%s" % event.__class__.__name__)
log.debug("Starting a PluginExecutor Thread for event extension %s" % event.__class__.__name__)
extension_thread.start()
# block till plugin run completes.
extension_thread.join()
else:
log.debug("No extensions registered for event %s" % event)
except OSError as e:
log.warn("No extension was found for event %s: %s" % (event, e))
except Exception as e:
log.exception("Error while executing extension for event %s: %s" % (event, e))
def get_repo_path_for_tenant(tenant_id, git_local_repo_path, is_multitenant):
""" Finds the repository path for tenant to clone from the remote repository
:param tenant_id:
:param git_local_repo_path:
:param is_multitenant:
:return:
"""
repo_path = ""
if is_multitenant:
if tenant_id == SUPER_TENANT_ID:
# super tenant, /repository/deploy/server/
super_tenant_repo_path = Config.super_tenant_repository_path
# "app_path"
repo_path += git_local_repo_path
if super_tenant_repo_path is not None and super_tenant_repo_path != "":
super_tenant_repo_path = super_tenant_repo_path if super_tenant_repo_path.startswith("/") \
else "/" + super_tenant_repo_path
super_tenant_repo_path = super_tenant_repo_path if super_tenant_repo_path.endswith("/") \
else super_tenant_repo_path + "/"
# "app_path/repository/deploy/server/"
repo_path += super_tenant_repo_path
else:
# "app_path/repository/deploy/server/"
repo_path += SUPER_TENANT_REPO_PATH
else:
# normal tenant, /repository/tenants/tenant_id
tenant_repo_path = Config.tenant_repository_path
# "app_path"
repo_path += git_local_repo_path
if tenant_repo_path is not None and tenant_repo_path != "":
tenant_repo_path = tenant_repo_path if tenant_repo_path.startswith("/") else "/" + tenant_repo_path
tenant_repo_path = tenant_repo_path if tenant_repo_path.endswith("/") else tenant_repo_path + "/"
# "app_path/repository/tenants/244653444"
repo_path += tenant_repo_path + tenant_id
else:
# "app_path/repository/tenants/244653444"
repo_path += TENANT_REPO_PATH + tenant_id
# tenant_dir_path = git_local_repo_path + AgentGitHandler.TENANT_REPO_PATH + tenant_id
# GitUtils.create_dir(repo_path)
else:
# not multi tenant, app_path
repo_path = git_local_repo_path
log.debug("Repo path returned : %r" % repo_path)
return repo_path
def is_member_initialized_in_topology(service_name, cluster_id, member_id):
if member_exists_in_topology(service_name, cluster_id, member_id):
topology = TopologyContext.get_topology()
service = topology.get_service(service_name)
if service is None:
raise Exception("Service not found in topology [service] %s" % service_name)
cluster = service.get_cluster(cluster_id)
if cluster is None:
raise Exception("Cluster id not found in topology [cluster] %s" % cluster_id)
member = cluster.get_member(member_id)
if member is None:
raise Exception("Member id not found in topology [member] %s" % member_id)
log.debug("Found member: " + member.to_json())
if member.status == MemberStatus.Initialized:
return True
log.debug("Member doesn't exist in topology")
return False
def member_exists_in_topology(service_name, cluster_id, member_id):
log.debug("Checking if member exists in topology : %s, %s, %s, " % (service_name, cluster_id, member_id))
topology = TopologyContext.get_topology()
service = topology.get_service(service_name)
if service is None:
raise Exception("Service not found in topology [service] %s" % service_name)
cluster = service.get_cluster(cluster_id)
if cluster is None:
raise Exception("Cluster id not found in topology [cluster] %s" % cluster_id)
member = cluster.get_member(member_id)
if member is None:
log.debug("Member id not found in topology [member] %s" % member_id)
return False
return True
def mark_member_as_initialized(service_name, cluster_id, member_id):
topology = TopologyContext.get_topology()
service = topology.get_service(service_name)
if service is None:
raise Exception("Service not found in topology [service] %s" % service_name)
cluster = service.get_cluster(cluster_id)
if cluster is None:
raise Exception("Cluster id not found in topology [cluster] %s" % cluster_id)
member = cluster.get_member(member_id)
if member is None:
raise Exception("Member id not found in topology [member] %s" % member_id)
member.status = MemberStatus.Initialized
def add_common_input_values(plugin_values):
"""
Adds the common parameters to be used by the extension scripts
:param dict[str, str] plugin_values: Dictionary to be added
:return: Dictionary with updated parameters
:rtype: dict[str, str]
"""
if plugin_values is None:
plugin_values = {}
elif type(plugin_values) != dict:
plugin_values = {"VALUE1": str(plugin_values)}
# Values for the plugins to use in case they want to connect to the MB.
plugin_values["MB_IP"] = Config.mb_ip
plugin_values["APPLICATION_PATH"] = Config.app_path
plugin_values["PARAM_FILE_PATH"] = Config.read_property(constants.PARAM_FILE_PATH, False)
plugin_values["PERSISTENCE_MAPPINGS"] = Config.persistence_mappings
lb_cluster_id_in_payload = Config.lb_cluster_id
lb_private_ip, lb_public_ip = get_lb_member_ip(lb_cluster_id_in_payload)
plugin_values["LB_IP"] = lb_private_ip if lb_private_ip is not None else Config.lb_private_ip
plugin_values["LB_PUBLIC_IP"] = lb_public_ip if lb_public_ip is not None else Config.lb_public_ip
topology = TopologyContext.get_topology()
if topology.initialized:
service = topology.get_service(Config.service_name)
if service is None:
raise Exception("Service not found in topology [service] %s" % Config.service_name)
cluster = service.get_cluster(Config.cluster_id)
if cluster is None:
raise Exception("Cluster id not found in topology [cluster] %s" % Config.cluster_id)
member = cluster.get_member(Config.member_id)
if member is None:
raise Exception("Member id not found in topology [member] %s" % Config.member_id)
add_properties(service.properties, plugin_values, "SERVICE_PROPERTY")
add_properties(cluster.properties, plugin_values, "CLUSTER_PROPERTY")
add_properties(member.properties, plugin_values, "MEMBER_PROPERTY")
plugin_values.update(Config.get_payload_params())
return clean_process_parameters(plugin_values)
def add_properties(properties, params, prefix):
"""
Adds the given property list to the parameters list with given prefix in the parameter name
:param dict[str, str] properties: service properties
:param dict[str, str] params:
:param str prefix:
:return: dict[str, str]
"""
if properties is None or properties.items() is None:
return
for key in properties:
params[prefix + "_" + key] = str(properties[key])
def get_lb_member_ip(lb_cluster_id):
topology = TopologyContext.get_topology()
services = topology.get_services()
for service in services:
clusters = service.get_clusters()
for cluster in clusters:
members = cluster.get_members()
for member in members:
if member.cluster_id == lb_cluster_id:
return member.member_default_private_ip, member.member_default_public_ip
return None, None
def clean_process_parameters(params):
"""
Removes any null valued parameters before passing them to the extension scripts
:param dict params:
:return: cleaned parameters
:rtype: dict
"""
for key, value in params.items():
if value is None:
del params[key]
return params
def find_tenant_domain(tenant_id):
tenant = TenantContext.get_tenant(tenant_id)
if tenant is None:
raise RuntimeError("Tenant could not be found: [tenant-id] %s" % str(tenant_id))
return tenant.tenant_domain
def validate_repo_path(app_path):
# app path would be ex: /var/www, or /opt/server/data
return os.path.isabs(app_path)
class PluginExecutor(Thread):
""" Executes a given plugin on a separate thread, passing the given dictionary of values to the plugin entry method
"""
def __init__(self, plugin_info, values):
Thread.__init__(self)
self.__plugin_info = plugin_info
self.__values = values
self.__log = LogFactory().get_log(__name__)
self.setDaemon(True)
def run(self):
self.__log.debug("Starting the PluginExecutor thread")
try:
self.__plugin_info.plugin_object.run_plugin(self.__values)
except Exception as e:
self.__log.exception("Error while executing plugin %s: %s" % (self.__plugin_info.name, e))
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models as _models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class PacketCapturesOperations(object):
"""PacketCapturesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_02_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _create_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
parameters, # type: "_models.PacketCapture"
**kwargs # type: Any
):
# type: (...) -> "_models.PacketCaptureResult"
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json, text/json"
# Construct URL
url = self._create_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PacketCapture')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
def begin_create(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
parameters, # type: "_models.PacketCapture"
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.PacketCaptureResult"]
"""Create and start a packet capture on the specified VM.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:param parameters: Parameters that define the create packet capture operation.
:type parameters: ~azure.mgmt.network.v2018_02_01.models.PacketCapture
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either PacketCaptureResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_02_01.models.PacketCaptureResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.PacketCaptureResult"
"""Gets a packet capture session by name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PacketCaptureResult, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_02_01.models.PacketCaptureResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json, text/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PacketCaptureResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
def _delete_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}'} # type: ignore
def _stop_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
# Construct URL
url = self._stop_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_stop_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/stop'} # type: ignore
def begin_stop(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Stops a specified packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the network watcher.
:type network_watcher_name: str
:param packet_capture_name: The name of the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._stop_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_stop.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/stop'} # type: ignore
def _get_status_initial(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
**kwargs # type: Any
):
# type: (...) -> "_models.PacketCaptureQueryStatusResult"
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureQueryStatusResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json, text/json"
# Construct URL
url = self._get_status_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.post(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_get_status_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/queryStatus'} # type: ignore
def begin_get_status(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
packet_capture_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller["_models.PacketCaptureQueryStatusResult"]
"""Query the status of a running packet capture session.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:param packet_capture_name: The name given to the packet capture session.
:type packet_capture_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either PacketCaptureQueryStatusResult or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_02_01.models.PacketCaptureQueryStatusResult]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureQueryStatusResult"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._get_status_initial(
resource_group_name=resource_group_name,
network_watcher_name=network_watcher_name,
packet_capture_name=packet_capture_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PacketCaptureQueryStatusResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'packetCaptureName': self._serialize.url("packet_capture_name", packet_capture_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = ARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_get_status.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures/{packetCaptureName}/queryStatus'} # type: ignore
def list(
self,
resource_group_name, # type: str
network_watcher_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["_models.PacketCaptureListResult"]
"""Lists all packet capture sessions within the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param network_watcher_name: The name of the Network Watcher resource.
:type network_watcher_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PacketCaptureListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_02_01.models.PacketCaptureListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PacketCaptureListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-02-01"
accept = "application/json, text/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'networkWatcherName': self._serialize.url("network_watcher_name", network_watcher_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('PacketCaptureListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/networkWatchers/{networkWatcherName}/packetCaptures'} # type: ignore
| |
import os
import re
import sys
import time
from ..utils import (
encodeFilename,
timeconvert,
format_bytes,
)
class FileDownloader(object):
"""File Downloader class.
File downloader objects are the ones responsible of downloading the
actual video file and writing it to disk.
File downloaders accept a lot of parameters. In order not to saturate
the object constructor with arguments, it receives a dictionary of
options instead.
Available options:
verbose: Print additional info to stdout.
quiet: Do not print messages to stdout.
ratelimit: Download speed limit, in bytes/sec.
retries: Number of times to retry for HTTP error 5xx
buffersize: Size of download buffer in bytes.
noresizebuffer: Do not automatically resize the download buffer.
continuedl: Try to continue downloads if possible.
noprogress: Do not print the progress bar.
logtostderr: Log messages to stderr instead of stdout.
consoletitle: Display progress in console window's titlebar.
nopart: Do not use temporary .part files.
updatetime: Use the Last-modified header to set output file timestamps.
test: Download only first bytes to test the downloader.
min_filesize: Skip files smaller than this size
max_filesize: Skip files larger than this size
Subclasses of this one must re-define the real_download method.
"""
params = None
def __init__(self, ydl, params):
"""Create a FileDownloader object with the given options."""
self.ydl = ydl
self._progress_hooks = []
self.params = params
@staticmethod
def format_seconds(seconds):
(mins, secs) = divmod(seconds, 60)
(hours, mins) = divmod(mins, 60)
if hours > 99:
return '--:--:--'
if hours == 0:
return '%02d:%02d' % (mins, secs)
else:
return '%02d:%02d:%02d' % (hours, mins, secs)
@staticmethod
def calc_percent(byte_counter, data_len):
if data_len is None:
return None
return float(byte_counter) / float(data_len) * 100.0
@staticmethod
def format_percent(percent):
if percent is None:
return '---.-%'
return '%6s' % ('%3.1f%%' % percent)
@staticmethod
def calc_eta(start, now, total, current):
if total is None:
return None
dif = now - start
if current == 0 or dif < 0.001: # One millisecond
return None
rate = float(current) / dif
return int((float(total) - float(current)) / rate)
@staticmethod
def format_eta(eta):
if eta is None:
return '--:--'
return FileDownloader.format_seconds(eta)
@staticmethod
def calc_speed(start, now, bytes):
dif = now - start
if bytes == 0 or dif < 0.001: # One millisecond
return None
return float(bytes) / dif
@staticmethod
def format_speed(speed):
if speed is None:
return '%10s' % '---b/s'
return '%10s' % ('%s/s' % format_bytes(speed))
@staticmethod
def best_block_size(elapsed_time, bytes):
new_min = max(bytes / 2.0, 1.0)
new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
if elapsed_time < 0.001:
return int(new_max)
rate = bytes / elapsed_time
if rate > new_max:
return int(new_max)
if rate < new_min:
return int(new_min)
return int(rate)
@staticmethod
def parse_bytes(bytestr):
"""Parse a string indicating a byte quantity into an integer."""
matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr)
if matchobj is None:
return None
number = float(matchobj.group(1))
multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower())
return int(round(number * multiplier))
def to_screen(self, *args, **kargs):
self.ydl.to_screen(*args, **kargs)
def to_stderr(self, message):
self.ydl.to_screen(message)
def to_console_title(self, message):
self.ydl.to_console_title(message)
def trouble(self, *args, **kargs):
self.ydl.trouble(*args, **kargs)
def report_warning(self, *args, **kargs):
self.ydl.report_warning(*args, **kargs)
def report_error(self, *args, **kargs):
self.ydl.report_error(*args, **kargs)
def slow_down(self, start_time, byte_counter):
"""Sleep if the download speed is over the rate limit."""
rate_limit = self.params.get('ratelimit', None)
if rate_limit is None or byte_counter == 0:
return
now = time.time()
elapsed = now - start_time
if elapsed <= 0.0:
return
speed = float(byte_counter) / elapsed
if speed > rate_limit:
time.sleep((byte_counter - rate_limit * (now - start_time)) / rate_limit)
def temp_name(self, filename):
"""Returns a temporary filename for the given filename."""
if self.params.get('nopart', False) or filename == u'-' or \
(os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))):
return filename
return filename + u'.part'
def undo_temp_name(self, filename):
if filename.endswith(u'.part'):
return filename[:-len(u'.part')]
return filename
def try_rename(self, old_filename, new_filename):
try:
if old_filename == new_filename:
return
os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
except (IOError, OSError) as err:
self.report_error(u'unable to rename file: %s' % str(err))
def try_utime(self, filename, last_modified_hdr):
"""Try to set the last-modified time of the given file."""
if last_modified_hdr is None:
return
if not os.path.isfile(encodeFilename(filename)):
return
timestr = last_modified_hdr
if timestr is None:
return
filetime = timeconvert(timestr)
if filetime is None:
return filetime
# Ignore obviously invalid dates
if filetime == 0:
return
try:
os.utime(filename, (time.time(), filetime))
except:
pass
return filetime
def report_destination(self, filename):
"""Report destination filename."""
self.to_screen(u'[download] Destination: ' + filename)
def _report_progress_status(self, msg, is_last_line=False):
fullmsg = u'[download] ' + msg
if self.params.get('progress_with_newline', False):
self.to_screen(fullmsg)
else:
if os.name == 'nt':
prev_len = getattr(self, '_report_progress_prev_line_length',
0)
if prev_len > len(fullmsg):
fullmsg += u' ' * (prev_len - len(fullmsg))
self._report_progress_prev_line_length = len(fullmsg)
clear_line = u'\r'
else:
clear_line = (u'\r\x1b[K' if sys.stderr.isatty() else u'\r')
self.to_screen(clear_line + fullmsg, skip_eol=not is_last_line)
self.to_console_title(u'youtube-dl ' + msg)
def report_progress(self, percent, data_len_str, speed, eta):
"""Report download progress."""
if self.params.get('noprogress', False):
return
if eta is not None:
eta_str = self.format_eta(eta)
else:
eta_str = 'Unknown ETA'
if percent is not None:
percent_str = self.format_percent(percent)
else:
percent_str = 'Unknown %'
speed_str = self.format_speed(speed)
msg = (u'%s of %s at %s ETA %s' %
(percent_str, data_len_str, speed_str, eta_str))
self._report_progress_status(msg)
def report_progress_live_stream(self, downloaded_data_len, speed, elapsed):
if self.params.get('noprogress', False):
return
downloaded_str = format_bytes(downloaded_data_len)
speed_str = self.format_speed(speed)
elapsed_str = FileDownloader.format_seconds(elapsed)
msg = u'%s at %s (%s)' % (downloaded_str, speed_str, elapsed_str)
self._report_progress_status(msg)
def report_finish(self, data_len_str, tot_time):
"""Report download finished."""
if self.params.get('noprogress', False):
self.to_screen(u'[download] Download completed')
else:
self._report_progress_status(
(u'100%% of %s in %s' %
(data_len_str, self.format_seconds(tot_time))),
is_last_line=True)
def report_resuming_byte(self, resume_len):
"""Report attempt to resume at given byte."""
self.to_screen(u'[download] Resuming download at byte %s' % resume_len)
def report_retry(self, count, retries):
"""Report retry in case of HTTP error 5xx"""
self.to_screen(u'[download] Got server HTTP error. Retrying (attempt %d of %d)...' % (count, retries))
def report_file_already_downloaded(self, file_name):
"""Report file has already been fully downloaded."""
try:
self.to_screen(u'[download] %s has already been downloaded' % file_name)
except UnicodeEncodeError:
self.to_screen(u'[download] The file has already been downloaded')
def report_unable_to_resume(self):
"""Report it was impossible to resume download."""
self.to_screen(u'[download] Unable to resume')
def download(self, filename, info_dict):
"""Download to a filename using the info from info_dict
Return True on success and False otherwise
"""
# Check file already present
if self.params.get('continuedl', False) and os.path.isfile(encodeFilename(filename)) and not self.params.get('nopart', False):
self.report_file_already_downloaded(filename)
self._hook_progress({
'filename': filename,
'status': 'finished',
'total_bytes': os.path.getsize(encodeFilename(filename)),
})
return True
return self.real_download(filename, info_dict)
def real_download(self, filename, info_dict):
"""Real download process. Redefine in subclasses."""
raise NotImplementedError(u'This method must be implemented by sublcasses')
def _hook_progress(self, status):
for ph in self._progress_hooks:
ph(status)
def add_progress_hook(self, ph):
""" ph gets called on download progress, with a dictionary with the entries
* filename: The final filename
* status: One of "downloading" and "finished"
It can also have some of the following entries:
* downloaded_bytes: Bytes on disks
* total_bytes: Total bytes, None if unknown
* tmpfilename: The filename we're currently writing to
* eta: The estimated time in seconds, None if unknown
* speed: The download speed in bytes/second, None if unknown
Hooks are guaranteed to be called at least once (with status "finished")
if the download is successful.
"""
self._progress_hooks.append(ph)
| |
#-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
import sys
import threading
import types
from datetime import datetime
from dateutil import parser
from dateutil.tz import tzutc
from time import sleep, time
from wsgiref.handlers import format_date_time
from azure import (WindowsAzureData,
WindowsAzureError,
METADATA_NS,
url_quote,
xml_escape,
_create_entry,
_decode_base64_to_text,
_decode_base64_to_bytes,
_encode_base64,
_general_error_handler,
_list_of,
_parse_response_for_dict,
_sign_string,
_unicode_type,
_ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY,
_etree_entity_feed_namespaces,
_make_etree_ns_attr_name,
_get_etree_tag_name_without_ns,
_get_etree_text,
ETree,
_ETreeXmlToObject,
BLOB_SERVICE_HOST_BASE,
TABLE_SERVICE_HOST_BASE,
QUEUE_SERVICE_HOST_BASE,
)
# x-ms-version for storage service.
X_MS_VERSION = '2014-02-14'
class EnumResultsBase(object):
''' base class for EnumResults. '''
def __init__(self):
self.prefix = u''
self.marker = u''
self.max_results = 0
self.next_marker = u''
class ContainerEnumResults(EnumResultsBase):
''' Blob Container list. '''
def __init__(self):
EnumResultsBase.__init__(self)
self.containers = _list_of(Container)
def __iter__(self):
return iter(self.containers)
def __len__(self):
return len(self.containers)
def __getitem__(self, index):
return self.containers[index]
class Container(WindowsAzureData):
''' Blob container class. '''
def __init__(self):
self.name = u''
self.url = u''
self.properties = Properties()
self.metadata = {}
class Properties(WindowsAzureData):
''' Blob container's properties class. '''
def __init__(self):
self.last_modified = u''
self.etag = u''
class RetentionPolicy(WindowsAzureData):
''' RetentionPolicy in service properties. '''
def __init__(self):
self.enabled = False
self.__dict__['days'] = None
def get_days(self):
# convert days to int value
return int(self.__dict__['days'])
def set_days(self, value):
''' set default days if days is set to empty. '''
self.__dict__['days'] = value
days = property(fget=get_days, fset=set_days)
class Logging(WindowsAzureData):
''' Logging class in service properties. '''
def __init__(self):
self.version = u'1.0'
self.delete = False
self.read = False
self.write = False
self.retention_policy = RetentionPolicy()
class HourMetrics(WindowsAzureData):
''' Hour Metrics class in service properties. '''
def __init__(self):
self.version = u'1.0'
self.enabled = False
self.include_apis = None
self.retention_policy = RetentionPolicy()
class MinuteMetrics(WindowsAzureData):
''' Minute Metrics class in service properties. '''
def __init__(self):
self.version = u'1.0'
self.enabled = False
self.include_apis = None
self.retention_policy = RetentionPolicy()
class StorageServiceProperties(WindowsAzureData):
''' Storage Service Propeties class. '''
def __init__(self):
self.logging = Logging()
self.hour_metrics = HourMetrics()
self.minute_metrics = MinuteMetrics()
@property
def metrics(self):
import warnings
warnings.warn(
'The metrics attribute has been deprecated. Use hour_metrics and minute_metrics instead.')
return self.hour_metrics
class AccessPolicy(WindowsAzureData):
''' Access Policy class in service properties. '''
def __init__(self, start=u'', expiry=u'', permission=u'',
start_pk=u'', start_rk=u'', end_pk=u'', end_rk=u''):
self.start = start
self.expiry = expiry
self.permission = permission
self.start_pk = start_pk
self.start_rk = start_rk
self.end_pk = end_pk
self.end_rk = end_rk
class SignedIdentifier(WindowsAzureData):
''' Signed Identifier class for service properties. '''
def __init__(self):
self.id = u''
self.access_policy = AccessPolicy()
class SignedIdentifiers(WindowsAzureData):
''' SignedIdentifier list. '''
def __init__(self):
self.signed_identifiers = _list_of(SignedIdentifier)
def __iter__(self):
return iter(self.signed_identifiers)
def __len__(self):
return len(self.signed_identifiers)
def __getitem__(self, index):
return self.signed_identifiers[index]
class BlobEnumResults(EnumResultsBase):
''' Blob list.'''
def __init__(self):
EnumResultsBase.__init__(self)
self.blobs = _list_of(Blob)
self.prefixes = _list_of(BlobPrefix)
self.delimiter = ''
def __iter__(self):
return iter(self.blobs)
def __len__(self):
return len(self.blobs)
def __getitem__(self, index):
return self.blobs[index]
class BlobResult(bytes):
def __new__(cls, blob, properties):
return bytes.__new__(cls, blob if blob else b'')
def __init__(self, blob, properties):
self.properties = properties
class Blob(WindowsAzureData):
''' Blob class. '''
def __init__(self):
self.name = u''
self.snapshot = u''
self.url = u''
self.properties = BlobProperties()
self.metadata = {}
class BlobProperties(WindowsAzureData):
''' Blob Properties '''
def __init__(self):
self.last_modified = u''
self.etag = u''
self.content_length = 0
self.content_type = u''
self.content_encoding = u''
self.content_language = u''
self.content_md5 = u''
self.xms_blob_sequence_number = 0
self.blob_type = u''
self.lease_status = u''
self.lease_state = u''
self.lease_duration = u''
self.copy_id = u''
self.copy_source = u''
self.copy_status = u''
self.copy_progress = u''
self.copy_completion_time = u''
self.copy_status_description = u''
class BlobPrefix(WindowsAzureData):
''' BlobPrefix in Blob. '''
def __init__(self):
self.name = ''
class BlobBlock(WindowsAzureData):
''' BlobBlock class '''
def __init__(self, id=None, size=None):
self.id = id
self.size = size
class BlobBlockList(WindowsAzureData):
''' BlobBlockList class '''
def __init__(self):
self.committed_blocks = []
self.uncommitted_blocks = []
class PageRange(WindowsAzureData):
''' Page Range for page blob. '''
def __init__(self):
self.start = 0
self.end = 0
class PageList(object):
''' Page list for page blob. '''
def __init__(self):
self.page_ranges = _list_of(PageRange)
def __iter__(self):
return iter(self.page_ranges)
def __len__(self):
return len(self.page_ranges)
def __getitem__(self, index):
return self.page_ranges[index]
class QueueEnumResults(EnumResultsBase):
''' Queue list'''
def __init__(self):
EnumResultsBase.__init__(self)
self.queues = _list_of(Queue)
def __iter__(self):
return iter(self.queues)
def __len__(self):
return len(self.queues)
def __getitem__(self, index):
return self.queues[index]
class Queue(WindowsAzureData):
''' Queue class '''
def __init__(self):
self.name = u''
self.url = u''
self.metadata = {}
class QueueMessagesList(WindowsAzureData):
''' Queue message list. '''
def __init__(self):
self.queue_messages = _list_of(QueueMessage)
def __iter__(self):
return iter(self.queue_messages)
def __len__(self):
return len(self.queue_messages)
def __getitem__(self, index):
return self.queue_messages[index]
class QueueMessage(WindowsAzureData):
''' Queue message class. '''
def __init__(self):
self.message_id = u''
self.insertion_time = u''
self.expiration_time = u''
self.pop_receipt = u''
self.time_next_visible = u''
self.dequeue_count = u''
self.message_text = u''
class Entity(WindowsAzureData):
''' Entity class. The attributes of entity will be created dynamically. '''
pass
class EntityProperty(WindowsAzureData):
''' Entity property. contains type and value. '''
def __init__(self, type=None, value=None):
self.type = type
self.value = value
class Table(WindowsAzureData):
''' Only for IntelliSense and telling user the return type. '''
pass
class ContainerSharedAccessPermissions(object):
'''Permissions for a container.'''
'''
Read the content, properties, metadata or block list of any blob in
the container. Use any blob in the container as the source of a
copy operation.
'''
READ = 'r'
'''
For any blob in the container, create or write content, properties,
metadata, or block list. Snapshot or lease the blob. Resize the blob
(page blob only). Use the blob as the destination of a copy operation
within the same account.
You cannot grant permissions to read or write container properties or
metadata, nor to lease a container.
'''
WRITE = 'w'
'''Delete any blob in the container.'''
DELETE = 'd'
'''List blobs in the container.'''
LIST = 'l'
class BlobSharedAccessPermissions(object):
'''Permissions for a blob.'''
'''
Read the content, properties, metadata and block list. Use the blob
as the source of a copy operation.
'''
READ = 'r'
'''
Create or write content, properties, metadata, or block list.
Snapshot or lease the blob. Resize the blob (page blob only). Use the
blob as the destination of a copy operation within the same account.
'''
WRITE = 'w'
'''Delete the blob.'''
DELETE = 'd'
class TableSharedAccessPermissions(object):
'''Permissions for a table.'''
'''Get entities and query entities.'''
QUERY = 'r'
'''Add entities.'''
ADD = 'a'
'''Update entities.'''
UPDATE = 'u'
'''Delete entities.'''
DELETE = 'd'
class QueueSharedAccessPermissions(object):
'''Permissions for a queue.'''
'''
Read metadata and properties, including message count.
Peek at messages.
'''
READ = 'r'
'''Add messages to the queue.'''
ADD = 'a'
'''Update messages in the queue.'''
UPDATE = 'u'
'''Get and delete messages from the queue.'''
PROCESS = 'p'
def _parse_blob_enum_results_list(response):
respbody = response.body
return_obj = BlobEnumResults()
enum_results = ETree.fromstring(respbody)
for child in enum_results.findall('./Blobs/Blob'):
return_obj.blobs.append(_ETreeXmlToObject.fill_instance_element(child, Blob))
for child in enum_results.findall('./Blobs/BlobPrefix'):
return_obj.prefixes.append(
_ETreeXmlToObject.fill_instance_element(child, BlobPrefix))
for name, value in vars(return_obj).items():
if name == 'blobs' or name == 'prefixes':
continue
value = _ETreeXmlToObject.fill_data_member(enum_results, name, value)
if value is not None:
setattr(return_obj, name, value)
return return_obj
def _update_storage_header(request):
''' add additional headers for storage request. '''
if request.body:
assert isinstance(request.body, bytes)
# if it is PUT, POST, MERGE, DELETE, need to add content-length to header.
if request.method in ['PUT', 'POST', 'MERGE', 'DELETE']:
request.headers.append(('Content-Length', str(len(request.body))))
# append addtional headers base on the service
request.headers.append(('x-ms-version', X_MS_VERSION))
# append x-ms-meta name, values to header
for name, value in request.headers:
if 'x-ms-meta-name-values' in name and value:
for meta_name, meta_value in value.items():
request.headers.append(('x-ms-meta-' + meta_name, meta_value))
request.headers.remove((name, value))
break
return request
def _update_storage_blob_header(request, authentication):
''' add additional headers for storage blob request. '''
request = _update_storage_header(request)
current_time = format_date_time(time())
request.headers.append(('x-ms-date', current_time))
request.headers.append(
('Content-Type', 'application/octet-stream Charset=UTF-8'))
authentication.sign_request(request)
return request.headers
def _update_storage_queue_header(request, authentication):
return _update_storage_blob_header(request, authentication)
def _update_storage_table_header(request, content_type='application/atom+xml'):
''' add additional headers for storage table request. '''
request = _update_storage_header(request)
if content_type:
for name, _ in request.headers:
if name.lower() == 'content-type':
break
else:
request.headers.append(('Content-Type', content_type))
request.headers.append(('DataServiceVersion', '2.0;NetFx'))
request.headers.append(('MaxDataServiceVersion', '2.0;NetFx'))
current_time = format_date_time(time())
request.headers.append(('x-ms-date', current_time))
request.headers.append(('Date', current_time))
return request.headers
def _to_python_bool(value):
if value.lower() == 'true':
return True
return False
def _to_entity_int(data):
int_max = (2 << 30) - 1
if data > (int_max) or data < (int_max + 1) * (-1):
return 'Edm.Int64', str(data)
else:
return 'Edm.Int32', str(data)
def _to_entity_bool(value):
if value:
return 'Edm.Boolean', 'true'
return 'Edm.Boolean', 'false'
def _to_entity_datetime(value):
# Azure expects the date value passed in to be UTC.
# Azure will always return values as UTC.
# If a date is passed in without timezone info, it is assumed to be UTC.
if value.tzinfo:
value = value.astimezone(tzutc())
return 'Edm.DateTime', value.strftime('%Y-%m-%dT%H:%M:%SZ')
def _to_entity_float(value):
return 'Edm.Double', str(value)
def _to_entity_property(value):
if value.type == 'Edm.Binary':
return value.type, _encode_base64(value.value)
return value.type, str(value.value)
def _to_entity_none(value):
return None, None
def _to_entity_str(value):
return 'Edm.String', value
# Tables of conversions to and from entity types. We support specific
# datatypes, and beyond that the user can use an EntityProperty to get
# custom data type support.
def _from_entity_binary(value):
return EntityProperty('Edm.Binary', _decode_base64_to_bytes(value))
def _from_entity_int(value):
return int(value)
def _from_entity_datetime(value):
# Note that Azure always returns UTC datetime, and dateutil parser
# will set the tzinfo on the date it returns
return parser.parse(value)
_ENTITY_TO_PYTHON_CONVERSIONS = {
'Edm.Binary': _from_entity_binary,
'Edm.Int32': _from_entity_int,
'Edm.Int64': _from_entity_int,
'Edm.Double': float,
'Edm.Boolean': _to_python_bool,
'Edm.DateTime': _from_entity_datetime,
}
# Conversion from Python type to a function which returns a tuple of the
# type string and content string.
_PYTHON_TO_ENTITY_CONVERSIONS = {
int: _to_entity_int,
bool: _to_entity_bool,
datetime: _to_entity_datetime,
float: _to_entity_float,
EntityProperty: _to_entity_property,
str: _to_entity_str,
}
if sys.version_info < (3,):
_PYTHON_TO_ENTITY_CONVERSIONS.update({
long: _to_entity_int,
types.NoneType: _to_entity_none,
unicode: _to_entity_str,
})
def _convert_entity_to_xml(source):
''' Converts an entity object to xml to send.
The entity format is:
<entry xmlns:d="http://schemas.microsoft.com/ado/2007/08/dataservices" xmlns:m="http://schemas.microsoft.com/ado/2007/08/dataservices/metadata" xmlns="http://www.w3.org/2005/Atom">
<title />
<updated>2008-09-18T23:46:19.3857256Z</updated>
<author>
<name />
</author>
<id />
<content type="application/xml">
<m:properties>
<d:Address>Mountain View</d:Address>
<d:Age m:type="Edm.Int32">23</d:Age>
<d:AmountDue m:type="Edm.Double">200.23</d:AmountDue>
<d:BinaryData m:type="Edm.Binary" m:null="true" />
<d:CustomerCode m:type="Edm.Guid">c9da6455-213d-42c9-9a79-3e9149a57833</d:CustomerCode>
<d:CustomerSince m:type="Edm.DateTime">2008-07-10T00:00:00</d:CustomerSince>
<d:IsActive m:type="Edm.Boolean">true</d:IsActive>
<d:NumOfOrders m:type="Edm.Int64">255</d:NumOfOrders>
<d:PartitionKey>mypartitionkey</d:PartitionKey>
<d:RowKey>myrowkey1</d:RowKey>
<d:Timestamp m:type="Edm.DateTime">0001-01-01T00:00:00</d:Timestamp>
</m:properties>
</content>
</entry>
'''
# construct the entity body included in <m:properties> and </m:properties>
entity_body = '<m:properties xml:space="preserve">{properties}</m:properties>'
if isinstance(source, WindowsAzureData):
source = vars(source)
properties_str = ''
# set properties type for types we know if value has no type info.
# if value has type info, then set the type to value.type
for name, value in source.items():
mtype = ''
conv = _PYTHON_TO_ENTITY_CONVERSIONS.get(type(value))
if conv is None and sys.version_info >= (3,) and value is None:
conv = _to_entity_none
if conv is None:
raise WindowsAzureError(
_ERROR_CANNOT_SERIALIZE_VALUE_TO_ENTITY.format(
type(value).__name__))
mtype, value = conv(value)
# form the property node
properties_str += ''.join(['<d:', name])
if value is None:
properties_str += ' m:null="true" />'
else:
if mtype:
properties_str += ''.join([' m:type="', mtype, '"'])
properties_str += ''.join(['>',
xml_escape(value), '</d:', name, '>'])
if sys.version_info < (3,):
if isinstance(properties_str, unicode):
properties_str = properties_str.encode('utf-8')
# generate the entity_body
entity_body = entity_body.format(properties=properties_str)
xmlstr = _create_entry(entity_body)
return xmlstr
def _convert_table_to_xml(table_name):
'''
Create xml to send for a given table name. Since xml format for table is
the same as entity and the only difference is that table has only one
property 'TableName', so we just call _convert_entity_to_xml.
table_name:
the name of the table
'''
return _convert_entity_to_xml({'TableName': table_name})
def _convert_block_list_to_xml(block_id_list):
'''
Convert a block list to xml to send.
block_id_list:
a str list containing the block ids that are used in put_block_list.
Only get block from latest blocks.
'''
if block_id_list is None:
return ''
xml = '<?xml version="1.0" encoding="utf-8"?><BlockList>'
for value in block_id_list:
xml += '<Latest>{0}</Latest>'.format(_encode_base64(value))
return xml + '</BlockList>'
def _convert_signed_identifiers_to_xml(signed_identifiers):
if signed_identifiers is None:
return ''
xml = '<?xml version="1.0" encoding="utf-8"?><SignedIdentifiers>'
for signed_identifier in signed_identifiers:
xml += '<SignedIdentifier>'
xml += '<Id>{0}</Id>'.format(signed_identifier.id)
xml += '<AccessPolicy>'
if signed_identifier.access_policy.start:
xml += '<Start>{0}</Start>'.format(signed_identifier.access_policy.start)
if signed_identifier.access_policy.expiry:
xml += '<Expiry>{0}</Expiry>'.format(signed_identifier.access_policy.expiry)
if signed_identifier.access_policy.permission:
xml += '<Permission>{0}</Permission>'.format(signed_identifier.access_policy.permission)
xml += '</AccessPolicy>'
xml += '</SignedIdentifier>'
return xml + '</SignedIdentifiers>'
def _create_blob_result(response):
blob_properties = _parse_response_for_dict(response)
return BlobResult(response.body, blob_properties)
def _convert_block_etree_element_to_blob_block(block_element):
block_id = _decode_base64_to_text(block_element.findtext('./Name', ''))
block_size = int(block_element.findtext('./Size'))
return BlobBlock(block_id, block_size)
def _convert_response_to_block_list(response):
'''
Converts xml response to block list class.
'''
block_list = BlobBlockList()
list_element = ETree.fromstring(response.body)
for block_element in list_element.findall('./CommittedBlocks/Block'):
block = _convert_block_etree_element_to_blob_block(block_element)
block_list.committed_blocks.append(block)
for block_element in list_element.findall('./UncommittedBlocks/Block'):
block = _convert_block_etree_element_to_blob_block(block_element)
block_list.uncommitted_blocks.append(block)
return block_list
def _remove_prefix(name):
colon = name.find(':')
if colon != -1:
return name[colon + 1:]
return name
def _convert_response_to_entity(response):
if response is None:
return response
root = ETree.fromstring(response.body)
return _convert_etree_element_to_entity(root)
def _convert_etree_element_to_entity(entry_element):
''' Convert xml response to entity.
The format of entity:
<entry xmlns:d="http://schemas.microsoft.com/ado/2007/08/dataservices" xmlns:m="http://schemas.microsoft.com/ado/2007/08/dataservices/metadata" xmlns="http://www.w3.org/2005/Atom">
<title />
<updated>2008-09-18T23:46:19.3857256Z</updated>
<author>
<name />
</author>
<id />
<content type="application/xml">
<m:properties>
<d:Address>Mountain View</d:Address>
<d:Age m:type="Edm.Int32">23</d:Age>
<d:AmountDue m:type="Edm.Double">200.23</d:AmountDue>
<d:BinaryData m:type="Edm.Binary" m:null="true" />
<d:CustomerCode m:type="Edm.Guid">c9da6455-213d-42c9-9a79-3e9149a57833</d:CustomerCode>
<d:CustomerSince m:type="Edm.DateTime">2008-07-10T00:00:00</d:CustomerSince>
<d:IsActive m:type="Edm.Boolean">true</d:IsActive>
<d:NumOfOrders m:type="Edm.Int64">255</d:NumOfOrders>
<d:PartitionKey>mypartitionkey</d:PartitionKey>
<d:RowKey>myrowkey1</d:RowKey>
<d:Timestamp m:type="Edm.DateTime">0001-01-01T00:00:00</d:Timestamp>
</m:properties>
</content>
</entry>
'''
entity = Entity()
properties = entry_element.findall('./atom:content/m:properties', _etree_entity_feed_namespaces)
for prop in properties:
for p in prop:
name = _get_etree_tag_name_without_ns(p.tag)
value = p.text or ''
mtype = p.attrib.get(_make_etree_ns_attr_name(_etree_entity_feed_namespaces['m'], 'type'), None)
isnull = p.attrib.get(_make_etree_ns_attr_name(_etree_entity_feed_namespaces['m'], 'null'), None)
# if not isnull and no type info, then it is a string and we just
# need the str type to hold the property.
if not isnull and not mtype:
_set_entity_attr(entity, name, value)
elif isnull == 'true':
if mtype:
property = EntityProperty(mtype, None)
else:
property = EntityProperty('Edm.String', None)
else: # need an object to hold the property
conv = _ENTITY_TO_PYTHON_CONVERSIONS.get(mtype)
if conv is not None:
property = conv(value)
else:
property = EntityProperty(mtype, value)
_set_entity_attr(entity, name, property)
# extract id, updated and name value from feed entry and set them of
# rule.
for name, value in _ETreeXmlToObject.get_entry_properties_from_element(
entry_element, True).items():
if name in ['etag']:
_set_entity_attr(entity, name, value)
return entity
def _set_entity_attr(entity, name, value):
try:
setattr(entity, name, value)
except UnicodeEncodeError:
# Python 2 doesn't support unicode attribute names, so we'll
# add them and access them directly through the dictionary
entity.__dict__[name] = value
def _convert_etree_element_to_table(entry_element):
''' Converts the xml element to table class.
'''
table = Table()
name_element = entry_element.find('./atom:content/m:properties/d:TableName', _etree_entity_feed_namespaces)
if name_element is not None:
table.name = name_element.text
for name_element, value in _ETreeXmlToObject.get_entry_properties_from_element(
entry_element, False).items():
setattr(table, name_element, value)
return table
class _BlobChunkDownloader(object):
def __init__(self, blob_service, container_name, blob_name, blob_size,
chunk_size, stream, parallel, max_retries, retry_wait,
progress_callback):
self.blob_service = blob_service
self.container_name = container_name
self.blob_name = blob_name
self.blob_size = blob_size
self.chunk_size = chunk_size
self.stream = stream
self.stream_start = stream.tell() if parallel else None
self.stream_lock = threading.Lock() if parallel else None
self.progress_callback = progress_callback
self.progress_total = 0
self.progress_lock = threading.Lock() if parallel else None
self.max_retries = max_retries
self.retry_wait = retry_wait
def get_chunk_offsets(self):
index = 0
while index < self.blob_size:
yield index
index += self.chunk_size
def process_chunk(self, chunk_offset):
chunk_data = self._download_chunk_with_retries(chunk_offset)
length = len(chunk_data)
if length > 0:
self._write_to_stream(chunk_data, chunk_offset)
self._update_progress(length)
def _update_progress(self, length):
if self.progress_callback is not None:
if self.progress_lock is not None:
with self.progress_lock:
self.progress_total += length
total = self.progress_total
else:
self.progress_total += length
total = self.progress_total
self.progress_callback(total, self.blob_size)
def _write_to_stream(self, chunk_data, chunk_offset):
if self.stream_lock is not None:
with self.stream_lock:
self.stream.seek(self.stream_start + chunk_offset)
self.stream.write(chunk_data)
else:
self.stream.write(chunk_data)
def _download_chunk_with_retries(self, chunk_offset):
range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_offset + self.chunk_size - 1)
retries = self.max_retries
while True:
try:
return self.blob_service.get_blob(
self.container_name,
self.blob_name,
x_ms_range=range_id
)
except Exception:
if retries > 0:
retries -= 1
sleep(self.retry_wait)
else:
raise
class _BlobChunkUploader(object):
def __init__(self, blob_service, container_name, blob_name, blob_size,
chunk_size, stream, parallel, max_retries, retry_wait,
progress_callback, x_ms_lease_id):
self.blob_service = blob_service
self.container_name = container_name
self.blob_name = blob_name
self.blob_size = blob_size
self.chunk_size = chunk_size
self.stream = stream
self.stream_start = stream.tell() if parallel else None
self.stream_lock = threading.Lock() if parallel else None
self.progress_callback = progress_callback
self.progress_total = 0
self.progress_lock = threading.Lock() if parallel else None
self.max_retries = max_retries
self.retry_wait = retry_wait
self.x_ms_lease_id = x_ms_lease_id
def get_chunk_offsets(self):
index = 0
if self.blob_size is None:
# we don't know the size of the stream, so we have no
# choice but to seek
while True:
data = self._read_from_stream(index, 1)
if not data:
break
yield index
index += self.chunk_size
else:
while index < self.blob_size:
yield index
index += self.chunk_size
def process_chunk(self, chunk_offset):
size = self.chunk_size
if self.blob_size is not None:
size = min(size, self.blob_size - chunk_offset)
chunk_data = self._read_from_stream(chunk_offset, size)
return self._upload_chunk_with_retries(chunk_offset, chunk_data)
def process_all_unknown_size(self):
assert self.stream_lock is None
range_ids = []
index = 0
while True:
data = self._read_from_stream(None, self.chunk_size)
if data:
index += len(data)
range_id = self._upload_chunk_with_retries(index, data)
range_ids.append(range_id)
else:
break
return range_ids
def _read_from_stream(self, offset, count):
if self.stream_lock is not None:
with self.stream_lock:
self.stream.seek(self.stream_start + offset)
data = self.stream.read(count)
else:
data = self.stream.read(count)
return data
def _update_progress(self, length):
if self.progress_callback is not None:
if self.progress_lock is not None:
with self.progress_lock:
self.progress_total += length
total = self.progress_total
else:
self.progress_total += length
total = self.progress_total
self.progress_callback(total, self.blob_size)
def _upload_chunk_with_retries(self, chunk_offset, chunk_data):
retries = self.max_retries
while True:
try:
range_id = self._upload_chunk(chunk_offset, chunk_data)
self._update_progress(len(chunk_data))
return range_id
except Exception:
if retries > 0:
retries -= 1
sleep(self.retry_wait)
else:
raise
class _BlockBlobChunkUploader(_BlobChunkUploader):
def _upload_chunk(self, chunk_offset, chunk_data):
range_id = url_quote(_encode_base64('{0:032d}'.format(chunk_offset)))
self.blob_service.put_block(
self.container_name,
self.blob_name,
chunk_data,
range_id,
x_ms_lease_id=self.x_ms_lease_id
)
return range_id
class _PageBlobChunkUploader(_BlobChunkUploader):
def _upload_chunk(self, chunk_offset, chunk_data):
range_id = 'bytes={0}-{1}'.format(chunk_offset, chunk_offset + len(chunk_data) - 1)
self.blob_service.put_page(
self.container_name,
self.blob_name,
chunk_data,
range_id,
'update',
x_ms_lease_id=self.x_ms_lease_id
)
return range_id
def _download_blob_chunks(blob_service, container_name, blob_name,
blob_size, block_size, stream, max_connections,
max_retries, retry_wait, progress_callback):
downloader = _BlobChunkDownloader(
blob_service,
container_name,
blob_name,
blob_size,
block_size,
stream,
max_connections > 1,
max_retries,
retry_wait,
progress_callback,
)
if progress_callback is not None:
progress_callback(0, blob_size)
if max_connections > 1:
import concurrent.futures
executor = concurrent.futures.ThreadPoolExecutor(max_connections)
result = list(executor.map(downloader.process_chunk, downloader.get_chunk_offsets()))
else:
for range_start in downloader.get_chunk_offsets():
downloader.process_chunk(range_start)
def _upload_blob_chunks(blob_service, container_name, blob_name,
blob_size, block_size, stream, max_connections,
max_retries, retry_wait, progress_callback,
x_ms_lease_id, uploader_class):
uploader = uploader_class(
blob_service,
container_name,
blob_name,
blob_size,
block_size,
stream,
max_connections > 1,
max_retries,
retry_wait,
progress_callback,
x_ms_lease_id,
)
if progress_callback is not None:
progress_callback(0, blob_size)
if max_connections > 1:
import concurrent.futures
executor = concurrent.futures.ThreadPoolExecutor(max_connections)
range_ids = list(executor.map(uploader.process_chunk, uploader.get_chunk_offsets()))
else:
if blob_size is not None:
range_ids = [uploader.process_chunk(start) for start in uploader.get_chunk_offsets()]
else:
range_ids = uploader.process_all_unknown_size()
return range_ids
def _storage_error_handler(http_error):
''' Simple error handler for storage service. '''
return _general_error_handler(http_error)
class StorageSASAuthentication(object):
def __init__(self, sas_token):
self.sas_token = sas_token
def sign_request(self, request):
if '?' in request.path:
request.path += '&'
else:
request.path += '?'
request.path += self.sas_token
class _StorageSharedKeyAuthentication(object):
def __init__(self, account_name, account_key):
self.account_name = account_name
self.account_key = account_key
def _get_headers(self, request, headers_to_sign):
headers = {
name.lower() : value for name, value in request.headers if value
}
return '\n'.join(headers.get(x, '') for x in headers_to_sign) + '\n'
def _get_verb(self, request):
return request.method + '\n'
def _get_canonicalized_resource(self, request):
uri_path = request.path.split('?')[0]
return '/' + self.account_name + uri_path
def _get_canonicalized_headers(self, request):
string_to_sign = ''
x_ms_headers = []
for name, value in request.headers:
if name.startswith('x-ms-'):
x_ms_headers.append((name.lower(), value))
x_ms_headers.sort()
for name, value in x_ms_headers:
if value:
string_to_sign += ''.join([name, ':', value, '\n'])
return string_to_sign
def _add_authorization_header(self, request, string_to_sign):
signature = _sign_string(self.account_key, string_to_sign)
auth_string = 'SharedKey ' + self.account_name + ':' + signature
request.headers.append(('Authorization', auth_string))
class StorageSharedKeyAuthentication(_StorageSharedKeyAuthentication):
def sign_request(self, request):
string_to_sign = \
self._get_verb(request) + \
self._get_headers(
request,
[
'content-encoding', 'content-language', 'content-length',
'content-md5', 'content-type', 'date', 'if-modified-since',
'if-match', 'if-none-match', 'if-unmodified-since', 'range'
]
) + \
self._get_canonicalized_headers(request) + \
self._get_canonicalized_resource(request) + \
self._get_canonicalized_resource_query(request)
self._add_authorization_header(request, string_to_sign)
def _get_canonicalized_resource_query(self, request):
query_to_sign = request.query
query_to_sign.sort()
string_to_sign = ''
current_name = ''
for name, value in query_to_sign:
if value:
if current_name != name:
string_to_sign += '\n' + name + ':' + value
current_name = name
else:
string_to_sign += '\n' + ',' + value
return string_to_sign
class StorageTableSharedKeyAuthentication(_StorageSharedKeyAuthentication):
def sign_request(self, request):
string_to_sign = \
self._get_verb(request) + \
self._get_headers(
request,
['content-md5', 'content-type', 'date'],
) + \
self._get_canonicalized_resource(request) + \
self._get_canonicalized_resource_query(request)
self._add_authorization_header(request, string_to_sign)
def _get_canonicalized_resource_query(self, request):
for name, value in request.query:
if name == 'comp':
return '?comp=' + value
return ''
class StorageNoAuthentication(object):
def sign_request(self, request):
pass
class StorageConnectionParameters(object):
'''
Extract connection parameters from a connection string.
This is based on http://azure.microsoft.com/en-us/documentation/articles/storage-configure-connection-string/ .
NOTE "(Blob|Table|Queue|File)Endpoint" are not supported.
"SharedAccessSignature" is not supported.
dev_host, timeout, and sas_token cannot be specified with a connection string.
'''
def __init__(self, connection_string = ''):
connection_params = dict(s.split('=',1) for s in connection_string.split(';'))
self.account_name = connection_params.get('AccountName', None)
self.account_key = connection_params.get('AccountKey', None)
self.protocol = connection_params.get('DefaultEndpointsProtocol', 'https').lower()
endpoint_suffix = connection_params.get('EndpointSuffix', None)
self.host_base_blob = BLOB_SERVICE_HOST_BASE if endpoint_suffix is None \
else ".blob.{}".format(endpoint_suffix)
self.host_base_table = TABLE_SERVICE_HOST_BASE if endpoint_suffix is None \
else ".table.{}".format(endpoint_suffix)
self.host_base_queue = QUEUE_SERVICE_HOST_BASE if endpoint_suffix is None \
else ".queue.{}".format(endpoint_suffix)
# make these available just from storage.
from azure.storage.blobservice import BlobService
from azure.storage.queueservice import QueueService
from azure.storage.tableservice import TableService
from azure.storage.cloudstorageaccount import CloudStorageAccount
from azure.storage.sharedaccesssignature import (
SharedAccessSignature,
SharedAccessPolicy,
)
| |
from textwrap import dedent as dd
names = (
'Intelligence',
'Will',
'Strength',
'Endurance',
'Dexterity',
'Agility',
'Speed',
'Eyesight',
'Hearing',
'Smell/Taste',
'Touch',
'Height',
'Weight',
'Physique'
)
groups = (
('Intelligence', 'Will'),
('Strength',
'Endurance',
'Dexterity',
'Agility',
'Speed',
'Eyesight',
'Hearing',
'Smell/Taste',
'Touch'),
('Height', 'Weight'),
('Physique',)
)
# TODO: Figure out how to determine chosen race and give stat info
# http://cloud-3.steamusercontent.com/ugc/320124788920141475/83A5568F35B91FC2BD926876D7757487797911CF/
extra_info = {
'Intelligence': dd(
'''
Intelligence primarily affects how quickly you train skills.
The following skills are governed by this stat:
Agriculture,
Building,
Herblore,
Physician,
Ritual,
Trapping,
Weatherlore
'''
),
'Will': dd(
'''
Will affects your ability to stay awake when exhausted and your abiliity
to force yourself to eat raw meat and fish (and then throwing up).
The following skills are governed by this stat:
Agriculture,
Fishing,
Physician,
Ritual,
Stealth
'''
),
'Strength': dd(
'''
Strength does NOT affect your carrying capacity.
The following skills are governed by this stat:
Club*,
Building,
Timbercraft,
Climbing,
Swimming,
Shield,
Sword,
Axe,
Flail,
Spear,
Bow,
Unarmed
* Club skill level accounts for 2x your Strength level.
((2*Strength + Dexterity)/3 * skill points)
'''
),
'Endurance': dd(
'''
Endurance affects your Encumberance penalty (and thus Mobility) via
reduced Fatigue gain and increased recovery speed. Fatigue acts as
flat reduction to skill levels. It also affects your pain tolerance,
or resistance to being hit and losing consciousness.
The following skills are governed by this stat:
Agriculture,
Swimming
'''
),
'Dexterity': dd(
'''
Dexterity affects the likelyhood of fumbling when in combat.
The following skills are governed by this stat:
Agriculture,
Carpentry,
Fishing,
Hideworking*,
Timbercraft,
Trapping,
Climbing,
Skiing,
Knife,
Sword,
Shield,
Flail,
Bow,
Crossbow
* Hideworking skill level accounts for 2x your Dexterity level.
'''
),
'Agility': dd(
'''
Agility is one of the few stats that governs your Dodge skill,
*supposedly* with a higher than average multiplier as well.
It also helps when standing up in the heat of combat.
The following skills are governed by this stat:
Dodge,
Timbercraft,
Climbing,
Skiing*,
Swimming,
Stealth,
Shield,
Unarmed,
Knife,
Sword,
Axe,
Spear*,
Club,
Flail
* Skiing and Spear skill levels account for 2x your Agility level.
'''
),
'Speed': dd(
'''
Speed affects your base Mobility value (5x Speed). This means
walking, running, paddling a raft/punt, swimming hiding and crawling.
*Supposedly* Speed also affects how long it takes to rest.
The following skills are governed by this stat:
Dodge,
Unarmed
'''
),
'Eyesight': dd(
'''
Eyesight affects how far you can see on the maps, and how close you have to be
to spot the outline of a beast or man in the distance on the wilderness map.
The following skills are governed by this stat:
Weatherlore,
Tracking*,
Dodge,
Shield,
Bow,
Crossbow
* Tracking skill level accounts for 2x your Eyesight level.
'''
),
'Hearing': dd(
'''
Hearing affects your ability to locate your prey on a partly obscured map, as well as
warn you of danger outside your line of sight. A deaf character will -potentially-
be in danger of being eaten alive by squirrels while sleeping outdoors. Or by wolves.
The following skills are governed by this stat:
Ritual,
Tracking
'''
),
'Smell/Taste': dd(
'''
Smell/Taste affects your ability to differentiate between cow milk and bull milk. :^)
The following skills are governed by this stat:
Cookery*,
Herblore,
Weatherlore,
Hideworking,
Tracking
* Cookery skill level accounts for 2x your Smell/Taste level.
'''
),
'Touch': dd(
'''
The following skills are governed by this stat:
Building,
Carpentry,
Cookery,
Fishing,
Herblore,
Hideworking,
Physician*,
Trapping,
Weatherlore,
Climbing,
Stealth,
Knife,
Flail,
Crossbow
* Physician skill level accounts for 2x your Touch level.
'''
),
'Height': dd(
'''
Indirectly affects your Weight.
'''
),
'Weight': dd(
'''
Weight is a combination of height and physique (or was at least).
Worn armor and clothes cause an Encumberance penalty when their total
weight exceeds 10% of your weight. *Supposedly* your weight limit
is calculated as such:
weight_limit = character_weight * 1.5
'''
),
'Physique': dd(
'''
Physique has since been removed from the game but still seems to exist in
memory. It is unknown if this mechanic is still in the game since its
removal from the stats screen during character creation.
Physique used to affect your weight limit and ability to pick up
items, i.e. a large physique would allow you to pick up large items with ease
while a small one would mean trouble picking up a punt. Physique may have
also affected how well you dealt with starvation. Speed and stealth were
also supposedly affected by having a small physique.
'''
),
}
class _stat:
def __init__(self, name, buffername=None):
self.name = name
self.buffername = (buffername or self.name.upper()) + '_STAT_BUFFER'
self.extra_info = extra_info.get(self.name, None)
def format(self, value):
return Stats.format(self.name, value)
def __repr__(self):
return f'<_stat {self.name} ({self.buffername}), extra_info={bool(self.extra_info)}>'
class Stats:
intelligence = _stat('Intelligence')
will = _stat('Will')
strength = _stat('Strength')
endurance = _stat('Endurance')
dexterity = _stat('Dexterity')
agility = _stat('Agility')
speed = _stat('Speed')
eyesight = _stat('Eyesight')
hearing = _stat('Hearing')
smelltaste = _stat('Smell/Taste', buffername='SMELLTASTE')
touch = _stat('Touch')
height = _stat('Height')
weight = _stat('Weight')
physique = _stat('Physique')
rerolls = _stat('Rerolls')
_fmt = '{stat:<13} {val:>2} [{size:<18}]'
_hfmt = '{stat:<13} {val:>2}" ({fval}\'{ival}")'
_wfmt = '{stat:<13} {val:>2} lbs ({kval} kg)'
_pfmt = '{stat:<13} Type {val} [ {size} ]'
_rfmt = '{stat:<13} {val}'
@classmethod
def get(cls, name, *, group=None):
if group is None:
group = cls.all_stats
try:
return next(filter(lambda s: s.name == name, group()))
except StopIteration:
pass
@classmethod
def get_name(cls, buffname, *, group=None):
if group is None:
group = cls.all_stats
try:
return next(filter(lambda s: s.buffername == buffname, group()))
except StopIteration:
pass
@classmethod
def all_stats(cls):
"""
Return a tuple of all stats with memory locations.
"""
return (
cls.intelligence, cls.will, cls.strength, cls.endurance,
cls.dexterity, cls.agility, cls.speed, cls.eyesight,
cls.hearing, cls.smelltaste, cls.touch, cls.height,
cls.weight, cls.physique, cls.rerolls)
@classmethod
def all_real_stats(cls):
"""
Return a tuple of all stats, not including rerolls.
"""
return (
cls.intelligence, cls.will, cls.strength, cls.endurance,
cls.dexterity, cls.agility, cls.speed, cls.eyesight,
cls.hearing, cls.smelltaste, cls.touch, cls.height,
cls.weight, cls.physique)
@classmethod
def all_normal_stats(cls):
"""
Return a tuple of all stats that can have values of 1 to 18.
"""
return (
cls.intelligence, cls.will, cls.strength, cls.endurance,
cls.dexterity, cls.agility, cls.speed, cls.eyesight,
cls.hearing, cls.smelltaste, cls.touch)
@classmethod
def format(cls, stat, value):
try:
return getattr(cls, '_format_%s' % stat.lower())(value)
except AttributeError:
return cls._fmt.format(
stat=stat + ':', val=value, size='='*value)
@classmethod
def _format_height(cls, value):
f, i = divmod(value, 12)
return cls._hfmt.format(
stat=cls.height.name + ':', val=value, fval=f, ival=i)
@classmethod
def _format_weight(cls, value):
k = round(value * 0.4535, 1)
return cls._wfmt.format(
stat=cls.weight.name + ':', val=value, kval=k)
@classmethod
def _format_physique(cls, value):
things = list('-----')
try:
things[value-1] = '@'
except IndexError:
pass
size = ' '.join(things)
return cls._pfmt.format(
stat=cls.physique.name + ':', val=value, size=size)
@classmethod
def _format_rerolls(cls, value):
return cls._rfmt.format(stat=cls.rerolls.name + ':', val=value)
| |
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from alembic.ddl import base as alembic_ddl
from alembic import script as alembic_script
from contextlib import contextmanager
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_db.sqlalchemy import test_migrations
from oslotest import base as oslotest_base
import six
import sqlalchemy
from sqlalchemy import event
from sqlalchemy.sql import ddl as sqla_ddl
import subprocess
from neutron.db.migration.alembic_migrations import external
from neutron.db.migration import cli as migration
from neutron.db.migration.models import head as head_models
from neutron.tests.unit import testlib_api
cfg.CONF.import_opt('core_plugin', 'neutron.conf.common')
CREATION_OPERATIONS = {
'sqla': (sqla_ddl.CreateIndex,
sqla_ddl.CreateTable,
sqla_ddl.CreateColumn,
),
'alembic': (alembic_ddl.AddColumn,
)
}
DROP_OPERATIONS = {
'sqla': (sqla_ddl.DropConstraint,
sqla_ddl.DropIndex,
sqla_ddl.DropTable,
),
'alembic': (alembic_ddl.DropColumn,
)
}
def upgrade(engine, alembic_config, branch_name='heads'):
cfg.CONF.set_override('connection', engine.url, group='database')
migration.do_alembic_command(alembic_config, 'upgrade',
branch_name)
class _TestModelsMigrations(test_migrations.ModelsMigrationsSync):
'''Test for checking of equality models state and migrations.
For the opportunistic testing you need to set up a db named
'openstack_citest' with user 'openstack_citest' and password
'openstack_citest' on localhost.
The test will then use that db and user/password combo to run the tests.
For PostgreSQL on Ubuntu this can be done with the following commands::
sudo -u postgres psql
postgres=# create user openstack_citest with createdb login password
'openstack_citest';
postgres=# create database openstack_citest with owner
openstack_citest;
For MySQL on Ubuntu this can be done with the following commands::
mysql -u root
>create database openstack_citest;
>grant all privileges on openstack_citest.* to
openstack_citest@localhost identified by 'openstack_citest';
Output is a list that contains information about differences between db and
models. Output example::
[('add_table',
Table('bat', MetaData(bind=None),
Column('info', String(), table=<bat>), schema=None)),
('remove_table',
Table(u'bar', MetaData(bind=None),
Column(u'data', VARCHAR(), table=<bar>), schema=None)),
('add_column',
None,
'foo',
Column('data', Integer(), table=<foo>)),
('remove_column',
None,
'foo',
Column(u'old_data', VARCHAR(), table=None)),
[('modify_nullable',
None,
'foo',
u'x',
{'existing_server_default': None,
'existing_type': INTEGER()},
True,
False)]]
* ``remove_*`` means that there is extra table/column/constraint in db;
* ``add_*`` means that it is missing in db;
* ``modify_*`` means that on column in db is set wrong
type/nullable/server_default. Element contains information:
- what should be modified,
- schema,
- table,
- column,
- existing correct column parameters,
- right value,
- wrong value.
This class also contains tests for branches, like that correct operations
are used in contract and expand branches.
'''
BUILD_SCHEMA = False
def setUp(self):
super(_TestModelsMigrations, self).setUp()
self.cfg = self.useFixture(config_fixture.Config())
self.cfg.config(core_plugin='ml2')
self.alembic_config = migration.get_neutron_config()
self.alembic_config.neutron_config = cfg.CONF
def db_sync(self, engine):
upgrade(engine, self.alembic_config)
def get_engine(self):
return self.engine
def get_metadata(self):
return head_models.get_metadata()
def include_object(self, object_, name, type_, reflected, compare_to):
if type_ == 'table' and (name == 'alembic_version'
or name in external.TABLES):
return False
return super(_TestModelsMigrations, self).include_object(
object_, name, type_, reflected, compare_to)
def filter_metadata_diff(self, diff):
return list(filter(self.remove_unrelated_errors, diff))
# Remove some difference that are not mistakes just specific of
# dialects, etc
def remove_unrelated_errors(self, element):
insp = sqlalchemy.engine.reflection.Inspector.from_engine(
self.get_engine())
dialect = self.get_engine().dialect.name
if isinstance(element, tuple):
if dialect == 'mysql' and element[0] == 'remove_index':
table_name = element[1].table.name
for fk in insp.get_foreign_keys(table_name):
if fk['name'] == element[1].name:
return False
cols = [c.name for c in element[1].expressions]
for col in cols:
if col in insp.get_pk_constraint(
table_name)['constrained_columns']:
return False
else:
for modified, _, table, column, _, _, new in element:
if modified == 'modify_default' and dialect == 'mysql':
constrained = insp.get_pk_constraint(table)
if column in constrained['constrained_columns']:
return False
return True
def test_upgrade_expand_branch(self):
# Verify that "command neutron-db-manage upgrade --expand" works
# without errors. Check this for both MySQL and PostgreSQL.
upgrade(self.engine, self.alembic_config,
branch_name='%s@head' % migration.EXPAND_BRANCH)
def test_upgrade_contract_branch(self):
# Verify that "command neutron-db-manage upgrade --contract" works
# without errors. Check this for both MySQL and PostgreSQL.
upgrade(self.engine, self.alembic_config,
branch_name='%s@head' % migration.CONTRACT_BRANCH)
@contextmanager
def _listener(self, engine, listener_func):
try:
event.listen(engine, 'before_execute', listener_func)
yield
finally:
event.remove(engine, 'before_execute',
listener_func)
def test_branches(self):
drop_exceptions = collections.defaultdict(list)
creation_exceptions = collections.defaultdict(list)
def find_migration_exceptions():
# Due to some misunderstandings and some conscious decisions,
# there may be some expand migrations which drop elements and
# some contract migrations which create elements. These excepted
# elements must be returned by a method in the script itself.
# The names of the method must be 'contract_creation_exceptions'
# or 'expand_drop_exceptions'. The methods must have a docstring
# explaining the reason for the exception.
#
# Here we build lists of the excepted elements and verify that
# they are documented.
script = alembic_script.ScriptDirectory.from_config(
self.alembic_config)
for m in list(script.walk_revisions(base='base', head='heads')):
branches = m.branch_labels or []
if migration.CONTRACT_BRANCH in branches:
method_name = 'contract_creation_exceptions'
exceptions_dict = creation_exceptions
elif migration.EXPAND_BRANCH in branches:
method_name = 'expand_drop_exceptions'
exceptions_dict = drop_exceptions
else:
continue
get_excepted_elements = getattr(m.module, method_name, None)
if not get_excepted_elements:
continue
explanation = getattr(get_excepted_elements, '__doc__', "")
if len(explanation) < 1:
self.fail("%s() requires docstring with explanation" %
'.'.join([m.module.__name__,
get_excepted_elements.__name__]))
for sa_type, elements in get_excepted_elements().items():
exceptions_dict[sa_type].extend(elements)
def is_excepted_sqla(clauseelement, exceptions):
"""Identify excepted operations that are allowed for the branch."""
element = clauseelement.element
element_name = element.name
if isinstance(element, sqlalchemy.Index):
element_name = element.table.name
for sa_type_, excepted_names in exceptions.items():
if isinstance(element, sa_type_):
if element_name in excepted_names:
return True
def is_excepted_alembic(clauseelement, exceptions):
"""Identify excepted operations that are allowed for the branch."""
# For alembic the clause is AddColumn or DropColumn
column = clauseelement.column.name
table = clauseelement.column.table.name
element_name = '.'.join([table, column])
for alembic_type, excepted_names in exceptions.items():
if alembic_type == sqlalchemy.Column:
if element_name in excepted_names:
return True
def is_allowed(clauseelement, exceptions, disallowed_ops):
if (isinstance(clauseelement, disallowed_ops['sqla']) and
hasattr(clauseelement, 'element')):
return is_excepted_sqla(clauseelement, exceptions)
if isinstance(clauseelement, disallowed_ops['alembic']):
return is_excepted_alembic(clauseelement, exceptions)
return True
def check_expand_branch(conn, clauseelement, multiparams, params):
if not is_allowed(clauseelement, drop_exceptions, DROP_OPERATIONS):
self.fail("Migration in expand branch contains drop command")
def check_contract_branch(conn, clauseelement, multiparams, params):
if not is_allowed(clauseelement, creation_exceptions,
CREATION_OPERATIONS):
self.fail("Migration in contract branch contains create "
"command")
find_migration_exceptions()
engine = self.engine
cfg.CONF.set_override('connection', engine.url, group='database')
with engine.begin() as connection:
self.alembic_config.attributes['connection'] = connection
migration.do_alembic_command(self.alembic_config, 'upgrade',
'kilo')
with self._listener(engine, check_expand_branch):
migration.do_alembic_command(
self.alembic_config, 'upgrade',
'%s@head' % migration.EXPAND_BRANCH)
with self._listener(engine, check_contract_branch):
migration.do_alembic_command(
self.alembic_config, 'upgrade',
'%s@head' % migration.CONTRACT_BRANCH)
def _test_has_offline_migrations(self, revision, expected):
engine = self.get_engine()
cfg.CONF.set_override('connection', engine.url, group='database')
migration.do_alembic_command(self.alembic_config, 'upgrade', revision)
self.assertEqual(expected,
migration.has_offline_migrations(self.alembic_config,
'unused'))
def test_has_offline_migrations_pending_contract_scripts(self):
self._test_has_offline_migrations('kilo', True)
def test_has_offline_migrations_all_heads_upgraded(self):
self._test_has_offline_migrations('heads', False)
class TestModelsMigrationsMysql(testlib_api.MySQLTestCaseMixin,
_TestModelsMigrations,
testlib_api.SqlTestCaseLight):
def test_check_mysql_engine(self):
engine = self.get_engine()
cfg.CONF.set_override('connection', engine.url, group='database')
with engine.begin() as connection:
self.alembic_config.attributes['connection'] = connection
migration.do_alembic_command(self.alembic_config, 'upgrade',
'heads')
insp = sqlalchemy.engine.reflection.Inspector.from_engine(engine)
# Test that table creation on MySQL only builds InnoDB tables
tables = insp.get_table_names()
self.assertTrue(len(tables) > 0,
"No tables found. Wrong schema?")
res = [table for table in tables if
insp.get_table_options(table)['mysql_engine'] != 'InnoDB'
and table != 'alembic_version']
self.assertEqual(0, len(res), "%s non InnoDB tables created" % res)
class TestModelsMigrationsPsql(testlib_api.PostgreSQLTestCaseMixin,
_TestModelsMigrations,
testlib_api.SqlTestCaseLight):
pass
class TestSanityCheck(testlib_api.SqlTestCaseLight):
BUILD_SCHEMA = False
def setUp(self):
super(TestSanityCheck, self).setUp()
self.alembic_config = migration.get_neutron_config()
self.alembic_config.neutron_config = cfg.CONF
def test_check_sanity_1df244e556f5(self):
ha_router_agent_port_bindings = sqlalchemy.Table(
'ha_router_agent_port_bindings', sqlalchemy.MetaData(),
sqlalchemy.Column('port_id', sqlalchemy.String(36)),
sqlalchemy.Column('router_id', sqlalchemy.String(36)),
sqlalchemy.Column('l3_agent_id', sqlalchemy.String(36)))
with self.engine.connect() as conn:
ha_router_agent_port_bindings.create(conn)
conn.execute(ha_router_agent_port_bindings.insert(), [
{'port_id': '1234', 'router_id': '12345',
'l3_agent_id': '123'},
{'port_id': '12343', 'router_id': '12345',
'l3_agent_id': '123'}
])
script_dir = alembic_script.ScriptDirectory.from_config(
self.alembic_config)
script = script_dir.get_revision("1df244e556f5").module
self.assertRaises(script.DuplicateL3HARouterAgentPortBinding,
script.check_sanity, conn)
def test_check_sanity_030a959ceafa(self):
routerports = sqlalchemy.Table(
'routerports', sqlalchemy.MetaData(),
sqlalchemy.Column('router_id', sqlalchemy.String(36)),
sqlalchemy.Column('port_id', sqlalchemy.String(36)),
sqlalchemy.Column('port_type', sqlalchemy.String(255)))
with self.engine.connect() as conn:
routerports.create(conn)
conn.execute(routerports.insert(), [
{'router_id': '1234', 'port_id': '12345',
'port_type': '123'},
{'router_id': '12343', 'port_id': '12345',
'port_type': '1232'}
])
script_dir = alembic_script.ScriptDirectory.from_config(
self.alembic_config)
script = script_dir.get_revision("030a959ceafa").module
self.assertRaises(script.DuplicatePortRecordinRouterPortdatabase,
script.check_sanity, conn)
def test_check_sanity_6b461a21bcfc(self):
floatingips = sqlalchemy.Table(
'floatingips', sqlalchemy.MetaData(),
sqlalchemy.Column('floating_network_id', sqlalchemy.String(36)),
sqlalchemy.Column('fixed_port_id', sqlalchemy.String(36)),
sqlalchemy.Column('fixed_ip_address', sqlalchemy.String(64)))
with self.engine.connect() as conn:
floatingips.create(conn)
conn.execute(floatingips.insert(), [
{'floating_network_id': '12345',
'fixed_port_id': '1234567',
'fixed_ip_address': '12345678'},
{'floating_network_id': '12345',
'fixed_port_id': '1234567',
'fixed_ip_address': '12345678'}
])
script_dir = alembic_script.ScriptDirectory.from_config(
self.alembic_config)
script = script_dir.get_revision("6b461a21bcfc").module
self.assertRaises(script.DuplicateFloatingIPforOneFixedIP,
script.check_sanity, conn)
class TestWalkDowngrade(oslotest_base.BaseTestCase):
def setUp(self):
super(TestWalkDowngrade, self).setUp()
self.alembic_config = migration.get_neutron_config()
self.alembic_config.neutron_config = cfg.CONF
def test_no_downgrade(self):
script_dir = alembic_script.ScriptDirectory.from_config(
self.alembic_config)
versions = [v for v in script_dir.walk_revisions(base='base',
head='heads')]
failed_revisions = []
for version in versions:
if hasattr(version.module, 'downgrade'):
failed_revisions.append(version.revision)
if failed_revisions:
self.fail('Migrations %s have downgrade' % failed_revisions)
return True
class _TestWalkMigrations(object):
'''This will add framework for testing schema migarations
for different backends.
'''
BUILD_SCHEMA = False
def execute_cmd(self, cmd=None):
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, shell=True)
output = proc.communicate()[0]
self.assertEqual(0, proc.returncode, 'Command failed with '
'output:\n%s' % output)
def _get_alembic_config(self, uri):
db_config = migration.get_neutron_config()
self.script_dir = alembic_script.ScriptDirectory.from_config(db_config)
db_config.neutron_config = cfg.CONF
db_config.neutron_config.set_override('connection',
six.text_type(uri),
group='database')
return db_config
def _revisions(self):
"""Provides revisions and its parent revisions.
:return: List of tuples. Every tuple contains revision and its parent
revision.
"""
revisions = list(self.script_dir.walk_revisions("base", "heads"))
revisions = list(reversed(revisions))
for rev in revisions:
# Destination, current
yield rev.revision, rev.down_revision
def _migrate_up(self, config, engine, dest, curr, with_data=False):
if with_data:
data = None
pre_upgrade = getattr(
self, "_pre_upgrade_%s" % dest, None)
if pre_upgrade:
data = pre_upgrade(engine)
migration.do_alembic_command(config, 'upgrade', dest)
if with_data:
check = getattr(self, "_check_%s" % dest, None)
if check and data:
check(engine, data)
def test_walk_versions(self):
"""Test migrations ability to upgrade and downgrade.
"""
engine = self.engine
config = self._get_alembic_config(engine.url)
revisions = self._revisions()
for dest, curr in revisions:
self._migrate_up(config, engine, dest, curr, with_data=True)
class TestWalkMigrationsMysql(testlib_api.MySQLTestCaseMixin,
_TestWalkMigrations,
testlib_api.SqlTestCaseLight):
pass
class TestWalkMigrationsPsql(testlib_api.PostgreSQLTestCaseMixin,
_TestWalkMigrations,
testlib_api.SqlTestCaseLight):
pass
| |
"""
Todo: cross-check the F-value with stats model
"""
import itertools
import numpy as np
from scipy import stats, sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_not_in
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils import safe_mask
from sklearn.datasets.samples_generator import (make_classification,
make_regression)
from sklearn.feature_selection import (chi2, f_classif, f_oneway, f_regression,
SelectPercentile, SelectKBest,
SelectFpr, SelectFdr, SelectFwe,
GenericUnivariateSelect)
##############################################################################
# Test the score functions
def test_f_oneway_vs_scipy_stats():
"""Test that our f_oneway gives the same result as scipy.stats"""
rng = np.random.RandomState(0)
X1 = rng.randn(10, 3)
X2 = 1 + rng.randn(10, 3)
f, pv = stats.f_oneway(X1, X2)
f2, pv2 = f_oneway(X1, X2)
assert_true(np.allclose(f, f2))
assert_true(np.allclose(pv, pv2))
def test_f_oneway_ints():
# Smoke test f_oneway on integers: that it does raise casting errors
# with recent numpys
rng = np.random.RandomState(0)
X = rng.randint(10, size=(10, 10))
y = np.arange(10)
fint, pint = f_oneway(X, y)
# test that is gives the same result as with float
f, p = f_oneway(X.astype(np.float), y)
assert_array_almost_equal(f, fint, decimal=4)
assert_array_almost_equal(p, pint, decimal=4)
def test_f_classif():
"""
Test whether the F test yields meaningful results
on a simple simulated classification problem
"""
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
F_sparse, pv_sparse = f_classif(sparse.csr_matrix(X), y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression():
"""
Test whether the F test yields meaningful results
on a simple simulated regression problem
"""
X, y = make_regression(n_samples=200, n_features=20, n_informative=5,
shuffle=False, random_state=0)
F, pv = f_regression(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
# again without centering, compare with sparse
F, pv = f_regression(X, y, center=False)
F_sparse, pv_sparse = f_regression(sparse.csr_matrix(X), y, center=False)
assert_array_almost_equal(F_sparse, F)
assert_array_almost_equal(pv_sparse, pv)
def test_f_regression_input_dtype():
"""
Test whether f_regression returns the same value
for any numeric data_type
"""
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
y = np.arange(10).astype(np.int)
F1, pv1 = f_regression(X, y)
F2, pv2 = f_regression(X, y.astype(np.float))
assert_array_almost_equal(F1, F2, 5)
assert_array_almost_equal(pv1, pv2, 5)
def test_f_regression_center():
"""Test whether f_regression preserves dof according to 'center' argument
We use two centered variates so we have a simple relationship between
F-score with variates centering and F-score without variates centering.
"""
# Create toy example
X = np.arange(-5, 6).reshape(-1, 1) # X has zero mean
n_samples = X.size
Y = np.ones(n_samples)
Y[::2] *= -1.
Y[0] = 0. # have Y mean being null
F1, _ = f_regression(X, Y, center=True)
F2, _ = f_regression(X, Y, center=False)
assert_array_almost_equal(F1 * (n_samples - 1.) / (n_samples - 2.), F2)
assert_almost_equal(F2[0], 0.232558139) # value from statsmodels OLS
def test_f_classif_multi_class():
"""
Test whether the F test yields meaningful results
on a simple simulated classification problem
"""
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
F, pv = f_classif(X, y)
assert_true((F > 0).all())
assert_true((pv > 0).all())
assert_true((pv < 1).all())
assert_true((pv[:5] < 0.05).all())
assert_true((pv[5:] > 1.e-4).all())
def test_select_percentile_classif():
"""
Test whether the relative univariate feature selection
gets the correct items in a simple classification problem
with the percentile heuristic
"""
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_percentile_classif_sparse():
"""
Test whether the relative univariate feature selection
gets the correct items in a simple classification problem
with the percentile heuristic
"""
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
X = sparse.csr_matrix(X)
univariate_filter = SelectPercentile(f_classif, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(f_classif, mode='percentile',
param=25).fit(X, y).transform(X)
assert_array_equal(X_r.toarray(), X_r2.toarray())
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_r2inv = univariate_filter.inverse_transform(X_r2)
assert_true(sparse.issparse(X_r2inv))
support_mask = safe_mask(X_r2inv, support)
assert_equal(X_r2inv.shape, X.shape)
assert_array_equal(X_r2inv[:, support_mask].toarray(), X_r.toarray())
# Check other columns are empty
assert_equal(X_r2inv.getnnz(), X_r.getnnz())
##############################################################################
# Test univariate selection in classification settings
def test_select_kbest_classif():
"""
Test whether the relative univariate feature selection
gets the correct items in a simple classification problem
with the k best heuristic
"""
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_kbest_all():
"""
Test whether k="all" correctly returns all features.
"""
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k='all')
X_r = univariate_filter.fit(X, y).transform(X)
assert_array_equal(X, X_r)
def test_select_kbest_zero():
"""
Test whether k=0 correctly returns no features.
"""
X, y = make_classification(n_samples=20, n_features=10,
shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_classif, k=0)
univariate_filter.fit(X, y)
support = univariate_filter.get_support()
gtruth = np.zeros(10, dtype=bool)
assert_array_equal(support, gtruth)
X_selected = assert_warns_message(UserWarning, 'No features were selected',
univariate_filter.transform, X)
assert_equal(X_selected.shape, (20, 0))
def test_select_fpr_classif():
"""
Test whether the relative univariate feature selection
gets the correct items in a simple classification problem
with the fpr heuristic
"""
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFpr(f_classif, alpha=0.0001)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='fpr', param=0.0001).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_fdr_classif():
"""
Test whether the relative univariate feature selection
gets the correct items in a simple classification problem
with the fpr heuristic
"""
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFdr(f_classif, alpha=0.0001)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='fdr', param=0.0001).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_fwe_classif():
"""
Test whether the relative univariate feature selection
gets the correct items in a simple classification problem
with the fpr heuristic
"""
X, y = make_classification(n_samples=200, n_features=20,
n_informative=3, n_redundant=2,
n_repeated=0, n_classes=8,
n_clusters_per_class=1, flip_y=0.0,
class_sep=10, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_classif, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_classif, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_almost_equal(support, gtruth)
##############################################################################
# Test univariate selection in regression settings
def assert_best_scores_kept(score_filter):
scores = score_filter.scores_
support = score_filter.get_support()
assert_array_equal(np.sort(scores[support]),
np.sort(scores)[-support.sum():])
def test_select_percentile_regression():
"""
Test whether the relative univariate feature selection
gets the correct items in a simple regression problem
with the percentile heuristic
"""
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=25)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=25).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
X_2 = X.copy()
X_2[:, np.logical_not(support)] = 0
assert_array_equal(X_2, univariate_filter.inverse_transform(X_r))
# Check inverse_transform respects dtype
assert_array_equal(X_2.astype(bool),
univariate_filter.inverse_transform(X_r.astype(bool)))
def test_select_percentile_regression_full():
"""
Test whether the relative univariate feature selection
selects all features when '100%' is asked.
"""
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectPercentile(f_regression, percentile=100)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='percentile', param=100).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.ones(20)
assert_array_equal(support, gtruth)
def test_invalid_percentile():
X, y = make_regression(n_samples=10, n_features=20,
n_informative=2, shuffle=False, random_state=0)
assert_raises(ValueError, SelectPercentile(percentile=-1).fit, X, y)
assert_raises(ValueError, SelectPercentile(percentile=101).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=-1).fit, X, y)
assert_raises(ValueError, GenericUnivariateSelect(mode='percentile',
param=101).fit, X, y)
def test_select_kbest_regression():
"""
Test whether the relative univariate feature selection
gets the correct items in a simple regression problem
with the k best heuristic
"""
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectKBest(f_regression, k=5)
X_r = univariate_filter.fit(X, y).transform(X)
assert_best_scores_kept(univariate_filter)
X_r2 = GenericUnivariateSelect(
f_regression, mode='k_best', param=5).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_fpr_regression():
"""
Test whether the relative univariate feature selection
gets the correct items in a simple regression problem
with the fpr heuristic
"""
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFpr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fpr', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 3)
def test_select_fdr_regression():
"""
Test whether the relative univariate feature selection
gets the correct items in a simple regression problem
with the fdr heuristic
"""
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFdr(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fdr', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support, gtruth)
def test_select_fwe_regression():
"""
Test whether the relative univariate feature selection
gets the correct items in a simple regression problem
with the fwe heuristic
"""
X, y = make_regression(n_samples=200, n_features=20,
n_informative=5, shuffle=False, random_state=0)
univariate_filter = SelectFwe(f_regression, alpha=0.01)
X_r = univariate_filter.fit(X, y).transform(X)
X_r2 = GenericUnivariateSelect(
f_regression, mode='fwe', param=0.01).fit(X, y).transform(X)
assert_array_equal(X_r, X_r2)
support = univariate_filter.get_support()
gtruth = np.zeros(20)
gtruth[:5] = 1
assert_array_equal(support[:5], np.ones((5, ), dtype=np.bool))
assert_less(np.sum(support[5:] == 1), 2)
def test_selectkbest_tiebreaking():
"""Test whether SelectKBest actually selects k features in case of ties.
Prior to 0.11, SelectKBest would return more features than requested.
"""
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectKBest(dummy_score, k=1)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectKBest(dummy_score, k=2)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_selectpercentile_tiebreaking():
"""Test if SelectPercentile selects the right n_features in case of ties.
"""
Xs = [[0, 1, 1], [0, 0, 1], [1, 0, 0], [1, 1, 0]]
y = [1]
dummy_score = lambda X, y: (X[0], X[0])
for X in Xs:
sel = SelectPercentile(dummy_score, percentile=34)
X1 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X1.shape[1], 1)
assert_best_scores_kept(sel)
sel = SelectPercentile(dummy_score, percentile=67)
X2 = ignore_warnings(sel.fit_transform)([X], y)
assert_equal(X2.shape[1], 2)
assert_best_scores_kept(sel)
def test_tied_pvalues():
"""Test whether k-best and percentiles work with tied pvalues from chi2."""
# chi2 will return the same p-values for the following features, but it
# will return different scores.
X0 = np.array([[10000, 9999, 9998], [1, 1, 1]])
y = [0, 1]
for perm in itertools.permutations((0, 1, 2)):
X = X0[:, perm]
Xt = SelectKBest(chi2, k=2).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
Xt = SelectPercentile(chi2, percentile=67).fit_transform(X, y)
assert_equal(Xt.shape, (2, 2))
assert_not_in(9998, Xt)
def test_tied_scores():
"""Test for stable sorting in k-best with tied scores."""
X_train = np.array([[0, 0, 0], [1, 1, 1]])
y_train = [0, 1]
for n_features in [1, 2, 3]:
sel = SelectKBest(chi2, k=n_features).fit(X_train, y_train)
X_test = sel.transform([0, 1, 2])
assert_array_equal(X_test[0], np.arange(3)[-n_features:])
def test_nans():
"""Assert that SelectKBest and SelectPercentile can handle NaNs."""
# First feature has zero variance to confuse f_classif (ANOVA) and
# make it return a NaN.
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for select in (SelectKBest(f_classif, 2),
SelectPercentile(f_classif, percentile=67)):
ignore_warnings(select.fit)(X, y)
assert_array_equal(select.get_support(indices=True), np.array([1, 2]))
def test_score_func_error():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
for SelectFeatures in [SelectKBest, SelectPercentile, SelectFwe,
SelectFdr, SelectFpr, GenericUnivariateSelect]:
assert_raises(TypeError, SelectFeatures(score_func=10).fit, X, y)
def test_invalid_k():
X = [[0, 1, 0], [0, -1, -1], [0, .5, .5]]
y = [1, 0, 1]
assert_raises(ValueError, SelectKBest(k=-1).fit, X, y)
assert_raises(ValueError, SelectKBest(k=4).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=-1).fit, X, y)
assert_raises(ValueError,
GenericUnivariateSelect(mode='k_best', param=4).fit, X, y)
def test_f_classif_constant_feature():
"""Test that f_classif warns if a feature is constant throughout."""
X, y = make_classification(n_samples=10, n_features=5)
X[:, 0] = 2.0
assert_warns(UserWarning, f_classif, X, y)
def test_no_feature_selected():
rng = np.random.RandomState(0)
# Generate random uncorrelated data: a strict univariate test should
# rejects all the features
X = rng.rand(40, 10)
y = rng.randint(0, 4, size=40)
strict_selectors = [
SelectFwe(alpha=0.01).fit(X, y),
SelectFdr(alpha=0.01).fit(X, y),
SelectFpr(alpha=0.01).fit(X, y),
SelectPercentile(percentile=0).fit(X, y),
SelectKBest(k=0).fit(X, y),
]
for selector in strict_selectors:
assert_array_equal(selector.get_support(), np.zeros(10))
X_selected = assert_warns_message(
UserWarning, 'No features were selected', selector.transform, X)
assert_equal(X_selected.shape, (40, 0))
| |
# -*- coding: utf-8 -*-
"""
Parser is the base class for generated parsers and for the bootstrap parser
(the parser that parses Grako grammars).
Parser does memoization at the rule invocation level, and provides the
decorators, context managers, and iterators needed to make generated parsers
simple.
Parser is also in charge of dealing with comments, with the help of
the .buffering module.
Parser.parse() will take the text to parse directly, or an instance of the
.buffeing.Buffer class.
"""
from __future__ import print_function, division, absolute_import, unicode_literals
import functools
from . import buffering
from .contexts import ParseContext, ParseInfo
from .exceptions import (FailedParse,
FailedToken,
FailedPattern,
FailedRef,
FailedSemantics,
MissingSemanticFor)
class CheckSemanticsMixin(object):
def _find_semantic_rule(self, name):
result = super(CheckSemanticsMixin, self)._find_semantic_rule(name)
if result is None:
raise MissingSemanticFor(name)
return result
class Parser(ParseContext):
def parse(self,
text,
rule_name,
filename=None,
semantics=None,
comments_re=None,
**kwargs):
try:
if isinstance(text, buffering.Buffer):
buffer = text
else:
if comments_re is None:
comments_re = self.comments_re
buffer = buffering.Buffer(text,
filename=filename,
comments_re=comments_re,
**kwargs)
self.parseinfo = kwargs.pop('parseinfo', self.parseinfo)
self.trace = kwargs.pop('trace', self.trace)
self._reset_context(buffer, semantics=semantics)
self._push_ast()
rule = self._find_rule(rule_name)
result = rule()
self.ast[rule_name] = result
return result
finally:
self._memoization_cache = dict()
@classmethod
def rule_list(cls):
import inspect
methods = inspect.getmembers(cls, predicate=inspect.ismethod)
result = []
for m in methods:
name = m[0]
if name[0] != '_' or name[-1] != '_':
continue
if not name[1:-1].isalnum():
continue
result.append(name[1:-1])
return result
def result(self):
return self.ast
def _call(self, rule):
name = rule.__name__.strip('_')
self._rule_stack.append(name)
pos = self._pos
try:
self._trace_event('ENTER ')
self._last_node = None
node, newpos = self._invoke_rule(pos, rule, name)
self._goto(newpos)
self._trace_event('SUCCESS')
self._add_cst_node(node)
self._last_node = node
return node
except FailedParse:
self._trace_event('FAILED')
self._goto(pos)
raise
finally:
self._rule_stack.pop()
def _invoke_rule(self, pos, rule, name):
key = (pos, rule)
cache = self._memoization_cache
if key in cache:
result = cache[key]
if isinstance(result, Exception):
raise result
return result
self._push_ast()
try:
if name[0].islower():
self._next_token()
rule(self)
node = self.ast
if not node:
node = self.cst
elif '@' in node:
node = node['@'] # override the AST
elif self.parseinfo:
node.add('parseinfo', ParseInfo(self._buffer, name, pos, self._pos))
semantic_rule = self._find_semantic_rule(name)
if semantic_rule:
try:
node = semantic_rule(node)
except FailedSemantics as e:
self._error(str(e), FailedParse)
cache[key] = result = (node, self._pos)
return result
except Exception as e:
cache[key] = e
raise
finally:
self._pop_ast()
def _token(self, token, node_name=None, force_list=False):
self._next_token()
if self._buffer.match(token) is None:
raise FailedToken(self._buffer, token)
self._trace_match(token, node_name)
self._add_ast_node(node_name, token, force_list)
self._add_cst_node(token)
self._last_node = token
return token
def _try_token(self, token, node_name=None, force_list=False):
p = self._pos
self._next_token()
self._last_node = None
if self._buffer.match(token) is None:
self._goto(p)
return None
self._trace_match(token, node_name)
self._add_ast_node(node_name, token, force_list)
self._add_cst_node(token)
self._last_node = token
return token
def _pattern(self, pattern, node_name=None, force_list=False):
token = self._buffer.matchre(pattern)
if token is None:
raise FailedPattern(self._buffer, pattern)
self._trace_match(token, pattern)
self._add_ast_node(node_name, token, force_list)
self._add_cst_node(token)
self._last_node = token
return token
def _try_pattern(self, pattern, node_name=None, force_list=False):
p = self._pos
token = self._buffer.matchre(pattern)
self._last_node = None
if token is None:
self._goto(p)
return None
self._trace_match(token)
self._add_ast_node(node_name, token, force_list)
self._add_cst_node(token)
self._last_node = token
return token
def _find_rule(self, name):
rule = getattr(self, name, None)
if rule is None or not isinstance(rule, type(self._find_rule)):
raise FailedRef(self._buffer, name)
return rule
def _eof(self):
return self._buffer.atend()
def _eol(self):
return self._buffer.ateol()
def _check_eof(self):
self._next_token()
if not self._buffer.atend():
raise FailedParse(self._buffer, 'Expecting end of text.')
# decorator
def rule_def(rule):
@functools.wraps(rule)
def wrapper(self):
return self._call(rule)
return wrapper
| |
#!/usr/bin/env python
"""Tests for config_lib classes."""
import os
from grr.lib import config_lib
from grr.lib import flags
from grr.lib import test_lib
class YamlConfigTest(test_lib.GRRBaseTest):
"""Test the Yaml config file support."""
def testParsing(self):
conf = config_lib.GrrConfigManager()
conf.DEFINE_list("Section1.test_list", ["a", "b"], "A test integer.")
conf.DEFINE_integer("Section1.test", 0, "An integer")
conf.Initialize(parser=config_lib.YamlParser, data="""
# Configuration options can be written as long hand, dot separated parameters.
Section1.test: 2
Section1.test_list: x,y
Section2.test: 3%(Section1.test)
Client Context:
Section1.test: 6
Section1.test2: 1
Windows Context:
Section1.test: 10
Windows Context:
Section1.test: 5
Section1.test2: 2
""")
self.assertEqual(conf["Section1.test"], 2)
# Test interpolation works.
self.assertEqual(conf["Section2.test"], "32")
self.assertEqual(conf["Section1.test_list"], ["x", "y"])
self.assertEqual(conf.Get("Section1.test_list",
context=["Client Context", "Windows Context"]),
["x", "y"])
# Test that contexts affect option selection.
self.assertEqual(
conf.Get("Section1.test", context=["Client Context"]), 6)
self.assertEqual(
conf.Get("Section1.test", context=["Windows Context"]), 5)
context = ["Client Context", "Windows Context"]
self.assertEqual(
conf.Get("Section1.test", context=context), 10)
context = ["Windows Context", "Client Context"]
# Order of the context parameters should not matter.
self.assertEqual(
conf.Get("Section1.test", context=context), 10)
def testConflictingContexts(self):
"""Test that conflicting contexts are resolved by precedence."""
conf = config_lib.GrrConfigManager()
conf.DEFINE_integer("Section1.test", 0, "An integer")
conf.Initialize(parser=config_lib.YamlParser, data="""
Section1.test: 2
Client Context:
Section1.test: 6
Platform:Windows:
Section1.test: 10
Extra Context:
Section1.test: 15
""")
# Without contexts.
self.assertEqual(conf.Get("Section1.test"), 2)
# When running in the client context only.
self.assertEqual(conf.Get("Section1.test", context=["Client Context"]), 6)
# Later defined contexts (i.e. with later calls to AddContext()) are
# stronger than earlier contexts. For example, contexts set the command line
# --context option are stronger than contexts set by the running binary,
# since they are added last.
self.assertEqual(
conf.Get("Section1.test",
context=["Client Context", "Platform:Windows"]),
10)
self.assertEqual(
conf.Get("Section1.test",
context=["Platform:Windows", "Client Context"]),
6)
def testBackslashes(self):
conf = config_lib.GrrConfigManager()
conf.DEFINE_string("Section1.parameter", "", "A test.")
conf.DEFINE_string("Section1.parameter2", "", "A test.")
conf.DEFINE_string("Section1.parameter3", "", "A test.")
conf.Initialize(parser=config_lib.YamlParser, data=r"""
Section1.parameter: |
a\\b\\c\\d
Section1.parameter2: |
%(parameter)\\e
Section1.parameter3: |
\%(a\\b\\c\\d\)
""")
self.assertEqual(conf.Get("Section1.parameter"), "a\\b\\c\\d")
self.assertEqual(conf.Get("Section1.parameter2"), "a\\b\\c\\d\\e")
self.assertEqual(conf.Get("Section1.parameter3"), "%(a\\b\\c\\d)")
class ConfigLibTest(test_lib.GRRBaseTest):
"""Tests for config functionality."""
def testInit(self):
"""Testing initialization of a ConfigManager."""
conf = config_lib.CONFIG
# Check that the linux client have a different value from the windows
# client.
self.assertEqual(conf.Get("MemoryDriver.device_path",
context=("Client", "Platform:Linux")),
"/dev/pmem")
self.assertEqual(conf.Get("MemoryDriver.device_path",
context=("Client", "Platform:Windows")),
r"\\.\pmem")
def testSet(self):
"""Test setting options."""
# Test access methods.
conf = config_lib.GrrConfigManager()
conf.DEFINE_string("NewSection1.new_option1", "Default Value", "Help")
conf.Set("NewSection1.new_option1", "New Value1")
self.assertEqual(conf["NewSection1.new_option1"], "New Value1")
def testSave(self):
"""Save the config and ensure it still works."""
conf = config_lib.GrrConfigManager()
config_file = os.path.join(self.temp_dir, "writeback.yaml")
conf.SetWriteBack(config_file)
conf.DEFINE_string("NewSection1.new_option1", "Default Value", "Help")
conf.Set("NewSection1.new_option1", "New Value1")
conf.Write()
new_conf = config_lib.GrrConfigManager()
new_conf.Initialize(config_file)
self.assertEqual(new_conf["NewSection1.new_option1"], "New Value1")
def testErrorDetection(self):
"""Check that invalid config files are detected immediately."""
test_conf = """
[Section1]
test = val2"""
conf = config_lib.GrrConfigManager()
# Define test as an integer.
conf.DEFINE_integer("Section1.test", 54, "A test integer.")
conf.Initialize(data=test_conf)
# This should raise since the config file is incorrect.
errors = conf.Validate("Section1")
self.assertTrue(
"Invalid value val2 for Integer" in str(errors["Section1.test"]))
def testEmptyClientPrivateKey(self):
"""Check an empty client private_key passes."""
# Clone a test config object from the global config so it knows about Client
# options.
conf = config_lib.CONFIG.MakeNewConfig()
conf.Initialize(data="""
[Client]
private_key =
driver_signing_public_key = -----BEGIN PUBLIC KEY-----
MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBALnfFW1FffeKPs5PLUhFOSkNrr9TDCOD
QAI3WluLh0sW7/ro93eoIZ0FbipnTpzGkPpriONbSOXmxWNTo0b9ma8CAwEAAQ==
-----END PUBLIC KEY-----
executable_signing_public_key = -----BEGIN PUBLIC KEY-----
MFwwDQYJKoZIhvcNAQEBBQADSwAwSAJBALnfFW1FffeKPs5PLUhFOSkNrr9TDCOD
QAI3WluLh0sW7/ro93eoIZ0FbipnTpzGkPpriONbSOXmxWNTo0b9ma8CAwEAAQ==
-----END PUBLIC KEY-----
""")
errors = conf.Validate(["Client"])
self.assertItemsEqual(errors.keys(), [])
def testEmptyClientKeys(self):
"""Check that empty other keys fail."""
conf = config_lib.CONFIG.MakeNewConfig()
conf.Initialize(data="""
[Client]
private_key =
driver_signing_public_key =
executable_signing_public_key =
certificate =
""")
errors = conf.Validate(["Client"])
self.assertItemsEqual(errors.keys(),
["Client.driver_signing_public_key",
"Client.executable_signing_public_key"])
def testAddOption(self):
"""Test that we can add options."""
conf = config_lib.GrrConfigManager()
conf.DEFINE_string("Section1.foobar", "test", "A test string.")
conf.DEFINE_string("Section1.test", "test", "A test string.")
conf.DEFINE_string("Section1.interpolated", "", "An interpolated string.")
# This entry is not correct - the default is invalid.
conf.DEFINE_integer("Section1.test_int", "string", "A test integer.")
# The default value is invalid.
errors = conf.Validate("Section1")
self.assertTrue(
"Invalid value string for Integer" in str(errors["Section1.test_int"]))
conf.DEFINE_string("Section1.system", None, "The basic operating system.")
conf.DEFINE_integer("Section1.test_int", 54, "A test integer.")
conf.DEFINE_list("Section1.test_list", ["a", "b"], "A test integer.")
conf.DEFINE_list("Section1.test_list2", ["a", "b"], "A test integer.")
conf.Initialize(data="""
[Section1]
foobar = X
test_list = x,y
[Section2]
test_int = 34
interpolated = %(Section1.foobar)Y
[Section3]
test_int = 1
interpolated = %(%(Section1.foobar)|lower)Y
""")
# Section not specified:
self.assertRaises(config_lib.UnknownOption, conf.__getitem__, "a")
# Test direct access.
self.assertEqual(conf["Section1.foobar"], "X")
self.assertEqual(conf["Section1.test_list"], ["x", "y"])
self.assertEqual(conf["Section1.test_list2"], ["a", "b"])
# Test default access.
self.assertEqual(conf["Section1.test"], "test")
# Test interpolation with full section name.
self.assertEqual(conf["Section2.interpolated"], "XY")
# Check that default values are typed.
self.assertEqual(conf["Section1.test_int"], 54)
# Test filter functions.
self.assertEqual(conf["Section3.interpolated"], "xY")
def testUnbalancedParenthesis(self):
conf = config_lib.GrrConfigManager()
conf.Initialize(data=r"""
[Section1]
foobar = X
foo = %(Section1.foobar)
foo1 = %(foo
# Unbalanced parenthesis
foo2 = foo)
# Unbalanced parenthesis is ok if escaped.
foo3 = foo\)
# Or if enclosed in a literal block.
foo6 = %{foo)}
foo4 = %{%(hello)}
foo5 = %{hello
# Literal blocks can also appear inside filter interpolations to prevent
# automatic expansions.
# This pull the environment variable "sectionX"
interpolation1 = %(section%(Section1.foobar)|env)
# But this means literally section%(Section1.foo):
interpolation2 = %(section%{%(Section1.foo)}|env)
literal = %{aff4:/C\.(?P<path>.\{1,16\}?)($|/.*)}
""")
# Test direct access.
self.assertEqual(conf["Section1.foo"], "X")
self.assertRaises(config_lib.ConfigFormatError,
conf.__getitem__, "Section1.foo1")
self.assertRaises(config_lib.ConfigFormatError,
conf.__getitem__, "Section1.foo2")
self.assertEqual(conf["Section1.foo3"], "foo)")
# Test literal expansion.
self.assertEqual(conf["Section1.foo4"], "%(hello)")
self.assertRaises(config_lib.ConfigFormatError,
conf.__getitem__, "Section1.foo5")
self.assertEqual(conf["Section1.foo6"], "foo)")
# The Env filter forces uppercase on args.
os.environ["sectionX".upper()] = "1"
os.environ["section%(Section1.foo)".upper()] = "2"
self.assertEqual(conf["Section1.interpolation1"], "1")
self.assertEqual(conf["Section1.interpolation2"], "2")
# Test that Set() escapes - i.e. reading the value back will return exactly
# the same as we wrote:
conf.Set("Section1.foo6", "%(Section1.foo3)")
self.assertEqual(conf["Section1.foo6"], "%(Section1.foo3)")
self.assertEqual(conf.GetRaw("Section1.foo6"), r"\%(Section1.foo3\)")
# OTOH when we write it raw, reading it back will interpolate:
conf.SetRaw("Section1.foo6", "%(Section1.foo3)")
self.assertEqual(conf["Section1.foo6"], "foo)")
# A complex regex which gets literally expanded.
self.assertEqual(
conf["Section1.literal"], r"aff4:/C\.(?P<path>.{1,16}?)($|/.*)")
def testDataTypes(self):
conf = config_lib.GrrConfigManager()
conf.DEFINE_float("Section1.float", 0, "A float")
conf.Initialize(parser=config_lib.YamlParser, data="Section1.float: abc")
errors = conf.Validate("Section1")
self.assertTrue(
"Invalid value abc for Float" in str(errors["Section1.float"]))
self.assertRaises(config_lib.ConfigFormatError, conf.Get, "Section1.float")
conf.Initialize(parser=config_lib.YamlParser, data="Section1.float: 2")
# Should have no errors now. Validate should normalize the value to a float.
self.assertEqual(conf.Validate("Section1"), {})
self.assertEqual(type(conf.Get("Section1.float")), float)
conf.DEFINE_integer("Section1.int", 0, "An integer")
conf.Initialize(parser=config_lib.YamlParser, data="Section1.int: 2.0")
errors = conf.Validate("Section1")
# Floats can not be coerced to an int because that will lose data.
self.assertTrue(
"Invalid value 2.0 for Integer" in str(errors["Section1.int"]))
# A string can be coerced to an int if it makes sense:
conf.Initialize(parser=config_lib.YamlParser, data="Section1.int: '2'")
errors = conf.Validate("Section1")
self.assertEqual(type(conf.Get("Section1.int")), long)
conf.DEFINE_list("Section1.list", default=[], help="A list")
self.assertEqual(type(conf.Get("Section1.list")), list)
self.assertEqual(conf.Get("Section1.list"), [])
conf.DEFINE_list("Section1.list2", default=["a", "2"], help="A list")
self.assertEqual(type(conf.Get("Section1.list2")), list)
self.assertEqual(conf.Get("Section1.list2"), ["a", "2"])
def main(argv):
test_lib.GrrTestProgram(argv=argv)
if __name__ == "__main__":
flags.StartMain(main)
| |
"""
Bulk rearrange locations.
This includes support for changing location types, changing locations' parents,
deleting things, and so on. See the spec doc for specifics:
https://docs.google.com/document/d/1gZFPP8yXjPazaJDP9EmFORi88R-jSytH6TTgMxTGQSk/
"""
import copy
from collections import Counter, defaultdict
from decimal import Decimal, InvalidOperation
from django.core.exceptions import ValidationError
from django.db import transaction
from django.utils.functional import cached_property
from django.utils.text import format_lazy
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from attr import attrib, attrs
from memoized import memoized
from dimagi.utils.chunked import chunked
from dimagi.utils.parsing import string_to_boolean
from corehq.apps.domain.models import Domain
from .const import (
LOCATION_SHEET_HEADERS,
LOCATION_SHEET_HEADERS_BASE,
LOCATION_SHEET_HEADERS_OPTIONAL,
LOCATION_TYPE_SHEET_HEADERS,
ROOT_LOCATION_TYPE,
)
from .models import LocationType, SQLLocation
from .tree_utils import BadParentError, CycleError, assert_no_cycles
from .util import get_location_data_model
class LocationExcelSheetError(Exception):
pass
class LocationUploadResult(object):
def __init__(self):
self.messages = []
self.errors = []
self.warnings = []
@property
def success(self):
return not self.errors
def to_boolean(val):
return False if val == '' else string_to_boolean(val)
def strip(val):
return str(val).strip()
@attrs(frozen=True)
class LocationTypeData(object):
"""read-only representation of location type attributes specified in an upload"""
name = attrib(type=str, converter=strip)
code = attrib(type=str, converter=strip)
parent_code = attrib(type=str, converter=lambda code: code or ROOT_LOCATION_TYPE)
do_delete = attrib(type=bool, converter=to_boolean)
shares_cases = attrib(type=bool, converter=to_boolean)
view_descendants = attrib(type=bool, converter=to_boolean)
index = attrib(type=int)
class LocationTypeStub(object):
meta_data_attrs = ['name', 'code', 'shares_cases', 'view_descendants']
def __init__(self, new_data, old_collection):
self.new_data = new_data
self.code = new_data.code
self.parent_code = new_data.parent_code
self.do_delete = new_data.do_delete
self.old_collection = old_collection
self.domain = old_collection.domain_name
self.old_object = self.old_collection.types_by_code.get(self.code)
self.is_new = self.old_object is None
@property
@memoized
def db_object(self):
if self.is_new:
obj = LocationType(domain=self.domain)
else:
obj = copy.copy(self.old_object)
for attr in self.meta_data_attrs:
setattr(obj, attr, getattr(self.new_data, attr))
return obj
@property
@memoized
def needs_save(self):
if self.is_new or self.do_delete:
return True
# check if any attributes are being updated
for attr in self.meta_data_attrs:
if getattr(self.old_object, attr) != getattr(self.new_data, attr):
return True
# check if the parent is being updated
old_parent_code = self.old_object.parent_type.code \
if self.old_object.parent_type else ROOT_LOCATION_TYPE
if old_parent_code != self.parent_code:
return True
return False
def lowercase_string(val):
return str(val).strip().lower()
def maybe_decimal(val):
if not val:
return None
try:
return Decimal(val)
except InvalidOperation:
return val # invalid - this will be caught later
@attrs(frozen=True)
class LocationData(object):
"""read-only representation of location attributes specified in an upload"""
name = attrib(type=str, converter=strip)
site_code = attrib(type=str, converter=lowercase_string)
location_type = attrib(type=str, converter=strip)
parent_code = attrib(type=str,
converter=lambda val: lowercase_string(val) if val else ROOT_LOCATION_TYPE)
location_id = attrib(type=str, converter=strip)
do_delete = attrib(type=bool, converter=to_boolean)
external_id = attrib(type=str, converter=strip)
latitude = attrib(converter=maybe_decimal)
longitude = attrib(converter=maybe_decimal)
# This can be a dict or 'NOT_PROVIDED_IN_EXCEL'
custom_data = attrib()
delete_uncategorized_data = attrib(type=bool, converter=to_boolean)
index = attrib(type=int)
class LocationStub(object):
titles = LOCATION_SHEET_HEADERS
NOT_PROVIDED = 'NOT_PROVIDED_IN_EXCEL'
meta_data_attrs = ['name', 'site_code', 'latitude', 'longitude', 'external_id']
def __init__(self, new_data, location_data_model, old_collection):
self.new_data = new_data
self.site_code = new_data.site_code
self.parent_code = new_data.parent_code
self.index = new_data.index
self.location_type = new_data.location_type
self.do_delete = new_data.do_delete
self.location_id = new_data.location_id
self.data_model = location_data_model
self.old_collection = old_collection
self.domain = old_collection.domain_name
# If a location ID is not provided, the location is presumed to be new
self.is_new = not self.new_data.location_id
@cached_property
def old_object(self):
if self.is_new:
return None
return self.old_collection.locations_by_id[self.location_id]
@cached_property
def db_object(self):
# The SQLLocation object, either an unsaved or the actual database
# object depending on whether 'is_new' is True or not
if self.is_new:
db_object = SQLLocation(domain=self.domain)
else:
db_object = copy.copy(self.old_object)
for attr in self.meta_data_attrs:
setattr(db_object, attr, getattr(self.new_data, attr))
db_object.metadata = self.custom_data
return db_object
@cached_property
def custom_data(self):
# This just compiles the custom location data, the validation is done in _custom_data_errors()
data_provided = self.new_data.custom_data != self.NOT_PROVIDED
if data_provided:
metadata = {key: str(value) for key, value in self.new_data.custom_data.items()}
elif self.is_new:
metadata = {}
else:
metadata = copy.copy(self.old_object.metadata)
metadata, unknown = self.data_model.get_model_and_uncategorized(metadata)
if data_provided and not self.new_data.delete_uncategorized_data:
# add back uncategorized data to new metadata
metadata.update(unknown)
return metadata
@cached_property
def needs_save(self):
if self.is_new or self.do_delete:
return True
for attr in self.meta_data_attrs:
old_value = getattr(self.old_object, attr)
new_value = getattr(self.new_data, attr)
if (old_value or new_value) and old_value != new_value:
# attributes are being updated
return True
if self._metadata_needs_save():
return True
if self.location_type != self.old_object.location_type.code:
# foreign-key refs are being updated
return True
if self.old_object.parent_id is not None:
old_parent_code = self.old_collection.locations_by_pk[self.old_object.parent_id].site_code
else:
old_parent_code = ROOT_LOCATION_TYPE
return old_parent_code != self.new_data.parent_code
def _metadata_needs_save(self):
# Save only for meaningful changes - not just to add empty fields
old_metadata, new_metadata = self.old_object.metadata, self.db_object.metadata
return (
# data is added or modified
any(old_metadata.get(k, '') != new_value
for k, new_value in new_metadata.items())
# data is removed
or any(k not in new_metadata for k in old_metadata.keys())
)
class UnexpectedState(Exception):
pass
class LocationCollection(object):
"""
Simple wrapper to lookup types and locations in a domain
"""
def __init__(self, domain_obj):
self.domain_name = domain_obj.name
self.types = domain_obj.location_types
self.locations = list(SQLLocation.objects.filter(domain=self.domain_name, is_archived=False))
@property
@memoized
def locations_by_pk(self):
return {l.id: l for l in self.locations}
@property
@memoized
def locations_by_id(self):
return {l.location_id: l for l in self.locations}
@property
@memoized
def locations_by_site_code(self):
return {l.site_code: l for l in self.locations}
@property
@memoized
def locations_by_parent_code(self):
locs_by_pk = self.locations_by_pk
locs_by_parent = defaultdict(list)
for loc in self.locations:
if loc.parent_id is not None:
parent_code = locs_by_pk[loc.parent_id].site_code
else:
parent_code = ''
locs_by_parent[parent_code].append(loc)
return locs_by_parent
@property
@memoized
def types_by_code(self):
return {lt.code: lt for lt in self.types}
@property
@memoized
def custom_data_validator(self):
from .views import LocationFieldsView
return LocationFieldsView.get_validator(self.domain_name)
class LocationExcelValidator(object):
types_sheet_title = "types"
def __init__(self, domain, excel_importer):
self.domain = domain
self.excel_importer = excel_importer
self.data_model = get_location_data_model(self.domain)
def validate_and_parse_data_from_excel(self):
# This validates format of the uploaded excel file and coverts excel rows into stubs
sheets_by_title = {ws.title: ws for ws in self.excel_importer.worksheets}
# excel file should contain 'types' sheet
if self.types_sheet_title not in sheets_by_title:
raise LocationExcelSheetError("'types' sheet is required")
# 'types' sheet should have correct headers
type_sheet_reader = sheets_by_title[self.types_sheet_title]
actual = set(type_sheet_reader.headers)
expected = set(LOCATION_TYPE_SHEET_HEADERS.values())
if actual != expected:
missing = ", ".join(expected - actual)
extra = ", ".join(actual - expected)
message = ugettext_lazy("'types' sheet should contain headers '{expected}'. {missing}{extra}")
raise LocationExcelSheetError(message.format(
expected=", ".join(expected),
missing=ugettext_lazy("'{}' are missing. ").format(missing) if missing else '',
extra=ugettext_lazy("'{}' are not recognized. ").format(extra) if extra else '',
))
type_data = [self._get_type_data(index, row)
for index, row in enumerate(type_sheet_reader)]
# all locations sheets should have correct headers
location_data = []
optional_headers = list(LOCATION_SHEET_HEADERS_OPTIONAL.values())
for sheet_name, sheet_reader in sheets_by_title.items():
if sheet_name != self.types_sheet_title:
actual = set(sheet_reader.fieldnames) - set(optional_headers)
expected = set(LOCATION_SHEET_HEADERS_BASE.values())
if actual != expected:
missing = ", ".join(expected - actual)
extra = ", ".join(actual - expected)
message = ugettext_lazy("Locations sheet with title '{name}' should contain exactly "
"'{expected}' as the sheet headers. {missing}{extra}")
raise LocationExcelSheetError(message.format(
name=sheet_name,
expected=", ".join(expected),
missing=ugettext_lazy("'{}' are missing. ").format(missing) if missing else '',
extra=ugettext_lazy("'{}' are not recognized. ").format(extra) if extra else '',
))
location_data.extend([
self._get_location_data(index, row, sheet_name)
for index, row in enumerate(sheet_reader)
])
return type_data, location_data
@staticmethod
def _get_type_data(index, row):
titles = LOCATION_TYPE_SHEET_HEADERS
return LocationTypeData(
name=row.get(titles['name']),
code=row.get(titles['code']),
parent_code=row.get(titles['parent_code']),
do_delete=row.get(titles['do_delete']),
shares_cases=row.get(titles['shares_cases']),
view_descendants=row.get(titles['view_descendants']),
index=index,
)
@staticmethod
def _get_location_data(index, row, location_type):
titles = LOCATION_SHEET_HEADERS
def _optional_attr(attr):
if titles[attr] in row:
val = row.get(titles[attr])
if isinstance(val, dict) and '' in val:
# when excel header is 'data: ', the value is parsed as {'': ''}, but it should be {}
val.pop('')
return val
else:
return LocationStub.NOT_PROVIDED
return LocationData(
name=row.get(titles['name']),
site_code=row.get(titles['site_code']),
location_type=location_type,
parent_code=row.get(titles['parent_code']),
location_id=row.get(titles['location_id']),
do_delete=row.get(titles['do_delete']),
external_id=row.get(titles['external_id']),
latitude=row.get(titles['latitude']),
longitude=row.get(titles['longitude']),
custom_data=_optional_attr('custom_data'),
delete_uncategorized_data=row.get(titles['delete_uncategorized_data']),
index=index,
)
class NewLocationImporter(object):
"""
This takes location type and location stubs, validates data and the tree
and saves the changes in a transaction.
"""
def __init__(self, domain, type_data, location_data, user, excel_importer=None, chunk_size=100):
self.domain = domain
self.domain_obj = Domain.get_by_name(domain)
self.old_collection = LocationCollection(self.domain_obj)
self.type_stubs = [LocationTypeStub(data, self.old_collection) for data in type_data]
data_model = get_location_data_model(self.domain)
self.location_stubs = [LocationStub(data, data_model, self.old_collection)
for data in location_data]
self.user = user
self.result = LocationUploadResult()
self.excel_importer = excel_importer # excel_importer is used for providing progress feedback
self.chunk_size = chunk_size
def run(self):
tree_validator = LocationTreeValidator(self.type_stubs, self.location_stubs,
self.old_collection, self.user)
self.result.errors = tree_validator.errors
self.result.warnings = tree_validator.warnings
if self.result.errors:
return self.result
self.bulk_commit(self.type_stubs, self.location_stubs)
return self.result
def bulk_commit(self, type_stubs, location_stubs):
type_objects = save_types(type_stubs, self.excel_importer)
save_locations(location_stubs, type_objects, self.old_collection,
self.excel_importer, self.chunk_size)
# Since we updated LocationType objects in bulk, some of the post-save logic
# that occurs inside LocationType.save needs to be explicitly called here
for lt in type_stubs:
if (not lt.do_delete and lt.needs_save):
obj = type_objects[lt.code]
if not lt.is_new:
# supply_points would have been synced while SQLLocation.save() already
obj.sync_administrative_status(sync_supply_points=False)
update_count = lambda items: sum(l.needs_save and not l.do_delete and not l.is_new for l in items)
delete_count = lambda items: sum(l.do_delete for l in items)
new_count = lambda items: sum(l.is_new for l in items)
self.result.messages.extend([
_("Created {} new location types").format(new_count(type_stubs)),
_("Updated {} existing location types").format(update_count(type_stubs)),
_("Deleted {} existing location types").format(delete_count(type_stubs)),
_("Created {} new locations").format(new_count(location_stubs)),
_("Updated {} existing locations").format(update_count(location_stubs)),
_("Deleted {} existing locations").format(delete_count(location_stubs)),
])
class LocationTreeValidator(object):
"""Validates the given type and location stubs
All types and location stubs are linked with a corresponding
db_object.
:param type_stubs: List of `LocationTypeStub` objects.
:param location_stubs: List of `LocationStub` objects.
:param old_collection: `LocationCollection`.
:param user: The user performing the upload
"""
def __init__(self, type_stubs, location_stubs, old_collection, user):
_to_be_deleted = lambda items: [i for i in items if i.do_delete]
_not_to_be_deleted = lambda items: [i for i in items if not i.do_delete]
self.user = user
self.domain = old_collection.domain_name
self.all_listed_types = type_stubs
self.location_types = _not_to_be_deleted(type_stubs)
self.types_to_be_deleted = _to_be_deleted(type_stubs)
self.all_listed_locations = location_stubs
self.locations = _not_to_be_deleted(location_stubs)
self.locations_to_be_deleted = _to_be_deleted(location_stubs)
self.old_collection = old_collection
self.types_by_code = {lt.code: lt for lt in self.location_types}
self.locations_by_code = {l.site_code: l for l in location_stubs}
self.errors = self._get_errors()
self.warnings = self._get_warnings()
def _get_warnings(self):
return [
_("Location deletion in sheet '{type}', row '{i}' is ignored, "
"as the location does not exist")
.format(type=loc.location_type, i=loc.index)
for loc in self.all_listed_locations
if loc.is_new and loc.do_delete
]
def _get_errors(self):
# We want to find as many errors as possible up front, but some high
# level errors make it unrealistic to keep validating
basic_errors = (
self._check_location_restriction() +
self._check_unique_type_codes() +
self._check_unique_location_codes() +
self._check_unique_location_ids() +
self._check_new_site_codes_available() +
self._check_unlisted_type_codes() +
self._check_unknown_location_ids() +
self._validate_geodata()
)
if basic_errors:
# it doesn't make sense to try to validate a tree when you can't
# uniquely determine the relationships
return basic_errors
# Make sure the location types make sense
type_errors = self._validate_types_tree()
if type_errors:
return type_errors
# Check each location's position in the tree
errors = self._validate_location_tree() + self._check_required_locations_missing()
errors.extend(self._custom_data_errors())
# Location names must be unique among siblings
errors.extend(self._check_location_names())
# Model field validation must pass
errors.extend(self._check_model_validation())
return errors
def _check_location_restriction(self):
if self.user.has_permission(self.domain, 'access_all_locations'):
return []
errors = []
if any(lt.needs_save for lt in self.all_listed_types):
errors.append(_('You do not have permission to add or modify location types'))
accessible_site_codes = set(SQLLocation.active_objects
.accessible_to_user(self.domain, self.user)
.values_list('site_code', flat=True))
for loc_stub in self.all_listed_locations:
if not loc_stub.needs_save:
# Allow users to include any loc, as long as there are no changes
continue
if loc_stub.is_new and loc_stub.parent_code == ROOT_LOCATION_TYPE:
errors.append(_("You do not have permission to add top level locations"))
elif loc_stub.is_new:
# This checks parent_exists to allow users to create multiple
# levels at once. Somewhere up the chain, they must have a
# parent that exists - if it isn't accessible, they'll get an
# error there, if not, the newly created locs are accessible by
# extension
parent_exists = loc_stub.parent_code in self.old_collection.locations_by_site_code
if parent_exists and loc_stub.parent_code not in accessible_site_codes:
errors.append(_("You do not have permission to add locations in '{}'")
.format(loc_stub.parent_code))
else:
if loc_stub.site_code not in accessible_site_codes:
errors.append(_("You do not have permission to edit '{}'")
.format(loc_stub.site_code))
return errors
def _validate_geodata(self):
errors = []
for loc_stub in self.all_listed_locations:
l = loc_stub.new_data
if (l.latitude and not isinstance(l.latitude, Decimal)
or l.longitude and not isinstance(l.longitude, Decimal)):
errors.append(l)
return [
_("latitude/longitude 'lat-{lat}, lng-{lng}' for location in sheet '{type}' "
"at index {index} should be valid decimal numbers.")
.format(type=l.location_type, index=l.index, lat=l.latitude, lng=l.longitude)
for l in errors
]
def _check_required_locations_missing(self):
if not self.locations_to_be_deleted:
# skip this check if no old locations or no location to be deleted
return []
old_locs_by_parent = self.old_collection.locations_by_parent_code
missing_locs = []
listed_sites = {l.site_code for l in self.all_listed_locations}
for loc in self.locations_to_be_deleted:
required_locs = old_locs_by_parent[loc.site_code]
missing = set([l.site_code for l in required_locs]) - listed_sites
if missing:
missing_locs.append((missing, loc))
return [
_("Location '{code}' in sheet '{type}' at index {index} is being deleted, so all its "
"child locations must be present in the upload, but child locations '{locs}' are missing")
.format(code=parent.site_code, type=parent.location_type, index=parent.index, locs=', '.join(old_locs))
for (old_locs, parent) in missing_locs
]
def _check_unique_type_codes(self):
counts = list(Counter(lt.code for lt in self.all_listed_types).items())
return [
_("Location type code '{}' is used {} times - they should be unique")
.format(code, count)
for code, count in counts if count > 1
]
def _check_unique_location_codes(self):
counts = list(Counter(l.site_code for l in self.all_listed_locations).items())
return [
_("Location site_code '{}' is used {} times - they should be unique")
.format(code, count)
for code, count in counts if count > 1
]
def _check_unique_location_ids(self):
counts = list(Counter(l.location_id for l in self.all_listed_locations if l.location_id).items())
return [
_("Location location_id '{}' is listed {} times - they should be listed once")
.format(location_id, count)
for location_id, count in counts if count > 1
]
def _check_new_site_codes_available(self):
updated_location_ids = {l.location_id for l in self.all_listed_locations
if l.location_id}
# These site codes belong to locations in the db, but not the upload
unavailable_site_codes = {l.site_code for l in self.old_collection.locations
if l.location_id not in updated_location_ids}
return [
_("Location site_code '{code}' is in use by another location. "
"All site_codes must be unique").format(code=l.site_code)
for l in self.all_listed_locations
if l.site_code in unavailable_site_codes
]
def _check_unlisted_type_codes(self):
# count types not listed in excel but are present in the domain now
old_codes = [lt.code for lt in self.old_collection.types]
listed_codes = [lt.code for lt in self.all_listed_types]
unlisted_codes = set(old_codes) - set(listed_codes)
return [
_("Location type code '{}' is not listed in the excel. All types should be listed")
.format(code)
for code in unlisted_codes
]
def _check_unknown_location_ids(self):
# count location_ids listed in the excel that are not found in the domain
old = self.old_collection.locations_by_id
listed = {l.location_id: l for l in self.all_listed_locations if l.location_id}
unknown = set(listed.keys()) - set(old.keys())
return [
_("Location 'id: {id}' is not found in your domain. It's listed in the sheet {type} at row {index}")
.format(id=l_id, type=listed[l_id].location_type, index=listed[l_id].index)
for l_id in unknown
]
def _custom_data_errors(self):
validator = self.old_collection.custom_data_validator
return [
_("Problem with custom data for location '{site_code}', in sheet '{type}', at index '{i}' - '{er}'")
.format(site_code=l.site_code, type=l.location_type, i=l.index, er=validator(l.custom_data))
for l in self.all_listed_locations
if l.custom_data is not LocationStub.NOT_PROVIDED and validator(l.custom_data)
]
def _validate_types_tree(self):
type_pairs = [(lt.code, lt.parent_code) for lt in self.location_types]
try:
assert_no_cycles(type_pairs)
except BadParentError as e:
return [
_("Location Type '{}' refers to a parent which doesn't exist").format(code)
for code in e.affected_nodes
]
except CycleError as e:
return [
_("Location Type '{}' has a parentage that loops").format(code)
for code in e.affected_nodes
]
def _validate_location_tree(self):
errors = []
def _validate_location(location):
loc_type = self.types_by_code.get(location.location_type)
if not loc_type:
# if no location_type is set
return (_(
"Location '{}' in sheet points to a nonexistent or to be deleted location-type '{}'")
.format(location.site_code, location.location_type))
if loc_type.parent_code == ROOT_LOCATION_TYPE:
# if top location then it shouldn't have a parent
if location.parent_code != ROOT_LOCATION_TYPE:
return _("Location '{}' is a '{}' and should not have a parent").format(
location.site_code, location.location_type)
else:
return
else:
# if not top location, its actual parent location type should match what it is set in excel
parent = self.locations_by_code.get(location.parent_code)
if not parent:
# check old_collection if it's not listed in current excel
parent = self.old_collection.locations_by_site_code.get(location.parent_code)
if not parent:
return _("Location '{}' does not have a parent set or its parent "
"is being deleted").format(location.site_code)
else:
actual_parent_type = parent.location_type.code
else:
actual_parent_type = parent.location_type
if parent.do_delete and not location.do_delete:
return _("Location points to a location that's being deleted")
if actual_parent_type != loc_type.parent_code:
return _("Location '{}' is a '{}', so it should have a parent that is a '{}'").format(
location.site_code, location.location_type, loc_type.parent_code)
for location in self.locations:
error = _validate_location(location)
if error:
errors.append(error)
return errors
def _check_location_names(self):
locs_by_parent = defaultdict(list)
for loc in self.locations:
locs_by_parent[loc.parent_code].append(loc)
errors = []
for parent, siblings in locs_by_parent.items():
counts = list(Counter(l.new_data.name for l in siblings).items())
for name, count in counts:
if count > 1:
errors.append(
(_("There are {} locations with the name '{}' under the parent '{}'")
.format(count, name, parent))
)
return errors
def _check_model_validation(self):
"""Do model validation"""
errors = []
for location in self.locations:
exclude_fields = ["location_type"] # Skip foreign key validation
if not location.db_object.location_id:
# Don't validate location_id if its blank because SQLLocation.save() will add it
exclude_fields.append("location_id")
try:
location.db_object.full_clean(exclude=exclude_fields)
except ValidationError as e:
for field, issues in e.message_dict.items():
for issue in issues:
errors.append(_(
"Error with location in sheet '{}', at row {}. {}: {}").format(
location.location_type, location.index, field, issue
))
return errors
def new_locations_import(domain, excel_importer, user):
try:
validator = LocationExcelValidator(domain, excel_importer)
type_data, location_data = validator.validate_and_parse_data_from_excel()
except LocationExcelSheetError as e:
result = LocationUploadResult()
result.errors = [str(e)]
return result
importer = NewLocationImporter(domain, type_data, location_data, user, excel_importer)
return importer.run()
def save_types(type_stubs, excel_importer=None):
"""
Given a list of LocationTypeStub objects, saves them to SQL as LocationType objects
:param type_stubs: (list) list of LocationType objects with meta-data attributes and
`needs_save`, 'is_new', 'db_object' set correctly
:param excel_importer: Used for providing progress feedback. Disabled on None
:returns: (dict) a dict of {object.code: object for all type objects}
"""
# This proceeds in 3 steps
# 1. Lookup all to be deleted types and 'bulk_delete' them
# 2. Lookup all new types and 'bulk_create' the SQL objects, but don't set ForeignKey attrs like
# 'parent' yet
# 3. Lookup all to be updated types. Set foreign key attrs on these and new objects, and
# 'bulk_update' the objects
# step 1
to_be_deleted_types = [lt.db_object for lt in type_stubs if lt.do_delete]
LocationType.bulk_delete(to_be_deleted_types)
if excel_importer:
excel_importer.add_progress(len(to_be_deleted_types))
# step 2
new_type_objects = LocationType.bulk_create([lt.db_object for lt in type_stubs if lt.is_new])
if excel_importer:
excel_importer.add_progress(len(new_type_objects))
# step 3
type_objects_by_code = {lt.code: lt for lt in new_type_objects}
type_objects_by_code.update({ROOT_LOCATION_TYPE: None})
type_objects_by_code.update({
lt.code: lt.db_object
for lt in type_stubs
if not lt.is_new and not lt.do_delete
})
to_bulk_update = []
for lt in type_stubs:
if (lt.needs_save or lt.is_new) and not lt.do_delete:
# lookup foreign key attributes from stub and set them on objects
type_object = type_objects_by_code[lt.code]
type_object.parent_type = type_objects_by_code[lt.parent_code]
to_bulk_update.append(type_object)
LocationType.bulk_update(to_bulk_update)
if excel_importer:
excel_importer.add_progress(len(to_bulk_update))
all_objs_by_code = {lt.code: lt for lt in to_bulk_update}
all_objs_by_code.update({
lt.code: lt.db_object
for lt in type_stubs
if not lt.needs_save
})
return all_objs_by_code
def save_locations(location_stubs, types_by_code, old_collection,
excel_importer=None, chunk_size=100):
"""
:param location_stubs: (list) List of LocationStub objects with
attributes like 'db_object', 'needs_save', 'do_delete' set
:param types_by_code: (dict) Mapping of 'code' to LocationType SQL objects
:param excel_importer: Used for providing progress feedback. Disabled on None
This recursively saves tree top to bottom.
"""
def order_by_location_type():
# returns locations in the order from top to bottom
types_by_parent = defaultdict(list)
for _type in types_by_code.values():
key = _type.parent_type.code if _type.parent_type else ROOT_LOCATION_TYPE
types_by_parent[key].append(_type)
location_stubs_by_type = defaultdict(list)
for l in location_stubs:
location_stubs_by_type[l.location_type].append(l)
top_to_bottom_locations = []
def append_at_bottom(parent_type):
top_to_bottom_locations.extend(location_stubs_by_type[parent_type.code])
for child_type in types_by_parent[parent_type.code]:
append_at_bottom(child_type)
for top_type in types_by_parent[ROOT_LOCATION_TYPE]:
append_at_bottom(top_type)
return top_to_bottom_locations
# Go through all locations and either flag for deletion or save
location_stubs_by_code = {stub.site_code: stub for stub in location_stubs}
to_delete = []
for stubs in chunked(order_by_location_type(), chunk_size):
with transaction.atomic():
for loc in stubs:
if loc.do_delete:
if loc.is_new:
if excel_importer:
excel_importer.add_progress()
else:
to_delete.append(loc)
continue
if excel_importer:
excel_importer.add_progress()
if loc.needs_save:
# attach location type and parent to location, then save
loc_object = loc.db_object
loc_object.location_type = types_by_code.get(loc.location_type)
parent_code = loc.parent_code
if parent_code == ROOT_LOCATION_TYPE:
loc_object.parent = None
elif parent_code:
if parent_code in location_stubs_by_code:
loc_object.parent = location_stubs_by_code[parent_code].db_object
else:
loc_object.parent = old_collection.locations_by_site_code[parent_code]
loc_object.save()
_delete_locations(to_delete, old_collection, excel_importer, chunk_size)
def _delete_locations(to_delete, old_collection, excel_importer, chunk_size):
# Delete locations in chunks. Also assemble ancestor IDs to update, but don't repeat across chunks.
_seen = set()
def iter_unprocessed_ancestor_ids(stubs):
# Returns a generator of all ancestor IDs of locations in 'stubs' which
# haven't already been returned in a previous call
for loc in stubs:
if not loc.is_new:
pk = loc.db_object.pk
while pk is not None and pk not in _seen:
_seen.add(pk)
location = old_collection.locations_by_pk[pk]
yield location.location_id
pk = location.parent_id
# reverse -> delete leaf nodes first
for stubs in chunked(reversed(to_delete), chunk_size):
to_delete = [loc.db_object for loc in stubs]
ancestor_ids = list(iter_unprocessed_ancestor_ids(stubs))
with transaction.atomic():
SQLLocation.bulk_delete(to_delete, ancestor_ids)
if excel_importer:
excel_importer.add_progress(len(to_delete))
| |
# sudo pip install hungarian
# sudo easy_install statsmodels
#sudo apt-get install python-pandas
import pandas as pd
import numpy as np
import hungarian
import math
import argparse
from multiprocessing import Pool
from scipy.stats import ks_2samp
from scipy.stats import ttest_ind
from scipy.stats import chisquare
from sklearn import linear_model
import sys
df_full = None
df_columns = None
column_parameters = {}
column_funs = {}
column_weights = {}
def print_report():
#generate an output report
print "Column weights used:"
for attribute in sorted(column_weights.keys()):
print "\t" + attribute.ljust(30) + "\t" + str(column_weights[attribute])
print""
print "Two-sample Kolmogorov-Smirnov:"
ks_vals = ks(df_sample_condition, df_matches)
for attribute in sorted(ks_vals.keys()):
if ks_vals[attribute][1] < 0.1:
print "*",
print "\t" + attribute.ljust(30) + "\tD={:.4f}\tp={:.4f}".format(*ks_vals[attribute])
print ""
print "Independent two-sample t-test:"
for attribute in sorted(df_columns.columns):
if df_columns.ix[0][attribute] == "ignore":
try:
t, p = ttest_ind(df_sample_condition[attribute], df_matches[attribute])
print "\t" + attribute.ljust(30) + "\tt={:.4f}\tp={:.4f}".format(t, p)
except:
pass #oops, must not have been an integer value!
def ks(df_sample_condition, df_match):
"""For every column that has not been set to ignore and is not the match_attribute
this function will perform a two sample Kolmogorov-Smirnov test
"""
ks_results = {}
for column in df_columns:
if df_columns.ix[0][column] != "ignore":
ks_results[column] = ks_2samp(df_sample_condition[column], df_match[column])
return ks_results
def write_output(output, df_sample_condition, df_matches, side_by_side=False):
"""Writes the sample condition and matches to a single csv file. If side_by_side
then the sample condition is written to the left of the matches, with pairs matched
appropriately. Otherwise, the sample condition is written above the matches,
with the matched pairs in order.
"""
if (side_by_side):
columns_to_rename = {}
for column in df_sample_condition.columns:
columns_to_rename[column] = "matched_" + str(column)
df_output = df_matches.rename(columns=columns_to_rename)
df_output = df_output.rename(columns={"_index": "index"}) #rename the index we will join on back
df_output = df_sample_condition.join(df_output)
else:
df_output = pd.concat([df_sample_condition, df_matches])
df_output.to_csv(file_output, encoding='utf-8')
def run_hungarian(matrix, df_population, df_sample_condition):
"""Runs the hungarian linear assignment problem solver from the hungarian package.
Takes in a matrix of datavalues and dataframes for the df_population and the
df_sample_condition. Returns the matches as a new dataframe with the same
structure as the df_population.
"""
row_assigns, col_assigns = hungarian.lap(matrix)
interesting_indicies = []
for i in range(0, len(df_sample_condition)):
interesting_indicies.append(col_assigns[i])
return df_population.ix[interesting_indicies]
def discover_weightings(df_full):
"""Returns a dict of weightings for every column in file_column_definitions
which is not labeled as ignore, and is not the match_attribute. This function
requires that there are no null/None/NaN values in the columns.
TODO: Remove the requirement for no null/None/NaN values.
"""
candidates = []
for column_name in df_columns.columns:
if df_columns[column_name][0] != "ignore":
if column_name != match_attribute:
candidates.append(column_name)
clf = linear_model.LinearRegression()
r_column = df_full[match_attribute]
nr_columns = df_full[candidates]
clf.fit(nr_columns, r_column)
results = {}
for i in range(0, len(clf.coef_)):
results[candidates[i]] = clf.coef_[i]
return results
def diff_two_rows(x, y):
"""Returns difference over all columns in column_definitions between two rows
in a pandas dataframe as a tuple: (average difference, dictionary of column differences)
"""
diffs = {}
for column_name in df_columns.columns:
difference = diff(x[column_name], y[column_name], column_name)
if difference != None:
diffs[column_name] = difference
return (np.mean(diffs.values()), diffs)
def diff_ignore(one, two):
return None
def diff_nominal(one, two):
if one == two:
return 0
else:
return 1
def diff_ordinal(one, two, sorted_range):
#sorted_range=sorted(sorted_range)
pos1 = sorted_range.index(one)
pos2 = sorted_range.index(two)
diff = math.fabs(pos1 - pos2)
return diff / (len(sorted_range) - 1)
def diff_real(one, two, min, max):
top = float(max - min)
one = float(one)
two - float(two)
return math.fabs((one / top) - (two / top))
def diff(one, two, column_name):
#look up the type for the column name
return column_funs[column_name](one, two, **column_parameters[column_name])
def load_data():
global df_full
global df_columns
df_full = pd.read_csv(file_input, na_values=["", " ", "NULL"])
df_columns = pd.read_csv(file_columns, na_values=["", " ", "NULL"])
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Subsamples a large population based on the characteristics of a small population.')
parser.add_argument('-i', '--input', help='Input filename as a CSV')
parser.add_argument('-o', '--output', help='Output filename for matches')
parser.add_argument('-c', '--column_definitions', help='The CSV of column definitions')
parser.add_argument('-m', '--match', help='The attribute to match one')
parser.add_argument('-w', '--weights', help='Whether weights should be automatically discovered or evenly applied')
args = parser.parse_args()
file_columns = args.column_definitions
file_input = args.input
file_output = args.output
match_attribute = args.match
weights = args.weights
#load datafiles into pandas dataframes
load_data()
#separate the general population for matching and the subsample of interest based on the match_attribute
df_population = df_full[df_full[match_attribute] == 0]
df_sample_condition = df_full[df_full[match_attribute] == 1]
df_population = df_population.reset_index(drop=True)
df_sample_condition = df_sample_condition.reset_index(drop=True)
#make sure sample condition and population and column definitions are all the same size
assert len(df_population.columns) == len(df_columns.columns) == len(
df_sample_condition.columns), "All data files must have the same number of columns"
#make sure that population file has at least one free choice in it
assert len(df_population) > len(
df_sample_condition), "The population file must have more items in it than the sample condition file"
column_weights = {}
if weights == "auto":
#run logistic regression to discover the weightings for each column
column_weights = discover_weightings(df_full)
else:
#set all columns to 1 except those that will not be used
for column_name in df_columns.columns:
if df_columns[column_name][0] != "ignore":
column_weights[column_name] = 1.0
else:
column_weights[column_name] = 0.0
#go through and build shorthand variables for the columns based on the data in the definitions file
for column_name in df_columns:
if df_columns[column_name][0] == "ordinal":
items = []
items.extend(df_sample_condition[column_name].unique().tolist())
items.extend(df_population[column_name].unique().tolist())
s = set()
for item in items:
s.add(item)
#print "Setting ordinal " + str(column_name)
newlist = sorted(s)
column_parameters[column_name] = {"sorted_range": newlist}
column_funs[column_name] = diff_ordinal
elif df_columns[column_name][0] == "real":
mymin = np.min(df_sample_condition[column_name])
if np.min(df_population[column_name]) < mymin:
mymin = np.min(df_population[column_name])
mymax = np.max(df_sample_condition[column_name])
if np.max(df_population[column_name]) > mymax:
mymax = np.max(df_population[column_name])
#print "Setting real " + str(column_name)
column_parameters[column_name] = {"min": mymin, "max": mymax}
column_funs[column_name] = diff_real
elif df_columns[column_name][0] == "ignore":
#print "Setting unique " + str(column_name)
column_parameters[column_name] = {}
column_funs[column_name] = diff_ignore
elif df_columns[column_name][0] == "nominal":
#print "Setting nominal " + str(column_name)
column_parameters[column_name] = {}
column_funs[column_name] = diff_nominal
#add a zero index for anything that does not already have a weight
if column_name not in column_weights.keys():
column_weights[column_name] = 0
#create a matrix filled with ones (worst match value)
matrix = np.ones((len(df_population), len(df_population)), dtype=np.float32)
print "Building difference tables matrix of size (" + str(len(df_population)) + "x" + str(
len(df_population)) + "):",
x_i = 0
for x in df_population.iterrows():
y_i = 0
for y in df_sample_condition.iterrows():
diffs = []
for column_name in df_columns.columns:
difference = diff(x[1][column_name], y[1][column_name], column_name)
#todo: right now if a value is missing we maximize it, setting it to totally different at 1, is this reasonable?
#todo: instead should we just ignore this? or should it be a sort of special value?
if difference != None and np.isnan(difference):
difference = 1
elif difference != None:
diffs.append(difference * column_weights[column_name])
matrix[x_i][y_i] = np.sum(diffs)
y_i += 1
x_i += 1
print ".",
sys.stdout.flush()
print""
#run lap solver
print "Running the LAP using the hungarian method."
df_matches = run_hungarian(matrix, df_population, df_sample_condition)
df_matches = df_matches.reset_index(drop=True)
#write output files
write_output(file_output, df_sample_condition, df_matches)
#write output report
print_report()
"""
#Alternative matches
import time
before = int(round(time.time() * 1000))
print before
matches={}
for rowiter in df_deltas.iterrows():
diff_value=np.mean(rowiter[1])
curr_sample_val=rowiter[0]
candidates=[]
for popiter in df_population.iterrows():
difference_value=diff_two_rows(df_sample_condition.ix[curr_sample_val], df_population.ix[popiter[0]])[0]
if difference_value <= diff_value:
candidates.append( (popiter[0], difference_value) )
matches[curr_sample_val]=candidates
after = int(round(time.time() * 1000))
print after
print str(after-before)
"""
#non_regression=df_john[["GENDER_CODE","BIRTH_YEAR","ABORIGINAL_ANCESTRY_IND","SELF_REPORTED_DISABILITY_IND","cu_year1","entrance_average"]]
#clf.fit(non_regression,regression_val)
#clf.coef_
| |
# -*- coding: utf-8 -*-
# Copyright 2021 ProjectQ-Framework (www.projectq.ch)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Back-end to run quantum program on AWS Braket supported devices.
This backend requires the official AWS SDK for Python, Boto3.
The installation is very simple
> pip install boto3
"""
import getpass
import json
import re
import signal
import time
import boto3
import botocore
from .._exceptions import DeviceOfflineError, DeviceTooSmall, RequestTimeoutError
class AWSBraket:
"""Manage a session between ProjectQ and AWS Braket service."""
def __init__(self):
"""Initialize a session with the AWS Braket Web APIs."""
self.backends = {}
self.timeout = 5.0
self._credentials = {}
self._s3_folder = []
def authenticate(self, credentials=None):
"""
Authenticate with AWSBraket Web APIs.
Args:
credentials (dict): mapping the AWS key credentials as the AWS_ACCESS_KEY_ID and AWS_SECRET_KEY.
"""
if credentials is None: # pragma: no cover
credentials['AWS_ACCESS_KEY_ID'] = getpass.getpass(prompt="Enter AWS_ACCESS_KEY_ID: ")
credentials['AWS_SECRET_KEY'] = getpass.getpass(prompt="Enter AWS_SECRET_KEY: ")
self._credentials = credentials
def get_s3_folder(self, s3_folder=None):
"""
Get the S3 bucket that contains the results.
Args:
s3_folder (list): contains the S3 bucket and directory to store the results.
"""
if s3_folder is None: # pragma: no cover
s3_bucket = input("Enter the S3 Bucket configured in Braket: ")
s3_directory = input("Enter the Directory created in the S3 Bucket: ")
s3_folder = [s3_bucket, s3_directory]
self._s3_folder = s3_folder
def get_list_devices(self, verbose=False):
"""
Get the list of available devices with their basic properties.
Args:
verbose (bool): print the returned dictionnary if True
Returns:
(dict) backends dictionary by deviceName, containing the qubit size 'nq', the coupling map 'coupling_map'
if applicable (IonQ Device as an ion device is having full connectivity) and the Schema Header
version 'version', because it seems that no device version is available by now
"""
# TODO: refresh region_names if more regions get devices available
self.backends = {}
region_names = ['us-west-1', 'us-east-1']
for region in region_names:
client = boto3.client(
'braket',
region_name=region,
aws_access_key_id=self._credentials['AWS_ACCESS_KEY_ID'],
aws_secret_access_key=self._credentials['AWS_SECRET_KEY'],
)
filters = []
devicelist = client.search_devices(filters=filters)
for result in devicelist['devices']:
if result['deviceType'] not in ['QPU', 'SIMULATOR']:
continue
if result['deviceType'] == 'QPU':
device_capabilities = json.loads(
client.get_device(deviceArn=result['deviceArn'])['deviceCapabilities']
)
self.backends[result['deviceName']] = {
'nq': device_capabilities['paradigm']['qubitCount'],
'coupling_map': device_capabilities['paradigm']['connectivity']['connectivityGraph'],
'version': device_capabilities['braketSchemaHeader']['version'],
'location': region, # deviceCapabilities['service']['deviceLocation'],
'deviceArn': result['deviceArn'],
'deviceParameters': device_capabilities['deviceParameters']['properties']['braketSchemaHeader'][
'const'
],
'deviceModelParameters': device_capabilities['deviceParameters']['definitions'][
'GateModelParameters'
]['properties']['braketSchemaHeader']['const'],
}
# Unfortunatelly the Capabilities schemas are not homogeneus
# for real devices and simulators
elif result['deviceType'] == 'SIMULATOR':
device_capabilities = json.loads(
client.get_device(deviceArn=result['deviceArn'])['deviceCapabilities']
)
self.backends[result['deviceName']] = {
'nq': device_capabilities['paradigm']['qubitCount'],
'coupling_map': {},
'version': device_capabilities['braketSchemaHeader']['version'],
'location': 'us-east-1',
'deviceArn': result['deviceArn'],
'deviceParameters': device_capabilities['deviceParameters']['properties']['braketSchemaHeader'][
'const'
],
'deviceModelParameters': device_capabilities['deviceParameters']['definitions'][
'GateModelParameters'
]['properties']['braketSchemaHeader']['const'],
}
if verbose:
print('- List of AWSBraket devices available:')
print(list(self.backends))
return self.backends
def is_online(self, device):
"""
Check if the device is in the list of available backends.
Args:
device (str): name of the device to check
Returns:
(bool) True if device is available, False otherwise
"""
# TODO: Add info for the device if it is actually ONLINE
return device in self.backends
def can_run_experiment(self, info, device):
"""
Check if the device is big enough to run the code.
Args:
info (dict): dictionary sent by the backend containing the code to run
device (str): name of the device to use
Returns:
(tuple): (bool) True if device is big enough, False otherwise (int)
maximum number of qubit available on the device (int)
number of qubit needed for the circuit
"""
nb_qubit_max = self.backends[device]['nq']
nb_qubit_needed = info['nq']
return nb_qubit_needed <= nb_qubit_max, nb_qubit_max, nb_qubit_needed
def run(self, info, device):
"""
Run the quantum code to the AWS Braket selected device.
Args:
info (dict): dictionary sent by the backend containing the code to run
device (str): name of the device to use
Returns:
task_arn (str): The Arn of the task
"""
argument = {
'circ': info['circuit'],
's3_folder': self._s3_folder,
'shots': info['shots'],
}
region_name = self.backends[device]['location']
device_parameters = {
'braketSchemaHeader': self.backends[device]['deviceParameters'],
'paradigmParameters': {
'braketSchemaHeader': self.backends[device]['deviceModelParameters'],
'qubitCount': info['nq'],
'disableQubitRewiring': False,
},
}
device_parameters = json.dumps(device_parameters)
client_braket = boto3.client(
'braket',
region_name=region_name,
aws_access_key_id=self._credentials['AWS_ACCESS_KEY_ID'],
aws_secret_access_key=self._credentials['AWS_SECRET_KEY'],
)
response = client_braket.create_quantum_task(
action=argument['circ'],
deviceArn=self.backends[device]['deviceArn'],
deviceParameters=device_parameters,
outputS3Bucket=argument['s3_folder'][0],
outputS3KeyPrefix=argument['s3_folder'][1],
shots=argument['shots'],
)
return response['quantumTaskArn']
def get_result(self, execution_id, num_retries=30, interval=1, verbose=False): # pylint: disable=too-many-locals
"""Get the result of an execution."""
if verbose:
print("Waiting for results. [Job Arn: {}]".format(execution_id))
original_sigint_handler = signal.getsignal(signal.SIGINT)
def _handle_sigint_during_get_result(*_): # pragma: no cover
raise Exception("Interrupted. The Arn of your submitted job is {}.".format(execution_id))
def _calculate_measurement_probs(measurements):
"""
Calculate the measurement probabilities .
Calculate the measurement probabilities based on the list of measurements for a job sent to a SV1 Braket
simulator.
Args:
measurements (list): list of measurements
Returns:
measurementsProbabilities (dict): The measurements with their probabilities
"""
total_mes = len(measurements)
unique_mes = [list(x) for x in {tuple(x) for x in measurements}]
total_unique_mes = len(unique_mes)
len_qubits = len(unique_mes[0])
measurements_probabilities = {}
for i in range(total_unique_mes):
strqubits = ''
for qubit_idx in range(len_qubits):
strqubits += str(unique_mes[i][qubit_idx])
prob = measurements.count(unique_mes[i]) / total_mes
measurements_probabilities[strqubits] = prob
return measurements_probabilities
# The region_name is obtained from the task_arn itself
region_name = re.split(':', execution_id)[3]
client_braket = boto3.client(
'braket',
region_name=region_name,
aws_access_key_id=self._credentials['AWS_ACCESS_KEY_ID'],
aws_secret_access_key=self._credentials['AWS_SECRET_KEY'],
)
try:
signal.signal(signal.SIGINT, _handle_sigint_during_get_result)
for _ in range(num_retries):
quantum_task = client_braket.get_quantum_task(quantumTaskArn=execution_id)
status = quantum_task['status']
bucket = quantum_task['outputS3Bucket']
directory = quantum_task['outputS3Directory']
resultsojectname = directory + '/results.json'
if status == 'COMPLETED':
# Get the device type to obtian the correct measurement
# structure
devicetype_used = client_braket.get_device(deviceArn=quantum_task['deviceArn'])['deviceType']
# Get the results from S3
client_s3 = boto3.client(
's3',
aws_access_key_id=self._credentials['AWS_ACCESS_KEY_ID'],
aws_secret_access_key=self._credentials['AWS_SECRET_KEY'],
)
s3result = client_s3.get_object(Bucket=bucket, Key=resultsojectname)
if verbose:
print("Results obtained. [Status: {}]".format(status))
result_content = json.loads(s3result['Body'].read())
if devicetype_used == 'QPU':
return result_content['measurementProbabilities']
if devicetype_used == 'SIMULATOR':
return _calculate_measurement_probs(result_content['measurements'])
if status == 'FAILED':
raise Exception(
"Error while running the code: {}. "
"The failure reason was: {}.".format(status, quantum_task['failureReason'])
)
if status == 'CANCELLING':
raise Exception("The job received a CANCEL operation: {}.".format(status))
time.sleep(interval)
# NOTE: Be aware that AWS is billing if a lot of API calls are
# executed, therefore the num_repetitions is set to a small
# number by default.
# For QPU devices the job is always queued and there are some
# working hours available.
# In addition the results and state is writen in the
# results.json file in the S3 Bucket and does not depend on the
# status of the device
finally:
if original_sigint_handler is not None:
signal.signal(signal.SIGINT, original_sigint_handler)
raise RequestTimeoutError(
"Timeout. "
"The Arn of your submitted job is {} and the status "
"of the job is {}.".format(execution_id, status)
)
def show_devices(credentials=None, verbose=False):
"""
Access the list of available devices and their properties (ex: for setup configuration).
Args:
credentials (dict): Dictionary storing the AWS credentials with keys AWS_ACCESS_KEY_ID and AWS_SECRET_KEY.
verbose (bool): If True, additional information is printed
Returns:
(list) list of available devices and their properties
"""
awsbraket_session = AWSBraket()
awsbraket_session.authenticate(credentials=credentials)
return awsbraket_session.get_list_devices(verbose=verbose)
# TODO: Create a Show Online properties per device
def retrieve(credentials, task_arn, num_retries=30, interval=1, verbose=False):
"""
Retrieve a job/task by its Arn.
Args:
credentials (dict): Dictionary storing the AWS credentials with keys AWS_ACCESS_KEY_ID and AWS_SECRET_KEY.
task_arn (str): The Arn of the task to retreive
Returns:
(dict) measurement probabilities from the result stored in the S3 folder
"""
try:
awsbraket_session = AWSBraket()
if verbose:
print("- Authenticating...")
if credentials is not None:
print("AWS credentials: " + credentials['AWS_ACCESS_KEY_ID'] + ", " + credentials['AWS_SECRET_KEY'])
awsbraket_session.authenticate(credentials=credentials)
res = awsbraket_session.get_result(task_arn, num_retries=num_retries, interval=interval, verbose=verbose)
return res
except botocore.exceptions.ClientError as error:
error_code = error.response['Error']['Code']
if error_code == 'ResourceNotFoundException':
print("- Unable to locate the job with Arn ", task_arn)
print(error, error_code)
raise
def send( # pylint: disable=too-many-branches,too-many-arguments,too-many-locals
info, device, credentials, s3_folder, num_retries=30, interval=1, verbose=False
):
"""
Send cicruit through the Boto3 SDK and runs the quantum circuit.
Args:
info(dict): Contains representation of the circuit to run.
device (str): name of the AWS Braket device.
credentials (dict): Dictionary storing the AWS credentials with keys AWS_ACCESS_KEY_ID and AWS_SECRET_KEY.
s3_folder (list): Contains the S3 bucket and directory to store the results.
verbose (bool): If True, additional information is printed, such as measurement statistics. Otherwise, the
backend simply registers one measurement result (same behavior as the projectq Simulator).
Returns:
(list) samples from the AWS Braket device
"""
try:
awsbraket_session = AWSBraket()
if verbose:
print("- Authenticating...")
if credentials is not None:
print("AWS credentials: " + credentials['AWS_ACCESS_KEY_ID'] + ", " + credentials['AWS_SECRET_KEY'])
awsbraket_session.authenticate(credentials=credentials)
awsbraket_session.get_s3_folder(s3_folder=s3_folder)
# check if the device is online/is available
awsbraket_session.get_list_devices(verbose)
online = awsbraket_session.is_online(device)
if online:
print("The job will be queued in any case, plase take this into account")
else:
print("The device is not available. Use the simulator instead or try another device.")
raise DeviceOfflineError("Device is not available.")
# check if the device has enough qubit to run the code
runnable, qmax, qneeded = awsbraket_session.can_run_experiment(info, device)
if not runnable:
print(
(
"The device is too small ({} qubits available) for the code "
+ "requested({} qubits needed) Try to look for another "
+ "device with more qubits"
).format(qmax, qneeded)
)
raise DeviceTooSmall("Device is too small.")
if verbose:
print("- Running code: {}".format(info))
task_arn = awsbraket_session.run(info, device)
print("Your task Arn is: {}. Make note of that for future reference".format(task_arn))
if verbose:
print("- Waiting for results...")
res = awsbraket_session.get_result(task_arn, num_retries=num_retries, interval=interval, verbose=verbose)
if verbose:
print("- Done.")
return res
except botocore.exceptions.ClientError as error:
error_code = error.response['Error']['Code']
if error_code == 'AccessDeniedException':
print("- There was an error: the access to Braket was denied")
if error_code == 'DeviceOfflineException':
print("- There was an error: the device is offline")
if error_code == 'InternalServiceException':
print("- There was an interal Bracket service error")
if error_code == 'ServiceQuotaExceededException':
print("- There was an error: the quota on Braket was exceed")
if error_code == 'ValidationException':
print("- There was a Validation error")
print(error, error_code)
raise
| |
from unittest import TestCase
from ..siptransport import SimulatedSIPTransport
from ..siptransport import SimulatedSIPTransportConnection
from ..siptransport import SimulatedNetwork
from abstractTransportConnectionTestCase import AbstractTransportConnectionTestCase
class TestSimulatedTransportConnection(AbstractTransportConnectionTestCase):
# TODO: will push most of this up.
def setUp(self):
SimulatedNetwork.clear()
self.hasBound = False
self.bindHasFailed = False
self.connectedConnections = []
self.notConnectedAddressesAndPorts = []
self.receivedRequests = []
self.receivedResponses = []
self.transport1 = SimulatedSIPTransport(self.bindAddress1, self.bindPort1)
self.transport1.when_event_do("bound", self.bound_event_handler)
self.transport1.when_event_do("bindFailed", self.bind_failed_event_handler)
self.transport1.when_event_do("madeConnection", self.made_connection_event_handler)
self.transport1.when_event_do("couldNotMakeConnection", self.could_not_make_connection_event_handler)
self.transport1.when_event_do("lostConnection", self.lost_connection_event_handler)
self.transport1.when_event_do("receivedValidConnectedRequest", self.received_valid_connected_request_event_handler)
self.transport1.when_event_do("receivedValidConnectedResponse", self.received_valid_connected_response_event_handler)
self.transport2 = SimulatedSIPTransport(self.bindAddress2, self.bindPort2)
self.transport3 = SimulatedSIPTransport(self.bindAddress3, self.bindPort3)
def test(self):
self.run_00_initialSanityCheck()
self.run_01_bind()
self.run_02_makeOutboundConnection()
self.run_03_makeInboundConnection()
self.run_04_attemptSecondBind()
self.run_05_attemptConnectToBogusAddressAndPort()
self.run_06_attemptConnectToOwnAddressAndPort()
self.run_07_sendRequestsVerifyReceipt()
self.run_08_sendResponsesVerifyReceipt()
def run_00_initialSanityCheck(self):
self.assertIsInstance(self.transport1, SimulatedSIPTransport)
self.assertEqual(0, len(self.transport1.connections))
self.assertEqual(0, len(self.connectedConnections))
self.assertEqual(0, len(self.receivedRequests))
self.assertEqual(0, len(self.receivedResponses))
self.assertTrue(self.transport1.is_reliable)
self.assertEqual('SIM', self.transport1.transport_parameter_name)
self.assertEqual(self.bindAddress1, self.transport1.bind_address)
self.assertEqual(self.bindPort1, self.transport1.bind_port)
self.assertFalse(self.hasBound)
self.assertFalse(self.bindHasFailed)
self.assertIsInstance(self.transport2, SimulatedSIPTransport)
self.assertEqual(0, len(self.transport2.connections))
self.assertTrue(self.transport2.is_reliable)
self.assertEqual('SIM', self.transport2.transport_parameter_name)
self.assertEqual(self.bindAddress2, self.transport2.bind_address)
self.assertEqual(self.bindPort2, self.transport2.bind_port)
self.assertIsInstance(self.transport3, SimulatedSIPTransport)
self.assertEqual(0, len(self.transport3.connections))
self.assertTrue(self.transport3.is_reliable)
self.assertEqual('SIM', self.transport3.transport_parameter_name)
self.assertEqual(self.bindAddress3, self.transport3.bind_address)
self.assertEqual(self.bindPort3, self.transport3.bind_port)
self.assertEqual(0, len(SimulatedNetwork.instance.boundTransports))
def run_01_bind(self):
self.transport1.bind()
self.assertEqual(1, len(SimulatedNetwork.instance.boundTransports))
self.assertTrue(self.hasBound)
self.assertFalse(self.bindHasFailed)
self.assertEqual(0, len(self.transport1.connections))
self.assertEqual(0, len(self.connectedConnections))
self.transport2.bind()
self.assertEqual(2, len(SimulatedNetwork.instance.boundTransports))
self.transport3.bind()
self.assertEqual(3, len(SimulatedNetwork.instance.boundTransports))
self.assertEqual(self.transport1, SimulatedNetwork.instance.boundTransports[0])
self.assertEqual(self.transport2, SimulatedNetwork.instance.boundTransports[1])
self.assertEqual(self.transport3, SimulatedNetwork.instance.boundTransports[2])
self.assertIs(self.transport1, SimulatedNetwork.instance.bound_transport_with_address_and_port(self.bindAddress1, self.bindPort1))
self.assertIs(self.transport2, SimulatedNetwork.instance.bound_transport_with_address_and_port(self.bindAddress2, self.bindPort2))
self.assertIs(self.transport3, SimulatedNetwork.instance.bound_transport_with_address_and_port(self.bindAddress3, self.bindPort3))
def run_02_makeOutboundConnection(self):
# Connect transport1 to transport2
self.transport1.connect_to_address_and_port(self.bindAddress2, self.bindPort2)
self.assertEqual(1, len(self.transport1.connections))
self.assertEqual(1, len(self.connectedConnections))
self.assertIs(self.connectedConnections[0], self.transport1.connections[0])
self.assertEqual(self.bindAddress2, self.transport1.connections[0].remoteAddress)
self.assertIsInstance(self.transport1.connections[0].bind_port, int)
self.assertIsInstance(self.transport1.connections[0].id, basestring)
self.assertEqual(self.bindPort2, self.transport1.connections[0].remotePort)
self.assertEqual(1, len(self.transport2.connections))
self.assertEqual(0, len(self.transport3.connections))
self.assertEqual(self.bindAddress1, self.transport2.connections[0].remoteAddress)
self.assertIsInstance(self.transport2.connections[0].remotePort, int)
self.assertIsInstance(self.transport2.connections[0].id, basestring)
self.assertEqual(self.bindPort2, self.transport2.connections[0].bind_port)
def run_03_makeInboundConnection(self):
# Connect transport3 to transport1
self.transport3.connect_to_address_and_port(self.bindAddress1, self.bindPort1)
self.assertEqual(2, len(self.transport1.connections))
self.assertEqual(1, len(self.transport2.connections))
self.assertEqual(1, len(self.transport3.connections))
self.assertEqual(2, len(self.connectedConnections))
self.assertIs(self.connectedConnections[1], self.transport1.connections[1])
self.assertEqual(self.bindAddress3, self.transport1.connections[1].remoteAddress)
self.assertIsInstance(self.transport3.connections[0].bind_port, int)
self.assertIsInstance(self.transport3.connections[0].id, basestring)
self.assertEqual(self.bindPort1, self.transport1.connections[0].bind_port)
self.assertEqual(self.bindAddress1, self.transport3.connections[0].remoteAddress)
self.assertIsInstance(self.transport1.connections[0].remotePort, int)
self.assertEqual(self.bindPort1, self.transport3.connections[0].remotePort)
def run_04_attemptSecondBind(self):
self.assertFalse(self.bindHasFailed)
transport = SimulatedSIPTransport(self.bindAddress1, self.bindPort1)
transport.when_event_do("bindFailed", self.bind_failed_event_handler)
transport.bind()
self.assertTrue(self.bindHasFailed)
def run_05_attemptConnectToBogusAddressAndPort(self):
self.assertEqual(0, len(self.notConnectedAddressesAndPorts))
self.assertEqual(2, len(self.transport1.connections))
self.transport1.connect_to_address_and_port('192.168.4.254', 5060)
self.assertEqual(1, len(self.notConnectedAddressesAndPorts))
self.assertEqual(2, len(self.transport1.connections))
self.assertEqual(('192.168.4.254', 5060), self.notConnectedAddressesAndPorts[0])
self.transport1.connect_to_address_and_port(self.bindAddress2, 5555)
self.assertEqual(2, len(self.notConnectedAddressesAndPorts))
self.assertEqual(2, len(self.transport1.connections))
self.assertEqual((self.bindAddress2, 5555), self.notConnectedAddressesAndPorts[1])
def run_06_attemptConnectToOwnAddressAndPort(self):
self.assertEqual(2, len(self.notConnectedAddressesAndPorts))
self.assertEqual(2, len(self.transport1.connections))
self.transport1.connect_to_address_and_port(self.bindAddress1, self.bindPort1)
self.assertEqual(3, len(self.notConnectedAddressesAndPorts))
self.assertEqual(2, len(self.transport1.connections))
self.assertEqual((self.bindAddress1, self.bindPort1), self.notConnectedAddressesAndPorts[2])
def run_07_sendRequestsVerifyReceipt(self):
self.assertTrue(self.sampleRequest.is_request)
self.assertTrue(self.sampleRequest2.is_request)
self.assertEqual(0, len(self.receivedRequests))
self.assertEqual(0, len(self.receivedResponses))
self.transport2.connections[0].send_message(self.sampleRequest)
self.assertEqual(1, len(self.receivedRequests))
self.assertEqual(0, len(self.receivedResponses))
self.assertIs(self.sampleRequest.__class__, self.receivedRequests[0].sip_message.__class__)
self.assertEqual(self.sampleRequest.raw_string, self.receivedRequests[0].sip_message.raw_string)
self.transport3.connections[0].send_message(self.sampleRequest2)
self.assertEqual(2, len(self.receivedRequests))
self.assertEqual(0, len(self.receivedResponses))
self.assertIs(self.sampleRequest2.__class__, self.receivedRequests[1].sip_message.__class__)
self.assertEqual(self.sampleRequest2.raw_string, self.receivedRequests[1].sip_message.raw_string)
def run_08_sendResponsesVerifyReceipt(self):
self.assertTrue(self.sampleResponse.is_response)
self.assertTrue(self.sampleResponse2.is_response)
self.assertEqual(2, len(self.receivedRequests))
self.assertEqual(0, len(self.receivedResponses))
self.transport2.connections[0].send_message(self.sampleResponse)
self.assertEqual(2, len(self.receivedRequests))
self.assertEqual(1, len(self.receivedResponses))
self.assertIs(self.sampleResponse.__class__, self.receivedResponses[0].sip_message.__class__)
self.assertEqual(self.sampleResponse.raw_string, self.receivedResponses[0].sip_message.raw_string)
self.transport3.connections[0].send_message(self.sampleResponse2)
self.assertEqual(2, len(self.receivedRequests))
self.assertEqual(2, len(self.receivedResponses))
self.assertIs(self.sampleResponse2.__class__, self.receivedResponses[1].sip_message.__class__)
self.assertEqual(self.sampleResponse2.raw_string, self.receivedResponses[1].sip_message.raw_string)
def bound_event_handler(self):
self.hasBound = True
def bind_failed_event_handler(self):
self.bindHasFailed = True
def made_connection_event_handler(self, a_simulated_sip_transport_connection):
self.connectedConnections.append(a_simulated_sip_transport_connection)
def could_not_make_connection_event_handler(self, bind_address_and_port):
address_and_port = bind_address_and_port
self.notConnectedAddressesAndPorts.append(address_and_port)
def lost_connection_event_handler(self, a_simulated_sip_transport_connection):
if a_simulated_sip_transport_connection in self.connectedConnections:
self.connectedConnections.remove(a_simulated_sip_transport_connection)
def received_valid_connected_request_event_handler(self, a_connected_aip_message):
print("received_valid_connected_request_event_handler")
self.receivedRequests.append(a_connected_aip_message)
def received_valid_connected_response_event_handler(self, a_connected_aip_message):
print("received_valid_connected_response_event_handler")
self.receivedResponses.append(a_connected_aip_message)
| |
#!/usr/bin/env python
"""
This application presents a 'console' prompt to the user asking for commands.
For 'read' commands it will create ReadPropertyRequest PDUs, then lines up the
coorresponding ReadPropertyACK and prints the value. For 'write' commands it
will create WritePropertyRequst PDUs and prints out a simple acknowledgement.
"""
import sys
from bacpypes.debugging import bacpypes_debugging, ModuleLogger
from bacpypes.consolelogging import ConfigArgumentParser
from bacpypes.consolecmd import ConsoleCmd
from bacpypes.core import run, deferred, enable_sleeping
from bacpypes.iocb import IOCB
from bacpypes.pdu import Address
from bacpypes.object import get_datatype
from bacpypes.apdu import SimpleAckPDU, \
ReadPropertyRequest, ReadPropertyACK, WritePropertyRequest
from bacpypes.primitivedata import Null, Atomic, Boolean, Unsigned, Integer, \
Real, Double, OctetString, CharacterString, BitString, Date, Time, ObjectIdentifier
from bacpypes.constructeddata import Array, Any, AnyAtomic
from bacpypes.app import BIPSimpleApplication
from bacpypes.local.device import LocalDeviceObject
# some debugging
_debug = 0
_log = ModuleLogger(globals())
# globals
this_application = None
#
# ReadWritePropertyConsoleCmd
#
@bacpypes_debugging
class ReadWritePropertyConsoleCmd(ConsoleCmd):
def do_read(self, args):
"""read <addr> <objid> <prop> [ <indx> ]"""
args = args.split()
if _debug: ReadWritePropertyConsoleCmd._debug("do_read %r", args)
try:
addr, obj_id, prop_id = args[:3]
obj_id = ObjectIdentifier(obj_id).value
if prop_id.isdigit():
prop_id = int(prop_id)
datatype = get_datatype(obj_id[0], prop_id)
if not datatype:
raise ValueError("invalid property for object type")
# build a request
request = ReadPropertyRequest(
objectIdentifier=obj_id,
propertyIdentifier=prop_id,
)
request.pduDestination = Address(addr)
if len(args) == 4:
request.propertyArrayIndex = int(args[3])
if _debug: ReadWritePropertyConsoleCmd._debug(" - request: %r", request)
# make an IOCB
iocb = IOCB(request)
if _debug: ReadWritePropertyConsoleCmd._debug(" - iocb: %r", iocb)
# give it to the application
deferred(this_application.request_io, iocb)
# wait for it to complete
iocb.wait()
# do something for success
if iocb.ioResponse:
apdu = iocb.ioResponse
# should be an ack
if not isinstance(apdu, ReadPropertyACK):
if _debug: ReadWritePropertyConsoleCmd._debug(" - not an ack")
return
# find the datatype
datatype = get_datatype(apdu.objectIdentifier[0], apdu.propertyIdentifier)
if _debug: ReadWritePropertyConsoleCmd._debug(" - datatype: %r", datatype)
if not datatype:
raise TypeError("unknown datatype")
# special case for array parts, others are managed by cast_out
if issubclass(datatype, Array) and (apdu.propertyArrayIndex is not None):
if apdu.propertyArrayIndex == 0:
value = apdu.propertyValue.cast_out(Unsigned)
else:
value = apdu.propertyValue.cast_out(datatype.subtype)
else:
value = apdu.propertyValue.cast_out(datatype)
if _debug: ReadWritePropertyConsoleCmd._debug(" - value: %r", value)
sys.stdout.write(str(value) + '\n')
if hasattr(value, 'debug_contents'):
value.debug_contents(file=sys.stdout)
sys.stdout.flush()
# do something for error/reject/abort
if iocb.ioError:
sys.stdout.write(str(iocb.ioError) + '\n')
except Exception as error:
ReadWritePropertyConsoleCmd._exception("exception: %r", error)
def do_write(self, args):
"""write <addr> <objid> <prop> <value> [ <indx> ] [ <priority> ]"""
args = args.split()
ReadWritePropertyConsoleCmd._debug("do_write %r", args)
try:
addr, obj_id, prop_id = args[:3]
obj_id = ObjectIdentifier(obj_id).value
value = args[3]
indx = None
if len(args) >= 5:
if args[4] != "-":
indx = int(args[4])
if _debug: ReadWritePropertyConsoleCmd._debug(" - indx: %r", indx)
priority = None
if len(args) >= 6:
priority = int(args[5])
if _debug: ReadWritePropertyConsoleCmd._debug(" - priority: %r", priority)
# get the datatype
datatype = get_datatype(obj_id[0], prop_id)
if _debug: ReadWritePropertyConsoleCmd._debug(" - datatype: %r", datatype)
# change atomic values into something encodeable, null is a special case
if (value == 'null'):
value = Null()
elif issubclass(datatype, AnyAtomic):
dtype, dvalue = value.split(':', 1)
if _debug: ReadWritePropertyConsoleCmd._debug(" - dtype, dvalue: %r, %r", dtype, dvalue)
datatype = {
'b': Boolean,
'u': lambda x: Unsigned(int(x)),
'i': lambda x: Integer(int(x)),
'r': lambda x: Real(float(x)),
'd': lambda x: Double(float(x)),
'o': OctetString,
'c': CharacterString,
'bs': BitString,
'date': Date,
'time': Time,
'id': ObjectIdentifier,
}[dtype]
if _debug: ReadWritePropertyConsoleCmd._debug(" - datatype: %r", datatype)
value = datatype(dvalue)
if _debug: ReadWritePropertyConsoleCmd._debug(" - value: %r", value)
elif issubclass(datatype, Atomic):
if datatype is Integer:
value = int(value)
elif datatype is Real:
value = float(value)
elif datatype is Unsigned:
value = int(value)
value = datatype(value)
elif issubclass(datatype, Array) and (indx is not None):
if indx == 0:
value = Integer(value)
elif issubclass(datatype.subtype, Atomic):
value = datatype.subtype(value)
elif not isinstance(value, datatype.subtype):
raise TypeError("invalid result datatype, expecting %s" % (datatype.subtype.__name__,))
elif not isinstance(value, datatype):
raise TypeError("invalid result datatype, expecting %s" % (datatype.__name__,))
if _debug: ReadWritePropertyConsoleCmd._debug(" - encodeable value: %r %s", value, type(value))
# build a request
request = WritePropertyRequest(
objectIdentifier=obj_id,
propertyIdentifier=prop_id
)
request.pduDestination = Address(addr)
# save the value
request.propertyValue = Any()
try:
request.propertyValue.cast_in(value)
except Exception as error:
ReadWritePropertyConsoleCmd._exception("WriteProperty cast error: %r", error)
# optional array index
if indx is not None:
request.propertyArrayIndex = indx
# optional priority
if priority is not None:
request.priority = priority
if _debug: ReadWritePropertyConsoleCmd._debug(" - request: %r", request)
# make an IOCB
iocb = IOCB(request)
if _debug: ReadWritePropertyConsoleCmd._debug(" - iocb: %r", iocb)
# give it to the application
deferred(this_application.request_io, iocb)
# wait for it to complete
iocb.wait()
# do something for success
if iocb.ioResponse:
# should be an ack
if not isinstance(iocb.ioResponse, SimpleAckPDU):
if _debug: ReadWritePropertyConsoleCmd._debug(" - not an ack")
return
sys.stdout.write("ack\n")
# do something for error/reject/abort
if iocb.ioError:
sys.stdout.write(str(iocb.ioError) + '\n')
except Exception as error:
ReadWritePropertyConsoleCmd._exception("exception: %r", error)
def do_rtn(self, args):
"""rtn <addr> <net> ... """
args = args.split()
if _debug: ReadWritePropertyConsoleCmd._debug("do_rtn %r", args)
# provide the address and a list of network numbers
router_address = Address(args[0])
network_list = [int(arg) for arg in args[1:]]
# pass along to the service access point
this_application.nsap.update_router_references(None, router_address, network_list)
#
# __main__
#
def main():
global this_application
# parse the command line arguments
args = ConfigArgumentParser(description=__doc__).parse_args()
if _debug: _log.debug("initialization")
if _debug: _log.debug(" - args: %r", args)
# make a device object
this_device = LocalDeviceObject(ini=args.ini)
if _debug: _log.debug(" - this_device: %r", this_device)
# make a simple application
this_application = BIPSimpleApplication(this_device, args.ini.address)
# make a console
this_console = ReadWritePropertyConsoleCmd()
if _debug: _log.debug(" - this_console: %r", this_console)
# enable sleeping will help with threads
enable_sleeping()
_log.debug("running")
run()
_log.debug("fini")
if __name__ == "__main__":
main()
| |
# Copyright (c) 2014 The Johns Hopkins University/Applied Physics Laboratory
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Key manager implementation for Barbican
"""
import array
import base64
import binascii
from barbicanclient import client as barbican_client
from barbicanclient.common import auth
from keystoneclient.v2_0 import client as keystone_client
from oslo.config import cfg
from cinder import exception
from cinder.i18n import _
from cinder.keymgr import key as keymgr_key
from cinder.keymgr import key_mgr
from cinder.openstack.common import excutils
from cinder.openstack.common import log as logging
CONF = cfg.CONF
CONF.import_opt('encryption_auth_url', 'cinder.keymgr.key_mgr', group='keymgr')
CONF.import_opt('encryption_api_url', 'cinder.keymgr.key_mgr', group='keymgr')
LOG = logging.getLogger(__name__)
class BarbicanKeyManager(key_mgr.KeyManager):
"""Key Manager Interface that wraps the Barbican client API."""
def _create_connection(self, ctxt):
"""Creates a connection to the Barbican service.
:param ctxt: the user context for authentication
:return: a Barbican Connection object
:throws NotAuthorized: if the ctxt is None
"""
# Confirm context is provided, if not raise not authorized
if not ctxt:
msg = _("User is not authorized to use key manager.")
LOG.error(msg)
raise exception.NotAuthorized(msg)
try:
endpoint = CONF.keymgr.encryption_auth_url
keystone = keystone_client.Client(token=ctxt.auth_token,
endpoint=endpoint)
keystone_auth = auth.KeystoneAuthV2(keystone=keystone)
keystone_auth._barbican_url = CONF.keymgr.encryption_api_url
connection = barbican_client.Client(auth_plugin=keystone_auth)
return connection
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_("Error creating Barbican client: %s"), (e))
def create_key(self, ctxt, expiration=None, name='Cinder Volume Key',
payload_content_type='application/octet-stream', mode='CBC',
algorithm='AES', length=256):
"""Creates a key.
:param ctxt: contains information of the user and the environment
for the request (cinder/context.py)
:param expiration: the date the key will expire
:param name: a friendly name for the secret
:param payload_content_type: the format/type of the secret data
:param mode: the algorithm mode (e.g. CBC or CTR mode)
:param algorithm: the algorithm associated with the secret
:param length: the bit length of the secret
:return: the UUID of the new key
:throws Exception: if key creation fails
"""
connection = self._create_connection(ctxt)
try:
order_ref = connection.orders.create(name, payload_content_type,
algorithm, length, mode,
expiration)
order = connection.orders.get(order_ref)
secret_uuid = order.secret_ref.rpartition('/')[2]
return secret_uuid
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_("Error creating key: %s"), (e))
def store_key(self, ctxt, key, expiration=None, name='Cinder Volume Key',
payload_content_type='application/octet-stream',
payload_content_encoding='base64', algorithm='AES',
bit_length=256, mode='CBC', from_copy=False):
"""Stores (i.e., registers) a key with the key manager.
:param ctxt: contains information of the user and the environment for
the request (cinder/context.py)
:param key: the unencrypted secret data. Known as "payload" to the
barbicanclient api
:param expiration: the expiration time of the secret in ISO 8601
format
:param name: a friendly name for the key
:param payload_content_type: the format/type of the secret data
:param payload_content_encoding: the encoding of the secret data
:param algorithm: the algorithm associated with this secret key
:param bit_length: the bit length of this secret key
:param mode: the algorithm mode used with this secret key
:param from_copy: establishes whether the function is being used
to copy a key. In case of the latter, it does not
try to decode the key
:returns: the UUID of the stored key
:throws Exception: if key storage fails
"""
connection = self._create_connection(ctxt)
try:
if key.get_algorithm():
algorithm = key.get_algorithm()
if payload_content_type == 'text/plain':
payload_content_encoding = None
encoded_key = key.get_encoded()
elif (payload_content_type == 'application/octet-stream' and
not from_copy):
key_list = key.get_encoded()
string_key = ''.join(map(lambda byte: "%02x" % byte, key_list))
encoded_key = base64.b64encode(binascii.unhexlify(string_key))
else:
encoded_key = key.get_encoded()
secret_ref = connection.secrets.store(name, encoded_key,
payload_content_type,
payload_content_encoding,
algorithm, bit_length, mode,
expiration)
secret_uuid = secret_ref.rpartition('/')[2]
return secret_uuid
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_("Error storing key: %s"), (e))
def copy_key(self, ctxt, key_id):
"""Copies (i.e., clones) a key stored by barbican.
:param ctxt: contains information of the user and the environment for
the request (cinder/context.py)
:param key_id: the UUID of the key to copy
:return: the UUID of the key copy
:throws Exception: if key copying fails
"""
connection = self._create_connection(ctxt)
try:
secret_ref = self._create_secret_ref(key_id, connection)
meta = self._get_secret_metadata(ctxt, secret_ref)
con_type = meta.content_types['default']
secret_data = self._get_secret_data(ctxt, secret_ref,
payload_content_type=con_type)
key = keymgr_key.SymmetricKey(meta.algorithm, secret_data)
copy_uuid = self.store_key(ctxt, key, meta.expiration,
meta.name, con_type,
'base64',
meta.algorithm, meta.bit_length,
meta.mode, True)
return copy_uuid
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_("Error copying key: %s"), (e))
def _create_secret_ref(self, key_id, connection):
"""Creates the URL required for accessing a secret.
:param key_id: the UUID of the key to copy
:param connection: barbican key manager object
:return: the URL of the requested secret
"""
return connection.base_url + "/secrets/" + key_id
def _get_secret_data(self, ctxt, secret_ref,
payload_content_type='application/octet-stream'):
"""Retrieves the secret data given a secret_ref and content_type.
:param ctxt: contains information of the user and the environment for
the request (cinder/context.py)
:param secret_ref: URL to access the secret
:param payload_content_type: the format/type of the secret data
:returns: the secret data
:throws Exception: if data cannot be retrieved
"""
connection = self._create_connection(ctxt)
try:
generated_data = connection.secrets.decrypt(secret_ref,
payload_content_type)
if payload_content_type == 'application/octet-stream':
secret_data = base64.b64encode(generated_data)
else:
secret_data = generated_data
return secret_data
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_("Error getting secret data: %s"), (e))
def _get_secret_metadata(self, ctxt, secret_ref):
"""Creates the URL required for accessing a secret's metadata.
:param ctxt: contains information of the user and the environment for
the request (cinder/context.py)
:param secret_ref: URL to access the secret
:return: the secret's metadata
:throws Exception: if there is an error retrieving the data
"""
connection = self._create_connection(ctxt)
try:
return connection.secrets.get(secret_ref)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_("Error getting secret metadata: %s"), (e))
def get_key(self, ctxt, key_id,
payload_content_type='application/octet-stream'):
"""Retrieves the specified key.
:param ctxt: contains information of the user and the environment for
the request (cinder/context.py)
:param key_id: the UUID of the key to retrieve
:param payload_content_type: The format/type of the secret data
:return: SymmetricKey representation of the key
:throws Exception: if key retrieval fails
"""
connection = self._create_connection(ctxt)
try:
secret_ref = self._create_secret_ref(key_id, connection)
secret_data = self._get_secret_data(ctxt, secret_ref,
payload_content_type)
if payload_content_type == 'application/octet-stream':
# convert decoded string to list of unsigned ints for each byte
secret = array.array('B',
base64.b64decode(secret_data)).tolist()
else:
secret = secret_data
meta = self._get_secret_metadata(ctxt, secret_ref)
key = keymgr_key.SymmetricKey(meta.algorithm, secret)
return key
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_("Error getting key: %s"), (e))
def delete_key(self, ctxt, key_id):
"""Deletes the specified key.
:param ctxt: contains information of the user and the environment for
the request (cinder/context.py)
:param key_id: the UUID of the key to delete
:throws Exception: if key deletion fails
"""
connection = self._create_connection(ctxt)
try:
secret_ref = self._create_secret_ref(key_id, connection)
connection.secrets.delete(secret_ref)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error(_("Error deleting key: %s"), (e))
| |
import logging
from uliweb.utils.coloredlog import ColoredStreamHandler
import cPickle
from cmd import Cmd
log = logging.getLogger('redbreast.daemon')
log.addHandler(ColoredStreamHandler())
log.setLevel(logging.DEBUG)
__daemon_client_version__ = 'v0.0.1'
class DaemonMsg(object):
def serialize(self):
return cPickle.dumps(self, cPickle.HIGHEST_PROTOCOL)
@classmethod
def load(cls, s):
return cPickle.loads(s)
@classmethod
def load_from_socket(cls, sock):
msgstr = []
while True:
s = sock.recv(1024)
if not s:
break
msgstr.append(s)
try:
msg = DaemonMsg.load("".join(msgstr))
break
except:
pass
msg = DaemonMsg.load("".join(msgstr))
return msg
@classmethod
def dump(cls, v):
return cPickle.dumps(v, cPickle.HIGHEST_PROTOCOL)
class DaemonRequest(DaemonMsg):
def __init__(self, command, msg=None, data=None):
super(DaemonRequest, self).__init__()
self.command = command
self.msg = msg
self.data = data
class DaemonResponse(DaemonMsg):
def __init__(self, success, msg, data=None):
super(DaemonResponse, self).__init__()
self.success = success
self.msg = msg
self.data = data
def info(self):
if self.success:
return "%s" % self.msg
else:
return "[ERROR] %s" % self.msg
class DaemonRequestDef(object):
def __init__(self, command, usage, handle=None, inner=False):
self.command = command
self.usage = usage
self.long_usage = usage
self.handle = "handle_%s" % (handle or command)
self.inner = inner
class GenericClient(Cmd, object):
intro = "Daemon Client %s" % __daemon_client_version__ + "\n" + \
"Type 'help', 'server' for more information."
prompt = ">>> "
def __init__(self, **kwargs):
super(GenericClient, self).__init__()
self.port = kwargs.get('port', 4202)
self.host = kwargs.get('host', '')
self.stdout = ColoredStreamHandler(self.stdout).stream
def prints(self, s):
self.stdout.write("%s\n" % s)
def start(self):
return self.cmdloop()
def default(self, line):
if line in ("exit", "quit", "bye", "EOF"):
print "Bye ..."
return True
cmd, arg, line = self.parseline(line)
msg = None
data = None
args = arg.split(" ")
if len(args)>0:
msg = args[0]
if len(args)>1:
data = " ".join(args[1:])
self.send_to_server(cmd, msg, data)
return False
def send_to_server(self, command, msg, data=None):
from socket import error
req = DaemonRequest(command, msg, data)
from gevent.socket import create_connection
try:
sock = create_connection((self.host, self.port))
sock.send(req.serialize())
response = DaemonMsg.load_from_socket(sock)
if response.success:
self.prints(response.msg)
else:
self.prints("{{white|red:[ERROR]}}: %s" % response.msg)
sock.close()
except error, e:
if e.errno == 10061:
self.prints("{{white|red:[ERROR]}}: cannot connect to the server at %s:%s"% (self.host, self.port))
else:
self.prints(e)
def emptyline(self):
return False
def print_client_help(self):
self.prints("\nsupported commands of client:")
self.prints(" %-10s %s" % ("host", "connect to another host"))
self.prints(" %-10s %s" % ("port", "change to connect to another port"))
self.prints(" %-10s %s" % ("exit", "exit this client"))
def do_host(self, args):
if not args:
self.prints("current server is (%s, %s)," % (self.host, self.port))
self.prints(" use 'host [HOSTNAME]' to update hostname.")
else:
old = self.host
self.host = args
self.prints("host changed from %s to %s" % (old, self.host))
def do_port(self, args):
if not args:
self.prints("current server is (%s, %s)," % (self.host, self.port))
self.prints(" use 'port [PORT]' to change port.")
else:
oldport = self.port
self.port = args
self.prints("port changed from %s to %s" % (oldport, self.port))
def do_help(self, args):
self.send_to_server("help", None)
self.print_client_help()
class GenericDaemon(object):
def __init__(self, **kwargs):
super(GenericDaemon, self).__init__()
self.port = kwargs.get('port', 5000)
self.host = kwargs.get('host', '')
self.sleep_seconds = kwargs.get('sleep', 0.5)
self.supported_requests = {}
self.register_request('server', usage="get server information.")
self.register_request('help', usage="get supportted command list.")
self.register_request('shutdown', usage="shut server down.")
self.register_request('echo', usage="echo from server.")
self.register_request('debug', inner=True)
self.strftime = "%Y-%m-%d %H:%M:%S"
self.debug = False
self.stopMainLoop = False
def gettimestamp(self):
from time import gmtime, strftime
return strftime(self.strftime)
def get_server_info(self):
return "Generic Daemon"
def prints(self, value):
if isinstance(value, str):
values = value.split("\n")
for s in values:
#log.info(s)
print s
elif isinstance(value, list):
for s in value:
#log.info(s)
print s
def get_address(self):
return (self.host, self.port)
def register_request(self, cmd, usage="", inner=False):
msg = None
if isinstance(cmd, str):
msg = DaemonRequestDef(cmd, usage, inner=inner)
elif isinstance(cmd, DaemonRequestDef):
msg = cmd
if msg:
self.supported_requests[msg.command] = msg
def is_supported_request(self, msg):
return self.supported_requests.has_key(msg.command)
def get_request_handle(self, msg):
return self.supported_requests[msg.command].handle
def mainLoop(self, cmd=None):
pass
def startMainLoop(self):
import gevent
print "..."
while True:
gevent.sleep(self.sleep_seconds)
if self.stopMainLoop:
self.prints(">>> MainLoop is stoped at %s" % self.gettimestamp())
break
self.mainLoop(cmd="shutdown")
def startOperationServer(self):
from gevent.server import StreamServer
server = None
def send_error_response(socket, msg):
response = DaemonResponse(False, msg)
socket.send(response.serialize())
socket.close()
def handle(socket, address):
self.prints(">>> # Call from: %s-%s" % address)
req = DaemonMsg.load_from_socket(socket)
self.prints(">>> %s" % req.command)
if self.debug:
self.prints(">>> [DEBUG] cmd:%s" % req.command)
self.prints(">>> [DEBUG] msg:%s" % req.msg)
self.prints(">>> [DEBUG] data:%s" % req.data)
if req.data:
if isinstance(req.data, str):
self.prints(">> - %s" % req.data)
if not self.is_supported_request(req):
send_error_response(socket, "Unsupported command: %s"% req.command)
return False
handle_name = self.get_request_handle(req)
if not hasattr(self, handle_name):
send_error_response(socket, "Unsupported command: %s"% req.command)
return False
response = getattr(self, handle_name)(req)
socket.send(response.serialize())
socket.close()
if req.command == "shutdown":
server.stop()
return True
self.print_daemon_head()
server = StreamServer(self.get_address(), handle)
server.serve_forever()
self.prints("The stream server has been shutdown at %s" % self.gettimestamp())
def print_daemon_head(self):
self.prints("---------------------------------------------")
self.prints(self.get_server_info())
self.prints("---------------------------------------------")
def start(self):
import gevent
gevent.joinall([
gevent.spawn(self.startMainLoop),
gevent.spawn(self.startOperationServer)
])
def handle_server(self, req):
msg = []
info = self.get_server_info()
if isinstance(info, list):
msg.extend(info)
else:
msg.append(info)
msg = "\n".join(msg)
return DaemonResponse(True, msg)
def handle_help(self, req):
msg = []
msg.append("supported commands of server:")
for cmd in sorted(self.supported_requests.keys()):
req_def = self.supported_requests[cmd]
if not req_def.inner:
msg.append(" %-10s %s" % (cmd, req_def.usage))
msg = "\n".join(msg)
return DaemonResponse(True, msg)
def handle_echo(self, req):
return DaemonResponse(True, req.msg)
def handle_shutdown(self, req):
self.stopMainLoop = True
return DaemonResponse(True, "The server is shutting down.")
def handle_debug(self, req):
if req.msg == "on":
self.debug = True
debug_mode = "ON"
else:
self.debug = False
debug_mode = "OFF"
return DaemonResponse(True, "Debug mode is %s" % debug_mode)
def create_response(self, success, msg, data=None):
return DaemonResponse(success, msg, data=data)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for head.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
# pylint: disable=g-bad-todo,g-import-not-at-top
import numpy as np
import six
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import model_fn
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.core.framework import summary_pb2
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import variables
from tensorflow.python.ops.losses import losses as losses_lib
from tensorflow.python.platform import test
# pylint: enable=g-bad-todo,g-import-not-at-top
def _assert_variables(test_case,
expected_global=None,
expected_model=None,
expected_trainable=None):
test_case.assertItemsEqual(
tuple([] if expected_global is None else expected_global),
tuple([k.name for k in variables.global_variables()]))
test_case.assertItemsEqual(
tuple([] if expected_model is None else expected_model),
tuple([k.name for k in variables.model_variables()]))
test_case.assertItemsEqual(
tuple([] if expected_trainable is None else expected_trainable),
tuple([k.name for k in variables.trainable_variables()]))
def _assert_no_variables(test_case):
_assert_variables(test_case)
# This must be called from within a tf.Session.
def _assert_metrics(test_case, expected_loss, expected_eval_metrics,
model_fn_ops):
test_case.assertAlmostEqual(expected_loss, model_fn_ops.loss.eval(), places=4)
for k in six.iterkeys(expected_eval_metrics):
test_case.assertIn(k, six.iterkeys(model_fn_ops.eval_metric_ops))
variables.initialize_local_variables().run()
for key, expected_value in six.iteritems(expected_eval_metrics):
value_tensor, update_tensor = model_fn_ops.eval_metric_ops[key]
update = update_tensor.eval()
test_case.assertAlmostEqual(
expected_value,
update,
places=4,
msg="%s: update, expected %s, got %s." % (key, expected_value, update))
value = value_tensor.eval()
test_case.assertAlmostEqual(
expected_value,
value,
places=4,
msg="%s: value, expected %s, got %s." % (key, expected_value, value))
# This must be called from within a tf.Session.
def _assert_summary_tags(test_case, expected_tags=None):
actual_tags = []
for summary_op in ops.get_collection(ops.GraphKeys.SUMMARIES):
summ = summary_pb2.Summary()
summ.ParseFromString(summary_op.eval())
actual_tags.append(summ.value[0].tag)
test_case.assertItemsEqual(expected_tags or [], actual_tags)
def _sigmoid(x):
return 1. / (1. + math.exp(-1 * x))
class PoissonHeadTest(test.TestCase):
def _assert_output_alternatives(self, model_fn_ops):
self.assertEquals({
None: constants.ProblemType.LINEAR_REGRESSION
}, {
k: v[0] for k, v in six.iteritems(model_fn_ops.output_alternatives)
})
def _log_poisson_loss(self, logits, labels):
x = np.array([f[0] for f in logits])
z = np.array([f[0] for f in labels])
lpl = np.exp(x) - z * x
stirling_approx = z * np.log(z) - z + 0.5 * np.log(2. * np.pi * z)
lpl += np.ma.masked_array(stirling_approx, mask=(z <= 1)).filled(0.)
return sum(lpl)/len(lpl)
def testPoissonWithLogits(self):
head = head_lib.poisson_regression_head()
labels = ((0.,), (1.,), (1.,))
logits = ((0.,), (-1.,), (3.,))
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
labels=labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=logits)
self._assert_output_alternatives(model_fn_ops)
_assert_summary_tags(self, ["loss"])
_assert_no_variables(self)
loss = self._log_poisson_loss(logits, labels)
_assert_metrics(self, loss, {"loss": loss}, model_fn_ops)
class RegressionHeadTest(test.TestCase):
def _assert_output_alternatives(self, model_fn_ops):
self.assertEquals({
None: constants.ProblemType.LINEAR_REGRESSION
}, {
k: v[0] for k, v in six.iteritems(model_fn_ops.output_alternatives)
})
# TODO(zakaria): test multilabel regression.
def testRegressionWithLogits(self):
head = head_lib.regression_head()
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
labels=((0.,), (1.,), (1.,)),
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=((1.,), (1.,), (3.,)))
self._assert_output_alternatives(model_fn_ops)
_assert_summary_tags(self, ["loss"])
_assert_no_variables(self)
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
def testRegressionWithInvalidLogits(self):
head = head_lib.regression_head()
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
head.create_model_fn_ops(
{},
labels=((0.,), (1.,), (1.,)),
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=((1., 1.), (1., 1.), (3., 1.)))
def testRegressionWithLogitsInput(self):
head = head_lib.regression_head()
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
labels=((0.,), (1.,), (1.,)),
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits_input=((0., 0.), (0., 0.), (0., 0.)))
self._assert_output_alternatives(model_fn_ops)
w = ("regression_head/logits/weights:0",
"regression_head/logits/biases:0")
_assert_variables(
self, expected_global=w, expected_model=w, expected_trainable=w)
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, 2. / 3, {"loss": 2. / 3}, model_fn_ops)
def testRegressionWithLogitsAndLogitsInput(self):
head = head_lib.regression_head()
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(
ValueError, "Both logits and logits_input supplied"):
head.create_model_fn_ops(
{},
labels=((0.,), (1.,), (1.,)),
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits_input=((0., 0.), (0., 0.), (0., 0.)),
logits=((1.,), (1.,), (3.,)))
def testRegressionEvalMode(self):
head = head_lib.regression_head()
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
labels=((1.,), (1.,), (3.,)),
mode=model_fn.ModeKeys.EVAL,
train_op_fn=head_lib.no_op_train_fn,
logits=((0.,), (1.,), (1.,)))
self._assert_output_alternatives(model_fn_ops)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
def testRegressionWithLabelName(self):
label_name = "my_label"
head = head_lib.regression_head(label_name=label_name)
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
labels={label_name: ((0.,), (1.,), (1.,))},
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=((1.,), (1.,), (3.,)))
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
def testRegressionWithWeights(self):
head = head_lib.regression_head(weight_column_name="label_weight")
with ops.Graph().as_default(), session.Session():
weights = ((2.,), (5.,), (0.,))
model_fn_ops = head.create_model_fn_ops(
features={"label_weight": weights},
labels=((0.,), (1.,), (1.,)),
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=((1.,), (1.,), (3.,)))
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, 2. / len(weights), {"loss": 2. / np.sum(weights)},
model_fn_ops)
def testRegressionWithCenteredBias(self):
head = head_lib.regression_head(enable_centered_bias=True)
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
labels=((0.,), (1.,), (1.,)),
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=((1.,), (1.,), (3.,)))
self._assert_output_alternatives(model_fn_ops)
_assert_variables(
self,
expected_global=(
"regression_head/centered_bias_weight:0",
"regression_head/regression_head/centered_bias_weight/Adagrad:0",
),
expected_trainable=("regression_head/centered_bias_weight:0",))
variables.global_variables_initializer().run()
_assert_summary_tags(
self, ["loss", "regression_head/centered_bias/bias_0"])
_assert_metrics(self, 5. / 3, {"loss": 5. / 3}, model_fn_ops)
def testRegressionErrorInSparseTensorLabels(self):
head = head_lib.regression_head()
with ops.Graph().as_default():
labels = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (2, 0)),
values=(0., 1., 1.),
dense_shape=(3, 1))
with self.assertRaisesRegexp(ValueError,
"SparseTensor is not supported"):
head.create_model_fn_ops(
{},
labels=labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=((1.,), (1.,), (3.,)))
class MultiLabelHeadTest(test.TestCase):
def _assert_output_alternatives(self, model_fn_ops):
self.assertEquals({
None: constants.ProblemType.CLASSIFICATION
}, {
k: v[0] for k, v in six.iteritems(model_fn_ops.output_alternatives)
})
def setUp(self):
self._logits = ((1., 0., 0.),)
self._labels = ((0, 0, 1),)
def _expected_eval_metrics(self, expected_loss):
return {
"accuracy": 1. / 3,
"auc": 1. / 4,
"loss": expected_loss,
"auc/class0": 1.,
"auc/class1": 1.,
"auc/class2": 0.,
"labels/actual_label_mean/class0": self._labels[0][0],
"labels/actual_label_mean/class1": self._labels[0][1],
"labels/actual_label_mean/class2": self._labels[0][2],
"labels/logits_mean/class0": self._logits[0][0],
"labels/logits_mean/class1": self._logits[0][1],
"labels/logits_mean/class2": self._logits[0][2],
"labels/prediction_mean/class0": self._logits[0][0],
"labels/prediction_mean/class1": self._logits[0][1],
"labels/prediction_mean/class2": self._logits[0][2],
"labels/probability_mean/class0": _sigmoid(self._logits[0][0]),
"labels/probability_mean/class1": _sigmoid(self._logits[0][1]),
"labels/probability_mean/class2": _sigmoid(self._logits[0][2]),
}
def testMultiLabelWithLogits(self):
n_classes = 3
head = head_lib.multi_label_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .89985204
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiLabelTwoClasses(self):
n_classes = 2
labels = ((0, 1),)
logits = ((1., 0.),)
head = head_lib.multi_label_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, labels=labels,
train_op_fn=head_lib.no_op_train_fn, logits=logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = 1.00320443
_assert_metrics(self, expected_loss, {
"accuracy": 0.,
"auc": 0.,
"loss": expected_loss,
"auc/class0": 1.,
"auc/class1": 0.,
"labels/actual_label_mean/class0": labels[0][0],
"labels/actual_label_mean/class1": labels[0][1],
"labels/logits_mean/class0": logits[0][0],
"labels/logits_mean/class1": logits[0][1],
"labels/prediction_mean/class0": logits[0][0],
"labels/prediction_mean/class1": logits[0][1],
"labels/probability_mean/class0": _sigmoid(logits[0][0]),
"labels/probability_mean/class1": _sigmoid(logits[0][1]),
}, model_fn_ops)
def testMultiLabelWithInvalidLogits(self):
head = head_lib.multi_label_head(n_classes=len(self._labels[0]) + 1)
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,
logits=self._logits)
def testMultiLabelWithLogitsInput(self):
n_classes = 3
head = head_lib.multi_label_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,
logits_input=((0., 0.),))
self._assert_output_alternatives(model_fn_ops)
w = ("multi_label_head/logits/weights:0",
"multi_label_head/logits/biases:0")
_assert_variables(
self, expected_global=w, expected_model=w, expected_trainable=w)
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss"])
expected_loss = .69314718
_assert_metrics(self, expected_loss, {
"accuracy": 2. / 3,
"auc": 2. / 4,
"loss": expected_loss,
"auc/class0": 1.,
"auc/class1": 1.,
"auc/class2": 0.,
"labels/actual_label_mean/class0": self._labels[0][0],
"labels/actual_label_mean/class1": self._labels[0][1],
"labels/actual_label_mean/class2": self._labels[0][2],
"labels/logits_mean/class0": 0.,
"labels/logits_mean/class1": 0.,
"labels/logits_mean/class2": 0.,
"labels/prediction_mean/class0": 0.,
"labels/prediction_mean/class1": 0.,
"labels/prediction_mean/class2": 0.,
"labels/probability_mean/class0": .5,
"labels/probability_mean/class1": .5,
"labels/probability_mean/class2": .5,
}, model_fn_ops)
def testMultiLabelWithLogitsAndLogitsInput(self):
n_classes = 3
head = head_lib.multi_label_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(
ValueError, "Both logits and logits_input supplied"):
head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,
logits_input=((0., 0.),), logits=self._logits)
def testMultiLabelEvalMode(self):
n_classes = 3
head = head_lib.multi_label_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.EVAL, self._labels, head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .89985204
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiClassEvalModeWithLargeLogits(self):
n_classes = 3
head = head_lib.multi_label_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
logits = ((2., 0., -1),)
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.EVAL, self._labels, head_lib.no_op_train_fn,
logits=logits)
self._assert_output_alternatives(model_fn_ops)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = 1.377779
expected_eval_metrics = {
"accuracy": 1. / 3,
"auc": 9.99999e-07,
"loss": expected_loss,
"auc/class0": 1.,
"auc/class1": 1.,
"auc/class2": 0.,
"labels/actual_label_mean/class0": 0. / 1,
"labels/actual_label_mean/class1": 0. / 1,
"labels/actual_label_mean/class2": 1. / 1,
"labels/logits_mean/class0": logits[0][0],
"labels/logits_mean/class1": logits[0][1],
"labels/logits_mean/class2": logits[0][2],
"labels/prediction_mean/class0": 1,
"labels/prediction_mean/class1": 0,
"labels/prediction_mean/class2": 0,
"labels/probability_mean/class0": _sigmoid(logits[0][0]),
"labels/probability_mean/class1": _sigmoid(logits[0][1]),
"labels/probability_mean/class2": _sigmoid(logits[0][2]),
}
_assert_metrics(self, expected_loss,
expected_eval_metrics, model_fn_ops)
def testMultiLabelWithLabelName(self):
n_classes = 3
label_name = "my_label"
head = head_lib.multi_label_head(
n_classes=n_classes,
label_name=label_name,
metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, {label_name: self._labels},
head_lib.no_op_train_fn, logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .89985204
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiLabelWithWeight(self):
n_classes = 3
head = head_lib.multi_label_head(
n_classes=n_classes,
weight_column_name="label_weight",
metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
features={"label_weight": .1},
labels=self._labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, .089985214,
self._expected_eval_metrics(2.69956), model_fn_ops)
def testMultiLabelWithCustomLoss(self):
n_classes = 3
head = head_lib.multi_label_head(
n_classes=n_classes,
weight_column_name="label_weight",
metric_class_ids=range(n_classes),
loss_fn=_sigmoid_cross_entropy)
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
features={"label_weight": .1},
labels=self._labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
_assert_metrics(self, 0.089985214,
self._expected_eval_metrics(0.089985214), model_fn_ops)
def testMultiLabelWithCenteredBias(self):
n_classes = 3
head = head_lib.multi_label_head(
n_classes=n_classes,
enable_centered_bias=True,
metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_variables(
self,
expected_global=(
"multi_label_head/centered_bias_weight:0",
("multi_label_head/multi_label_head/centered_bias_weight/"
"Adagrad:0"),),
expected_trainable=("multi_label_head/centered_bias_weight:0",))
variables.global_variables_initializer().run()
_assert_summary_tags(self, (
"loss",
"multi_label_head/centered_bias/bias_0",
"multi_label_head/centered_bias/bias_1",
"multi_label_head/centered_bias/bias_2"
))
expected_loss = .89985204
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiLabelSparseTensorLabels(self):
n_classes = 3
head = head_lib.multi_label_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
labels = sparse_tensor.SparseTensorValue(
indices=((0, 0),),
values=(2,),
dense_shape=(1, 1))
model_fn_ops = head.create_model_fn_ops(
features={},
mode=model_fn.ModeKeys.TRAIN,
labels=labels,
train_op_fn=head_lib.no_op_train_fn,
logits=self._logits)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .89985204
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiLabelSparseTensorLabelsTooFewClasses(self):
n_classes = 3
head = head_lib.multi_label_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
# Set _logits_dimension (n_classes) to a lower value; if it's set to 1
# upfront, the class throws an error during initialization.
head._logits_dimension = 1
with ops.Graph().as_default(), session.Session():
labels = sparse_tensor.SparseTensorValue(
indices=((0, 0),),
values=(2,),
dense_shape=(1, 1))
with self.assertRaisesRegexp(ValueError,
"Must set num_classes >= 2 when passing"):
head.create_model_fn_ops(
features={},
labels=labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=[0.])
class BinaryClassificationHeadTest(test.TestCase):
def _assert_output_alternatives(self, model_fn_ops):
self.assertEquals({
None: constants.ProblemType.LOGISTIC_REGRESSION
}, {
k: v[0] for k, v in six.iteritems(model_fn_ops.output_alternatives)
})
def setUp(self):
self._logits = ((1.,), (1.,))
self._labels = ((1.,), (0.,))
def _expected_eval_metrics(self, expected_loss):
label_mean = np.mean(self._labels)
return {
"accuracy": 1. / 2,
"accuracy/baseline_label_mean": label_mean,
"accuracy/threshold_0.500000_mean": 1. / 2,
"auc": 1. / 2,
"labels/actual_label_mean": label_mean,
"labels/prediction_mean": .731059, # softmax
"loss": expected_loss,
"precision/positive_threshold_0.500000_mean": 1. / 2,
"recall/positive_threshold_0.500000_mean": 1. / 1,
}
def testBinaryClassificationWithLogits(self):
n_classes = 2
head = head_lib.multi_class_head(n_classes=n_classes)
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .81326175
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testBinaryClassificationWithInvalidLogits(self):
head = head_lib.multi_class_head(n_classes=len(self._labels) + 1)
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,
logits=self._logits)
def testBinaryClassificationWithLogitsInput(self):
n_classes = 2
head = head_lib.multi_class_head(n_classes=n_classes)
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,
logits_input=((0., 0.), (0., 0.)))
self._assert_output_alternatives(model_fn_ops)
w = ("binary_logistic_head/logits/weights:0",
"binary_logistic_head/logits/biases:0")
_assert_variables(
self, expected_global=w, expected_model=w, expected_trainable=w)
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss"])
expected_loss = .69314718
label_mean = np.mean(self._labels)
_assert_metrics(self, expected_loss, {
"accuracy": 1. / 2,
"accuracy/baseline_label_mean": label_mean,
"accuracy/threshold_0.500000_mean": 1. / 2,
"auc": 1. / 2,
"labels/actual_label_mean": label_mean,
"labels/prediction_mean": .5, # softmax
"loss": expected_loss,
"precision/positive_threshold_0.500000_mean": 0. / 2,
"recall/positive_threshold_0.500000_mean": 0. / 1,
}, model_fn_ops)
def testBinaryClassificationWithLogitsAndLogitsInput(self):
head = head_lib.multi_class_head(n_classes=2)
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(
ValueError, "Both logits and logits_input supplied"):
head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,
logits_input=((0., 0.), (0., 0.)), logits=self._logits)
def testBinaryClassificationEvalMode(self):
n_classes = 2
head = head_lib.multi_class_head(n_classes=n_classes)
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.EVAL, self._labels, head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .81326175
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testBinaryClassificationInferMode(self):
n_classes = 2
head = head_lib.multi_class_head(n_classes=n_classes)
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.INFER, self._labels, head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
def testBinaryClassificationInferMode_withWightColumn(self):
n_classes = 2
head = head_lib.multi_class_head(n_classes=n_classes,
weight_column_name="label_weight")
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
# This is what is being tested, features should not have weight for
# inference.
{}, model_fn.ModeKeys.INFER, self._labels, head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
def testErrorInSparseTensorLabels(self):
n_classes = 2
head = head_lib.multi_class_head(n_classes=n_classes)
with ops.Graph().as_default():
labels = sparse_tensor.SparseTensorValue(
indices=((0, 0), (1, 0), (2, 0)),
values=(0, 1, 1),
dense_shape=(3, 1))
with self.assertRaisesRegexp(ValueError,
"SparseTensor is not supported"):
head.create_model_fn_ops(
{},
model_fn.ModeKeys.TRAIN,
labels,
head_lib.no_op_train_fn,
logits=((1.,), (1.,), (3.,)))
def testBinaryClassificationWithLabelName(self):
label_name = "my_label"
head = head_lib.multi_class_head(n_classes=2, label_name=label_name)
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{},
labels={label_name: self._labels},
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = .81326175
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testBinaryClassificationWithWeights(self):
n_classes = 2
head = head_lib.multi_class_head(
n_classes=n_classes, weight_column_name="label_weight")
with ops.Graph().as_default(), session.Session():
weights = ((1.,), (0.,))
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
features={"label_weight": weights},
labels=self._labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_total_loss = .31326166
_assert_metrics(
self,
expected_total_loss / len(weights),
{
"accuracy": 1. / 1,
"accuracy/baseline_label_mean": 1. / 1,
"accuracy/threshold_0.500000_mean": 1. / 1,
"auc": 0. / 1,
"labels/actual_label_mean": 1. / 1,
"labels/prediction_mean": .731059, # softmax
# eval loss is weighted loss divided by sum of weights.
"loss": expected_total_loss,
"precision/positive_threshold_0.500000_mean": 1. / 1,
"recall/positive_threshold_0.500000_mean": 1. / 1,
},
model_fn_ops)
def testBinaryClassificationWithCustomLoss(self):
head = head_lib.multi_class_head(
n_classes=2, weight_column_name="label_weight",
loss_fn=_sigmoid_cross_entropy)
with ops.Graph().as_default(), session.Session():
weights = ((.2,), (0.,))
model_fn_ops = head.create_model_fn_ops(
features={"label_weight": weights},
labels=self._labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
# expected_loss is (total_weighted_loss)/1 since htere is 1 nonzero
# weight.
expected_loss = 0.062652342
_assert_metrics(
self,
expected_loss,
{
"accuracy": 1. / 1,
"accuracy/baseline_label_mean": 1. / 1,
"accuracy/threshold_0.500000_mean": 1. / 1,
"auc": 0. / 1,
"labels/actual_label_mean": 1. / 1,
"labels/prediction_mean": .731059, # softmax
"loss": expected_loss,
"precision/positive_threshold_0.500000_mean": 1. / 1,
"recall/positive_threshold_0.500000_mean": 1. / 1,
},
model_fn_ops)
def testBinaryClassificationWithCenteredBias(self):
head = head_lib.multi_class_head(n_classes=2, enable_centered_bias=True)
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_variables(
self,
expected_global=(
"binary_logistic_head/centered_bias_weight:0",
("binary_logistic_head/binary_logistic_head/centered_bias_weight/"
"Adagrad:0"),),
expected_trainable=("binary_logistic_head/centered_bias_weight:0",))
variables.global_variables_initializer().run()
_assert_summary_tags(
self, ["loss", "binary_logistic_head/centered_bias/bias_0"])
expected_loss = .81326175
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
class MultiClassHeadTest(test.TestCase):
def _assert_output_alternatives(self, model_fn_ops):
self.assertEquals({
None: constants.ProblemType.CLASSIFICATION
}, {
k: v[0] for k, v in six.iteritems(model_fn_ops.output_alternatives)
})
def setUp(self):
self._logits = ((1., 0., 0.),)
self._labels = (2,)
def _expected_eval_metrics(self, expected_loss):
return {
"accuracy": 0.,
"loss": expected_loss,
"labels/actual_label_mean/class0": 0. / 1,
"labels/actual_label_mean/class1": 0. / 1,
"labels/actual_label_mean/class2": 1. / 1,
"labels/logits_mean/class0": self._logits[0][0],
"labels/logits_mean/class1": self._logits[0][1],
"labels/logits_mean/class2": self._logits[0][2],
"labels/prediction_mean/class0": self._logits[0][0],
"labels/prediction_mean/class1": self._logits[0][1],
"labels/prediction_mean/class2": self._logits[0][2],
"labels/probability_mean/class0": 0.576117, # softmax
"labels/probability_mean/class1": 0.211942, # softmax
"labels/probability_mean/class2": 0.211942, # softmax
}
def testMultiClassWithLogits(self):
n_classes = 3
head = head_lib.multi_class_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = 1.5514447
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiClassWithInvalidLogits(self):
head = head_lib.multi_class_head(n_classes=len(self._logits[0]) + 1)
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,
logits=self._logits)
def testMultiClassWithNoneTrainOpFnInTrain(self):
head = head_lib.multi_class_head(n_classes=3)
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(
ValueError, "train_op_fn can not be None in TRAIN mode"):
head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels,
train_op_fn=None,
logits=self._logits)
def testMultiClassWithLogitsInput(self):
n_classes = 3
head = head_lib.multi_class_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,
logits_input=((0., 0.),))
self._assert_output_alternatives(model_fn_ops)
w = ("multi_class_head/logits/weights:0",
"multi_class_head/logits/biases:0")
_assert_variables(
self, expected_global=w, expected_model=w, expected_trainable=w)
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss"])
expected_loss = 1.0986123
_assert_metrics(self, expected_loss, {
"accuracy": 0.,
"loss": expected_loss,
"labels/actual_label_mean/class0": 0. / 1,
"labels/actual_label_mean/class1": 0. / 1,
"labels/actual_label_mean/class2": 1. / 1,
"labels/logits_mean/class0": 0.,
"labels/logits_mean/class1": 0.,
"labels/logits_mean/class2": 0.,
"labels/prediction_mean/class0": 1.,
"labels/prediction_mean/class1": 0.,
"labels/prediction_mean/class2": 0.,
"labels/probability_mean/class0": 0.333333, # softmax
"labels/probability_mean/class1": 0.333333, # softmax
"labels/probability_mean/class2": 0.333333, # softmax
}, model_fn_ops)
def testMultiClassWithLogitsAndLogitsInput(self):
n_classes = 3
head = head_lib.multi_class_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(
ValueError, "Both logits and logits_input supplied"):
head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,
logits_input=((0., 0.),), logits=self._logits)
def testMultiClassEnableCenteredBias(self):
n_classes = 3
head = head_lib.multi_class_head(
n_classes=n_classes, enable_centered_bias=True)
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_variables(
self,
expected_global=(
"multi_class_head/centered_bias_weight:0",
("multi_class_head/multi_class_head/centered_bias_weight/"
"Adagrad:0"),
),
expected_trainable=("multi_class_head/centered_bias_weight:0",))
variables.global_variables_initializer().run()
_assert_summary_tags(self,
["loss",
"multi_class_head/centered_bias/bias_0",
"multi_class_head/centered_bias/bias_1",
"multi_class_head/centered_bias/bias_2"])
def testMultiClassEvalMode(self):
n_classes = 3
head = head_lib.multi_class_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.EVAL, self._labels, head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = 1.5514447
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiClassEvalModeWithLargeLogits(self):
n_classes = 3
head = head_lib.multi_class_head(
n_classes=n_classes, metric_class_ids=range(n_classes))
logits = ((2., 0., -1),)
with ops.Graph().as_default(), session.Session():
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
{}, model_fn.ModeKeys.EVAL, self._labels, head_lib.no_op_train_fn,
logits=logits)
self._assert_output_alternatives(model_fn_ops)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = 3.1698461
expected_eval_metrics = {
"accuracy": 0.,
"loss": expected_loss,
"labels/actual_label_mean/class0": 0. / 1,
"labels/actual_label_mean/class1": 0. / 1,
"labels/actual_label_mean/class2": 1. / 1,
"labels/logits_mean/class0": logits[0][0],
"labels/logits_mean/class1": logits[0][1],
"labels/logits_mean/class2": logits[0][2],
"labels/prediction_mean/class0": 1,
"labels/prediction_mean/class1": 0,
"labels/prediction_mean/class2": 0,
"labels/probability_mean/class0": 0.843795, # softmax
"labels/probability_mean/class1": 0.114195, # softmax
"labels/probability_mean/class2": 0.0420101, # softmax
}
_assert_metrics(self, expected_loss,
expected_eval_metrics, model_fn_ops)
def testMultiClassWithWeight(self):
n_classes = 3
head = head_lib.multi_class_head(
n_classes=n_classes,
weight_column_name="label_weight",
metric_class_ids=range(n_classes))
with ops.Graph().as_default(), session.Session():
weight = .1
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
features={"label_weight": weight},
labels=self._labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = 1.5514447
_assert_metrics(self, expected_loss * weight,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiClassWithCustomLoss(self):
n_classes = 3
head = head_lib.multi_class_head(
n_classes=n_classes,
weight_column_name="label_weight",
metric_class_ids=range(n_classes),
loss_fn=losses_lib.sparse_softmax_cross_entropy)
with ops.Graph().as_default(), session.Session():
weight = .1
# logloss: z:label, x:logit
# z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
model_fn_ops = head.create_model_fn_ops(
features={"label_weight": weight},
labels=self._labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=self._logits)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = 1.5514447 * weight
_assert_metrics(self, expected_loss,
self._expected_eval_metrics(expected_loss), model_fn_ops)
def testMultiClassInfer(self):
n_classes = 3
head = head_lib._multi_class_head(
n_classes=n_classes,
head_name="head_name")
with ops.Graph().as_default():
model_fn_ops = head.create_model_fn_ops(
features={},
mode=model_fn.ModeKeys.INFER,
train_op_fn=head_lib.no_op_train_fn,
logits=((1., 0., 0.), (0., 0., 1.),))
with session.Session():
data_flow_ops.tables_initializer().run()
self.assertAllEqual(
[0, 2],
model_fn_ops.predictions["classes"].eval())
self.assertItemsEqual(
["head_name"], six.iterkeys(model_fn_ops.output_alternatives))
self.assertEqual(
constants.ProblemType.CLASSIFICATION,
model_fn_ops.output_alternatives["head_name"][0])
predictions_for_serving = (
model_fn_ops.output_alternatives["head_name"][1])
self.assertIn("classes", six.iterkeys(predictions_for_serving))
self.assertAllEqual(
[[0, 1, 2], [0, 1, 2]],
predictions_for_serving["classes"].eval())
self.assertIn("probabilities", six.iterkeys(predictions_for_serving))
self.assertAllClose(
[[0.576117, 0.2119416, 0.2119416],
[0.2119416, 0.2119416, 0.576117]],
predictions_for_serving["probabilities"].eval())
def testInvalidNClasses(self):
for n_classes in (None, -1, 0, 1):
with self.assertRaisesRegexp(ValueError, "n_classes must be > 1"):
head_lib.multi_class_head(n_classes=n_classes)
def testMultiClassWithLabelKeysInvalidShape(self):
with self.assertRaisesRegexp(
ValueError, "Length of label_keys must equal n_classes"):
head_lib._multi_class_head(
n_classes=3, label_keys=("key0", "key1"))
def testMultiClassWithLabelKeysTwoClasses(self):
with self.assertRaisesRegexp(
ValueError, "label_keys is not supported for n_classes=2"):
head_lib._multi_class_head(
n_classes=2, label_keys=("key0", "key1"))
def testMultiClassWithLabelKeysInfer(self):
n_classes = 3
label_keys = ("key0", "key1", "key2")
head = head_lib._multi_class_head(
n_classes=n_classes, label_keys=label_keys,
metric_class_ids=range(n_classes),
head_name="head_name")
with ops.Graph().as_default():
model_fn_ops = head.create_model_fn_ops(
features={},
mode=model_fn.ModeKeys.INFER,
train_op_fn=head_lib.no_op_train_fn,
logits=((1., 0., 0.), (0., 0., 1.),))
with session.Session():
data_flow_ops.tables_initializer().run()
self.assertAllEqual(
[b"key0", b"key2"],
model_fn_ops.predictions["classes"].eval())
self.assertItemsEqual(
["head_name"], six.iterkeys(model_fn_ops.output_alternatives))
self.assertEqual(
constants.ProblemType.CLASSIFICATION,
model_fn_ops.output_alternatives["head_name"][0])
predictions_for_serving = (
model_fn_ops.output_alternatives["head_name"][1])
self.assertIn("classes", six.iterkeys(predictions_for_serving))
self.assertAllEqual(
[[b"key0", b"key1", b"key2"], [b"key0", b"key1", b"key2"]],
predictions_for_serving["classes"].eval())
self.assertIn("probabilities", six.iterkeys(predictions_for_serving))
self.assertAllClose(
[[0.576117, 0.2119416, 0.2119416],
[0.2119416, 0.2119416, 0.576117]],
predictions_for_serving["probabilities"].eval())
def testMultiClassWithLabelKeysEvalAccuracy0(self):
n_classes = 3
label_keys = ("key0", "key1", "key2")
head = head_lib._multi_class_head(
n_classes=n_classes,
label_keys=label_keys)
with ops.Graph().as_default():
model_fn_ops = head.create_model_fn_ops(
features={},
mode=model_fn.ModeKeys.EVAL,
labels=("key2",),
train_op_fn=head_lib.no_op_train_fn,
logits=((1., 0., 0.),))
with session.Session():
data_flow_ops.tables_initializer().run()
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = 1.5514447
expected_eval_metrics = {
"accuracy": 0.,
"loss": expected_loss,
}
_assert_metrics(self, expected_loss,
expected_eval_metrics, model_fn_ops)
def testMultiClassWithLabelKeysEvalAccuracy1(self):
n_classes = 3
label_keys = ("key0", "key1", "key2")
head = head_lib._multi_class_head(
n_classes=n_classes,
label_keys=label_keys)
with ops.Graph().as_default():
model_fn_ops = head.create_model_fn_ops(
features={},
mode=model_fn.ModeKeys.EVAL,
labels=("key2",),
train_op_fn=head_lib.no_op_train_fn,
logits=((0., 0., 1.),))
with session.Session():
data_flow_ops.tables_initializer().run()
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = 0.5514447
expected_eval_metrics = {
"accuracy": 1.,
"loss": expected_loss,
}
_assert_metrics(self, expected_loss,
expected_eval_metrics, model_fn_ops)
class BinarySvmHeadTest(test.TestCase):
def _assert_output_alternatives(self, model_fn_ops):
self.assertEquals({
None: constants.ProblemType.LOGISTIC_REGRESSION
}, {
k: v[0] for k, v in six.iteritems(model_fn_ops.output_alternatives)
})
def setUp(self):
# Prediction for first example is in the right side of the hyperplane
# (i.e., < 0) but it is within the [-1,1] margin. There is a 0.5 loss
# incurred by this example. The 2nd prediction is outside the margin so it
# incurs no loss at all.
self._predictions = ((-.5,), (1.2,))
self._labels = (0, 1)
self._expected_losses = (.5, 0.)
def testBinarySVMWithLogits(self):
head = head_lib.binary_svm_head()
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
model_fn.ModeKeys.TRAIN,
self._labels,
head_lib.no_op_train_fn,
logits=self._predictions)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = np.average(self._expected_losses)
_assert_metrics(self, expected_loss, {
"accuracy": 1.,
"loss": expected_loss,
}, model_fn_ops)
def testBinarySVMWithInvalidLogits(self):
head = head_lib.binary_svm_head()
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(ValueError, "Dimensions.*not compatible"):
head.create_model_fn_ops(
{}, model_fn.ModeKeys.TRAIN, self._labels, head_lib.no_op_train_fn,
logits=np.ones((2, 2)))
def testBinarySVMWithLogitsInput(self):
head = head_lib.binary_svm_head()
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
model_fn.ModeKeys.TRAIN,
self._labels,
head_lib.no_op_train_fn,
logits_input=((0., 0.), (0., 0.)))
self._assert_output_alternatives(model_fn_ops)
w = ("binary_svm_head/logits/weights:0",
"binary_svm_head/logits/biases:0")
_assert_variables(
self, expected_global=w, expected_model=w, expected_trainable=w)
variables.global_variables_initializer().run()
_assert_summary_tags(self, ["loss"])
expected_loss = 1.
_assert_metrics(self, expected_loss, {
"accuracy": .5,
"loss": expected_loss,
}, model_fn_ops)
def testBinarySVMWithLogitsAndLogitsInput(self):
head = head_lib.binary_svm_head()
with ops.Graph().as_default(), session.Session():
with self.assertRaisesRegexp(
ValueError, "Both logits and logits_input supplied"):
head.create_model_fn_ops(
{},
model_fn.ModeKeys.TRAIN,
self._labels,
head_lib.no_op_train_fn,
logits_input=((0., 0.), (0., 0.)),
logits=self._predictions)
def testBinarySVMEvalMode(self):
head = head_lib.binary_svm_head()
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
model_fn.ModeKeys.EVAL,
self._labels,
head_lib.no_op_train_fn,
logits=self._predictions)
self._assert_output_alternatives(model_fn_ops)
self.assertIsNone(model_fn_ops.train_op)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = np.average(self._expected_losses)
_assert_metrics(self, expected_loss, {
"accuracy": 1.,
"loss": expected_loss,
}, model_fn_ops)
def testBinarySVMWithLabelName(self):
label_name = "my_label"
head = head_lib.binary_svm_head(label_name=label_name)
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
model_fn.ModeKeys.TRAIN,
{label_name: self._labels},
head_lib.no_op_train_fn,
logits=self._predictions)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_loss = np.average(self._expected_losses)
_assert_metrics(self, expected_loss, {
"accuracy": 1.,
"loss": expected_loss,
}, model_fn_ops)
def testBinarySVMWithWeights(self):
head = head_lib.binary_svm_head(weight_column_name="weights")
with ops.Graph().as_default(), session.Session():
weights = (7., 11.)
model_fn_ops = head.create_model_fn_ops(
features={"weights": weights},
mode=model_fn.ModeKeys.TRAIN,
labels=self._labels,
train_op_fn=head_lib.no_op_train_fn,
logits=self._predictions)
self._assert_output_alternatives(model_fn_ops)
_assert_no_variables(self)
_assert_summary_tags(self, ["loss"])
expected_weighted_sum = np.sum(
np.multiply(weights, self._expected_losses))
_assert_metrics(self, expected_weighted_sum / len(weights), {
"accuracy": 1.,
"loss": expected_weighted_sum / np.sum(weights),
}, model_fn_ops)
def testBinarySVMWithCenteredBias(self):
head = head_lib.binary_svm_head(enable_centered_bias=True)
with ops.Graph().as_default(), session.Session():
model_fn_ops = head.create_model_fn_ops(
{},
model_fn.ModeKeys.TRAIN,
self._labels,
head_lib.no_op_train_fn,
logits=self._predictions)
self._assert_output_alternatives(model_fn_ops)
_assert_variables(
self,
expected_global=(
"binary_svm_head/centered_bias_weight:0",
("binary_svm_head/binary_svm_head/centered_bias_weight/"
"Adagrad:0"),
),
expected_trainable=("binary_svm_head/centered_bias_weight:0",))
variables.global_variables_initializer().run()
_assert_summary_tags(
self, ["loss", "binary_svm_head/centered_bias/bias_0"])
expected_loss = np.average(self._expected_losses)
_assert_metrics(self, expected_loss, {
"accuracy": 1.,
"loss": expected_loss,
}, model_fn_ops)
class MultiHeadTest(test.TestCase):
def testInvalidHeads(self):
named_head = head_lib.multi_class_head(
n_classes=3, label_name="label", head_name="head1")
unnamed_head = head_lib.multi_class_head(
n_classes=4, label_name="label")
with self.assertRaisesRegexp(ValueError, "must have names"):
head_lib.multi_head((named_head, unnamed_head))
def testTrainWithNoneTrainOpFn(self):
head1 = head_lib.multi_class_head(
n_classes=3, label_name="label1", head_name="head1")
head2 = head_lib.multi_class_head(
n_classes=4, label_name="label2", head_name="head2")
head = head_lib.multi_head((head1, head2))
labels = {
"label1": (1,),
"label2": (1,)
}
with self.assertRaisesRegexp(
ValueError, "train_op_fn can not be None in TRAIN mode"):
head.create_model_fn_ops(
features={"weights": (2.0, 10.0)},
labels=labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=None,
logits=((-0.7, 0.2, .1, .1, .1, .1, .1),))
def testTrain_withNoHeadWeights(self):
head1 = head_lib.multi_class_head(
n_classes=3, label_name="label1", head_name="head1")
head2 = head_lib.multi_class_head(
n_classes=4, label_name="label2", head_name="head2")
head = head_lib.multi_head((head1, head2))
labels = {
"label1": (1,),
"label2": (1,)
}
model_fn_ops = head.create_model_fn_ops(
features={"weights": (2.0, 10.0)},
labels=labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=((-0.7, 0.2, .1, .1, .1, .1, .1),))
self.assertIsNone(model_fn_ops.predictions)
self.assertIsNotNone(model_fn_ops.loss)
self.assertIsNotNone(model_fn_ops.train_op)
self.assertFalse(model_fn_ops.eval_metric_ops)
self.assertIsNone(model_fn_ops.output_alternatives)
with session.Session() as sess:
self.assertAlmostEqual(2.224, sess.run(model_fn_ops.loss), places=3)
def testTrain_withHeadWeights(self):
head1 = head_lib.multi_class_head(
n_classes=3, label_name="label1", head_name="head1")
head2 = head_lib.multi_class_head(
n_classes=4, label_name="label2", head_name="head2")
head = head_lib.multi_head((head1, head2), (1, .5))
labels = {
"label1": (1,),
"label2": (1,)
}
model_fn_ops = head.create_model_fn_ops(
features={"weights": (2.0, 10.0)},
labels=labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits=((-0.7, 0.2, .1, .1, .1, .1, .1),))
self.assertIsNone(model_fn_ops.predictions)
self.assertIsNotNone(model_fn_ops.loss)
self.assertIsNotNone(model_fn_ops.train_op)
self.assertFalse(model_fn_ops.eval_metric_ops)
self.assertIsNone(model_fn_ops.output_alternatives)
with session.Session() as sess:
self.assertAlmostEqual(1.531, sess.run(model_fn_ops.loss), places=3)
def testTrain_withDictLogits(self):
head1 = head_lib.multi_class_head(
n_classes=3, label_name="label1", head_name="head1")
head2 = head_lib.multi_class_head(
n_classes=4, label_name="label2", head_name="head2")
head = head_lib.multi_head((head1, head2))
labels = {
"label1": (1,),
"label2": (1,)
}
model_fn_ops = head.create_model_fn_ops(
features={"weights": (2.0, 10.0)},
labels=labels,
mode=model_fn.ModeKeys.TRAIN,
train_op_fn=head_lib.no_op_train_fn,
logits={head1.head_name: ((-0.7, 0.2, .1),),
head2.head_name: ((.1, .1, .1, .1),)})
self.assertIsNone(model_fn_ops.predictions)
self.assertIsNotNone(model_fn_ops.loss)
self.assertIsNotNone(model_fn_ops.train_op)
self.assertFalse(model_fn_ops.eval_metric_ops)
self.assertIsNone(model_fn_ops.output_alternatives)
with session.Session() as sess:
self.assertAlmostEqual(2.224, sess.run(model_fn_ops.loss), places=3)
def testInfer(self):
head1 = head_lib.multi_class_head(
n_classes=3, label_name="label1", head_name="head1")
head2 = head_lib.multi_class_head(
n_classes=4, label_name="label2", head_name="head2")
head = head_lib.multi_head((head1, head2), (1, .5))
labels = {
"label1": (1,),
"label2": (1,)
}
model_fn_ops = head.create_model_fn_ops(
features={"weights": (2.0, 10.0)},
labels=labels,
mode=model_fn.ModeKeys.INFER,
train_op_fn=head_lib.no_op_train_fn,
logits=((-0.7, 0.2, .1, .1, .1, .1, .1),))
self.assertIsNotNone(model_fn_ops.predictions)
self.assertIsNone(model_fn_ops.loss)
self.assertIsNone(model_fn_ops.train_op)
self.assertFalse(model_fn_ops.eval_metric_ops)
# Tests predictions keys.
self.assertItemsEqual((
("head1", prediction_key.PredictionKey.LOGITS),
("head1", prediction_key.PredictionKey.PROBABILITIES),
("head1", prediction_key.PredictionKey.CLASSES),
("head2", prediction_key.PredictionKey.LOGITS),
("head2", prediction_key.PredictionKey.PROBABILITIES),
("head2", prediction_key.PredictionKey.CLASSES),
), model_fn_ops.predictions.keys())
# Tests output alternative.
self.assertEquals({
"head1": constants.ProblemType.CLASSIFICATION,
"head2": constants.ProblemType.CLASSIFICATION,
}, {
k: v[0] for k, v in six.iteritems(model_fn_ops.output_alternatives)
})
self.assertItemsEqual((
prediction_key.PredictionKey.PROBABILITIES,
prediction_key.PredictionKey.CLASSES,
), model_fn_ops.output_alternatives["head1"][1].keys())
self.assertItemsEqual((
prediction_key.PredictionKey.PROBABILITIES,
prediction_key.PredictionKey.CLASSES,
), model_fn_ops.output_alternatives["head2"][1].keys())
def testEval(self):
head1 = head_lib.multi_class_head(
n_classes=3, label_name="label1", head_name="head1")
head2 = head_lib.multi_class_head(
n_classes=4, label_name="label2", head_name="head2")
head = head_lib.multi_head((head1, head2), (1, .5))
labels = {
"label1": (1,),
"label2": (1,)
}
model_fn_ops = head.create_model_fn_ops(
features={"weights": (2.0, 10.0)},
labels=labels,
mode=model_fn.ModeKeys.EVAL,
train_op_fn=head_lib.no_op_train_fn,
logits=((-0.7, 0.2, .1, .1, .1, .1, .1),))
self.assertIsNotNone(model_fn_ops.predictions)
self.assertIsNotNone(model_fn_ops.loss)
self.assertIsNone(model_fn_ops.train_op)
self.assertIsNotNone(model_fn_ops.eval_metric_ops)
self.assertIsNone(model_fn_ops.output_alternatives)
metric_ops = model_fn_ops.eval_metric_ops
# Tests eval keys.
self.assertIn("accuracy/head1", metric_ops.keys())
self.assertIn("accuracy/head2", metric_ops.keys())
def _sigmoid_cross_entropy(labels, logits, weights):
return losses_lib.sigmoid_cross_entropy(labels, logits, weights)
if __name__ == "__main__":
test.main()
| |
from browser import document, html, alert, local_storage
import ui, ui.dialog
import json
from ui_menu import Menu, BarItem, MenuListItem
ui.add_stylesheet()
storage = local_storage.storage
current_cell = None
current_cell_info = None
def entry_keydown(ev):
global current_cell
_input = ev.target
cell = _input.parent
is_arrow = ev.keyCode in [9, #tab
37, # left
39, # right
38, #up
40, #down
13 # CR
]
if is_arrow:
update(cell)
current_cell = None
move_sel(ev)
#document.bind('keydown', doc_keydown)
elif ev.keyCode == 27: # escape
update_current(cell.info['entry'])
cell.remove(_input)
cell.text = cell.value
current_cell = None
#document.bind('keydown', doc_keydown)
ev.stopPropagation()
def entry_keyup(ev):
update_current(current_cell.get(selector='INPUT')[0].value)
def update_current(data):
document['current'].value = data
def entry_click(ev):
ev.stopPropagation()
# callbacks for cell editor
def enter_editor(ev):
make_input(selected)
ev.target.focus()
current_cell.get(selector='INPUT')[0].value = ev.target.value
ev.stopPropagation()
def editor_keydown(ev):
ev.stopPropagation()
def update_from_editor(ev):
global current_cell
current_cell.get(selector='INPUT')[0].value = ev.target.value
if ev.keyCode == 13: # CR
update(current_cell)
current_cell = None
ev.target.blur()
elif ev.keyCode == 27: # escape
update_current(current_cell.info['entry'])
current_cell.clear()
current_cell.text = current_cell.value
current_cell = None
ev.target.blur()
ev.stopPropagation()
selected = None
def update(cell):
# update the table, based on the last entry in specified cell
content = cell.get(selector='INPUT')[0].value
cell.info['entry'] = content
if content.startswith('='):
cell.text = eval(content[1:])
else:
cell.text = content
def doc_keydown(ev):
is_arrow = ev.keyCode in [9, #tab
37, # left
39, # right
38, #up
40, #down
13 # CR
]
if is_arrow:
move_sel(ev)
elif ev.keyCode != 0:
make_input(selected)
def move_sel(ev):
cell = selected
row = cell.parent
cell_num = row.children.index(cell)
row_num = row.parent.children.index(row)
# jump to next cell
if ev.keyCode==39 or (ev.keyCode==9 and not ev.shiftKey) or ev.keyCode==13:
if cell_num<len(row.children)-1:
next_cell = row.children[cell_num+1]
mark_selected(next_cell)
elif ev.keyCode==37 or (ev.keyCode==9 and ev.shiftKey):
if cell_num>1:
next_cell = row.children[cell_num-1]
mark_selected(next_cell)
elif ev.keyCode == 40:
if row_num<len(row.parent.children)-1:
next_cell = row.parent.children[row_num+1].children[cell_num]
mark_selected(next_cell)
elif ev.keyCode == 38:
if row_num>1:
next_cell = row.parent.children[row_num-1].children[cell_num]
mark_selected(next_cell)
ev.preventDefault()
ev.stopPropagation()
def select(ev):
global current_cell
if current_cell is not None:
update(current_cell)
current_cell = None
mark_selected(ev.target)
def mark_selected(cell):
global selected
if selected is not None:
selected.style.borderColor = '#000'
selected.style.borderWidth = '1px'
cell.style.borderColor = 'blue'
cell.style.borderWidth = '2px'
selected = cell
update_current(cell.info['entry'])
def deselect():
global selected
if selected is not None:
selected.style.borderColor = '#000'
selected.style.borderWidth = '1px'
selected = None
def entry(ev):
make_input(ev.target, True)
def make_input(cell, keep_value=False):
global current_cell
if current_cell is not None:
value = current_cell.get(selector='INPUT')[0].value
current_cell.clear()
current_cell.text = value
value = cell.text.strip()
# save value in case editing the cell is aborted by Escape
cell.value = cell.text
cell.clear()
_input = html.INPUT(style={'padding':'0px'})
if keep_value:
_input.value = cell.info['entry']
_input.style.width = '%spx' %100
cell <= _input
_input.bind('keydown', entry_keydown)
_input.bind('keyup', entry_keyup)
_input.bind('click', entry_click)
document['current'].value = cell.info['entry']
_input.focus()
current_cell = cell
mark_selected(cell)
# Functions to open/save spredsheets
prefix = 'brython_spreadsheet'
def sheet_names():
return [ name[len(prefix):] for name in storage.keys()
if name.startswith(prefix)]
def select_sheet(ev):
menu_file.close()
names = sheet_names()
names.sort()
if names:
d = ui.dialog.SelectDialog("Open sheet...",
"Sheet name",names, open_sheet)
else:
d = ui.dialog.Dialog()
d.set_title("Error")
d.set_body("No sheet found")
return
def open_sheet(sheet_name):
data = json.loads(storage['brython_spreadsheet%s' %sheet_name])
print(data)
document['panel'].clear()
load(sheet_name)
cells = []
for row in document['sheet_table'].get(selector='TR')[1:]:
cells.append([])
for cell in row.get(selector='TD'):
cells[-1].append(cell)
for row, column, entry in data:
cell = cells[row][column]
cell.info = {'entry':entry}
if not entry.startswith('='):
cell.text = entry
else:
cell.text = eval(entry[1:])
def save_as(ev):
d = ui.dialog.EntryDialog("Save sheet as...","Name",save_sheet)
d.entry.bind('keydown', lambda ev: ev.stopPropagation())
def confirm_override(widget):
save_sheet_content(widget.sheet_name)
def save_sheet(sheet_name):
if not sheet_name.strip():
d = ui.dialog.Dialog()
d.set_title("Error")
d.set_body("No sheet name provided")
return
if sheet_name in sheet_names():
d = ui.dialog.YesNoDialog("Save sheet",
"A sheet named %s already exists. Override ?" %sheet_name,
confirm_override,
None)
d.sheet_name = sheet_name
return
save_sheet_content(sheet_name)
def save_sheet_content(sheet_name):
info = []
table = document['sheet_table']
for i,row in enumerate(table.get(selector="TR")[1:]):
print(row)
for j, cell in enumerate(row.get(selector='TD')):
if cell.info['entry']:
info.append([i, j, cell.info['entry']])
storage['brython_spreadsheet%s' %sheet_name] = json.dumps(info)
document['sheet_name'].text = sheet_name
current_menu = None
def stop_menu(*args):
global current_menu
if current_menu:
current_menu.close()
current_menu = None
document.bind('keydown', doc_keydown)
document.bind('click', stop_menu)
menu_file = None
def load(sheet_name=None):
global current_cell_info,menu_file
if sheet_name is None:
sheet_name = 'New document'
panel = document['panel']
title = html.DIV(style=dict(width='auto'))
title <= html.H2(sheet_name, id="sheet_name")
panel <= title
menu = Menu()
menu_file = BarItem(menu, 'File')
MenuListItem(menu_file, 'New')
MenuListItem(menu_file, 'Open...', select_sheet)
MenuListItem(menu_file, 'Save as...', save_as)
panel <= menu
panel <= html.BR()
cell_editor = html.INPUT(style=dict(width="200px"), Id="current")
cell_editor.bind('click', enter_editor)
cell_editor.bind('keydown', editor_keydown)
cell_editor.bind('keyup', update_from_editor)
panel <= cell_editor
t = html.TABLE(Id="sheet_table")
srow = -1
rows, cols = 20, 20
col_widths = [100 for i in range(rows)]
line = html.TR()
line <= html.TH()
for i in range(cols):
col_name = chr(65+i)
line <= html.TH(col_name, style={'min-width':'%spx' %col_widths[i]})
t <= line
for i in range(rows*cols):
row, column = divmod(i, cols)
if row>srow:
line = html.TR()
line <= html.TH(row+1)
t <= line
srow = row
cell = html.TD('',id='c%s_%s' %(row,column),style=dict(padding='2px'))
cell.bind('click', select)
cell.bind('dblclick', entry)
cell.info = {'entry':''}
line <= cell
panel <= html.DIV(t,style=dict(float='left'))
mark_selected(t.get(selector='TD')[0])
load()
| |
from typing import TypeVar, NamedTuple, Dict
from plenum.common.constants import BATCH, BLACKLIST, REQACK, REQNACK, REJECT, \
POOL_LEDGER_TXNS, ORDERED, PROPAGATE, PREPREPARE, PREPARE, COMMIT, CHECKPOINT, \
REPLY, INSTANCE_CHANGE, LEDGER_STATUS, CONSISTENCY_PROOF, CATCHUP_REQ, CATCHUP_REP, \
VIEW_CHANGE_DONE, CURRENT_STATE, MESSAGE_REQUEST, MESSAGE_RESPONSE, OBSERVED_DATA, \
BATCH_COMMITTED, OPERATION_SCHEMA_IS_STRICT, BACKUP_INSTANCE_FAULTY, VIEW_CHANGE_START, \
PROPOSED_VIEW_NO, VIEW_CHANGE_CONTINUE, VIEW_CHANGE, VIEW_CHANGE_ACK, NEW_VIEW, \
OLD_VIEW_PREPREPARE_REQ, OLD_VIEW_PREPREPARE_REP
from plenum.common.messages.client_request import ClientMessageValidator
from plenum.common.messages.fields import NonNegativeNumberField, IterableField, \
SerializedValueField, SignatureField, AnyValueField, TimestampField, \
LedgerIdField, MerkleRootField, Base58Field, LedgerInfoField, AnyField, ChooseField, AnyMapField, \
LimitedLengthStringField, BlsMultiSignatureField, ProtocolVersionField, BooleanField, \
IntegerField, BatchIDField, ViewChangeField, MapField, StringifiedNonNegativeNumberField
from plenum.common.messages.message_base import MessageBase
from plenum.common.types import f
from plenum.config import NAME_FIELD_LIMIT, DIGEST_FIELD_LIMIT, SENDER_CLIENT_FIELD_LIMIT, HASH_FIELD_LIMIT, \
SIGNATURE_FIELD_LIMIT, BLS_SIG_LIMIT
# TODO set of classes are not hashable but MessageBase expects that
from plenum.server.consensus.batch_id import BatchID
class Batch(MessageBase):
typename = BATCH
schema = (
(f.MSGS.nm, IterableField(SerializedValueField())),
(f.SIG.nm, SignatureField(max_length=SIGNATURE_FIELD_LIMIT)),
)
# TODO implement actual rules
class BlacklistMsg(MessageBase):
typename = BLACKLIST
schema = (
(f.SUSP_CODE.nm, AnyValueField()),
(f.NODE_NAME.nm, AnyValueField()),
)
# TODO implement actual rules
class RequestAck(MessageBase):
typename = REQACK
schema = (
(f.IDENTIFIER.nm, AnyValueField()),
(f.REQ_ID.nm, AnyValueField())
)
# TODO implement actual rules
class RequestNack(MessageBase):
typename = REQNACK
schema = (
(f.IDENTIFIER.nm, AnyValueField()),
(f.REQ_ID.nm, AnyValueField()),
(f.REASON.nm, AnyValueField()),
(f.CODE.nm, IntegerField(optional=True))
)
# TODO implement actual rules
class Reject(MessageBase):
typename = REJECT
schema = (
(f.IDENTIFIER.nm, AnyValueField()),
(f.REQ_ID.nm, AnyValueField()),
(f.REASON.nm, AnyValueField()),
(f.CODE.nm, IntegerField(optional=True,
nullable=True))
)
# TODO implement actual rules
class PoolLedgerTxns(MessageBase):
typename = POOL_LEDGER_TXNS
schema = (
(f.TXN.nm, AnyValueField()),
)
class Ordered(MessageBase):
typename = ORDERED
schema = (
(f.INST_ID.nm, NonNegativeNumberField()),
(f.VIEW_NO.nm, NonNegativeNumberField()),
(f.VALID_REQ_IDR.nm, IterableField(LimitedLengthStringField(
max_length=DIGEST_FIELD_LIMIT))),
(f.INVALID_REQ_IDR.nm, IterableField(LimitedLengthStringField(
max_length=DIGEST_FIELD_LIMIT))),
(f.PP_SEQ_NO.nm, NonNegativeNumberField()),
(f.PP_TIME.nm, TimestampField()),
(f.LEDGER_ID.nm, LedgerIdField()),
(f.STATE_ROOT.nm, MerkleRootField(nullable=True)),
(f.TXN_ROOT.nm, MerkleRootField(nullable=True)),
(f.AUDIT_TXN_ROOT_HASH.nm, MerkleRootField(nullable=True)),
(f.PRIMARIES.nm, IterableField(LimitedLengthStringField(
max_length=NAME_FIELD_LIMIT))),
(f.NODE_REG.nm, IterableField(LimitedLengthStringField(
max_length=NAME_FIELD_LIMIT))),
(f.ORIGINAL_VIEW_NO.nm, NonNegativeNumberField()),
(f.DIGEST.nm, LimitedLengthStringField(max_length=DIGEST_FIELD_LIMIT)),
(f.PLUGIN_FIELDS.nm, AnyMapField(optional=True, nullable=True))
)
class Propagate(MessageBase):
typename = PROPAGATE
schema = (
(f.REQUEST.nm, ClientMessageValidator(
operation_schema_is_strict=OPERATION_SCHEMA_IS_STRICT)),
(f.SENDER_CLIENT.nm, LimitedLengthStringField(max_length=SENDER_CLIENT_FIELD_LIMIT, nullable=True)),
)
class PrePrepare(MessageBase):
schema = (
(f.INST_ID.nm, NonNegativeNumberField()),
(f.VIEW_NO.nm, NonNegativeNumberField()),
(f.PP_SEQ_NO.nm, NonNegativeNumberField()),
(f.PP_TIME.nm, TimestampField()),
(f.REQ_IDR.nm, IterableField(LimitedLengthStringField(
max_length=DIGEST_FIELD_LIMIT))),
(f.DISCARDED.nm, SerializedValueField(nullable=True)),
(f.DIGEST.nm, LimitedLengthStringField(max_length=DIGEST_FIELD_LIMIT)),
(f.LEDGER_ID.nm, LedgerIdField()),
(f.STATE_ROOT.nm, MerkleRootField(nullable=True)),
(f.TXN_ROOT.nm, MerkleRootField(nullable=True)),
(f.SUB_SEQ_NO.nm, NonNegativeNumberField()),
(f.FINAL.nm, BooleanField()),
(f.POOL_STATE_ROOT_HASH.nm, MerkleRootField(optional=True,
nullable=True)),
(f.AUDIT_TXN_ROOT_HASH.nm, MerkleRootField(optional=True,
nullable=True)),
# TODO: support multiple multi-sigs for multiple previous batches
(f.BLS_MULTI_SIG.nm, BlsMultiSignatureField(optional=True,
nullable=True)),
(f.BLS_MULTI_SIGS.nm, IterableField(optional=True,
inner_field_type=BlsMultiSignatureField(optional=True, nullable=True))),
(f.ORIGINAL_VIEW_NO.nm, NonNegativeNumberField(optional=True,
nullable=True)),
(f.PLUGIN_FIELDS.nm, AnyMapField(optional=True, nullable=True)),
)
typename = PREPREPARE
def _post_process(self, input_as_dict: Dict) -> Dict:
# make validated input hashable
input_as_dict[f.REQ_IDR.nm] = tuple(input_as_dict[f.REQ_IDR.nm])
bls = input_as_dict.get(f.BLS_MULTI_SIG.nm, None)
if bls is not None:
input_as_dict[f.BLS_MULTI_SIG.nm] = (bls[0], tuple(bls[1]), tuple(bls[2]))
bls_sigs = input_as_dict.get(f.BLS_MULTI_SIGS.nm, None)
if bls_sigs is not None:
sub = []
for sig in bls_sigs:
sub.append((sig[0], tuple(sig[1]), tuple(sig[2])))
input_as_dict[f.BLS_MULTI_SIGS.nm] = tuple(sub)
return input_as_dict
# TODO: use generic MessageReq mechanism once it's separated into an independent service
class OldViewPrePrepareRequest(MessageBase):
typename = OLD_VIEW_PREPREPARE_REQ
schema = (
(f.INST_ID.nm, NonNegativeNumberField()),
(f.BATCH_IDS.nm, IterableField(BatchIDField())),
)
class OldViewPrePrepareReply(MessageBase):
typename = OLD_VIEW_PREPREPARE_REP
schema = (
(f.INST_ID.nm, NonNegativeNumberField()),
(f.PREPREPARES.nm, IterableField(AnyField())),
)
class Prepare(MessageBase):
typename = PREPARE
schema = (
(f.INST_ID.nm, NonNegativeNumberField()),
(f.VIEW_NO.nm, NonNegativeNumberField()),
(f.PP_SEQ_NO.nm, NonNegativeNumberField()),
(f.PP_TIME.nm, TimestampField()),
(f.DIGEST.nm, LimitedLengthStringField(max_length=DIGEST_FIELD_LIMIT)),
(f.STATE_ROOT.nm, MerkleRootField(nullable=True)),
(f.TXN_ROOT.nm, MerkleRootField(nullable=True)),
(f.AUDIT_TXN_ROOT_HASH.nm, MerkleRootField(optional=True,
nullable=True)),
(f.PLUGIN_FIELDS.nm, AnyMapField(optional=True, nullable=True))
)
class Commit(MessageBase):
typename = COMMIT
schema = (
(f.INST_ID.nm, NonNegativeNumberField()),
(f.VIEW_NO.nm, NonNegativeNumberField()),
(f.PP_SEQ_NO.nm, NonNegativeNumberField()),
(f.BLS_SIG.nm, LimitedLengthStringField(max_length=BLS_SIG_LIMIT,
optional=True)),
(f.BLS_SIGS.nm, MapField(optional=True,
key_field=StringifiedNonNegativeNumberField(),
value_field=LimitedLengthStringField(max_length=BLS_SIG_LIMIT))),
# PLUGIN_FIELDS is not used in Commit as of now but adding for
# consistency
(f.PLUGIN_FIELDS.nm, AnyMapField(optional=True, nullable=True)),
)
class Checkpoint(MessageBase):
typename = CHECKPOINT
schema = (
(f.INST_ID.nm, NonNegativeNumberField()),
(f.VIEW_NO.nm, NonNegativeNumberField()), # This will no longer be used soon
(f.SEQ_NO_START.nm, NonNegativeNumberField()), # This is no longer used and must always be 0
(f.SEQ_NO_END.nm, NonNegativeNumberField()),
(f.DIGEST.nm, MerkleRootField(nullable=True)), # This is actually audit ledger merkle root
)
# TODO implement actual rules
class Reply(MessageBase):
typename = REPLY
schema = (
(f.RESULT.nm, AnyValueField()),
)
class InstanceChange(MessageBase):
typename = INSTANCE_CHANGE
schema = (
(f.VIEW_NO.nm, NonNegativeNumberField()),
(f.REASON.nm, NonNegativeNumberField())
)
class BackupInstanceFaulty(MessageBase):
typename = BACKUP_INSTANCE_FAULTY
schema = (
(f.VIEW_NO.nm, NonNegativeNumberField()),
(f.INSTANCES.nm, IterableField(NonNegativeNumberField())),
(f.REASON.nm, NonNegativeNumberField())
)
#
# class CheckpointsList(IterableField):
#
# def __init__(self, min_length=None, max_length=None, **kwargs):
# super().__init__(AnyField(), min_length, max_length, **kwargs)
#
# def _specific_validation(self, val):
# result = super()._specific_validation(val)
# if result is not None:
# return result
# for chk in val:
# if not isinstance(chk, Checkpoint):
# return "Checkpoints list contains not Checkpoint objects"
class ViewChange(MessageBase):
typename = VIEW_CHANGE
schema = (
(f.VIEW_NO.nm, NonNegativeNumberField()),
(f.STABLE_CHECKPOINT.nm, NonNegativeNumberField()),
(f.PREPARED.nm, IterableField(BatchIDField())), # list of tuples (view_no, pp_view_no, pp_seq_no, pp_digest)
(f.PREPREPARED.nm, IterableField(BatchIDField())), # list of tuples (view_no, pp_view_no, pp_seq_no, pp_digest)
(f.CHECKPOINTS.nm, IterableField(AnyField())) # list of Checkpoints TODO: should we change to tuples?
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
checkpoints = []
for chk in self.checkpoints:
if isinstance(chk, dict):
checkpoints.append(Checkpoint(**chk))
if checkpoints:
self.checkpoints = checkpoints
# The field `prepared` can to be a list of BatchIDs or of dicts.
# If its a list of dicts then we need to deserialize it.
if self.prepared and isinstance(self.prepared[0], dict):
self.prepared = [BatchID(**bid)
for bid in self.prepared
if isinstance(bid, dict)]
# The field `preprepared` can to be a list of BatchIDs or of dicts.
# If its a list of dicts then we need to deserialize it.
if self.preprepared and isinstance(self.preprepared[0], dict):
self.preprepared = [BatchID(**bid)
for bid in self.preprepared
if isinstance(bid, dict)]
def _asdict(self):
result = super()._asdict()
checkpoints = []
for chk in self.checkpoints:
if isinstance(chk, dict):
continue
checkpoints.append(chk._asdict())
if checkpoints:
result[f.CHECKPOINTS.nm] = checkpoints
# The field `prepared` can to be a list of BatchIDs or of dicts.
# If its a list of BatchID then we need to serialize it.
if self.prepared and isinstance(self.prepared[0], BatchID):
result[f.PREPARED.nm] = [bid._asdict()
for bid in self.prepared]
# The field `preprepared` can to be a list of BatchIDs or of dicts.
# If its a list of BatchID then we need to serialize it.
if self.preprepared and isinstance(self.preprepared[0], BatchID):
result[f.PREPREPARED.nm] = [bid._asdict()
for bid in self.preprepared]
return result
class ViewChangeAck(MessageBase):
typename = VIEW_CHANGE_ACK
schema = (
(f.VIEW_NO.nm, NonNegativeNumberField()),
(f.NAME.nm, LimitedLengthStringField(max_length=NAME_FIELD_LIMIT)),
(f.DIGEST.nm, LimitedLengthStringField(max_length=DIGEST_FIELD_LIMIT))
)
class NewView(MessageBase):
typename = NEW_VIEW
schema = (
(f.VIEW_NO.nm, NonNegativeNumberField()),
(f.VIEW_CHANGES.nm, IterableField(ViewChangeField())), # list of tuples (node_name, view_change_digest)
(f.CHECKPOINT.nm, AnyField()), # Checkpoint to be selected as stable (TODO: or tuple?)
(f.BATCHES.nm, IterableField(BatchIDField())), # list of tuples (view_no, pp_view_no, pp_seq_no, pp_digest)
(f.PRIMARY.nm, LimitedLengthStringField(optional=True))
# that should get into new view
)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
if isinstance(self.checkpoint, dict):
self.checkpoint = Checkpoint(**self.checkpoint)
# The field `batches` can to be a list of BatchIDs or of dicts.
# If it's not a list of dicts then we don't need to deserialize it.
if not self.batches or not isinstance(self.batches[0], dict):
return
self.batches = [BatchID(**bid)
for bid in self.batches
if isinstance(bid, dict)]
def _asdict(self):
result = super()._asdict()
chk = self.checkpoint
if not isinstance(chk, dict):
result[f.CHECKPOINT.nm] = chk._asdict()
# The field `batches` can to be a list of BatchIDs or of dicts.
# If its a list of dicts then we don't need to serialize it.
if not self.batches or not isinstance(self.batches[0], BatchID):
return result
result[f.BATCHES.nm] = [bid._asdict()
for bid in self.batches]
return result
class LedgerStatus(MessageBase):
"""
Purpose: spread status of ledger copy on a specific node.
When node receives this message and see that it has different
status of ledger it should reply with LedgerStatus that contains its
status
"""
typename = LEDGER_STATUS
schema = (
(f.LEDGER_ID.nm, LedgerIdField()),
(f.TXN_SEQ_NO.nm, NonNegativeNumberField()),
(f.VIEW_NO.nm, NonNegativeNumberField(nullable=True)),
(f.PP_SEQ_NO.nm, NonNegativeNumberField(nullable=True)),
(f.MERKLE_ROOT.nm, MerkleRootField()),
(f.PROTOCOL_VERSION.nm, ProtocolVersionField())
)
class ConsistencyProof(MessageBase):
typename = CONSISTENCY_PROOF
schema = (
(f.LEDGER_ID.nm, LedgerIdField()),
(f.SEQ_NO_START.nm, NonNegativeNumberField()),
(f.SEQ_NO_END.nm, NonNegativeNumberField()),
(f.VIEW_NO.nm, NonNegativeNumberField()),
(f.PP_SEQ_NO.nm, NonNegativeNumberField()),
(f.OLD_MERKLE_ROOT.nm, MerkleRootField()),
(f.NEW_MERKLE_ROOT.nm, MerkleRootField()),
(f.HASHES.nm, IterableField(LimitedLengthStringField(max_length=HASH_FIELD_LIMIT))),
)
class CatchupReq(MessageBase):
typename = CATCHUP_REQ
schema = (
(f.LEDGER_ID.nm, LedgerIdField()),
(f.SEQ_NO_START.nm, NonNegativeNumberField()),
(f.SEQ_NO_END.nm, NonNegativeNumberField()),
(f.CATCHUP_TILL.nm, NonNegativeNumberField()),
)
class CatchupRep(MessageBase):
typename = CATCHUP_REP
schema = (
(f.LEDGER_ID.nm, LedgerIdField()),
# TODO: turn on validation, the cause is INDY-388
# (f.TXNS.nm, MapField(key_field=StringifiedNonNegativeNumberField(),
# value_field=ClientMessageValidator(operation_schema_is_strict=False))),
(f.TXNS.nm, AnyValueField()),
(f.CONS_PROOF.nm, IterableField(Base58Field(byte_lengths=(32,)))),
)
class ViewChangeDone(MessageBase):
"""
Node sends this kind of message when view change steps done and it is
ready to switch to the new primary.
In contrast to 'Primary' message this one does not imply election.
"""
typename = VIEW_CHANGE_DONE
schema = (
# name is nullable because this message can be sent when
# there were no view changes and instance has no primary yet
(f.VIEW_NO.nm, NonNegativeNumberField()),
(f.NAME.nm, LimitedLengthStringField(max_length=NAME_FIELD_LIMIT,
nullable=True)),
(f.LEDGER_INFO.nm, IterableField(LedgerInfoField()))
)
class CurrentState(MessageBase):
"""
Node sends this kind of message for nodes which
suddenly reconnected (lagged). It contains information about current
pool state, like view no, primary etc.
"""
typename = CURRENT_STATE
schema = (
(f.VIEW_NO.nm, NonNegativeNumberField()),
(f.PRIMARY.nm, IterableField(AnyField())), # ViewChangeDone
)
"""
The choice to do a generic 'request message' feature instead of a specific
one was debated. It has some pros and some cons. We wrote up the analysis in
http://bit.ly/2uxf6Se. This decision can and should be revisited if we feel a
lot of ongoing dissonance about it. Lovesh, Alex, and Daniel, July 2017
"""
class MessageReq(MessageBase):
"""
Purpose: ask node for any message
"""
allowed_types = {LEDGER_STATUS, CONSISTENCY_PROOF, PREPREPARE, PREPARE,
COMMIT, PROPAGATE, VIEW_CHANGE, NEW_VIEW}
typename = MESSAGE_REQUEST
schema = (
(f.MSG_TYPE.nm, ChooseField(values=allowed_types)),
(f.PARAMS.nm, AnyMapField())
)
class MessageRep(MessageBase):
"""
Purpose: respond to a node for any requested message
"""
# TODO: support a setter for `msg` to create an instance of a type
# according to `msg_type`
typename = MESSAGE_RESPONSE
schema = (
(f.MSG_TYPE.nm, ChooseField(values=MessageReq.allowed_types)),
(f.PARAMS.nm, AnyMapField()),
(f.MSG.nm, AnyField())
)
ThreePhaseType = (PrePrepare, Prepare, Commit)
ThreePhaseMsg = TypeVar("3PhaseMsg", *ThreePhaseType)
ThreePhaseKey = NamedTuple("ThreePhaseKey", [
f.VIEW_NO,
f.PP_SEQ_NO
])
class BatchCommitted(MessageBase):
"""
Purpose: pass to Observable after each batch is committed
(so that Observable can propagate the data to Observers using ObservedData msg)
"""
typename = BATCH_COMMITTED
schema = (
(f.REQUESTS.nm,
IterableField(ClientMessageValidator(
operation_schema_is_strict=OPERATION_SCHEMA_IS_STRICT))),
(f.LEDGER_ID.nm, LedgerIdField()),
(f.INST_ID.nm, NonNegativeNumberField()),
(f.VIEW_NO.nm, NonNegativeNumberField()),
(f.PP_SEQ_NO.nm, NonNegativeNumberField()),
(f.PP_TIME.nm, TimestampField()),
(f.STATE_ROOT.nm, MerkleRootField()),
(f.TXN_ROOT.nm, MerkleRootField()),
(f.SEQ_NO_START.nm, NonNegativeNumberField()),
(f.SEQ_NO_END.nm, NonNegativeNumberField()),
(f.AUDIT_TXN_ROOT_HASH.nm, MerkleRootField(nullable=True)),
(f.PRIMARIES.nm, IterableField(LimitedLengthStringField(
max_length=NAME_FIELD_LIMIT))),
(f.NODE_REG.nm, IterableField(LimitedLengthStringField(
max_length=NAME_FIELD_LIMIT))),
(f.ORIGINAL_VIEW_NO.nm, NonNegativeNumberField()),
(f.DIGEST.nm, LimitedLengthStringField(max_length=DIGEST_FIELD_LIMIT)),
)
class ObservedData(MessageBase):
"""
Purpose: propagate data from Validators to Observers
"""
# TODO: support other types
# TODO: support validation of Msg according to the type
allowed_types = {BATCH}
typename = OBSERVED_DATA
schema = (
(f.MSG_TYPE.nm, ChooseField(values=allowed_types)),
(f.MSG.nm, AnyValueField())
)
def _validate_message(self, dct):
msg = dct[f.MSG.nm]
# TODO: support other types
expected_type_cls = BatchCommitted
if isinstance(msg, expected_type_cls):
return None
if isinstance(msg, dict):
expected_type_cls(**msg)
return None
self._raise_invalid_fields(
f.MSG.nm, msg,
"The message type must be {} ".format(expected_type_cls.typename))
class FutureViewChangeDone:
"""
Purpose: sent from Node to ViewChanger to indicate that other nodes finished ViewChange to one of the next view
In particular, it's sent when CURRENT_STATE (with primary propagation) is processed.
"""
def __init__(self, vcd_msg: ViewChangeDone) -> None:
self.vcd_msg = vcd_msg
class ViewChangeStartMessage(MessageBase):
typename = VIEW_CHANGE_START
schema = (
(PROPOSED_VIEW_NO, IntegerField()),
)
class ViewChangeContinueMessage(MessageBase):
typename = VIEW_CHANGE_CONTINUE
schema = (
(PROPOSED_VIEW_NO, IntegerField()),
)
| |
import base64
import datetime
import logging
import urllib
from functools import wraps
from io import BytesIO
from typing import Callable, Dict, Optional, Sequence, Tuple, TypeVar, Union, cast, overload
import django_otp
from django.conf import settings
from django.contrib.auth import REDIRECT_FIELD_NAME
from django.contrib.auth import login as django_login
from django.contrib.auth.decorators import user_passes_test as django_user_passes_test
from django.contrib.auth.models import AnonymousUser
from django.contrib.auth.views import redirect_to_login
from django.http import HttpRequest, HttpResponse, HttpResponseRedirect, QueryDict
from django.http.multipartparser import MultiPartParser
from django.shortcuts import resolve_url
from django.template.response import SimpleTemplateResponse, TemplateResponse
from django.utils.timezone import now as timezone_now
from django.utils.translation import gettext as _
from django.views.decorators.csrf import csrf_exempt
from django_otp import user_has_device
from two_factor.utils import default_device
from zerver.lib.exceptions import (
AccessDeniedError,
ErrorCode,
InvalidAPIKeyError,
InvalidAPIKeyFormatError,
InvalidJSONError,
JsonableError,
OrganizationAdministratorRequired,
OrganizationMemberRequired,
OrganizationOwnerRequired,
RateLimited,
RealmDeactivatedError,
UnsupportedWebhookEventType,
UserDeactivatedError,
)
from zerver.lib.queue import queue_json_publish
from zerver.lib.rate_limiter import RateLimitedIPAddr, RateLimitedUser
from zerver.lib.request import REQ, RequestNotes, has_request_variables
from zerver.lib.response import json_method_not_allowed, json_success, json_unauthorized
from zerver.lib.subdomains import get_subdomain, user_matches_subdomain
from zerver.lib.timestamp import datetime_to_timestamp, timestamp_to_datetime
from zerver.lib.types import ViewFuncT
from zerver.lib.utils import has_api_key_format, statsd
from zerver.models import Realm, UserProfile, get_client, get_user_profile_by_api_key
if settings.ZILENCER_ENABLED:
from zilencer.models import (
RateLimitedRemoteZulipServer,
RemoteZulipServer,
get_remote_server_by_uuid,
)
rate_limiter_logger = logging.getLogger("zerver.lib.rate_limiter")
webhook_logger = logging.getLogger("zulip.zerver.webhooks")
webhook_unsupported_events_logger = logging.getLogger("zulip.zerver.webhooks.unsupported")
FuncT = TypeVar("FuncT", bound=Callable[..., object])
def cachify(method: FuncT) -> FuncT:
dct: Dict[Tuple[object, ...], object] = {}
def cache_wrapper(*args: object) -> object:
tup = tuple(args)
if tup in dct:
return dct[tup]
result = method(*args)
dct[tup] = result
return result
return cast(FuncT, cache_wrapper) # https://github.com/python/mypy/issues/1927
def update_user_activity(
request: HttpRequest, user_profile: UserProfile, query: Optional[str]
) -> None:
# update_active_status also pushes to RabbitMQ, and it seems
# redundant to log that here as well.
if request.META["PATH_INFO"] == "/json/users/me/presence":
return
request_notes = RequestNotes.get_notes(request)
if query is not None:
pass
elif request_notes.query is not None:
query = request_notes.query
else:
query = request.META["PATH_INFO"]
assert request_notes.client is not None
event = {
"query": query,
"user_profile_id": user_profile.id,
"time": datetime_to_timestamp(timezone_now()),
"client_id": request_notes.client.id,
}
queue_json_publish("user_activity", event, lambda event: None)
# Based on django.views.decorators.http.require_http_methods
def require_post(func: ViewFuncT) -> ViewFuncT:
@wraps(func)
def wrapper(request: HttpRequest, *args: object, **kwargs: object) -> HttpResponse:
if request.method != "POST":
err_method = request.method
logging.warning(
"Method Not Allowed (%s): %s",
err_method,
request.path,
extra={"status_code": 405, "request": request},
)
if RequestNotes.get_notes(request).error_format == "JSON":
return json_method_not_allowed(["POST"])
else:
return TemplateResponse(
request, "404.html", context={"status_code": 405}, status=405
)
return func(request, *args, **kwargs)
return cast(ViewFuncT, wrapper) # https://github.com/python/mypy/issues/1927
def require_realm_owner(func: ViewFuncT) -> ViewFuncT:
@wraps(func)
def wrapper(
request: HttpRequest, user_profile: UserProfile, *args: object, **kwargs: object
) -> HttpResponse:
if not user_profile.is_realm_owner:
raise OrganizationOwnerRequired()
return func(request, user_profile, *args, **kwargs)
return cast(ViewFuncT, wrapper) # https://github.com/python/mypy/issues/1927
def require_realm_admin(func: ViewFuncT) -> ViewFuncT:
@wraps(func)
def wrapper(
request: HttpRequest, user_profile: UserProfile, *args: object, **kwargs: object
) -> HttpResponse:
if not user_profile.is_realm_admin:
raise OrganizationAdministratorRequired()
return func(request, user_profile, *args, **kwargs)
return cast(ViewFuncT, wrapper) # https://github.com/python/mypy/issues/1927
def require_organization_member(func: ViewFuncT) -> ViewFuncT:
@wraps(func)
def wrapper(
request: HttpRequest, user_profile: UserProfile, *args: object, **kwargs: object
) -> HttpResponse:
if user_profile.role > UserProfile.ROLE_MEMBER:
raise OrganizationMemberRequired()
return func(request, user_profile, *args, **kwargs)
return cast(ViewFuncT, wrapper) # https://github.com/python/mypy/issues/1927
def require_billing_access(func: ViewFuncT) -> ViewFuncT:
@wraps(func)
def wrapper(
request: HttpRequest, user_profile: UserProfile, *args: object, **kwargs: object
) -> HttpResponse:
if not user_profile.has_billing_access:
raise JsonableError(_("Must be a billing administrator or an organization owner"))
return func(request, user_profile, *args, **kwargs)
return cast(ViewFuncT, wrapper) # https://github.com/python/mypy/issues/1927
def process_client(
request: HttpRequest,
user: Union[UserProfile, AnonymousUser],
*,
is_browser_view: bool = False,
client_name: Optional[str] = None,
skip_update_user_activity: bool = False,
query: Optional[str] = None,
) -> None:
request_notes = RequestNotes.get_notes(request)
if client_name is None:
client_name = request_notes.client_name
assert client_name is not None
# We could check for a browser's name being "Mozilla", but
# e.g. Opera and MobileSafari don't set that, and it seems
# more robust to just key off whether it was a browser view
if is_browser_view and not client_name.startswith("Zulip"):
# Avoid changing the client string for browsers, but let
# the Zulip desktop apps be themselves.
client_name = "website"
request_notes.client = get_client(client_name)
if not skip_update_user_activity and user.is_authenticated:
update_user_activity(request, user, query)
class InvalidZulipServerError(JsonableError):
code = ErrorCode.INVALID_ZULIP_SERVER
data_fields = ["role"]
def __init__(self, role: str) -> None:
self.role: str = role
@staticmethod
def msg_format() -> str:
return "Zulip server auth failure: {role} is not registered"
class InvalidZulipServerKeyError(InvalidZulipServerError):
@staticmethod
def msg_format() -> str:
return "Zulip server auth failure: key does not match role {role}"
def validate_api_key(
request: HttpRequest,
role: Optional[str],
api_key: str,
allow_webhook_access: bool = False,
client_name: Optional[str] = None,
) -> Union[UserProfile, "RemoteZulipServer"]:
# Remove whitespace to protect users from trivial errors.
api_key = api_key.strip()
if role is not None:
role = role.strip()
# If `role` doesn't look like an email, it might be a uuid.
if settings.ZILENCER_ENABLED and role is not None and "@" not in role:
try:
remote_server = get_remote_server_by_uuid(role)
except RemoteZulipServer.DoesNotExist:
raise InvalidZulipServerError(role)
if api_key != remote_server.api_key:
raise InvalidZulipServerKeyError(role)
if get_subdomain(request) != Realm.SUBDOMAIN_FOR_ROOT_DOMAIN:
raise JsonableError(_("Invalid subdomain for push notifications bouncer"))
request.user = remote_server
remote_server.rate_limits = ""
# Skip updating UserActivity, since remote_server isn't actually a UserProfile object.
process_client(request, remote_server, skip_update_user_activity=True)
return remote_server
user_profile = access_user_by_api_key(request, api_key, email=role)
if user_profile.is_incoming_webhook and not allow_webhook_access:
raise JsonableError(_("This API is not available to incoming webhook bots."))
request.user = user_profile
process_client(request, user_profile, client_name=client_name)
return user_profile
def validate_account_and_subdomain(request: HttpRequest, user_profile: UserProfile) -> None:
if user_profile.realm.deactivated:
raise RealmDeactivatedError()
if not user_profile.is_active:
raise UserDeactivatedError()
# Either the subdomain matches, or we're accessing Tornado from
# and to localhost (aka spoofing a request as the user).
if not user_matches_subdomain(get_subdomain(request), user_profile) and not (
settings.RUNNING_INSIDE_TORNADO
and request.META["SERVER_NAME"] == "127.0.0.1"
and request.META["REMOTE_ADDR"] == "127.0.0.1"
):
logging.warning(
"User %s (%s) attempted to access API on wrong subdomain (%s)",
user_profile.delivery_email,
user_profile.realm.subdomain,
get_subdomain(request),
)
raise JsonableError(_("Account is not associated with this subdomain"))
def access_user_by_api_key(
request: HttpRequest, api_key: str, email: Optional[str] = None
) -> UserProfile:
if not has_api_key_format(api_key):
raise InvalidAPIKeyFormatError()
try:
user_profile = get_user_profile_by_api_key(api_key)
except UserProfile.DoesNotExist:
raise InvalidAPIKeyError()
if email is not None and email.lower() != user_profile.delivery_email.lower():
# This covers the case that the API key is correct, but for a
# different user. We may end up wanting to relaxing this
# constraint or give a different error message in the future.
raise InvalidAPIKeyError()
validate_account_and_subdomain(request, user_profile)
return user_profile
def log_exception_to_webhook_logger(
summary: str,
unsupported_event: bool,
) -> None:
if unsupported_event:
webhook_unsupported_events_logger.exception(summary, stack_info=True)
else:
webhook_logger.exception(summary, stack_info=True)
def full_webhook_client_name(raw_client_name: Optional[str] = None) -> Optional[str]:
if raw_client_name is None:
return None
return f"Zulip{raw_client_name}Webhook"
# Use this for webhook views that don't get an email passed in.
def webhook_view(
webhook_client_name: str,
notify_bot_owner_on_invalid_json: bool = True,
all_event_types: Optional[Sequence[str]] = None,
) -> Callable[[Callable[..., HttpResponse]], Callable[..., HttpResponse]]:
# Unfortunately, callback protocols are insufficient for this:
# https://mypy.readthedocs.io/en/stable/protocols.html#callback-protocols
# Variadic generics are necessary: https://github.com/python/typing/issues/193
def _wrapped_view_func(view_func: Callable[..., HttpResponse]) -> Callable[..., HttpResponse]:
@csrf_exempt
@has_request_variables
@wraps(view_func)
def _wrapped_func_arguments(
request: HttpRequest, api_key: str = REQ(), *args: object, **kwargs: object
) -> HttpResponse:
user_profile = validate_api_key(
request,
None,
api_key,
allow_webhook_access=True,
client_name=full_webhook_client_name(webhook_client_name),
)
if settings.RATE_LIMITING:
rate_limit_user(request, user_profile, domain="api_by_user")
try:
return view_func(request, user_profile, *args, **kwargs)
except Exception as err:
if isinstance(err, InvalidJSONError) and notify_bot_owner_on_invalid_json:
# NOTE: importing this at the top of file leads to a
# cyclic import; correct fix is probably to move
# notify_bot_owner_about_invalid_json to a smaller file.
from zerver.lib.webhooks.common import notify_bot_owner_about_invalid_json
notify_bot_owner_about_invalid_json(user_profile, webhook_client_name)
elif isinstance(err, JsonableError) and not isinstance(
err, UnsupportedWebhookEventType
):
pass
else:
if isinstance(err, UnsupportedWebhookEventType):
err.webhook_name = webhook_client_name
log_exception_to_webhook_logger(
summary=str(err),
unsupported_event=isinstance(err, UnsupportedWebhookEventType),
)
raise err
_wrapped_func_arguments._all_event_types = all_event_types
return _wrapped_func_arguments
return _wrapped_view_func
def zulip_redirect_to_login(
request: HttpRequest,
login_url: Optional[str] = None,
redirect_field_name: str = REDIRECT_FIELD_NAME,
) -> HttpResponseRedirect:
path = request.build_absolute_uri()
resolved_login_url = resolve_url(login_url or settings.LOGIN_URL)
# If the login URL is the same scheme and net location then just
# use the path as the "next" url.
login_scheme, login_netloc = urllib.parse.urlparse(resolved_login_url)[:2]
current_scheme, current_netloc = urllib.parse.urlparse(path)[:2]
if (not login_scheme or login_scheme == current_scheme) and (
not login_netloc or login_netloc == current_netloc
):
path = request.get_full_path()
# TODO: Restore testing for this case; it was removed when
# we enabled web-public stream testing on /.
if path == "/": # nocoverage
# Don't add ?next=/, to keep our URLs clean
return HttpResponseRedirect(resolved_login_url)
return redirect_to_login(path, resolved_login_url, redirect_field_name)
# From Django 2.2, modified to pass the request rather than just the
# user into test_func; this is useful so that we can revalidate the
# subdomain matches the user's realm. It is likely that we could make
# the subdomain validation happen elsewhere and switch to using the
# stock Django version.
def user_passes_test(
test_func: Callable[[HttpRequest], bool],
login_url: Optional[str] = None,
redirect_field_name: str = REDIRECT_FIELD_NAME,
) -> Callable[[ViewFuncT], ViewFuncT]:
"""
Decorator for views that checks that the user passes the given test,
redirecting to the log-in page if necessary. The test should be a callable
that takes the user object and returns True if the user passes.
"""
def decorator(view_func: ViewFuncT) -> ViewFuncT:
@wraps(view_func)
def _wrapped_view(request: HttpRequest, *args: object, **kwargs: object) -> HttpResponse:
if test_func(request):
return view_func(request, *args, **kwargs)
return zulip_redirect_to_login(request, login_url, redirect_field_name)
return cast(ViewFuncT, _wrapped_view) # https://github.com/python/mypy/issues/1927
return decorator
def logged_in_and_active(request: HttpRequest) -> bool:
if not request.user.is_authenticated:
return False
if not request.user.is_active:
return False
if request.user.realm.deactivated:
return False
return user_matches_subdomain(get_subdomain(request), request.user)
def do_two_factor_login(request: HttpRequest, user_profile: UserProfile) -> None:
device = default_device(user_profile)
if device:
django_otp.login(request, device)
def do_login(request: HttpRequest, user_profile: UserProfile) -> None:
"""Creates a session, logging in the user, using the Django method,
and also adds helpful data needed by our server logs.
"""
django_login(request, user_profile)
RequestNotes.get_notes(request).requestor_for_logs = user_profile.format_requestor_for_logs()
process_client(request, user_profile, is_browser_view=True)
if settings.TWO_FACTOR_AUTHENTICATION_ENABLED:
# Log in with two factor authentication as well.
do_two_factor_login(request, user_profile)
def log_view_func(view_func: ViewFuncT) -> ViewFuncT:
@wraps(view_func)
def _wrapped_view_func(request: HttpRequest, *args: object, **kwargs: object) -> HttpResponse:
RequestNotes.get_notes(request).query = view_func.__name__
return view_func(request, *args, **kwargs)
return cast(ViewFuncT, _wrapped_view_func) # https://github.com/python/mypy/issues/1927
def add_logging_data(view_func: ViewFuncT) -> ViewFuncT:
@wraps(view_func)
def _wrapped_view_func(request: HttpRequest, *args: object, **kwargs: object) -> HttpResponse:
process_client(request, request.user, is_browser_view=True, query=view_func.__name__)
return rate_limit()(view_func)(request, *args, **kwargs)
return cast(ViewFuncT, _wrapped_view_func) # https://github.com/python/mypy/issues/1927
def human_users_only(view_func: ViewFuncT) -> ViewFuncT:
@wraps(view_func)
def _wrapped_view_func(request: HttpRequest, *args: object, **kwargs: object) -> HttpResponse:
assert request.user.is_authenticated
if request.user.is_bot:
raise JsonableError(_("This endpoint does not accept bot requests."))
return view_func(request, *args, **kwargs)
return cast(ViewFuncT, _wrapped_view_func) # https://github.com/python/mypy/issues/1927
@overload
def zulip_login_required(
function: ViewFuncT,
redirect_field_name: str = REDIRECT_FIELD_NAME,
login_url: str = settings.HOME_NOT_LOGGED_IN,
) -> ViewFuncT:
...
@overload
def zulip_login_required(
function: None,
redirect_field_name: str = REDIRECT_FIELD_NAME,
login_url: str = settings.HOME_NOT_LOGGED_IN,
) -> Callable[[ViewFuncT], ViewFuncT]:
...
# Based on Django 1.8's @login_required
def zulip_login_required(
function: Optional[ViewFuncT] = None,
redirect_field_name: str = REDIRECT_FIELD_NAME,
login_url: str = settings.HOME_NOT_LOGGED_IN,
) -> Union[Callable[[ViewFuncT], ViewFuncT], ViewFuncT]:
actual_decorator = lambda function: user_passes_test(
logged_in_and_active,
login_url=login_url,
redirect_field_name=redirect_field_name,
)(
zulip_otp_required(
redirect_field_name=redirect_field_name,
login_url=login_url,
)(add_logging_data(function))
)
if function:
return actual_decorator(function)
return actual_decorator # nocoverage # We don't use this without a function
def web_public_view(
view_func: ViewFuncT,
redirect_field_name: str = REDIRECT_FIELD_NAME,
login_url: str = settings.HOME_NOT_LOGGED_IN,
) -> Union[Callable[[ViewFuncT], ViewFuncT], ViewFuncT]:
"""
This wrapper adds client info for unauthenticated users but
forces authenticated users to go through 2fa.
NOTE: This function == zulip_login_required in a production environment as
web_public_view path has only been enabled for development purposes
currently.
"""
if not settings.DEVELOPMENT:
# Coverage disabled because DEVELOPMENT is always true in development.
return zulip_login_required(view_func, redirect_field_name, login_url) # nocoverage
actual_decorator = lambda view_func: zulip_otp_required(
redirect_field_name=redirect_field_name, login_url=login_url
)(add_logging_data(view_func))
return actual_decorator(view_func)
def require_server_admin(view_func: ViewFuncT) -> ViewFuncT:
@zulip_login_required
@wraps(view_func)
def _wrapped_view_func(request: HttpRequest, *args: object, **kwargs: object) -> HttpResponse:
if not request.user.is_staff:
return HttpResponseRedirect(settings.HOME_NOT_LOGGED_IN)
return add_logging_data(view_func)(request, *args, **kwargs)
return cast(ViewFuncT, _wrapped_view_func) # https://github.com/python/mypy/issues/1927
def require_server_admin_api(view_func: ViewFuncT) -> ViewFuncT:
@zulip_login_required
@wraps(view_func)
def _wrapped_view_func(
request: HttpRequest, user_profile: UserProfile, *args: object, **kwargs: object
) -> HttpResponse:
if not user_profile.is_staff:
raise JsonableError(_("Must be an server administrator"))
return view_func(request, user_profile, *args, **kwargs)
return cast(ViewFuncT, _wrapped_view_func) # https://github.com/python/mypy/issues/1927
def require_non_guest_user(view_func: ViewFuncT) -> ViewFuncT:
@wraps(view_func)
def _wrapped_view_func(
request: HttpRequest, user_profile: UserProfile, *args: object, **kwargs: object
) -> HttpResponse:
if user_profile.is_guest:
raise JsonableError(_("Not allowed for guest users"))
return view_func(request, user_profile, *args, **kwargs)
return cast(ViewFuncT, _wrapped_view_func) # https://github.com/python/mypy/issues/1927
def require_member_or_admin(view_func: ViewFuncT) -> ViewFuncT:
@wraps(view_func)
def _wrapped_view_func(
request: HttpRequest, user_profile: UserProfile, *args: object, **kwargs: object
) -> HttpResponse:
if user_profile.is_guest:
raise JsonableError(_("Not allowed for guest users"))
if user_profile.is_bot:
raise JsonableError(_("This endpoint does not accept bot requests."))
return view_func(request, user_profile, *args, **kwargs)
return cast(ViewFuncT, _wrapped_view_func) # https://github.com/python/mypy/issues/1927
def require_user_group_edit_permission(view_func: ViewFuncT) -> ViewFuncT:
@require_member_or_admin
@wraps(view_func)
def _wrapped_view_func(
request: HttpRequest, user_profile: UserProfile, *args: object, **kwargs: object
) -> HttpResponse:
if not user_profile.can_edit_user_groups():
raise JsonableError(_("Insufficient permission"))
return view_func(request, user_profile, *args, **kwargs)
return cast(ViewFuncT, _wrapped_view_func) # https://github.com/python/mypy/issues/1927
# This API endpoint is used only for the mobile apps. It is part of a
# workaround for the fact that React Native doesn't support setting
# HTTP basic authentication headers.
def authenticated_uploads_api_view(
skip_rate_limiting: bool = False,
) -> Callable[[Callable[..., HttpResponse]], Callable[..., HttpResponse]]:
def _wrapped_view_func(view_func: Callable[..., HttpResponse]) -> Callable[..., HttpResponse]:
@csrf_exempt
@has_request_variables
@wraps(view_func)
def _wrapped_func_arguments(
request: HttpRequest, api_key: str = REQ(), *args: object, **kwargs: object
) -> HttpResponse:
user_profile = validate_api_key(request, None, api_key, False)
if not skip_rate_limiting:
limited_func = rate_limit()(view_func)
else:
limited_func = view_func
return limited_func(request, user_profile, *args, **kwargs)
return _wrapped_func_arguments
return _wrapped_view_func
# A more REST-y authentication decorator, using, in particular, HTTP basic
# authentication.
#
# If webhook_client_name is specific, the request is a webhook view
# with that string as the basis for the client string.
def authenticated_rest_api_view(
*,
webhook_client_name: Optional[str] = None,
allow_webhook_access: bool = False,
skip_rate_limiting: bool = False,
) -> Callable[[Callable[..., HttpResponse]], Callable[..., HttpResponse]]:
if webhook_client_name is not None:
allow_webhook_access = True
def _wrapped_view_func(view_func: Callable[..., HttpResponse]) -> Callable[..., HttpResponse]:
@csrf_exempt
@wraps(view_func)
def _wrapped_func_arguments(
request: HttpRequest, *args: object, **kwargs: object
) -> HttpResponse:
# First try block attempts to get the credentials we need to do authentication
try:
# Grab the base64-encoded authentication string, decode it, and split it into
# the email and API key
auth_type, credentials = request.META["HTTP_AUTHORIZATION"].split()
# case insensitive per RFC 1945
if auth_type.lower() != "basic":
raise JsonableError(_("This endpoint requires HTTP basic authentication."))
role, api_key = base64.b64decode(credentials).decode().split(":")
except ValueError:
return json_unauthorized(_("Invalid authorization header for basic auth"))
except KeyError:
return json_unauthorized(_("Missing authorization header for basic auth"))
# Now we try to do authentication or die
try:
# profile is a Union[UserProfile, RemoteZulipServer]
profile = validate_api_key(
request,
role,
api_key,
allow_webhook_access=allow_webhook_access,
client_name=full_webhook_client_name(webhook_client_name),
)
except JsonableError as e:
return json_unauthorized(e.msg)
try:
if not skip_rate_limiting:
# Apply rate limiting
target_view_func = rate_limit()(view_func)
else:
target_view_func = view_func
return target_view_func(request, profile, *args, **kwargs)
except Exception as err:
if not webhook_client_name:
raise err
if isinstance(err, JsonableError) and not isinstance(
err, UnsupportedWebhookEventType
): # nocoverage
raise err
if isinstance(err, UnsupportedWebhookEventType):
err.webhook_name = webhook_client_name
log_exception_to_webhook_logger(
summary=str(err),
unsupported_event=isinstance(err, UnsupportedWebhookEventType),
)
raise err
return _wrapped_func_arguments
return _wrapped_view_func
def process_as_post(view_func: ViewFuncT) -> ViewFuncT:
@wraps(view_func)
def _wrapped_view_func(request: HttpRequest, *args: object, **kwargs: object) -> HttpResponse:
# Adapted from django/http/__init__.py.
# So by default Django doesn't populate request.POST for anything besides
# POST requests. We want this dict populated for PATCH/PUT, so we have to
# do it ourselves.
#
# This will not be required in the future, a bug will be filed against
# Django upstream.
if not request.POST:
# Only take action if POST is empty.
if request.META.get("CONTENT_TYPE", "").startswith("multipart"):
# Note that request._files is just the private attribute that backs the
# FILES property, so we are essentially setting request.FILES here. (In
# Django 1.5 FILES was still a read-only property.)
request.POST, request._files = MultiPartParser(
request.META,
BytesIO(request.body),
request.upload_handlers,
request.encoding,
).parse()
else:
request.POST = QueryDict(request.body, encoding=request.encoding)
return view_func(request, *args, **kwargs)
return cast(ViewFuncT, _wrapped_view_func) # https://github.com/python/mypy/issues/1927
def authenticate_log_and_execute_json(
request: HttpRequest,
view_func: ViewFuncT,
*args: object,
skip_rate_limiting: bool = False,
allow_unauthenticated: bool = False,
**kwargs: object,
) -> HttpResponse:
if not skip_rate_limiting:
limited_view_func = rate_limit()(view_func)
else:
limited_view_func = view_func
if not request.user.is_authenticated:
if not allow_unauthenticated:
return json_unauthorized()
process_client(
request,
request.user,
is_browser_view=True,
skip_update_user_activity=True,
query=view_func.__name__,
)
return limited_view_func(request, request.user, *args, **kwargs)
user_profile = request.user
validate_account_and_subdomain(request, user_profile)
if user_profile.is_incoming_webhook:
raise JsonableError(_("Webhook bots can only access webhooks"))
process_client(request, user_profile, is_browser_view=True, query=view_func.__name__)
return limited_view_func(request, user_profile, *args, **kwargs)
# Checks if the user is logged in. If not, return an error (the
# @login_required behavior of redirecting to a login page doesn't make
# sense for json views)
def authenticated_json_view(
view_func: Callable[..., HttpResponse],
skip_rate_limiting: bool = False,
allow_unauthenticated: bool = False,
) -> Callable[..., HttpResponse]:
@wraps(view_func)
def _wrapped_view_func(request: HttpRequest, *args: object, **kwargs: object) -> HttpResponse:
return authenticate_log_and_execute_json(
request,
view_func,
*args,
skip_rate_limiting=skip_rate_limiting,
allow_unauthenticated=allow_unauthenticated,
**kwargs,
)
return _wrapped_view_func
def is_local_addr(addr: str) -> bool:
return addr in ("127.0.0.1", "::1")
# These views are used by the main Django server to notify the Tornado server
# of events. We protect them from the outside world by checking a shared
# secret, and also the originating IP (for now).
def authenticate_notify(request: HttpRequest) -> bool:
return (
is_local_addr(request.META["REMOTE_ADDR"])
and request.POST.get("secret") == settings.SHARED_SECRET
)
def client_is_exempt_from_rate_limiting(request: HttpRequest) -> bool:
# Don't rate limit requests from Django that come from our own servers,
# and don't rate-limit dev instances
client = RequestNotes.get_notes(request).client
return (client is not None and client.name.lower() == "internal") and (
is_local_addr(request.META["REMOTE_ADDR"]) or settings.DEBUG_RATE_LIMITING
)
def internal_notify_view(
is_tornado_view: bool,
) -> Callable[[ViewFuncT], Callable[..., HttpResponse]]:
# The typing here could be improved by using the extended Callable types:
# https://mypy.readthedocs.io/en/stable/additional_features.html#extended-callable-types
"""Used for situations where something running on the Zulip server
needs to make a request to the (other) Django/Tornado processes running on
the server."""
def _wrapped_view_func(view_func: ViewFuncT) -> Callable[..., HttpResponse]:
@csrf_exempt
@require_post
@wraps(view_func)
def _wrapped_func_arguments(
request: HttpRequest, *args: object, **kwargs: object
) -> HttpResponse:
if not authenticate_notify(request):
raise AccessDeniedError()
request_notes = RequestNotes.get_notes(request)
is_tornado_request = request_notes.tornado_handler is not None
# These next 2 are not security checks; they are internal
# assertions to help us find bugs.
if is_tornado_view and not is_tornado_request:
raise RuntimeError("Tornado notify view called with no Tornado handler")
if not is_tornado_view and is_tornado_request:
raise RuntimeError("Django notify view called with Tornado handler")
request_notes.requestor_for_logs = "internal"
return view_func(request, *args, **kwargs)
return _wrapped_func_arguments
return _wrapped_view_func
def to_utc_datetime(timestamp: str) -> datetime.datetime:
return timestamp_to_datetime(float(timestamp))
def statsd_increment(counter: str, val: int = 1) -> Callable[[FuncT], FuncT]:
"""Increments a statsd counter on completion of the
decorated function.
Pass the name of the counter to this decorator-returning function."""
def wrapper(func: FuncT) -> FuncT:
@wraps(func)
def wrapped_func(*args: object, **kwargs: object) -> object:
ret = func(*args, **kwargs)
statsd.incr(counter, val)
return ret
return cast(FuncT, wrapped_func) # https://github.com/python/mypy/issues/1927
return wrapper
def rate_limit_user(request: HttpRequest, user: UserProfile, domain: str) -> None:
"""Returns whether or not a user was rate limited. Will raise a RateLimited exception
if the user has been rate limited, otherwise returns and modifies request to contain
the rate limit information"""
RateLimitedUser(user, domain=domain).rate_limit_request(request)
def rate_limit_ip(request: HttpRequest, ip_addr: str, domain: str) -> None:
RateLimitedIPAddr(ip_addr, domain=domain).rate_limit_request(request)
def rate_limit_request_by_ip(request: HttpRequest, domain: str) -> None:
# REMOTE_ADDR is set by SetRemoteAddrFromRealIpHeader in conjunction
# with the nginx configuration to guarantee this to be *the* correct
# IP address to use - without worrying we'll grab the IP of a proxy.
ip_addr = request.META["REMOTE_ADDR"]
assert ip_addr
rate_limit_ip(request, ip_addr, domain=domain)
def rate_limit_remote_server(
request: HttpRequest, remote_server: "RemoteZulipServer", domain: str
) -> None:
try:
RateLimitedRemoteZulipServer(remote_server, domain=domain).rate_limit_request(request)
except RateLimited as e:
rate_limiter_logger.warning(
"Remote server %s exceeded rate limits on domain %s", remote_server, domain
)
raise e
def rate_limit() -> Callable[[ViewFuncT], ViewFuncT]:
"""Rate-limits a view. Returns a decorator"""
def wrapper(func: ViewFuncT) -> ViewFuncT:
@wraps(func)
def wrapped_func(request: HttpRequest, *args: object, **kwargs: object) -> HttpResponse:
# It is really tempting to not even wrap our original function
# when settings.RATE_LIMITING is False, but it would make
# for awkward unit testing in some situations.
if not settings.RATE_LIMITING:
return func(request, *args, **kwargs)
if client_is_exempt_from_rate_limiting(request):
return func(request, *args, **kwargs)
user = request.user
if isinstance(user, AnonymousUser):
rate_limit_request_by_ip(request, domain="api_by_ip")
return func(request, *args, **kwargs)
elif settings.ZILENCER_ENABLED and isinstance(user, RemoteZulipServer):
rate_limit_remote_server(request, user, domain="api_by_remote_server")
else:
assert isinstance(user, UserProfile)
rate_limit_user(request, user, domain="api_by_user")
return func(request, *args, **kwargs)
return cast(ViewFuncT, wrapped_func) # https://github.com/python/mypy/issues/1927
return wrapper
def return_success_on_head_request(view_func: ViewFuncT) -> ViewFuncT:
@wraps(view_func)
def _wrapped_view_func(request: HttpRequest, *args: object, **kwargs: object) -> HttpResponse:
if request.method == "HEAD":
return json_success()
return view_func(request, *args, **kwargs)
return cast(ViewFuncT, _wrapped_view_func) # https://github.com/python/mypy/issues/1927
def zulip_otp_required(
redirect_field_name: str = "next",
login_url: str = settings.HOME_NOT_LOGGED_IN,
) -> Callable[[ViewFuncT], ViewFuncT]:
"""
The reason we need to create this function is that the stock
otp_required decorator doesn't play well with tests. We cannot
enable/disable if_configured parameter during tests since the decorator
retains its value due to closure.
Similar to :func:`~django.contrib.auth.decorators.login_required`, but
requires the user to be :term:`verified`. By default, this redirects users
to :setting:`OTP_LOGIN_URL`.
"""
def test(user: UserProfile) -> bool:
"""
:if_configured: If ``True``, an authenticated user with no confirmed
OTP devices will be allowed. Also, non-authenticated users will be
allowed as spectator users. Default is ``False``. If ``False``,
2FA will not do any authentication.
"""
if_configured = settings.TWO_FACTOR_AUTHENTICATION_ENABLED
if not if_configured:
return True
# User has completed 2FA verification
if user.is_verified():
return True
# This request is unauthenticated (logged-out) access; 2FA is
# not required or possible.
#
# TODO: Add a test for 2FA-enabled with web-public views.
if not user.is_authenticated: # nocoverage
return True
# If the user doesn't have 2FA set up, we can't enforce 2FA.
if not user_has_device(user):
return True
# User has configured 2FA and is not verified, so the user
# fails the test (and we should redirect to the 2FA view).
return False
decorator = django_user_passes_test(
test, login_url=login_url, redirect_field_name=redirect_field_name
)
return decorator
def add_google_analytics_context(context: Dict[str, object]) -> None:
if settings.GOOGLE_ANALYTICS_ID is not None: # nocoverage
page_params = context.setdefault("page_params", {})
assert isinstance(page_params, dict)
page_params["google_analytics_id"] = settings.GOOGLE_ANALYTICS_ID
def add_google_analytics(view_func: ViewFuncT) -> ViewFuncT:
@wraps(view_func)
def _wrapped_view_func(request: HttpRequest, *args: object, **kwargs: object) -> HttpResponse:
response = view_func(request, *args, **kwargs)
if isinstance(response, SimpleTemplateResponse):
if response.context_data is None:
response.context_data = {}
add_google_analytics_context(response.context_data)
elif response.status_code == 200: # nocoverage
raise TypeError("add_google_analytics requires a TemplateResponse")
return response
return cast(ViewFuncT, _wrapped_view_func) # https://github.com/python/mypy/issues/1927
| |
# =============================================================================
# Copyright (c) 2016, Cisco Systems, Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
# =============================================================================
from database import DBSession
from models import logger
from models import Log
from models import SystemOption
from models import InventoryJob
from models import InventoryJobHistory
from models import InstallJobHistory
from models import DownloadJobHistory
from models import CreateTarJob
from constants import get_log_directory
from constants import JobStatus
import threading
import sched
import datetime
import time
import shutil
class InventoryManagerScheduler(threading.Thread):
def __init__(self, name):
threading.Thread.__init__(self, name=name)
def run(self):
db_session = DBSession()
try:
system_option = SystemOption.get(db_session)
inventory_hour = system_option.inventory_hour
db_session.close()
# Build a scheduler object that will look at absolute times
scheduler = sched.scheduler(time.time, time.sleep)
current_hour = datetime.datetime.now().hour
# Put task for today at the designated hour.
daily_time = datetime.time(inventory_hour)
# If the scheduled time already passed, schedule it for tomorrow
if current_hour > inventory_hour:
first_time = datetime.datetime.combine(datetime.datetime.now() + datetime.timedelta(days=1), daily_time)
else:
first_time = datetime.datetime.combine(datetime.datetime.now(), daily_time)
scheduler.enterabs(time.mktime(first_time.timetuple()), 1, self.scheduling, (scheduler, daily_time,))
scheduler.run()
except:
logger.exception('InventoryManagerScheduler hit exception')
db_session.close()
def scheduling(self, scheduler, daily_time):
# First, re-set up the scheduler for the next day the same time. It is important to have
# this logic on the top so that if any error encountered below, the scheduling still works.
t = datetime.datetime.combine(datetime.datetime.now() + datetime.timedelta(days=1), daily_time)
scheduler.enterabs(time.mktime(t.timetuple()), 1, self.scheduling, (scheduler, daily_time,))
db_session = DBSession()
try:
system_option = SystemOption.get(db_session)
# If software inventory is enabled, submit the inventory jobs
if system_option.enable_inventory:
inventory_jobs = db_session.query(InventoryJob).all()
if len(inventory_jobs) > 0:
for inventory_job in inventory_jobs:
inventory_job.pending_submit = True
db_session.commit()
# Check if there is any housekeeping work to do
self.perform_housekeeping_tasks(db_session, system_option)
except:
logger.exception('InventoryManagerScheduler hit exception')
finally:
db_session.close()
def perform_housekeeping_tasks(self, db_session, system_option):
inventory_history_per_host = system_option.inventory_history_per_host
install_history_per_host = system_option.install_history_per_host
download_history_per_user = system_option.download_history_per_user
total_system_logs = system_option.total_system_logs
current_system_logs_count = db_session.query(Log).count()
system_logs_threshold = int(total_system_logs * 1.1)
# If the current system logs count > the threshold (10% more than total_system_logs),
# trim the log table back to the total_system_logs
if current_system_logs_count > system_logs_threshold:
num_records_to_purge = current_system_logs_count - total_system_logs
# Select the logs by created_time in ascending order (older logs)
logs = db_session.query(Log).order_by(Log.created_time.asc()).limit(num_records_to_purge)
for log in logs:
db_session.delete(log)
db_session.commit()
# Scanning the InventoryJobHistory table for records that should be deleted.
skip_count = 0
current_host_id = -1
inventory_jobs = db_session.query(InventoryJobHistory) \
.order_by(InventoryJobHistory.host_id, InventoryJobHistory.created_time.desc())
for inventory_job in inventory_jobs:
if inventory_job.host_id != current_host_id:
current_host_id = inventory_job.host_id
skip_count = 0
if skip_count >= inventory_history_per_host:
# Delete the session log directory
try:
if inventory_job.session_log is not None:
shutil.rmtree(get_log_directory() + inventory_job.session_log)
except:
logger.exception('InventoryManagerScheduler hit exception- inventory job = %s', inventory_job.id)
db_session.delete(inventory_job)
skip_count += 1
db_session.commit()
# Scanning the InstallJobHistory table for records that should be deleted.
skip_count = 0
current_host_id = -1
install_jobs = db_session.query(InstallJobHistory) \
.order_by(InstallJobHistory.host_id, InstallJobHistory.created_time.desc())
for install_job in install_jobs:
if install_job.host_id != current_host_id:
current_host_id = install_job.host_id
skip_count = 0
if skip_count >= install_history_per_host:
# Delete the session log directory
try:
if install_job.session_log is not None:
shutil.rmtree(get_log_directory() + install_job.session_log)
except:
logger.exception('InventoryManagerScheduler hit exception - install job = %s', install_job.id)
db_session.delete(install_job)
skip_count += 1
db_session.commit()
# Scanning the DownloadJobHistory table for records that should be deleted.
skip_count = 0
current_user_id = -1
download_jobs = db_session.query(DownloadJobHistory) \
.order_by(DownloadJobHistory.user_id, DownloadJobHistory.created_time.desc())
for download_job in download_jobs:
if download_job.user_id != current_user_id:
current_user_id = download_job.user_id
skip_count = 0
if skip_count >= download_history_per_user:
db_session.delete(download_job)
skip_count += 1
db_session.commit()
# Deleting old CreateTarJobs
create_tar_jobs = db_session.query(CreateTarJob).all
for create_tar_job in create_tar_jobs:
if create_tar_job.status == JobStatus.COMPLETED or create_tar_job.status == JobStatus.FAILED:
db_session.delete(create_tar_job)
db_session.commit()
if __name__ == '__main__':
pass
| |
"""KDDCUP 99 dataset.
A classic dataset for anomaly detection.
The dataset page is available from UCI Machine Learning Repository
https://archive.ics.uci.edu/ml/machine-learning-databases/kddcup99-mld/kddcup.data.gz
"""
import sys
import errno
from gzip import GzipFile
from io import BytesIO
import logging
import os
from os.path import exists, join
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
import numpy as np
from .base import get_data_home
from ..utils import Bunch
from ..externals import joblib, six
from ..utils import check_random_state
from ..utils import shuffle as shuffle_method
URL10 = ('http://archive.ics.uci.edu/ml/'
'machine-learning-databases/kddcup99-mld/kddcup.data_10_percent.gz')
URL = ('http://archive.ics.uci.edu/ml/'
'machine-learning-databases/kddcup99-mld/kddcup.data.gz')
logger = logging.getLogger()
def fetch_kddcup99(subset=None, shuffle=False, random_state=None,
percent10=True, download_if_missing=True):
"""Load and return the kddcup 99 dataset (classification).
The KDD Cup '99 dataset was created by processing the tcpdump portions
of the 1998 DARPA Intrusion Detection System (IDS) Evaluation dataset,
created by MIT Lincoln Lab [1]. The artificial data was generated using
a closed network and hand-injected attacks to produce a large number of
different types of attack with normal activity in the background.
As the initial goal was to produce a large training set for supervised
learning algorithms, there is a large proportion (80.1%) of abnormal
data which is unrealistic in real world, and inappropriate for unsupervised
anomaly detection which aims at detecting 'abnormal' data, ie
1) qualitatively different from normal data.
2) in large minority among the observations.
We thus transform the KDD Data set into two different data sets: SA and SF.
- SA is obtained by simply selecting all the normal data, and a small
proportion of abnormal data to gives an anomaly proportion of 1%.
- SF is obtained as in [2]
by simply picking up the data whose attribute logged_in is positive, thus
focusing on the intrusion attack, which gives a proportion of 0.3% of
attack.
- http and smtp are two subsets of SF corresponding with third feature
equal to 'http' (resp. to 'smtp')
General KDD structure :
================ ==========================================
Samples total 4898431
Dimensionality 41
Features discrete (int) or continuous (float)
Targets str, 'normal.' or name of the anomaly type
================ ==========================================
SA structure :
================ ==========================================
Samples total 976158
Dimensionality 41
Features discrete (int) or continuous (float)
Targets str, 'normal.' or name of the anomaly type
================ ==========================================
SF structure :
================ ==========================================
Samples total 699691
Dimensionality 4
Features discrete (int) or continuous (float)
Targets str, 'normal.' or name of the anomaly type
================ ==========================================
http structure :
================ ==========================================
Samples total 619052
Dimensionality 3
Features discrete (int) or continuous (float)
Targets str, 'normal.' or name of the anomaly type
================ ==========================================
smtp structure :
================ ==========================================
Samples total 95373
Dimensionality 3
Features discrete (int) or continuous (float)
Targets str, 'normal.' or name of the anomaly type
================ ==========================================
.. versionadded:: 0.18
Parameters
----------
subset : None, 'SA', 'SF', 'http', 'smtp'
To return the corresponding classical subsets of kddcup 99.
If None, return the entire kddcup 99 dataset.
random_state : int, RandomState instance or None, optional (default=None)
Random state for shuffling the dataset.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
shuffle : bool, default=False
Whether to shuffle dataset.
percent10 : bool, default=True
Whether to load only 10 percent of the data.
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
References
----------
.. [1] Analysis and Results of the 1999 DARPA Off-Line Intrusion
Detection Evaluation Richard Lippmann, Joshua W. Haines,
David J. Fried, Jonathan Korba, Kumar Das
.. [2] K. Yamanishi, J.-I. Takeuchi, G. Williams, and P. Milne. Online
unsupervised outlier detection using finite mixtures with
discounting learning algorithms. In Proceedings of the sixth
ACM SIGKDD international conference on Knowledge discovery
and data mining, pages 320-324. ACM Press, 2000.
"""
kddcup99 = _fetch_brute_kddcup99(shuffle=shuffle, percent10=percent10,
download_if_missing=download_if_missing)
data = kddcup99.data
target = kddcup99.target
if subset == 'SA':
s = target == b'normal.'
t = np.logical_not(s)
normal_samples = data[s, :]
normal_targets = target[s]
abnormal_samples = data[t, :]
abnormal_targets = target[t]
n_samples_abnormal = abnormal_samples.shape[0]
# selected abnormal samples:
random_state = check_random_state(random_state)
r = random_state.randint(0, n_samples_abnormal, 3377)
abnormal_samples = abnormal_samples[r]
abnormal_targets = abnormal_targets[r]
data = np.r_[normal_samples, abnormal_samples]
target = np.r_[normal_targets, abnormal_targets]
if subset == 'SF' or subset == 'http' or subset == 'smtp':
# select all samples with positive logged_in attribute:
s = data[:, 11] == 1
data = np.c_[data[s, :11], data[s, 12:]]
target = target[s]
data[:, 0] = np.log((data[:, 0] + 0.1).astype(float))
data[:, 4] = np.log((data[:, 4] + 0.1).astype(float))
data[:, 5] = np.log((data[:, 5] + 0.1).astype(float))
if subset == 'http':
s = data[:, 2] == b'http'
data = data[s]
target = target[s]
data = np.c_[data[:, 0], data[:, 4], data[:, 5]]
if subset == 'smtp':
s = data[:, 2] == b'smtp'
data = data[s]
target = target[s]
data = np.c_[data[:, 0], data[:, 4], data[:, 5]]
if subset == 'SF':
data = np.c_[data[:, 0], data[:, 2], data[:, 4], data[:, 5]]
return Bunch(data=data, target=target)
def _fetch_brute_kddcup99(subset=None, data_home=None,
download_if_missing=True, random_state=None,
shuffle=False, percent10=True):
"""Load the kddcup99 dataset, downloading it if necessary.
Parameters
----------
subset : None, 'SA', 'SF', 'http', 'smtp'
To return the corresponding classical subsets of kddcup 99.
If None, return the entire kddcup 99 dataset.
data_home : string, optional
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : boolean, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
random_state : int, RandomState instance or None, optional (default=None)
Random state for shuffling the dataset.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
shuffle : bool, default=False
Whether to shuffle dataset.
percent10 : bool, default=True
Whether to load only 10 percent of the data.
Returns
-------
dataset : dict-like object with the following attributes:
dataset.data : numpy array of shape (494021, 41)
Each row corresponds to the 41 features in the dataset.
dataset.target : numpy array of shape (494021,)
Each value corresponds to one of the 21 attack types or to the
label 'normal.'.
dataset.DESCR : string
Description of the kddcup99 dataset.
"""
data_home = get_data_home(data_home=data_home)
if sys.version_info[0] == 3:
# The zlib compression format use by joblib is not compatible when
# switching from Python 2 to Python 3, let us use a separate folder
# under Python 3:
dir_suffix = "-py3"
else:
# Backward compat for Python 2 users
dir_suffix = ""
if percent10:
kddcup_dir = join(data_home, "kddcup99_10" + dir_suffix)
else:
kddcup_dir = join(data_home, "kddcup99" + dir_suffix)
samples_path = join(kddcup_dir, "samples")
targets_path = join(kddcup_dir, "targets")
available = exists(samples_path)
if download_if_missing and not available:
_mkdirp(kddcup_dir)
URL_ = URL10 if percent10 else URL
logger.warning("Downloading %s" % URL_)
f = BytesIO(urlopen(URL_).read())
dt = [('duration', int),
('protocol_type', 'S4'),
('service', 'S11'),
('flag', 'S6'),
('src_bytes', int),
('dst_bytes', int),
('land', int),
('wrong_fragment', int),
('urgent', int),
('hot', int),
('num_failed_logins', int),
('logged_in', int),
('num_compromised', int),
('root_shell', int),
('su_attempted', int),
('num_root', int),
('num_file_creations', int),
('num_shells', int),
('num_access_files', int),
('num_outbound_cmds', int),
('is_host_login', int),
('is_guest_login', int),
('count', int),
('srv_count', int),
('serror_rate', float),
('srv_serror_rate', float),
('rerror_rate', float),
('srv_rerror_rate', float),
('same_srv_rate', float),
('diff_srv_rate', float),
('srv_diff_host_rate', float),
('dst_host_count', int),
('dst_host_srv_count', int),
('dst_host_same_srv_rate', float),
('dst_host_diff_srv_rate', float),
('dst_host_same_src_port_rate', float),
('dst_host_srv_diff_host_rate', float),
('dst_host_serror_rate', float),
('dst_host_srv_serror_rate', float),
('dst_host_rerror_rate', float),
('dst_host_srv_rerror_rate', float),
('labels', 'S16')]
DT = np.dtype(dt)
file_ = GzipFile(fileobj=f, mode='r')
Xy = []
for line in file_.readlines():
if six.PY3:
line = line.decode()
Xy.append(line.replace('\n', '').split(','))
file_.close()
print('extraction done')
Xy = np.asarray(Xy, dtype=object)
for j in range(42):
Xy[:, j] = Xy[:, j].astype(DT[j])
X = Xy[:, :-1]
y = Xy[:, -1]
# XXX bug when compress!=0:
# (error: 'Incorrect data length while decompressing[...] the file
# could be corrupted.')
joblib.dump(X, samples_path, compress=0)
joblib.dump(y, targets_path, compress=0)
elif not available:
if not download_if_missing:
raise IOError("Data not found and `download_if_missing` is False")
try:
X, y
except NameError:
X = joblib.load(samples_path)
y = joblib.load(targets_path)
if shuffle:
X, y = shuffle_method(X, y, random_state=random_state)
return Bunch(data=X, target=y, DESCR=__doc__)
def _mkdirp(d):
"""Ensure directory d exists (like mkdir -p on Unix)
No guarantee that the directory is writable.
"""
try:
os.makedirs(d)
except OSError as e:
if e.errno != errno.EEXIST:
raise
| |
"""Unit tests of commenting objects."""
import pytest
from ..utilities.general import is_never_authz, is_no_authz, uses_cataloging, uses_filesystem_only
from dlkit.abstract_osid.commenting import objects as ABCObjects
from dlkit.abstract_osid.id.primitives import Id as ABC_Id
from dlkit.abstract_osid.locale.primitives import DisplayText as ABC_DisplayText
from dlkit.abstract_osid.osid import errors
from dlkit.abstract_osid.osid.objects import OsidCatalog
from dlkit.json_.id.objects import IdList
from dlkit.json_.osid.metadata import Metadata
from dlkit.primordium.id.primitives import Id
from dlkit.primordium.locale.primitives import DisplayText
from dlkit.primordium.type.primitives import Type
from dlkit.runtime import PROXY_SESSION, proxy_example
from dlkit.runtime.managers import Runtime
REQUEST = proxy_example.SimpleRequest()
CONDITION = PROXY_SESSION.get_proxy_condition()
CONDITION.set_http_request(REQUEST)
PROXY = PROXY_SESSION.get_proxy(CONDITION)
DEFAULT_TYPE = Type(**{'identifier': 'DEFAULT', 'namespace': 'DEFAULT', 'authority': 'DEFAULT'})
AGENT_ID = Id(**{'identifier': 'jane_doe', 'namespace': 'osid.agent.Agent', 'authority': 'MIT-ODL'})
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def comment_class_fixture(request):
request.cls.service_config = request.param
request.cls.svc_mgr = Runtime().get_service_manager(
'COMMENTING',
proxy=PROXY,
implementation=request.cls.service_config)
if not is_never_authz(request.cls.service_config):
create_form = request.cls.svc_mgr.get_book_form_for_create([])
create_form.display_name = 'Test catalog'
create_form.description = 'Test catalog description'
request.cls.catalog = request.cls.svc_mgr.create_book(create_form)
form = request.cls.catalog.get_comment_form_for_create(
Id('resource.Resource%3A1%40ODL.MIT.EDU'),
[])
form.display_name = 'Test object'
request.cls.object = request.cls.catalog.create_comment(form)
def class_tear_down():
if not is_never_authz(request.cls.service_config):
for obj in request.cls.catalog.get_comments():
request.cls.catalog.delete_comment(obj.ident)
request.cls.svc_mgr.delete_book(request.cls.catalog.ident)
request.addfinalizer(class_tear_down)
@pytest.fixture(scope="function")
def comment_test_fixture(request):
pass
@pytest.mark.usefixtures("comment_class_fixture", "comment_test_fixture")
class TestComment(object):
"""Tests for Comment"""
@pytest.mark.skip('unimplemented test')
def test_get_reference_id(self):
"""Tests get_reference_id"""
pass
@pytest.mark.skip('unimplemented test')
def test_get_commentor_id(self):
"""Tests get_commentor_id"""
pass
def test_get_commentor(self):
"""Tests get_commentor"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
else:
with pytest.raises(errors.Unimplemented):
self.object.get_commentor()
def test_get_commenting_agent_id(self):
"""Tests get_commenting_agent_id"""
if not is_never_authz(self.service_config):
result = self.object.get_commenting_agent_id()
assert isinstance(result, Id)
assert str(result) == str(self.catalog._proxy.get_effective_agent_id())
def test_get_commenting_agent(self):
"""Tests get_commenting_agent"""
if not is_never_authz(self.service_config):
# because the resource doesn't actually exist
with pytest.raises(errors.OperationFailed):
self.object.get_commenting_agent()
def test_get_text(self):
"""Tests get_text"""
if not is_never_authz(self.service_config):
result = self.object.get_text()
assert isinstance(result, DisplayText)
assert result.text == ''
def test_has_rating(self):
"""Tests has_rating"""
# From test_templates/resources.py::Resource::has_avatar_template
if not is_never_authz(self.service_config):
assert isinstance(self.object.has_rating(), bool)
def test_get_rating_id(self):
"""Tests get_rating_id"""
# From test_templates/resources.py::Resource::get_avatar_id_template
if not is_never_authz(self.service_config):
pytest.raises(errors.IllegalState,
self.object.get_rating_id)
def test_get_rating(self):
"""Tests get_rating"""
# From test_templates/resources.py::Resource::get_avatar_template
if not is_never_authz(self.service_config):
pytest.raises(errors.IllegalState,
self.object.get_rating)
def test_get_comment_record(self):
"""Tests get_comment_record"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
else:
with pytest.raises(errors.Unsupported):
self.object.get_comment_record(True)
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def comment_form_class_fixture(request):
request.cls.service_config = request.param
request.cls.svc_mgr = Runtime().get_service_manager(
'COMMENTING',
proxy=PROXY,
implementation=request.cls.service_config)
if not is_never_authz(request.cls.service_config):
create_form = request.cls.svc_mgr.get_book_form_for_create([])
create_form.display_name = 'Test Book'
create_form.description = 'Test Book for CommentForm tests'
request.cls.catalog = request.cls.svc_mgr.create_book(create_form)
request.cls.form = request.cls.catalog.get_comment_form_for_create(AGENT_ID, [])
def class_tear_down():
if not is_never_authz(request.cls.service_config):
for catalog in request.cls.svc_mgr.get_books():
request.cls.svc_mgr.delete_book(catalog.ident)
request.addfinalizer(class_tear_down)
@pytest.fixture(scope="function")
def comment_form_test_fixture(request):
pass
@pytest.mark.usefixtures("comment_form_class_fixture", "comment_form_test_fixture")
class TestCommentForm(object):
"""Tests for CommentForm"""
def test_get_text_metadata(self):
"""Tests get_text_metadata"""
# From test_templates/resource.py::ResourceForm::get_group_metadata_template
if not is_never_authz(self.service_config):
mdata = self.form.get_text_metadata()
assert isinstance(mdata, Metadata)
assert isinstance(mdata.get_element_id(), ABC_Id)
assert isinstance(mdata.get_element_label(), ABC_DisplayText)
assert isinstance(mdata.get_instructions(), ABC_DisplayText)
assert mdata.get_syntax() == 'STRING'
assert not mdata.is_array()
assert isinstance(mdata.is_required(), bool)
assert isinstance(mdata.is_read_only(), bool)
assert isinstance(mdata.is_linked(), bool)
def test_set_text(self):
"""Tests set_text"""
# From test_templates/repository.py::AssetForm::set_title_template
if not is_never_authz(self.service_config):
default_value = self.form.get_text_metadata().get_default_string_values()[0]
assert self.form._my_map['text'] == default_value
self.form.set_text('String')
assert self.form._my_map['text']['text'] == 'String'
with pytest.raises(errors.InvalidArgument):
self.form.set_text(42)
def test_clear_text(self):
"""Tests clear_text"""
# From test_templates/repository.py::AssetForm::clear_title_template
if not is_never_authz(self.service_config):
self.form.set_text('A String to Clear')
assert self.form._my_map['text']['text'] == 'A String to Clear'
self.form.clear_text()
assert self.form._my_map['text'] == self.form.get_text_metadata().get_default_string_values()[0]
def test_get_rating_metadata(self):
"""Tests get_rating_metadata"""
# From test_templates/resource.py::ResourceForm::get_avatar_metadata_template
if not is_never_authz(self.service_config):
mdata = self.form.get_rating_metadata()
assert isinstance(mdata, Metadata)
assert isinstance(mdata.get_element_id(), ABC_Id)
assert isinstance(mdata.get_element_label(), ABC_DisplayText)
assert isinstance(mdata.get_instructions(), ABC_DisplayText)
assert mdata.get_syntax() == 'ID'
assert not mdata.is_array()
assert isinstance(mdata.is_required(), bool)
assert isinstance(mdata.is_read_only(), bool)
assert isinstance(mdata.is_linked(), bool)
def test_set_rating(self):
"""Tests set_rating"""
# From test_templates/resource.py::ResourceForm::set_avatar_template
if not is_never_authz(self.service_config):
assert self.form._my_map['ratingId'] == ''
self.form.set_rating(Id('repository.Asset%3Afake-id%40ODL.MIT.EDU'))
assert self.form._my_map['ratingId'] == 'repository.Asset%3Afake-id%40ODL.MIT.EDU'
with pytest.raises(errors.InvalidArgument):
self.form.set_rating(True)
def test_clear_rating(self):
"""Tests clear_rating"""
# From test_templates/resource.py::ResourceForm::clear_avatar_template
if not is_never_authz(self.service_config):
self.form.set_rating(Id('repository.Asset%3Afake-id%40ODL.MIT.EDU'))
assert self.form._my_map['ratingId'] == 'repository.Asset%3Afake-id%40ODL.MIT.EDU'
self.form.clear_rating()
assert self.form._my_map['ratingId'] == self.form.get_rating_metadata().get_default_id_values()[0]
def test_get_comment_form_record(self):
"""Tests get_comment_form_record"""
if not is_never_authz(self.service_config):
with pytest.raises(errors.Unsupported):
self.form.get_comment_form_record(Type('osid.Osid%3Afake-record%40ODL.MIT.EDU'))
# Here check for a real record?
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def comment_list_class_fixture(request):
request.cls.service_config = request.param
request.cls.svc_mgr = Runtime().get_service_manager(
'COMMENTING',
proxy=PROXY,
implementation=request.cls.service_config)
if not is_never_authz(request.cls.service_config):
create_form = request.cls.svc_mgr.get_book_form_for_create([])
create_form.display_name = 'Test Book'
create_form.description = 'Test Book for CommentForm tests'
request.cls.catalog = request.cls.svc_mgr.create_book(create_form)
request.cls.form = request.cls.catalog.get_comment_form_for_create(AGENT_ID, [])
def class_tear_down():
if not is_never_authz(request.cls.service_config):
for catalog in request.cls.svc_mgr.get_books():
for comment in catalog.get_comments():
catalog.delete_comment(comment.ident)
request.cls.svc_mgr.delete_book(catalog.ident)
request.addfinalizer(class_tear_down)
@pytest.fixture(scope="function")
def comment_list_test_fixture(request):
from dlkit.json_.commenting.objects import CommentList
request.cls.comment_list = list()
request.cls.comment_ids = list()
if not is_never_authz(request.cls.service_config):
for num in [0, 1]:
form = request.cls.catalog.get_comment_form_for_create(AGENT_ID, [])
obj = request.cls.catalog.create_comment(form)
request.cls.comment_list.append(obj)
request.cls.comment_ids.append(obj.ident)
request.cls.comment_list = CommentList(request.cls.comment_list)
@pytest.mark.usefixtures("comment_list_class_fixture", "comment_list_test_fixture")
class TestCommentList(object):
"""Tests for CommentList"""
def test_get_next_comment(self):
"""Tests get_next_comment"""
# From test_templates/resource.py::ResourceList::get_next_resource_template
from dlkit.abstract_osid.commenting.objects import Comment
if not is_never_authz(self.service_config):
assert isinstance(self.comment_list.get_next_comment(), Comment)
def test_get_next_comments(self):
"""Tests get_next_comments"""
# From test_templates/resource.py::ResourceList::get_next_resources_template
from dlkit.abstract_osid.commenting.objects import CommentList, Comment
if not is_never_authz(self.service_config):
new_list = self.comment_list.get_next_comments(2)
assert isinstance(new_list, CommentList)
for item in new_list:
assert isinstance(item, Comment)
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def book_class_fixture(request):
# From test_templates/resource.py::Bin::init_template
request.cls.service_config = request.param
request.cls.svc_mgr = Runtime().get_service_manager(
'COMMENTING',
proxy=PROXY,
implementation=request.cls.service_config)
def class_tear_down():
pass
request.addfinalizer(class_tear_down)
@pytest.fixture(scope="function")
def book_test_fixture(request):
# From test_templates/resource.py::Bin::init_template
if not is_never_authz(request.cls.service_config):
form = request.cls.svc_mgr.get_book_form_for_create([])
form.display_name = 'for testing'
request.cls.object = request.cls.svc_mgr.create_book(form)
def test_tear_down():
if not is_never_authz(request.cls.service_config):
request.cls.svc_mgr.delete_book(request.cls.object.ident)
request.addfinalizer(test_tear_down)
@pytest.mark.usefixtures("book_class_fixture", "book_test_fixture")
class TestBook(object):
"""Tests for Book"""
def test_get_book_record(self):
"""Tests get_book_record"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
elif uses_cataloging(self.service_config):
pass # cannot call the _get_record() methods on catalogs
else:
with pytest.raises(errors.Unimplemented):
self.object.get_book_record(True)
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def book_form_class_fixture(request):
# From test_templates/resource.py::BinForm::init_template
request.cls.service_config = request.param
request.cls.svc_mgr = Runtime().get_service_manager(
'COMMENTING',
proxy=PROXY,
implementation=request.cls.service_config)
def class_tear_down():
pass
request.addfinalizer(class_tear_down)
@pytest.fixture(scope="function")
def book_form_test_fixture(request):
# From test_templates/resource.py::BinForm::init_template
if not is_never_authz(request.cls.service_config):
request.cls.object = request.cls.svc_mgr.get_book_form_for_create([])
def test_tear_down():
pass
request.addfinalizer(test_tear_down)
@pytest.mark.usefixtures("book_form_class_fixture", "book_form_test_fixture")
class TestBookForm(object):
"""Tests for BookForm"""
def test_get_book_form_record(self):
"""Tests get_book_form_record"""
if is_never_authz(self.service_config):
pass # no object to call the method on?
elif uses_cataloging(self.service_config):
pass # cannot call the _get_record() methods on catalogs
else:
with pytest.raises(errors.Unimplemented):
self.object.get_book_form_record(True)
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def book_list_class_fixture(request):
# Implemented from init template for BinList
request.cls.service_config = request.param
request.cls.svc_mgr = Runtime().get_service_manager(
'COMMENTING',
proxy=PROXY,
implementation=request.cls.service_config)
if not is_never_authz(request.cls.service_config):
create_form = request.cls.svc_mgr.get_book_form_for_create([])
create_form.display_name = 'Test Book'
create_form.description = 'Test Book for BookList tests'
request.cls.catalog = request.cls.svc_mgr.create_book(create_form)
request.cls.book_ids = list()
def class_tear_down():
if not is_never_authz(request.cls.service_config):
for obj in request.cls.book_ids:
request.cls.svc_mgr.delete_book(obj)
request.cls.svc_mgr.delete_book(request.cls.catalog.ident)
request.addfinalizer(class_tear_down)
@pytest.fixture(scope="function")
def book_list_test_fixture(request):
# Implemented from init template for BinList
from dlkit.json_.commenting.objects import BookList
request.cls.book_list = list()
if not is_never_authz(request.cls.service_config):
for num in [0, 1]:
create_form = request.cls.svc_mgr.get_book_form_for_create([])
create_form.display_name = 'Test Book ' + str(num)
create_form.description = 'Test Book for BookList tests'
obj = request.cls.svc_mgr.create_book(create_form)
request.cls.book_list.append(obj)
request.cls.book_ids.append(obj.ident)
request.cls.book_list = BookList(request.cls.book_list)
@pytest.mark.usefixtures("book_list_class_fixture", "book_list_test_fixture")
class TestBookList(object):
"""Tests for BookList"""
def test_get_next_book(self):
"""Tests get_next_book"""
# From test_templates/resource.py::ResourceList::get_next_resource_template
from dlkit.abstract_osid.commenting.objects import Book
if not is_never_authz(self.service_config):
assert isinstance(self.book_list.get_next_book(), Book)
def test_get_next_books(self):
"""Tests get_next_books"""
# From test_templates/resource.py::ResourceList::get_next_resources_template
from dlkit.abstract_osid.commenting.objects import BookList, Book
if not is_never_authz(self.service_config):
new_list = self.book_list.get_next_books(2)
assert isinstance(new_list, BookList)
for item in new_list:
assert isinstance(item, Book)
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def book_node_class_fixture(request):
# Implemented from init template for BinNode
request.cls.service_config = request.param
request.cls.svc_mgr = Runtime().get_service_manager(
'COMMENTING',
proxy=PROXY,
implementation=request.cls.service_config)
if not is_never_authz(request.cls.service_config):
create_form = request.cls.svc_mgr.get_book_form_for_create([])
create_form.display_name = 'Test Book'
create_form.description = 'Test Book for BookNode tests'
request.cls.catalog = request.cls.svc_mgr.create_book(create_form)
request.cls.book_ids = list()
def class_tear_down():
if not is_never_authz(request.cls.service_config):
request.cls.svc_mgr.delete_book(request.cls.catalog.ident)
request.addfinalizer(class_tear_down)
@pytest.fixture(scope="function")
def book_node_test_fixture(request):
# Implemented from init template for BinNode
from dlkit.json_.commenting.objects import BookNode
request.cls.book_list = list()
if not is_never_authz(request.cls.service_config):
for num in [0, 1]:
create_form = request.cls.svc_mgr.get_book_form_for_create([])
create_form.display_name = 'Test Book ' + str(num)
create_form.description = 'Test Book for BookNode tests'
obj = request.cls.svc_mgr.create_book(create_form)
request.cls.book_list.append(BookNode(
obj.object_map,
runtime=request.cls.svc_mgr._runtime,
proxy=request.cls.svc_mgr._proxy))
request.cls.book_ids.append(obj.ident)
# Not put the catalogs in a hierarchy
request.cls.svc_mgr.add_root_book(request.cls.book_list[0].ident)
request.cls.svc_mgr.add_child_book(
request.cls.book_list[0].ident,
request.cls.book_list[1].ident)
request.cls.object = request.cls.svc_mgr.get_book_nodes(
request.cls.book_list[0].ident, 0, 5, False)
def test_tear_down():
if not is_never_authz(request.cls.service_config):
request.cls.svc_mgr.remove_child_book(
request.cls.book_list[0].ident,
request.cls.book_list[1].ident)
request.cls.svc_mgr.remove_root_book(request.cls.book_list[0].ident)
for node in request.cls.book_list:
request.cls.svc_mgr.delete_book(node.ident)
request.addfinalizer(test_tear_down)
@pytest.mark.usefixtures("book_node_class_fixture", "book_node_test_fixture")
class TestBookNode(object):
"""Tests for BookNode"""
def test_get_book(self):
"""Tests get_book"""
# from test_templates/resource.py::BinNode::get_bin_template
from dlkit.abstract_osid.commenting.objects import Book
if not is_never_authz(self.service_config):
assert isinstance(self.book_list[0].get_book(), OsidCatalog)
assert str(self.book_list[0].get_book().ident) == str(self.book_list[0].ident)
def test_get_parent_book_nodes(self):
"""Tests get_parent_book_nodes"""
# from test_templates/resource.py::BinNode::get_parent_bin_nodes
from dlkit.abstract_osid.commenting.objects import BookNodeList
if not is_never_authz(self.service_config):
node = self.svc_mgr.get_book_nodes(
self.book_list[1].ident,
1,
0,
False)
assert isinstance(node.get_parent_book_nodes(), BookNodeList)
assert node.get_parent_book_nodes().available() == 1
assert str(node.get_parent_book_nodes().next().ident) == str(self.book_list[0].ident)
def test_get_child_book_nodes(self):
"""Tests get_child_book_nodes"""
# from test_templates/resource.py::BinNode::get_child_bin_nodes_template
from dlkit.abstract_osid.commenting.objects import BookNodeList
if not is_never_authz(self.service_config):
node = self.svc_mgr.get_book_nodes(
self.book_list[0].ident,
0,
1,
False)
assert isinstance(node.get_child_book_nodes(), BookNodeList)
assert node.get_child_book_nodes().available() == 1
assert str(node.get_child_book_nodes().next().ident) == str(self.book_list[1].ident)
@pytest.fixture(scope="class",
params=['TEST_SERVICE', 'TEST_SERVICE_ALWAYS_AUTHZ', 'TEST_SERVICE_NEVER_AUTHZ', 'TEST_SERVICE_CATALOGING', 'TEST_SERVICE_FILESYSTEM', 'TEST_SERVICE_MEMCACHE'])
def book_node_list_class_fixture(request):
# Implemented from init template for BinNodeList
request.cls.service_config = request.param
request.cls.svc_mgr = Runtime().get_service_manager(
'COMMENTING',
proxy=PROXY,
implementation=request.cls.service_config)
if not is_never_authz(request.cls.service_config):
create_form = request.cls.svc_mgr.get_book_form_for_create([])
create_form.display_name = 'Test Book'
create_form.description = 'Test Book for BookNodeList tests'
request.cls.catalog = request.cls.svc_mgr.create_book(create_form)
request.cls.book_node_ids = list()
def class_tear_down():
if not is_never_authz(request.cls.service_config):
for obj in request.cls.book_node_ids:
request.cls.svc_mgr.delete_book(obj)
request.cls.svc_mgr.delete_book(request.cls.catalog.ident)
@pytest.fixture(scope="function")
def book_node_list_test_fixture(request):
# Implemented from init template for BinNodeList
from dlkit.json_.commenting.objects import BookNodeList, BookNode
request.cls.book_node_list = list()
if not is_never_authz(request.cls.service_config):
for num in [0, 1]:
create_form = request.cls.svc_mgr.get_book_form_for_create([])
create_form.display_name = 'Test BookNode ' + str(num)
create_form.description = 'Test BookNode for BookNodeList tests'
obj = request.cls.svc_mgr.create_book(create_form)
request.cls.book_node_list.append(BookNode(obj.object_map))
request.cls.book_node_ids.append(obj.ident)
# Not put the catalogs in a hierarchy
request.cls.svc_mgr.add_root_book(request.cls.book_node_list[0].ident)
request.cls.svc_mgr.add_child_book(
request.cls.book_node_list[0].ident,
request.cls.book_node_list[1].ident)
request.cls.book_node_list = BookNodeList(request.cls.book_node_list)
@pytest.mark.usefixtures("book_node_list_class_fixture", "book_node_list_test_fixture")
class TestBookNodeList(object):
"""Tests for BookNodeList"""
def test_get_next_book_node(self):
"""Tests get_next_book_node"""
# From test_templates/resource.py::ResourceList::get_next_resource_template
from dlkit.abstract_osid.commenting.objects import BookNode
if not is_never_authz(self.service_config):
assert isinstance(self.book_node_list.get_next_book_node(), BookNode)
def test_get_next_book_nodes(self):
"""Tests get_next_book_nodes"""
# From test_templates/resource.py::ResourceList::get_next_resources_template
from dlkit.abstract_osid.commenting.objects import BookNodeList, BookNode
if not is_never_authz(self.service_config):
new_list = self.book_node_list.get_next_book_nodes(2)
assert isinstance(new_list, BookNodeList)
for item in new_list:
assert isinstance(item, BookNode)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.compiler.jit."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
# TODO(keveman): #6568 Remove this hack that makes dlopen() not crash.
if hasattr(sys, "getdlopenflags") and hasattr(sys, "setdlopenflags"):
import ctypes # pylint: disable=g-import-not-at-top
sys.setdlopenflags(sys.getdlopenflags() | ctypes.RTLD_GLOBAL)
# pylint: disable=g-import-not-at-top
from tensorflow.contrib.compiler import jit
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import function
from tensorflow.python.framework import op_def_registry
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import gradients
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
# pylint: enable=g-import-not-at-top
_REGISTERED_OPS = op_def_registry.get_registered_ops()
def enable_jit_nonstateful(node_def):
try:
return not _REGISTERED_OPS[node_def.op].is_stateful
except KeyError:
raise ValueError("Unregistered op being created: %s" % node_def)
class JITTest(test.TestCase):
def compute(self, use_jit, compute_fn):
random_seed.set_random_seed(1234)
with self.test_session(graph=ops.Graph()) as sess:
with jit.experimental_jit_scope(use_jit):
r = compute_fn()
sess.run(variables.global_variables_initializer())
return (r, sess.run(r))
def testJITCreateOpsLambda(self):
"""Test several ways of customizing the compilation attribute."""
def create_ops():
with variable_scope.variable_scope(
"root",
initializer=init_ops.random_uniform_initializer(
-0.1, 0.1, seed=2)):
inputs = random_ops.random_uniform((1,), seed=1)
return inputs
v_false_1_t, v_false_1 = self.compute(False, create_ops)
_, v_false_2 = self.compute(False, create_ops)
v_true_1_t, v_true_1 = self.compute(enable_jit_nonstateful, create_ops)
_, v_true_2 = self.compute(enable_jit_nonstateful, create_ops)
v_all_true_t, _ = self.compute(True, create_ops)
self.assertEqual(False, v_false_1_t.op.get_attr("_XlaCompile"))
v_true_1_t_sampler_op = v_true_1_t.graph.get_operation_by_name(
"root/random_uniform/RandomUniform")
v_all_true_t_sampler_op = v_all_true_t.graph.get_operation_by_name(
"root/random_uniform/RandomUniform")
self.assertEqual(False, v_true_1_t_sampler_op.get_attr("_XlaCompile"))
self.assertEqual(True, v_all_true_t_sampler_op.get_attr("_XlaCompile"))
self.assertEqual(True, v_true_1_t.op.get_attr("_XlaCompile"))
self.assertEqual(True, v_all_true_t.op.get_attr("_XlaCompile"))
# Additionally ensure that where no JIT compilation happens on the
# random_uniform op, the output values are identical to the case
# where no JIT compilation happens anywhere.
self.assertAllClose(v_false_1, v_false_2)
self.assertAllClose(v_true_1, v_true_2)
self.assertAllClose(v_false_1, v_true_1)
def testJITXlaScope(self):
with self.test_session(graph=ops.Graph()):
with jit.experimental_jit_scope(True):
# XlaScope 0
a1 = constant_op.constant(1)
with jit.experimental_jit_scope(True):
# XlaScope 1
a2 = constant_op.constant(1)
with jit.experimental_jit_scope(True):
# XlaScope still 1, depth 1
a3 = constant_op.constant(1)
with jit.experimental_jit_scope(True):
# XlaScope still 1, depth 2
a4 = constant_op.constant(1)
# XlaScope still 1, depth 1
a5 = constant_op.constant(1)
with jit.experimental_jit_scope(True):
# XlaScope now 2, depth 0
a6 = constant_op.constant(1)
self.assertEqual(b"jit_scope_0", a1.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", a2.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", a3.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", a4.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", a5.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_2", a6.op.get_attr("_XlaScope"))
def testJITVariableSeed(self):
"""Test that the stateful initializer is not marked for compilation.
XLA does not currently support seeded initialization and XLA initializers
therefore return different values than non-XLA counterparts. Here
we ensure that if we can disable JIT compilation for the initializers and
get the same variable values as if no JIT compilation happened.
"""
def create_ops():
with variable_scope.variable_scope(
"root",
initializer=init_ops.random_uniform_initializer(
-0.1, 0.1, seed=2)):
inputs = variable_scope.get_variable("var", (1,))
return inputs
_, v_false_1 = self.compute(False, create_ops)
_, v_false_2 = self.compute(False, create_ops)
_, v_true_1 = self.compute(enable_jit_nonstateful, create_ops)
_, v_true_2 = self.compute(enable_jit_nonstateful, create_ops)
self.assertAllClose(v_false_1, v_false_2)
self.assertAllClose(v_true_1, v_true_2)
self.assertAllClose(v_false_1, v_true_1)
class CompilationEnabledInGradientTest(test.TestCase):
def testCompilationInGradient(self):
with self.test_session():
x = constant_op.constant(3)
y_nc = math_ops.add(x, x, name="not_compiled")
with jit.experimental_jit_scope():
y_c = math_ops.add(y_nc, y_nc, name="compiled")
x_grads = gradients.gradients([y_c], [x])[0]
operations = x_grads.graph.get_operations()
c_grad_ops = [
op for op in operations if "gradients/compiled" in op.name]
nc_grad_ops = [
op for op in operations if "gradients/not_compiled" in op.name]
self.assertGreater(len(c_grad_ops), 0)
self.assertGreater(len(nc_grad_ops), 0)
for cg in c_grad_ops:
self.assertEqual(True, cg.get_attr("_XlaCompile"))
for ncg in nc_grad_ops:
with self.assertRaisesRegexp(ValueError, "No attr named"):
ncg.get_attr("_XlaCompile")
# d/dx (4 * x)
self.assertAllClose(4, x_grads.eval())
def testCompilationGradientScopeNames(self):
with self.test_session(graph=ops.Graph()):
with jit.experimental_jit_scope(True):
# XlaScope 0
a1 = constant_op.constant(1)
a1t = a1 + a1
with jit.experimental_jit_scope(True):
# XlaScope 1
a2 = constant_op.constant(1)
a2t = a2 + a2
self.assertEqual(b"jit_scope_0", a1.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1", a2.op.get_attr("_XlaScope"))
grad_a1 = gradients.gradients(a1t, a1, name="GA")[0]
grad_a2 = gradients.gradients(a2t, a2, name="GB")[0]
grad_a1 = grad_a1.op.inputs[0]
grad_a2 = grad_a2.op.inputs[0]
self.assertEqual(True, grad_a1.op.get_attr("_XlaCompile"))
self.assertEqual(True, grad_a2.op.get_attr("_XlaCompile"))
self.assertEqual(b"jit_scope_0_grad_GA",
grad_a1.op.get_attr("_XlaScope"))
self.assertEqual(b"jit_scope_1_grad_GB",
grad_a2.op.get_attr("_XlaScope"))
def testPlaysNicelyWithDefun(self):
with self.test_session(graph=ops.Graph()) as sess:
with jit.experimental_jit_scope(True): # This should be ignored
@function.Defun(compiled=True, noinline=True)
def mulop(x1, x2):
return x1 * x2
x = constant_op.constant(1.0)
r = mulop(x, x)
g_r = gradients.gradients(r, x, name="GA")[0]
# Ensure the forward function is compiled
graph_def = r.graph.as_graph_def()
func_attrs = graph_def.library.function[0].attr
self.assertTrue(func_attrs["_XlaCompile"].b)
self.assertEqual(b"function_mulop", func_attrs["_XlaScope"].s)
# Ensure the gradient (SymbolicGradient) is compiled
grad_op = g_r.op.inputs[0].op
self.assertTrue(grad_op.get_attr("_XlaCompile"))
self.assertEqual(b"function_mulop_grad_GA",
grad_op.get_attr("_XlaScope"))
# Ensure the ops run
# grad(x1*x1) = 2*x1
self.assertAllClose([1.0, 1.0, 2.0], sess.run([x, r, g_r]))
if __name__ == "__main__":
test.main()
| |
# Copyright 2012 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# W0603: Using the global statement
# W0621: Redefining name %s from outer scope
# pylint: disable=W0603,W0621
from __future__ import print_function
import getpass
import inspect
import os
import sys
import textwrap
from oslo_utils import encodeutils
from oslo_utils import strutils
import prettytable
import six
from six import moves
from kingclient.openstack.common._i18n import _
class MissingArgs(Exception):
"""Supplied arguments are not sufficient for calling a function."""
def __init__(self, missing):
self.missing = missing
msg = _("Missing arguments: %s") % ", ".join(missing)
super(MissingArgs, self).__init__(msg)
def validate_args(fn, *args, **kwargs):
"""Check that the supplied args are sufficient for calling a function.
>>> validate_args(lambda a: None)
Traceback (most recent call last):
...
MissingArgs: Missing argument(s): a
>>> validate_args(lambda a, b, c, d: None, 0, c=1)
Traceback (most recent call last):
...
MissingArgs: Missing argument(s): b, d
:param fn: the function to check
:param arg: the positional arguments supplied
:param kwargs: the keyword arguments supplied
"""
argspec = inspect.getargspec(fn)
num_defaults = len(argspec.defaults or [])
required_args = argspec.args[:len(argspec.args) - num_defaults]
def isbound(method):
return getattr(method, '__self__', None) is not None
if isbound(fn):
required_args.pop(0)
missing = [arg for arg in required_args if arg not in kwargs]
missing = missing[len(args):]
if missing:
raise MissingArgs(missing)
def arg(*args, **kwargs):
"""Decorator for CLI args.
Example:
>>> @arg("name", help="Name of the new entity")
... def entity_create(args):
... pass
"""
def _decorator(func):
add_arg(func, *args, **kwargs)
return func
return _decorator
def env(*args, **kwargs):
"""Returns the first environment variable set.
If all are empty, defaults to '' or keyword arg `default`.
"""
for arg in args:
value = os.environ.get(arg)
if value:
return value
return kwargs.get('default', '')
def add_arg(func, *args, **kwargs):
"""Bind CLI arguments to a shell.py `do_foo` function."""
if not hasattr(func, 'arguments'):
func.arguments = []
# NOTE(sirp): avoid dups that can occur when the module is shared across
# tests.
if (args, kwargs) not in func.arguments:
# Because of the semantics of decorator composition if we just append
# to the options list positional options will appear to be backwards.
func.arguments.insert(0, (args, kwargs))
def unauthenticated(func):
"""Adds 'unauthenticated' attribute to decorated function.
Usage:
>>> @unauthenticated
... def mymethod(f):
... pass
"""
func.unauthenticated = True
return func
def isunauthenticated(func):
"""Checks if the function does not require authentication.
Mark such functions with the `@unauthenticated` decorator.
:returns: bool
"""
return getattr(func, 'unauthenticated', False)
def print_list(objs, fields, formatters=None, sortby_index=0,
mixed_case_fields=None, field_labels=None):
"""Print a list of objects as a table, one row per object.
:param objs: iterable of :class:`Resource`
:param fields: attributes that correspond to columns, in order
:param formatters: `dict` of callables for field formatting
:param sortby_index: index of the field for sorting table rows
:param mixed_case_fields: fields corresponding to object attributes that
have mixed case names (e.g., 'serverId')
:param field_labels: Labels to use in the heading of the table, default to
fields.
"""
formatters = formatters or {}
mixed_case_fields = mixed_case_fields or []
field_labels = field_labels or fields
if len(field_labels) != len(fields):
raise ValueError(_("Field labels list %(labels)s has different number "
"of elements than fields list %(fields)s"),
{'labels': field_labels, 'fields': fields})
if sortby_index is None:
kwargs = {}
else:
kwargs = {'sortby': field_labels[sortby_index]}
pt = prettytable.PrettyTable(field_labels)
pt.align = 'l'
for o in objs:
row = []
for field in fields:
if field in formatters:
row.append(formatters[field](o))
else:
if field in mixed_case_fields:
field_name = field.replace(' ', '_')
else:
field_name = field.lower().replace(' ', '_')
data = getattr(o, field_name, '')
row.append(data)
pt.add_row(row)
if six.PY3:
print(encodeutils.safe_encode(pt.get_string(**kwargs)).decode())
else:
print(encodeutils.safe_encode(pt.get_string(**kwargs)))
def print_dict(dct, dict_property="Property", wrap=0, dict_value='Value'):
"""Print a `dict` as a table of two columns.
:param dct: `dict` to print
:param dict_property: name of the first column
:param wrap: wrapping for the second column
:param dict_value: header label for the value (second) column
"""
pt = prettytable.PrettyTable([dict_property, dict_value])
pt.align = 'l'
for k, v in sorted(dct.items()):
# convert dict to str to check length
if isinstance(v, dict):
v = six.text_type(v)
if wrap > 0:
v = textwrap.fill(six.text_type(v), wrap)
# if value has a newline, add in multiple rows
# e.g. fault with stacktrace
if v and isinstance(v, six.string_types) and r'\n' in v:
lines = v.strip().split(r'\n')
col1 = k
for line in lines:
pt.add_row([col1, line])
col1 = ''
else:
pt.add_row([k, v])
if six.PY3:
print(encodeutils.safe_encode(pt.get_string()).decode())
else:
print(encodeutils.safe_encode(pt.get_string()))
def get_password(max_password_prompts=3):
"""Read password from TTY."""
verify = strutils.bool_from_string(env("OS_VERIFY_PASSWORD"))
pw = None
if hasattr(sys.stdin, "isatty") and sys.stdin.isatty():
# Check for Ctrl-D
try:
for __ in moves.range(max_password_prompts):
pw1 = getpass.getpass("OS Password: ")
if verify:
pw2 = getpass.getpass("Please verify: ")
else:
pw2 = pw1
if pw1 == pw2 and pw1:
pw = pw1
break
except EOFError:
pass
return pw
def service_type(stype):
"""Adds 'service_type' attribute to decorated function.
Usage:
.. code-block:: python
@service_type('volume')
def mymethod(f):
...
"""
def inner(f):
f.service_type = stype
return f
return inner
def get_service_type(f):
"""Retrieves service type from function."""
return getattr(f, 'service_type', None)
def pretty_choice_list(l):
return ', '.join("'%s'" % i for i in l)
def exit(msg=''):
if msg:
print (msg, file=sys.stderr)
sys.exit(1)
| |
import re
import sys
import copy
import socket
from functools import wraps
from datetime import datetime
import warnings
from collections import Mapping, Container
if sys.version_info[0] == 3:
_str_type = str
_int_types = (int,)
else:
_str_type = basestring
_int_types = (int, long)
class SchemaError(ValueError):
"""
errors encountered in processing a schema (subclass of :class:`ValueError`)
"""
class ValidationError(ValueError):
"""
validation errors encountered during validation (subclass of
:class:`ValueError`)
"""
def _generate_datetime_validator(format_option, dateformat_string):
def validate_format_datetime(validator, fieldname, value, format_option):
try:
datetime.strptime(value, dateformat_string)
except ValueError:
validator.error_list.append(
"Value %(value)r of field '%(fieldname)s' is not in "
"'%(format_option)s' format" % locals())
return validate_format_datetime
validate_format_date_time = _generate_datetime_validator('date-time',
'%Y-%m-%dT%H:%M:%SZ')
validate_format_date = _generate_datetime_validator('date', '%Y-%m-%d')
validate_format_time = _generate_datetime_validator('time', '%H:%M:%S')
def validate_format_utc_millisec(validator, fieldname, value, format_option):
if not isinstance(value, (int, float)):
validator.error_list.append("Value %(value)r of field '%(fieldname)s' is "
"not a number" % locals())
if not value > 0:
validator.error_list.append("Value %(value)r of field '%(fieldname)s' is "
"not a positive number" % locals())
def validate_format_ip_address(validator, fieldname, value, format_option):
try:
socket.inet_aton(value)
# Make sure we expect "X.X.X.X" as socket.inet_aton() converts "1"
# to "0.0.0.1"
ip = len(value.split('.')) == 4
except:
ip = False
if not ip:
validator.error_list.append("Value %(value)r of field '%(fieldname)s' is "
"not a ip-address" % locals())
DEFAULT_FORMAT_VALIDATORS = {
'date-time': validate_format_date_time,
'date': validate_format_date,
'time': validate_format_time,
'utc-millisec': validate_format_utc_millisec,
'ip-address': validate_format_ip_address,
}
class MetaSchemaValidator(type):
''' A metaclass that helps keeping track of the fields path
'''
def __new__(cls, name, bases, dct):
for method_name, method in list(dct.items()):
if method_name.startswith('validate_') and not method_name.startswith('validate_type_') and callable(method):
def make_wrapper(method_name, method):
@wraps(method)
def wrapper(self, x, fieldname, schema, *a, **kw):
name = fieldname if not isinstance(fieldname, int) else '[%d]' % fieldname
if self.current_object and self.current_object[-1] is x:
pop_at_end = False
self.current_field[-1] = name
else:
pop_at_end = True
self.current_object.append(x)
self.current_field.append(name)
res = method(self, x, fieldname, schema, *a, **kw)
if pop_at_end:
self.current_field.pop()
self.current_object.pop()
return res
return wrapper
dct[method_name] = make_wrapper(method_name, method)
return super(MetaSchemaValidator, cls).__new__(cls, name, bases, dct)
class SchemaValidator(object):
'''
Validator largely based upon the JSON Schema proposal but useful for
validating arbitrary python data structures.
:param format_validators: optional dictionary of custom format validators
:param required_by_default: defaults to True, set to False to make
``required`` schema attribute False by default.
:param blank_by_default: defaults to False, set to True to make ``blank``
schema attribute True by default.
'''
__metaclass__ = MetaSchemaValidator
@property
def current_field_name(self):
current = u'.'.join(self.current_field[1:]) # we remove the first _data.
return current if current else None
def __init__(self, format_validators=None, required_by_default=True,
blank_by_default=False):
if format_validators is None:
format_validators = DEFAULT_FORMAT_VALIDATORS.copy()
self._format_validators = format_validators
self.required_by_default = required_by_default
self.blank_by_default = blank_by_default
self.error_list = []
self.error_stack = []
self.current_field = []
self.current_object = []
def get(self, x, field, default=None):
try:
return x[field]
except KeyError:
return default
except IndexError:
return default
def push_error_stack(self):
self.error_stack.append(self.error_list)
self.error_list = []
def pop_error_stack(self):
last_error = self.error_list
self.error_list = self.error_stack.pop()
return last_error
def register_format_validator(self, format_name, format_validator_fun):
self._format_validators[format_name] = format_validator_fun
def validate_type_string(self, val):
return isinstance(val, _str_type)
def validate_type_integer(self, val):
return type(val) in _int_types
def validate_type_number(self, val):
return type(val) in _int_types + (float,)
def validate_type_boolean(self, val):
return type(val) == bool
def validate_type_object(self, val):
return isinstance(val, Mapping)
def validate_type_array(self, val):
return isinstance(val, (list, tuple))
def validate_type_null(self, val):
return val is None
def validate_type_any(self, val):
return True
def _error(self, code, message=None, suppl=None):
self.error_list.append((code, self.current_field_name, message, suppl))
def validate_type(self, x, fieldname, schema, fieldtype=None):
'''
Validates that the fieldtype specified is correct for the given
data
'''
# We need to know if the field exists or if it's just Null
fieldexists = True
try:
value = x[fieldname]
except KeyError:
fieldexists = False
value = None
if fieldtype and fieldexists:
if isinstance(fieldtype, (list, tuple)):
# Match if type matches any one of the types in the list
datavalid = False
for eachtype in fieldtype:
self.push_error_stack()
self.validate_type(x, fieldname, eachtype, eachtype)
errs = self.pop_error_stack()
if not errs:
datavalid = True
break
if not datavalid:
self._error('incorrect-type', fieldtype, self.get(x, fieldname))
return
elif isinstance(fieldtype, dict):
self.push_error_stack()
self.__validate(fieldname, x, fieldtype)
errs = self.pop_error_stack()
if errs:
self.error_list += errs
else:
try:
type_checker = getattr(self, 'validate_type_%s' % fieldtype)
except AttributeError:
raise SchemaError("Field type '%s' is not supported." %
fieldtype)
if not type_checker(value):
self._error('incorrect-type', fieldtype, self.get(x, fieldname))
def validate_properties(self, x, fieldname, schema, properties=None):
'''
Validates properties of a JSON object by processing the object's
schema recursively
'''
value = self.get(x, fieldname)
if value is not None:
if isinstance(value, dict):
if isinstance(properties, dict):
for eachProp in properties:
self.__validate(eachProp, value, properties.get(eachProp))
else:
raise SchemaError("Properties definition of field '%s' is "
"not an object" % fieldname)
def validate_items(self, x, fieldname, schema, items=None):
'''
Validates that all items in the list for the given field match the
given schema
'''
value = self.get(x, fieldname)
if value is not None:
if isinstance(value, (list, tuple)):
if isinstance(items, (list, tuple)):
if len(items) != len(value):
# resolve defaults now
for i, item in enumerate(items):
try:
value[i]
except IndexError:
# obviously it does not exists
if 'default' in item:
value.append(item['default'])
if not 'additionalItems' in schema and len(items) != len(value):
self._error('incorrect-item-length')
return
else:
for itemIndex in range(len(items)):
self.__validate(itemIndex, value, items[itemIndex])
elif isinstance(items, dict):
for i, eachItem in enumerate(value):
self.push_error_stack()
self.__validate(i, value, items)
errs = self.pop_error_stack()
if errs:
self.error_list += errs
else:
raise SchemaError("Properties definition of field '%s' is "
"not a list or an object" % fieldname)
def validate_required(self, x, fieldname, schema, required):
'''
Validates that the given field is present if required is True
'''
# Make sure the field is present
if fieldname not in x and required:
self._error('missing-required')
def validate_blank(self, x, fieldname, schema, blank=False):
'''
Validates that the given field is not blank if blank=False
'''
value = self.get(x, fieldname)
if isinstance(value, _str_type) and not blank and not value:
self._error('blank')
def validate_patternProperties(self, x, fieldname, schema,
patternproperties=None):
if patternproperties == None:
patternproperties = {}
value_obj = self.get(x, fieldname, {})
for pattern, schema in patternproperties.items():
for key, value in value_obj.items():
if re.match(pattern, key):
self.__validate(key, value_obj, schema)
def validate_additionalItems(self, x, fieldname, schema,
additionalItems=False):
value = self.get(x, fieldname)
if not isinstance(value, (list, tuple)):
return
if isinstance(additionalItems, bool):
if additionalItems or 'items' not in schema:
return
elif len(value) != len(schema['items']):
self._error('incorrect-list-length')
return
remaining = value[len(schema['items']):]
if len(remaining) > 0:
self._validate(remaining, {'items': additionalItems})
def validate_additionalProperties(self, x, fieldname, schema, additionalProperties=None):
'''
Validates additional properties of a JSON object that were not
specifically defined by the properties property OR the patternProperties
object.
By default, the validator behaves like True was passed to additional,
which means that we mostly want to use it with False or a schema.
'''
# Shouldn't be validating additionalProperties on non-dicts
value = self.get(x, fieldname)
if not isinstance(value, dict):
return
# If additionalProperties is the boolean value True then we accept
# any additional properties.
if isinstance(additionalProperties, bool) and additionalProperties:
return
if isinstance(additionalProperties, (dict, bool)):
properties = schema.get("properties", [])
patternProperties = schema.get('patternProperties', [])
if properties is None:
properties = {}
if value is None:
value = {}
for eachProperty in value:
if eachProperty in properties:
continue
# Check if the property matches a patternProperty
matched = False
for pattern in patternProperties:
if re.match(pattern, eachProperty):
matched = True
break
if matched:
continue
# If additionalProperties is the boolean value False
# then we don't accept any additional properties.
if (isinstance(additionalProperties, bool) and not additionalProperties):
self._error('forbidden-property', eachProperty)
else:
# If it's an object, then we try to validate the value
# on the schema.
self.validate(value, additionalProperties)
else:
raise SchemaError("additionalProperties schema definition for "
"field '%s' is not an object" % fieldname)
def validate_dependencies(self, x, fieldname, schema, dependencies=None):
if self.get(x, fieldname) is not None:
# handle cases where dependencies is a string or list of strings
if isinstance(dependencies, _str_type):
dependencies = [dependencies]
if isinstance(dependencies, (list, tuple)):
for dependency in dependencies:
if dependency not in x:
self._error('dependency', dependency)
return
elif isinstance(dependencies, dict):
# NOTE: the version 3 spec is really unclear on what this means
# based on the meta-schema I'm assuming that it should check
# that if a key exists, the appropriate value exists
for k, v in dependencies.items():
if k in x and v not in x:
self._error('dependency', k, v)
return
else:
raise SchemaError("'dependencies' must be a string, "
"list of strings, or dict")
def validate_minimum(self, x, fieldname, schema, minimum=None):
'''
Validates that the field is longer than or equal to the minimum
length if specified
'''
exclusive = schema.get('exclusiveMinimum', False)
value = self.get(x, fieldname)
if value is not None:
if value is not None:
if (type(value) in (int, float) and
(not exclusive and value < minimum) or
(exclusive and value <= minimum)):
self._error('less-than-minimum', minimum, value)
def validate_maximum(self, x, fieldname, schema, maximum=None):
'''
Validates that the field is shorter than or equal to the maximum
length if specified.
'''
exclusive = schema.get('exclusiveMaximum', False)
value = self.get(x, fieldname)
if value is not None:
if (type(value) in (int, float) and
(not exclusive and value > maximum) or
(exclusive and value >= maximum)):
self._error('more-than-maximum', maximum, value)
def validate_maxLength(self, x, fieldname, schema, length=None):
'''
Validates that the value of the given field is shorter than or equal
to the specified length
'''
value = self.get(x, fieldname)
if isinstance(value, (_str_type, list, tuple)) and len(value) > length:
self._error('too-long', length, len(value))
def validate_minLength(self, x, fieldname, schema, length=None):
'''
Validates that the value of the given field is longer than or equal
to the specified length
'''
value = self.get(x, fieldname)
if isinstance(value, (_str_type, list, tuple)) and len(value) < length:
self._error('too-short', length, len(value))
validate_minItems = validate_minLength
validate_maxItems = validate_maxLength
def validate_format(self, x, fieldname, schema, format_option=None):
'''
Validates the format of primitive data types
'''
value = self.get(x, fieldname)
format_validator = self._format_validators.get(format_option, None)
if format_validator and value:
format_validator(self, fieldname, value, format_option)
# TODO: warn about unsupported format ?
def validate_pattern(self, x, fieldname, schema, pattern=None):
'''
Validates that the given field, if a string, matches the given
regular expression.
'''
value = self.get(x, fieldname)
if isinstance(value, _str_type):
if not re.match(pattern, value):
self._error('pattern-mismatch', pattern, fieldname)
def validate_uniqueItems(self, x, fieldname, schema, uniqueItems=False):
'''
Validates that all items in an array instance MUST be unique
(contains no two identical values).
'''
# If additionalProperties is the boolean value True then we accept
# any additional properties.
if isinstance(uniqueItems, bool) and not uniqueItems:
return
values = self.get(x, fieldname)
if not isinstance(values, (list, tuple)):
return
hashables = set()
unhashables = []
for value in values:
if isinstance(value, (list, dict)):
container, add = unhashables, unhashables.append
else:
container, add = hashables, hashables.add
if value in container:
self._error('not-unique', value)
else:
add(value)
def validate_enum(self, x, fieldname, schema, options=None):
'''
Validates that the value of the field is equal to one of the
specified option values
'''
value = self.get(x, fieldname)
if value is not None:
if not isinstance(options, Container):
raise SchemaError("Enumeration %r for field '%s' must be a "
"container", (options, fieldname))
if value not in options:
self._error('not-in-enumeration', options, value)
def validate_title(self, x, fieldname, schema, title=None):
if not isinstance(title, (_str_type, type(None))):
raise SchemaError("The title for field '%s' must be a string" %
fieldname)
def validate_description(self, x, fieldname, schema, description=None):
if not isinstance(description, (_str_type, type(None))):
raise SchemaError("The description for field '%s' must be a string"
% fieldname)
def validate_divisibleBy(self, x, fieldname, schema, divisibleBy=None):
value = self.get(x, fieldname)
if not self.validate_type_number(value):
return
if divisibleBy == 0:
raise SchemaError("'%r' <- divisibleBy can not be 0" % schema)
if value % divisibleBy != 0:
self._error('not-divisible-by', divisibleBy, value)
def validate_extends(self, x, fieldname, schema, extends=None):
''' Kind of an inheritance for schema validation : the
field is to be checked against the provided schema
in the extends property.
'''
self.validate_type(x, fieldname, schema, extends)
def validate_disallow(self, x, fieldname, schema, disallow=None):
'''
Validates that the value of the given field does not match the
disallowed type.
'''
self.push_error_stack()
self.validate_type(x, fieldname, schema, disallow)
errs = self.pop_error_stack()
if len(errs) > 1:
return
self._error('disallowed-type', disallow, self.get(x, fieldname))
def validate(self, data, schema):
'''
Validates a piece of json data against the provided json-schema.
Returns the validated data.
'''
result = self._validate(data, schema)
if self.error_list:
raise ValidationError(self.error_list)
return result
def _validate(self, data, schema):
return self.__validate("_data", {"_data": data}, schema).get('_data')
def __validate(self, fieldname, data, schema):
if schema is not None:
if not isinstance(schema, dict):
raise SchemaError("Schema structure is invalid.")
newschema = copy.copy(schema)
if isinstance(data, dict) and fieldname not in data and 'default' in schema:
data[fieldname] = schema['default']
if 'optional' in schema:
raise SchemaError('The "optional" attribute has been replaced'
' by "required"')
if 'requires' in schema:
raise SchemaError('The "requires" attribute has been replaced'
' by "dependencies"')
if 'blank' not in schema:
newschema['blank'] = self.blank_by_default
self.validate_required(data, fieldname, newschema,
newschema.pop('required', self.required_by_default))
if 'type' in schema:
self.push_error_stack()
self.validate_type(data, fieldname, newschema, newschema.pop('type'))
errs = self.pop_error_stack()
if errs:
# do not keep validating an object if its type was not correct !
self.error_list += errs
return data
for schemaprop in newschema:
validatorname = "validate_" + schemaprop
validator = getattr(self, validatorname, None)
if validator:
validator(data, fieldname, newschema, newschema.get(schemaprop))
return data
__all__ = ['SchemaValidator']
| |
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Session Handling for SQLAlchemy backend.
Recommended ways to use sessions within this framework:
* Don't use them explicitly; this is like running with ``AUTOCOMMIT=1``.
`model_query()` will implicitly use a session when called without one
supplied. This is the ideal situation because it will allow queries
to be automatically retried if the database connection is interrupted.
.. note:: Automatic retry will be enabled in a future patch.
It is generally fine to issue several queries in a row like this. Even though
they may be run in separate transactions and/or separate sessions, each one
will see the data from the prior calls. If needed, undo- or rollback-like
functionality should be handled at a logical level. For an example, look at
the code around quotas and `reservation_rollback()`.
Examples:
.. code:: python
def get_foo(context, foo):
return (model_query(context, models.Foo).
filter_by(foo=foo).
first())
def update_foo(context, id, newfoo):
(model_query(context, models.Foo).
filter_by(id=id).
update({'foo': newfoo}))
def create_foo(context, values):
foo_ref = models.Foo()
foo_ref.update(values)
foo_ref.save()
return foo_ref
* Within the scope of a single method, keep all the reads and writes within
the context managed by a single session. In this way, the session's
`__exit__` handler will take care of calling `flush()` and `commit()` for
you. If using this approach, you should not explicitly call `flush()` or
`commit()`. Any error within the context of the session will cause the
session to emit a `ROLLBACK`. Database errors like `IntegrityError` will be
raised in `session`'s `__exit__` handler, and any try/except within the
context managed by `session` will not be triggered. And catching other
non-database errors in the session will not trigger the ROLLBACK, so
exception handlers should always be outside the session, unless the
developer wants to do a partial commit on purpose. If the connection is
dropped before this is possible, the database will implicitly roll back the
transaction.
.. note:: Statements in the session scope will not be automatically retried.
If you create models within the session, they need to be added, but you
do not need to call `model.save()`:
.. code:: python
def create_many_foo(context, foos):
session = sessionmaker()
with session.begin():
for foo in foos:
foo_ref = models.Foo()
foo_ref.update(foo)
session.add(foo_ref)
def update_bar(context, foo_id, newbar):
session = sessionmaker()
with session.begin():
foo_ref = (model_query(context, models.Foo, session).
filter_by(id=foo_id).
first())
(model_query(context, models.Bar, session).
filter_by(id=foo_ref['bar_id']).
update({'bar': newbar}))
.. note:: `update_bar` is a trivially simple example of using
``with session.begin``. Whereas `create_many_foo` is a good example of
when a transaction is needed, it is always best to use as few queries as
possible.
The two queries in `update_bar` can be better expressed using a single query
which avoids the need for an explicit transaction. It can be expressed like
so:
.. code:: python
def update_bar(context, foo_id, newbar):
subq = (model_query(context, models.Foo.id).
filter_by(id=foo_id).
limit(1).
subquery())
(model_query(context, models.Bar).
filter_by(id=subq.as_scalar()).
update({'bar': newbar}))
For reference, this emits approximately the following SQL statement:
.. code:: sql
UPDATE bar SET bar = ${newbar}
WHERE id=(SELECT bar_id FROM foo WHERE id = ${foo_id} LIMIT 1);
.. note:: `create_duplicate_foo` is a trivially simple example of catching an
exception while using ``with session.begin``. Here create two duplicate
instances with same primary key, must catch the exception out of context
managed by a single session:
.. code:: python
def create_duplicate_foo(context):
foo1 = models.Foo()
foo2 = models.Foo()
foo1.id = foo2.id = 1
session = sessionmaker()
try:
with session.begin():
session.add(foo1)
session.add(foo2)
except exception.DBDuplicateEntry as e:
handle_error(e)
* Passing an active session between methods. Sessions should only be passed
to private methods. The private method must use a subtransaction; otherwise
SQLAlchemy will throw an error when you call `session.begin()` on an existing
transaction. Public methods should not accept a session parameter and should
not be involved in sessions within the caller's scope.
Note that this incurs more overhead in SQLAlchemy than the above means
due to nesting transactions, and it is not possible to implicitly retry
failed database operations when using this approach.
This also makes code somewhat more difficult to read and debug, because a
single database transaction spans more than one method. Error handling
becomes less clear in this situation. When this is needed for code clarity,
it should be clearly documented.
.. code:: python
def myfunc(foo):
session = sessionmaker()
with session.begin():
# do some database things
bar = _private_func(foo, session)
return bar
def _private_func(foo, session=None):
if not session:
session = sessionmaker()
with session.begin(subtransaction=True):
# do some other database things
return bar
There are some things which it is best to avoid:
* Don't keep a transaction open any longer than necessary.
This means that your ``with session.begin()`` block should be as short
as possible, while still containing all the related calls for that
transaction.
* Avoid ``with_lockmode('UPDATE')`` when possible.
In MySQL/InnoDB, when a ``SELECT ... FOR UPDATE`` query does not match
any rows, it will take a gap-lock. This is a form of write-lock on the
"gap" where no rows exist, and prevents any other writes to that space.
This can effectively prevent any INSERT into a table by locking the gap
at the end of the index. Similar problems will occur if the SELECT FOR UPDATE
has an overly broad WHERE clause, or doesn't properly use an index.
One idea proposed at ODS Fall '12 was to use a normal SELECT to test the
number of rows matching a query, and if only one row is returned,
then issue the SELECT FOR UPDATE.
The better long-term solution is to use
``INSERT .. ON DUPLICATE KEY UPDATE``.
However, this can not be done until the "deleted" columns are removed and
proper UNIQUE constraints are added to the tables.
Enabling soft deletes:
* To use/enable soft-deletes, the `SoftDeleteMixin` must be added
to your model class. For example:
.. code:: python
class NovaBase(models.SoftDeleteMixin, models.ModelBase):
pass
Efficient use of soft deletes:
* There are two possible ways to mark a record as deleted:
`model.soft_delete()` and `query.soft_delete()`.
The `model.soft_delete()` method works with a single already-fetched entry.
`query.soft_delete()` makes only one db request for all entries that
correspond to the query.
* In almost all cases you should use `query.soft_delete()`. Some examples:
.. code:: python
def soft_delete_bar():
count = model_query(BarModel).find(some_condition).soft_delete()
if count == 0:
raise Exception("0 entries were soft deleted")
def complex_soft_delete_with_synchronization_bar(session=None):
if session is None:
session = sessionmaker()
with session.begin(subtransactions=True):
count = (model_query(BarModel).
find(some_condition).
soft_delete(synchronize_session=True))
# Here synchronize_session is required, because we
# don't know what is going on in outer session.
if count == 0:
raise Exception("0 entries were soft deleted")
* There is only one situation where `model.soft_delete()` is appropriate: when
you fetch a single record, work with it, and mark it as deleted in the same
transaction.
.. code:: python
def soft_delete_bar_model():
session = sessionmaker()
with session.begin():
bar_ref = model_query(BarModel).find(some_condition).first()
# Work with bar_ref
bar_ref.soft_delete(session=session)
However, if you need to work with all entries that correspond to query and
then soft delete them you should use the `query.soft_delete()` method:
.. code:: python
def soft_delete_multi_models():
session = sessionmaker()
with session.begin():
query = (model_query(BarModel, session=session).
find(some_condition))
model_refs = query.all()
# Work with model_refs
query.soft_delete(synchronize_session=False)
# synchronize_session=False should be set if there is no outer
# session and these entries are not used after this.
When working with many rows, it is very important to use query.soft_delete,
which issues a single query. Using `model.soft_delete()`, as in the following
example, is very inefficient.
.. code:: python
for bar_ref in bar_refs:
bar_ref.soft_delete(session=session)
# This will produce count(bar_refs) db requests.
"""
import functools
import logging
import re
import time
import six
from sqlalchemy import exc as sqla_exc
from sqlalchemy.interfaces import PoolListener
import sqlalchemy.orm
from sqlalchemy.pool import NullPool, StaticPool
from sqlalchemy.sql.expression import literal_column
from ironic.openstack.common.db import exception
from ironic.openstack.common.gettextutils import _LE, _LW, _LI
from ironic.openstack.common import timeutils
LOG = logging.getLogger(__name__)
class SqliteForeignKeysListener(PoolListener):
"""Ensures that the foreign key constraints are enforced in SQLite.
The foreign key constraints are disabled by default in SQLite,
so the foreign key constraints will be enabled here for every
database connection
"""
def connect(self, dbapi_con, con_record):
dbapi_con.execute('pragma foreign_keys=ON')
# note(boris-42): In current versions of DB backends unique constraint
# violation messages follow the structure:
#
# sqlite:
# 1 column - (IntegrityError) column c1 is not unique
# N columns - (IntegrityError) column c1, c2, ..., N are not unique
#
# sqlite since 3.7.16:
# 1 column - (IntegrityError) UNIQUE constraint failed: tbl.k1
#
# N columns - (IntegrityError) UNIQUE constraint failed: tbl.k1, tbl.k2
#
# postgres:
# 1 column - (IntegrityError) duplicate key value violates unique
# constraint "users_c1_key"
# N columns - (IntegrityError) duplicate key value violates unique
# constraint "name_of_our_constraint"
#
# mysql:
# 1 column - (IntegrityError) (1062, "Duplicate entry 'value_of_c1' for key
# 'c1'")
# N columns - (IntegrityError) (1062, "Duplicate entry 'values joined
# with -' for key 'name_of_our_constraint'")
#
# ibm_db_sa:
# N columns - (IntegrityError) SQL0803N One or more values in the INSERT
# statement, UPDATE statement, or foreign key update caused by a
# DELETE statement are not valid because the primary key, unique
# constraint or unique index identified by "2" constrains table
# "NOVA.KEY_PAIRS" from having duplicate values for the index
# key.
_DUP_KEY_RE_DB = {
"sqlite": (re.compile(r"^.*columns?([^)]+)(is|are)\s+not\s+unique$"),
re.compile(r"^.*UNIQUE\s+constraint\s+failed:\s+(.+)$")),
"postgresql": (re.compile(r"^.*duplicate\s+key.*\"([^\"]+)\"\s*\n.*$"),),
"mysql": (re.compile(r"^.*\(1062,.*'([^\']+)'\"\)$"),),
"ibm_db_sa": (re.compile(r"^.*SQL0803N.*$"),),
}
def _raise_if_duplicate_entry_error(integrity_error, engine_name):
"""Raise exception if two entries are duplicated.
In this function will be raised DBDuplicateEntry exception if integrity
error wrap unique constraint violation.
"""
def get_columns_from_uniq_cons_or_name(columns):
# note(vsergeyev): UniqueConstraint name convention: "uniq_t0c10c2"
# where `t` it is table name and columns `c1`, `c2`
# are in UniqueConstraint.
uniqbase = "uniq_"
if not columns.startswith(uniqbase):
if engine_name == "postgresql":
return [columns[columns.index("_") + 1:columns.rindex("_")]]
return [columns]
return columns[len(uniqbase):].split("0")[1:]
if engine_name not in ["ibm_db_sa", "mysql", "sqlite", "postgresql"]:
return
# FIXME(johannes): The usage of the .message attribute has been
# deprecated since Python 2.6. However, the exceptions raised by
# SQLAlchemy can differ when using unicode() and accessing .message.
# An audit across all three supported engines will be necessary to
# ensure there are no regressions.
for pattern in _DUP_KEY_RE_DB[engine_name]:
match = pattern.match(integrity_error.message)
if match:
break
else:
return
# NOTE(mriedem): The ibm_db_sa integrity error message doesn't provide the
# columns so we have to omit that from the DBDuplicateEntry error.
columns = ''
if engine_name != 'ibm_db_sa':
columns = match.group(1)
if engine_name == "sqlite":
columns = [c.split('.')[-1] for c in columns.strip().split(", ")]
else:
columns = get_columns_from_uniq_cons_or_name(columns)
raise exception.DBDuplicateEntry(columns, integrity_error)
# NOTE(comstud): In current versions of DB backends, Deadlock violation
# messages follow the structure:
#
# mysql:
# (OperationalError) (1213, 'Deadlock found when trying to get lock; try '
# 'restarting transaction') <query_str> <query_args>
_DEADLOCK_RE_DB = {
"mysql": re.compile(r"^.*\(1213, 'Deadlock.*")
}
def _raise_if_deadlock_error(operational_error, engine_name):
"""Raise exception on deadlock condition.
Raise DBDeadlock exception if OperationalError contains a Deadlock
condition.
"""
re = _DEADLOCK_RE_DB.get(engine_name)
if re is None:
return
# FIXME(johannes): The usage of the .message attribute has been
# deprecated since Python 2.6. However, the exceptions raised by
# SQLAlchemy can differ when using unicode() and accessing .message.
# An audit across all three supported engines will be necessary to
# ensure there are no regressions.
m = re.match(operational_error.message)
if not m:
return
raise exception.DBDeadlock(operational_error)
def _wrap_db_error(f):
#TODO(rpodolyaka): in a subsequent commit make this a class decorator to
# ensure it can only applied to Session subclasses instances (as we use
# Session instance bind attribute below)
@functools.wraps(f)
def _wrap(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
except UnicodeEncodeError:
raise exception.DBInvalidUnicodeParameter()
except sqla_exc.OperationalError as e:
_raise_if_db_connection_lost(e, self.bind)
_raise_if_deadlock_error(e, self.bind.dialect.name)
# NOTE(comstud): A lot of code is checking for OperationalError
# so let's not wrap it for now.
raise
# note(boris-42): We should catch unique constraint violation and
# wrap it by our own DBDuplicateEntry exception. Unique constraint
# violation is wrapped by IntegrityError.
except sqla_exc.IntegrityError as e:
# note(boris-42): SqlAlchemy doesn't unify errors from different
# DBs so we must do this. Also in some tables (for example
# instance_types) there are more than one unique constraint. This
# means we should get names of columns, which values violate
# unique constraint, from error message.
_raise_if_duplicate_entry_error(e, self.bind.dialect.name)
raise exception.DBError(e)
except Exception as e:
LOG.exception(_LE('DB exception wrapped.'))
raise exception.DBError(e)
return _wrap
def _synchronous_switch_listener(dbapi_conn, connection_rec):
"""Switch sqlite connections to non-synchronous mode."""
dbapi_conn.execute("PRAGMA synchronous = OFF")
def _add_regexp_listener(dbapi_con, con_record):
"""Add REGEXP function to sqlite connections."""
def regexp(expr, item):
reg = re.compile(expr)
return reg.search(six.text_type(item)) is not None
dbapi_con.create_function('regexp', 2, regexp)
def _thread_yield(dbapi_con, con_record):
"""Ensure other greenthreads get a chance to be executed.
If we use eventlet.monkey_patch(), eventlet.greenthread.sleep(0) will
execute instead of time.sleep(0).
Force a context switch. With common database backends (eg MySQLdb and
sqlite), there is no implicit yield caused by network I/O since they are
implemented by C libraries that eventlet cannot monkey patch.
"""
time.sleep(0)
def _ping_listener(engine, dbapi_conn, connection_rec, connection_proxy):
"""Ensures that MySQL and DB2 connections are alive.
Borrowed from:
http://groups.google.com/group/sqlalchemy/msg/a4ce563d802c929f
"""
cursor = dbapi_conn.cursor()
try:
ping_sql = 'select 1'
if engine.name == 'ibm_db_sa':
# DB2 requires a table expression
ping_sql = 'select 1 from (values (1)) AS t1'
cursor.execute(ping_sql)
except Exception as ex:
if engine.dialect.is_disconnect(ex, dbapi_conn, cursor):
msg = _LW('Database server has gone away: %s') % ex
LOG.warning(msg)
raise sqla_exc.DisconnectionError(msg)
else:
raise
def _set_mode_traditional(dbapi_con, connection_rec, connection_proxy):
"""Set engine mode to 'traditional'.
Required to prevent silent truncates at insert or update operations
under MySQL. By default MySQL truncates inserted string if it longer
than a declared field just with warning. That is fraught with data
corruption.
"""
_set_session_sql_mode(dbapi_con, connection_rec,
connection_proxy, 'TRADITIONAL')
def _set_session_sql_mode(dbapi_con, connection_rec,
connection_proxy, sql_mode=None):
"""Set the sql_mode session variable.
MySQL supports several server modes. The default is None, but sessions
may choose to enable server modes like TRADITIONAL, ANSI,
several STRICT_* modes and others.
Note: passing in '' (empty string) for sql_mode clears
the SQL mode for the session, overriding a potentially set
server default. Passing in None (the default) makes this
a no-op, meaning if a server-side SQL mode is set, it still applies.
"""
cursor = dbapi_con.cursor()
if sql_mode is not None:
cursor.execute("SET SESSION sql_mode = %s", [sql_mode])
# Check against the real effective SQL mode. Even when unset by
# our own config, the server may still be operating in a specific
# SQL mode as set by the server configuration
cursor.execute("SHOW VARIABLES LIKE 'sql_mode'")
row = cursor.fetchone()
if row is None:
LOG.warning(_LW('Unable to detect effective SQL mode'))
return
realmode = row[1]
LOG.info(_LI('MySQL server mode set to %s') % realmode)
# 'TRADITIONAL' mode enables several other modes, so
# we need a substring match here
if not ('TRADITIONAL' in realmode.upper() or
'STRICT_ALL_TABLES' in realmode.upper()):
LOG.warning(_LW("MySQL SQL mode is '%s', "
"consider enabling TRADITIONAL or STRICT_ALL_TABLES")
% realmode)
def _is_db_connection_error(args):
"""Return True if error in connecting to db."""
# NOTE(adam_g): This is currently MySQL specific and needs to be extended
# to support Postgres and others.
# For the db2, the error code is -30081 since the db2 is still not ready
conn_err_codes = ('2002', '2003', '2006', '2013', '-30081')
for err_code in conn_err_codes:
if args.find(err_code) != -1:
return True
return False
def _raise_if_db_connection_lost(error, engine):
# NOTE(vsergeyev): Function is_disconnect(e, connection, cursor)
# requires connection and cursor in incoming parameters,
# but we have no possibility to create connection if DB
# is not available, so in such case reconnect fails.
# But is_disconnect() ignores these parameters, so it
# makes sense to pass to function None as placeholder
# instead of connection and cursor.
if engine.dialect.is_disconnect(error, None, None):
raise exception.DBConnectionError(error)
def create_engine(sql_connection, sqlite_fk=False, mysql_sql_mode=None,
mysql_traditional_mode=False, idle_timeout=3600,
connection_debug=0, max_pool_size=None, max_overflow=None,
pool_timeout=None, sqlite_synchronous=True,
connection_trace=False, max_retries=10, retry_interval=10):
"""Return a new SQLAlchemy engine."""
connection_dict = sqlalchemy.engine.url.make_url(sql_connection)
engine_args = {
"pool_recycle": idle_timeout,
'convert_unicode': True,
}
logger = logging.getLogger('sqlalchemy.engine')
# Map SQL debug level to Python log level
if connection_debug >= 100:
logger.setLevel(logging.DEBUG)
elif connection_debug >= 50:
logger.setLevel(logging.INFO)
else:
logger.setLevel(logging.WARNING)
if "sqlite" in connection_dict.drivername:
if sqlite_fk:
engine_args["listeners"] = [SqliteForeignKeysListener()]
engine_args["poolclass"] = NullPool
if sql_connection == "sqlite://":
engine_args["poolclass"] = StaticPool
engine_args["connect_args"] = {'check_same_thread': False}
else:
if max_pool_size is not None:
engine_args['pool_size'] = max_pool_size
if max_overflow is not None:
engine_args['max_overflow'] = max_overflow
if pool_timeout is not None:
engine_args['pool_timeout'] = pool_timeout
engine = sqlalchemy.create_engine(sql_connection, **engine_args)
sqlalchemy.event.listen(engine, 'checkin', _thread_yield)
if engine.name in ['mysql', 'ibm_db_sa']:
ping_callback = functools.partial(_ping_listener, engine)
sqlalchemy.event.listen(engine, 'checkout', ping_callback)
if engine.name == 'mysql':
if mysql_traditional_mode:
mysql_sql_mode = 'TRADITIONAL'
if mysql_sql_mode:
mode_callback = functools.partial(_set_session_sql_mode,
sql_mode=mysql_sql_mode)
sqlalchemy.event.listen(engine, 'checkout', mode_callback)
elif 'sqlite' in connection_dict.drivername:
if not sqlite_synchronous:
sqlalchemy.event.listen(engine, 'connect',
_synchronous_switch_listener)
sqlalchemy.event.listen(engine, 'connect', _add_regexp_listener)
if connection_trace and engine.dialect.dbapi.__name__ == 'MySQLdb':
_patch_mysqldb_with_stacktrace_comments()
try:
engine.connect()
except sqla_exc.OperationalError as e:
if not _is_db_connection_error(e.args[0]):
raise
remaining = max_retries
if remaining == -1:
remaining = 'infinite'
while True:
msg = _LW('SQL connection failed. %s attempts left.')
LOG.warning(msg % remaining)
if remaining != 'infinite':
remaining -= 1
time.sleep(retry_interval)
try:
engine.connect()
break
except sqla_exc.OperationalError as e:
if (remaining != 'infinite' and remaining == 0) or \
not _is_db_connection_error(e.args[0]):
raise
return engine
class Query(sqlalchemy.orm.query.Query):
"""Subclass of sqlalchemy.query with soft_delete() method."""
def soft_delete(self, synchronize_session='evaluate'):
return self.update({'deleted': literal_column('id'),
'updated_at': literal_column('updated_at'),
'deleted_at': timeutils.utcnow()},
synchronize_session=synchronize_session)
class Session(sqlalchemy.orm.session.Session):
"""Custom Session class to avoid SqlAlchemy Session monkey patching."""
@_wrap_db_error
def query(self, *args, **kwargs):
return super(Session, self).query(*args, **kwargs)
@_wrap_db_error
def flush(self, *args, **kwargs):
return super(Session, self).flush(*args, **kwargs)
@_wrap_db_error
def execute(self, *args, **kwargs):
return super(Session, self).execute(*args, **kwargs)
def get_maker(engine, autocommit=True, expire_on_commit=False):
"""Return a SQLAlchemy sessionmaker using the given engine."""
return sqlalchemy.orm.sessionmaker(bind=engine,
class_=Session,
autocommit=autocommit,
expire_on_commit=expire_on_commit,
query_cls=Query)
def _patch_mysqldb_with_stacktrace_comments():
"""Adds current stack trace as a comment in queries.
Patches MySQLdb.cursors.BaseCursor._do_query.
"""
import MySQLdb.cursors
import traceback
old_mysql_do_query = MySQLdb.cursors.BaseCursor._do_query
def _do_query(self, q):
stack = ''
for filename, line, method, function in traceback.extract_stack():
# exclude various common things from trace
if filename.endswith('session.py') and method == '_do_query':
continue
if filename.endswith('api.py') and method == 'wrapper':
continue
if filename.endswith('utils.py') and method == '_inner':
continue
if filename.endswith('exception.py') and method == '_wrap':
continue
# db/api is just a wrapper around db/sqlalchemy/api
if filename.endswith('db/api.py'):
continue
# only trace inside ironic
index = filename.rfind('ironic')
if index == -1:
continue
stack += "File:%s:%s Method:%s() Line:%s | " \
% (filename[index:], line, method, function)
# strip trailing " | " from stack
if stack:
stack = stack[:-3]
qq = "%s /* %s */" % (q, stack)
else:
qq = q
old_mysql_do_query(self, qq)
setattr(MySQLdb.cursors.BaseCursor, '_do_query', _do_query)
class EngineFacade(object):
"""A helper class for removing of global engine instances from ironic.db.
As a library, ironic.db can't decide where to store/when to create engine
and sessionmaker instances, so this must be left for a target application.
On the other hand, in order to simplify the adoption of ironic.db changes,
we'll provide a helper class, which creates engine and sessionmaker
on its instantiation and provides get_engine()/get_session() methods
that are compatible with corresponding utility functions that currently
exist in target projects, e.g. in Nova.
engine/sessionmaker instances will still be global (and they are meant to
be global), but they will be stored in the app context, rather that in the
ironic.db context.
Note: using of this helper is completely optional and you are encouraged to
integrate engine/sessionmaker instances into your apps any way you like
(e.g. one might want to bind a session to a request context). Two important
things to remember:
1. An Engine instance is effectively a pool of DB connections, so it's
meant to be shared (and it's thread-safe).
2. A Session instance is not meant to be shared and represents a DB
transactional context (i.e. it's not thread-safe). sessionmaker is
a factory of sessions.
"""
def __init__(self, sql_connection,
sqlite_fk=False, mysql_sql_mode=None,
autocommit=True, expire_on_commit=False, **kwargs):
"""Initialize engine and sessionmaker instances.
:param sqlite_fk: enable foreign keys in SQLite
:type sqlite_fk: bool
:param mysql_sql_mode: set SQL mode in MySQL
:type mysql_sql_mode: string
:param autocommit: use autocommit mode for created Session instances
:type autocommit: bool
:param expire_on_commit: expire session objects on commit
:type expire_on_commit: bool
Keyword arguments:
:keyword idle_timeout: timeout before idle sql connections are reaped
(defaults to 3600)
:keyword connection_debug: verbosity of SQL debugging information.
0=None, 100=Everything (defaults to 0)
:keyword max_pool_size: maximum number of SQL connections to keep open
in a pool (defaults to SQLAlchemy settings)
:keyword max_overflow: if set, use this value for max_overflow with
sqlalchemy (defaults to SQLAlchemy settings)
:keyword pool_timeout: if set, use this value for pool_timeout with
sqlalchemy (defaults to SQLAlchemy settings)
:keyword sqlite_synchronous: if True, SQLite uses synchronous mode
(defaults to True)
:keyword connection_trace: add python stack traces to SQL as comment
strings (defaults to False)
:keyword max_retries: maximum db connection retries during startup.
(setting -1 implies an infinite retry count)
(defaults to 10)
:keyword retry_interval: interval between retries of opening a sql
connection (defaults to 10)
"""
super(EngineFacade, self).__init__()
self._engine = create_engine(
sql_connection=sql_connection,
sqlite_fk=sqlite_fk,
mysql_sql_mode=mysql_sql_mode,
idle_timeout=kwargs.get('idle_timeout', 3600),
connection_debug=kwargs.get('connection_debug', 0),
max_pool_size=kwargs.get('max_pool_size'),
max_overflow=kwargs.get('max_overflow'),
pool_timeout=kwargs.get('pool_timeout'),
sqlite_synchronous=kwargs.get('sqlite_synchronous', True),
connection_trace=kwargs.get('connection_trace', False),
max_retries=kwargs.get('max_retries', 10),
retry_interval=kwargs.get('retry_interval', 10))
self._session_maker = get_maker(
engine=self._engine,
autocommit=autocommit,
expire_on_commit=expire_on_commit)
def get_engine(self):
"""Get the engine instance (note, that it's shared)."""
return self._engine
def get_session(self, **kwargs):
"""Get a Session instance.
If passed, keyword arguments values override the ones used when the
sessionmaker instance was created.
:keyword autocommit: use autocommit mode for created Session instances
:type autocommit: bool
:keyword expire_on_commit: expire session objects on commit
:type expire_on_commit: bool
"""
for arg in kwargs:
if arg not in ('autocommit', 'expire_on_commit'):
del kwargs[arg]
return self._session_maker(**kwargs)
| |
# Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `DatasetCreator` with `Model.fit` across usages and strategies."""
import tensorflow.compat.v2 as tf
import os
from absl.testing import parameterized
import numpy as np
import keras
from keras import callbacks as callbacks_lib
from keras.engine import sequential
from keras.layers import core as core_layers
from keras.layers.preprocessing import string_lookup
from keras.optimizers.optimizer_v2 import gradient_descent
from keras.utils import dataset_creator
from tensorflow.python.platform import tf_logging as logging
class DatasetCreatorModelFitTestBase(tf.test.TestCase, parameterized.TestCase):
"""The base class for DatasetCreator with Model.fit tests."""
def _get_dataset_fn(self, use_lookup_layer):
if use_lookup_layer:
filepath = os.path.join(self.get_temp_dir(), "vocab")
with open(filepath, "w") as f:
f.write("\n".join(["earth", "wind", "and", "fire"]))
def dataset_fn(input_context):
del input_context
lookup_layer = string_lookup.StringLookup(
num_oov_indices=1, vocabulary=filepath)
x = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
y = np.array([0, 1])
map_fn = lambda x, y: (lookup_layer(x), y)
return tf.data.Dataset.from_tensor_slices(
(x, y)).shuffle(10).repeat().batch(2).map(map_fn)
else:
def dataset_fn(input_context):
del input_context
x = tf.random.uniform((10, 10))
y = tf.random.uniform((10,))
return tf.data.Dataset.from_tensor_slices(
(x, y)).shuffle(10).repeat().batch(2)
return dataset_fn
def _model_compile(self,
strategy,
steps_per_execution=1,
run_eagerly=False,
with_normalization_layer=False,
use_lookup_layer=False):
class ResultAssertingCallback(callbacks_lib.Callback):
"""A callback that asserts the result of the tests."""
def __init__(self):
self._prev_epoch = -1
def on_epoch_end(self, epoch, logs=None):
logging.info("testModelFit: epoch=%r, logs=%r", epoch, logs)
if epoch <= self._prev_epoch:
raise RuntimeError("Epoch is supposed to be larger than previous.")
self._prev_epoch = epoch
is_loss_float = (
logs.get("loss", None) is not None and
isinstance(logs["loss"], (float, np.floating)))
if not is_loss_float:
raise RuntimeError("loss is supposed to be in the logs and float.")
with strategy.scope():
model = sequential.Sequential([core_layers.Dense(10)])
if with_normalization_layer:
norm = keras.layers.BatchNormalization(
axis=-1, input_shape=(4, 4, 3), momentum=0.8)
model.add(norm)
model.add(core_layers.Dense(1, activation="sigmoid"))
self._accuracy_metric = keras.metrics.Accuracy()
model.compile(
gradient_descent.SGD(),
loss="binary_crossentropy",
metrics=[self._accuracy_metric],
steps_per_execution=steps_per_execution,
run_eagerly=run_eagerly)
return model, [ResultAssertingCallback()]
def _model_fit(self,
strategy,
steps_per_execution=1,
validation_data=None,
x=None,
y=None,
shuffle=True,
batch_size=None,
steps_per_epoch=10,
run_eagerly=False,
with_normalization_layer=False,
callbacks=None,
use_lookup_layer=False,
use_dataset_creator=True,
verbose="auto"):
if callbacks is None:
callbacks = []
model, default_callbacks = self._model_compile(strategy,
steps_per_execution,
run_eagerly,
with_normalization_layer,
use_lookup_layer)
callbacks += default_callbacks
if x is None:
if use_dataset_creator:
x = dataset_creator.DatasetCreator(
self._get_dataset_fn(use_lookup_layer))
else:
x = self._get_dataset_fn(use_lookup_layer)(None)
if validation_data is None:
if use_dataset_creator:
validation_data = dataset_creator.DatasetCreator(
self._get_dataset_fn(use_lookup_layer))
else:
validation_data = self._get_dataset_fn(use_lookup_layer)(None)
model.fit(
x,
y,
shuffle=shuffle,
batch_size=batch_size,
epochs=10,
steps_per_epoch=steps_per_epoch,
callbacks=callbacks,
validation_data=validation_data,
validation_steps=steps_per_epoch,
verbose=verbose)
return model
def _model_evaluate(self,
strategy,
steps_per_execution=1,
x=None,
y=None,
batch_size=None,
steps=10,
run_eagerly=False,
with_normalization_layer=False,
callbacks=None,
use_dataset_creator=True):
if callbacks is None:
callbacks = []
model, default_callbacks = self._model_compile(
strategy,
steps_per_execution,
run_eagerly,
with_normalization_layer,
)
callbacks += default_callbacks
def dataset_fn(input_context):
del input_context
x = tf.random.uniform((10, 10))
y = tf.random.uniform((10, 1))
return tf.data.Dataset.from_tensor_slices(
(x, y)).shuffle(10).repeat().batch(8)
if x is None:
if use_dataset_creator:
x = dataset_creator.DatasetCreator(dataset_fn)
else:
x = dataset_fn(None)
model.evaluate(
x=x, y=y, steps=steps, callbacks=callbacks, batch_size=batch_size)
return model
def _model_predict(
self,
strategy,
model=None,
steps_per_execution=1,
test_data=None,
steps=10,
with_normalization_layer=False,
):
callbacks = []
if model is None:
model, default_callbacks = self._model_compile(
strategy,
steps_per_execution,
with_normalization_layer=with_normalization_layer,
)
callbacks += default_callbacks
def create_test_data():
x = tf.constant([[1.], [2.], [3.], [1.], [5.], [1.]])
return tf.data.Dataset.from_tensor_slices(x).repeat().batch(2)
if test_data is None:
test_data = create_test_data()
predictions = model.predict(x=test_data, steps=steps, callbacks=callbacks)
predictions = np.around(predictions, 4)
return model, predictions
| |
# Copyright 2014 DreamHost, LLC
#
# Author: DreamHost, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import unittest
import mock
from akanda.rug.common.linux import ip_lib
NETNS_SAMPLE = [
'12345678-1234-5678-abcd-1234567890ab',
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'cccccccc-cccc-cccc-cccc-cccccccccccc']
LINK_SAMPLE = [
'1: lo: <LOOPBACK,UP,LOWER_UP> mtu 16436 qdisc noqueue state UNKNOWN \\'
'link/loopback 00:00:00:00:00:00 brd 00:00:00:00:00:00',
'2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP '
'qlen 1000\ link/ether cc:dd:ee:ff:ab:cd brd ff:ff:ff:ff:ff:ff'
'\ alias openvswitch',
'3: br-int: <BROADCAST,MULTICAST> mtu 1500 qdisc noop state DOWN '
'\ link/ether aa:bb:cc:dd:ee:ff brd ff:ff:ff:ff:ff:ff',
'4: gw-ddc717df-49: <BROADCAST,MULTICAST> mtu 1500 qdisc noop '
'state DOWN \ link/ether fe:dc:ba:fe:dc:ba brd ff:ff:ff:ff:ff:ff']
ADDR_SAMPLE = ("""
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff
inet 172.16.77.240/24 brd 172.16.77.255 scope global eth0
inet6 2001:470:9:1224:5595:dd51:6ba2:e788/64 scope global temporary dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 2001:470:9:1224:fd91:272:581e:3a32/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:4508:b885:5fb:740b/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:dfcc:aaff:feb9:76ce/64 scope global dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 fe80::dfcc:aaff:feb9:76ce/64 scope link
valid_lft forever preferred_lft forever
""")
ADDR_SAMPLE2 = ("""
2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc mq state UP qlen 1000
link/ether dd:cc:aa:b9:76:ce brd ff:ff:ff:ff:ff:ff
inet 172.16.77.240/24 scope global eth0
inet6 2001:470:9:1224:5595:dd51:6ba2:e788/64 scope global temporary dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 2001:470:9:1224:fd91:272:581e:3a32/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:4508:b885:5fb:740b/64 scope global temporary """
"""deprecated dynamic
valid_lft 14187sec preferred_lft 0sec
inet6 2001:470:9:1224:dfcc:aaff:feb9:76ce/64 scope global dynamic
valid_lft 14187sec preferred_lft 3387sec
inet6 fe80::dfcc:aaff:feb9:76ce/64 scope link
valid_lft forever preferred_lft forever
""")
GATEWAY_SAMPLE1 = ("""
default via 10.35.19.254 metric 100
10.35.16.0/22 proto kernel scope link src 10.35.17.97
""")
GATEWAY_SAMPLE2 = ("""
default via 10.35.19.254 metric 100
""")
GATEWAY_SAMPLE3 = ("""
10.35.16.0/22 proto kernel scope link src 10.35.17.97
""")
GATEWAY_SAMPLE4 = ("""
default via 10.35.19.254
""")
DEVICE_ROUTE_SAMPLE = ("10.0.0.0/24 scope link src 10.0.0.2")
SUBNET_SAMPLE1 = ("10.0.0.0/24 dev qr-23380d11-d2 scope link src 10.0.0.1\n"
"10.0.0.0/24 dev tap1d7888a7-10 scope link src 10.0.0.2")
SUBNET_SAMPLE2 = ("10.0.0.0/24 dev tap1d7888a7-10 scope link src 10.0.0.2\n"
"10.0.0.0/24 dev qr-23380d11-d2 scope link src 10.0.0.1")
class TestSubProcessBase(unittest.TestCase):
def setUp(self):
super(TestSubProcessBase, self).setUp()
self.execute_p = mock.patch('akanda.rug.common.linux.utils.execute')
self.execute = self.execute_p.start()
self.addCleanup(self.execute_p.stop)
def test_execute_wrapper(self):
ip_lib.SubProcessBase._execute('o', 'link', ('list',), 'sudo')
self.execute.assert_called_once_with(['ip', '-o', 'link', 'list'],
root_helper='sudo')
def test_execute_wrapper_int_options(self):
ip_lib.SubProcessBase._execute([4], 'link', ('list',))
self.execute.assert_called_once_with(['ip', '-4', 'link', 'list'],
root_helper=None)
def test_execute_wrapper_no_options(self):
ip_lib.SubProcessBase._execute([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'link', 'list'],
root_helper=None)
def test_run_no_namespace(self):
base = ip_lib.SubProcessBase('sudo')
base._run([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'link', 'list'],
root_helper=None)
def test_run_namespace(self):
base = ip_lib.SubProcessBase('sudo', 'ns')
base._run([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns',
'ip', 'link', 'list'],
root_helper='sudo')
def test_as_root_namespace(self):
base = ip_lib.SubProcessBase('sudo', 'ns')
base._as_root([], 'link', ('list',))
self.execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns',
'ip', 'link', 'list'],
root_helper='sudo')
def test_as_root_no_root_helper(self):
base = ip_lib.SubProcessBase()
self.assertRaisesRegexp(Exception,
'Sudo is required to run this command',
base._as_root,
[], 'link', ('list',))
class TestIpWrapper(unittest.TestCase):
def setUp(self):
super(TestIpWrapper, self).setUp()
self.execute_p = mock.patch.object(ip_lib.IPWrapper, '_execute')
self.execute = self.execute_p.start()
self.addCleanup(self.execute_p.stop)
def test_get_devices(self):
self.execute.return_value = '\n'.join(LINK_SAMPLE)
retval = ip_lib.IPWrapper('sudo').get_devices()
self.assertEqual(retval,
[ip_lib.IPDevice('lo'),
ip_lib.IPDevice('eth0'),
ip_lib.IPDevice('br-int'),
ip_lib.IPDevice('gw-ddc717df-49')])
self.execute.assert_called_once_with('o', 'link', ('list',),
'sudo', None)
def test_get_devices_malformed_line(self):
self.execute.return_value = '\n'.join(LINK_SAMPLE + ['gibberish'])
retval = ip_lib.IPWrapper('sudo').get_devices()
self.assertEqual(retval,
[ip_lib.IPDevice('lo'),
ip_lib.IPDevice('eth0'),
ip_lib.IPDevice('br-int'),
ip_lib.IPDevice('gw-ddc717df-49')])
self.execute.assert_called_once_with('o', 'link', ('list',),
'sudo', None)
def test_get_namespaces(self):
self.execute.return_value = '\n'.join(NETNS_SAMPLE)
retval = ip_lib.IPWrapper.get_namespaces('sudo')
self.assertEqual(retval,
['12345678-1234-5678-abcd-1234567890ab',
'bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb',
'cccccccc-cccc-cccc-cccc-cccccccccccc'])
self.execute.assert_called_once_with('', 'netns', ('list',),
root_helper='sudo')
def test_add_tuntap(self):
ip_lib.IPWrapper('sudo').add_tuntap('tap0')
self.execute.assert_called_once_with('', 'tuntap',
('add', 'tap0', 'mode', 'tap'),
'sudo', None)
def test_add_veth(self):
ip_lib.IPWrapper('sudo').add_veth('tap0', 'tap1')
self.execute.assert_called_once_with('', 'link',
('add', 'tap0', 'type', 'veth',
'peer', 'name', 'tap1'),
'sudo', None)
def test_get_device(self):
dev = ip_lib.IPWrapper('sudo', 'ns').device('eth0')
self.assertEqual(dev.root_helper, 'sudo')
self.assertEqual(dev.namespace, 'ns')
self.assertEqual(dev.name, 'eth0')
def test_ensure_namespace(self):
with mock.patch.object(ip_lib, 'IPDevice') as ip_dev:
ip = ip_lib.IPWrapper('sudo')
with mock.patch.object(ip.netns, 'exists') as ns_exists:
ns_exists.return_value = False
ip.ensure_namespace('ns')
self.execute.assert_has_calls(
[mock.call([], 'netns', ('add', 'ns'), 'sudo', None)])
ip_dev.assert_has_calls([mock.call('lo', 'sudo', 'ns'),
mock.call().link.set_up()])
def test_ensure_namespace_existing(self):
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd:
ip_ns_cmd.exists.return_value = True
ns = ip_lib.IPWrapper('sudo').ensure_namespace('ns')
self.assertFalse(self.execute.called)
self.assertEqual(ns.namespace, 'ns')
def test_namespace_is_empty_no_devices(self):
ip = ip_lib.IPWrapper('sudo', 'ns')
with mock.patch.object(ip, 'get_devices') as get_devices:
get_devices.return_value = []
self.assertTrue(ip.namespace_is_empty())
get_devices.assert_called_once_with(exclude_loopback=True)
def test_namespace_is_empty(self):
ip = ip_lib.IPWrapper('sudo', 'ns')
with mock.patch.object(ip, 'get_devices') as get_devices:
get_devices.return_value = [mock.Mock()]
self.assertFalse(ip.namespace_is_empty())
get_devices.assert_called_once_with(exclude_loopback=True)
def test_garbage_collect_namespace_does_not_exist(self):
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls:
ip_ns_cmd_cls.return_value.exists.return_value = False
ip = ip_lib.IPWrapper('sudo', 'ns')
with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty:
self.assertFalse(ip.garbage_collect_namespace())
ip_ns_cmd_cls.assert_has_calls([mock.call().exists('ns')])
self.assertNotIn(mock.call().delete('ns'),
ip_ns_cmd_cls.return_value.mock_calls)
self.assertEqual(mock_is_empty.mock_calls, [])
def test_garbage_collect_namespace_existing_empty_ns(self):
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls:
ip_ns_cmd_cls.return_value.exists.return_value = True
ip = ip_lib.IPWrapper('sudo', 'ns')
with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty:
mock_is_empty.return_value = True
self.assertTrue(ip.garbage_collect_namespace())
mock_is_empty.assert_called_once_with()
expected = [mock.call().exists('ns'),
mock.call().delete('ns')]
ip_ns_cmd_cls.assert_has_calls(expected)
def test_garbage_collect_namespace_existing_not_empty(self):
lo_device = mock.Mock()
lo_device.name = 'lo'
tap_device = mock.Mock()
tap_device.name = 'tap1'
with mock.patch.object(ip_lib, 'IpNetnsCommand') as ip_ns_cmd_cls:
ip_ns_cmd_cls.return_value.exists.return_value = True
ip = ip_lib.IPWrapper('sudo', 'ns')
with mock.patch.object(ip, 'namespace_is_empty') as mock_is_empty:
mock_is_empty.return_value = False
self.assertFalse(ip.garbage_collect_namespace())
mock_is_empty.assert_called_once_with()
expected = [mock.call(ip),
mock.call().exists('ns')]
self.assertEqual(ip_ns_cmd_cls.mock_calls, expected)
self.assertNotIn(mock.call().delete('ns'),
ip_ns_cmd_cls.mock_calls)
def test_add_device_to_namespace(self):
dev = mock.Mock()
ip_lib.IPWrapper('sudo', 'ns').add_device_to_namespace(dev)
dev.assert_has_calls([mock.call.link.set_netns('ns')])
def test_add_device_to_namespace_is_none(self):
dev = mock.Mock()
ip_lib.IPWrapper('sudo').add_device_to_namespace(dev)
self.assertEqual(dev.mock_calls, [])
class TestIPDevice(unittest.TestCase):
def test_eq_same_name(self):
dev1 = ip_lib.IPDevice('tap0')
dev2 = ip_lib.IPDevice('tap0')
self.assertEqual(dev1, dev2)
def test_eq_diff_name(self):
dev1 = ip_lib.IPDevice('tap0')
dev2 = ip_lib.IPDevice('tap1')
self.assertNotEqual(dev1, dev2)
def test_eq_same_namespace(self):
dev1 = ip_lib.IPDevice('tap0', 'ns1')
dev2 = ip_lib.IPDevice('tap0', 'ns1')
self.assertEqual(dev1, dev2)
def test_eq_diff_namespace(self):
dev1 = ip_lib.IPDevice('tap0', 'sudo', 'ns1')
dev2 = ip_lib.IPDevice('tap0', 'sudo', 'ns2')
self.assertNotEqual(dev1, dev2)
def test_eq_other_is_none(self):
dev1 = ip_lib.IPDevice('tap0', 'sudo', 'ns1')
self.assertNotEqual(dev1, None)
def test_str(self):
self.assertEqual(str(ip_lib.IPDevice('tap0')), 'tap0')
class TestIPCommandBase(unittest.TestCase):
def setUp(self):
super(TestIPCommandBase, self).setUp()
self.ip = mock.Mock()
self.ip.root_helper = 'sudo'
self.ip.namespace = 'namespace'
self.ip_cmd = ip_lib.IpCommandBase(self.ip)
self.ip_cmd.COMMAND = 'foo'
def test_run(self):
self.ip_cmd._run('link', 'show')
self.ip.assert_has_calls([mock.call._run([], 'foo', ('link', 'show'))])
def test_run_with_options(self):
self.ip_cmd._run('link', options='o')
self.ip.assert_has_calls([mock.call._run('o', 'foo', ('link', ))])
def test_as_root(self):
self.ip_cmd._as_root('link')
self.ip.assert_has_calls(
[mock.call._as_root([], 'foo', ('link', ), False)])
def test_as_root_with_options(self):
self.ip_cmd._as_root('link', options='o')
self.ip.assert_has_calls(
[mock.call._as_root('o', 'foo', ('link', ), False)])
class TestIPDeviceCommandBase(unittest.TestCase):
def setUp(self):
super(TestIPDeviceCommandBase, self).setUp()
self.ip_dev = mock.Mock()
self.ip_dev.name = 'eth0'
self.ip_dev.root_helper = 'sudo'
self.ip_dev._execute = mock.Mock(return_value='executed')
self.ip_cmd = ip_lib.IpDeviceCommandBase(self.ip_dev)
self.ip_cmd.COMMAND = 'foo'
def test_name_property(self):
self.assertEqual(self.ip_cmd.name, 'eth0')
class TestIPCmdBase(unittest.TestCase):
def setUp(self):
super(TestIPCmdBase, self).setUp()
self.parent = mock.Mock()
self.parent.name = 'eth0'
self.parent.root_helper = 'sudo'
def _assert_call(self, options, args):
self.parent.assert_has_calls([
mock.call._run(options, self.command, args)])
def _assert_sudo(self, options, args, force_root_namespace=False):
self.parent.assert_has_calls(
[mock.call._as_root(options, self.command, args,
force_root_namespace)])
class TestIpLinkCommand(TestIPCmdBase):
def setUp(self):
super(TestIpLinkCommand, self).setUp()
self.parent._run.return_value = LINK_SAMPLE[1]
self.command = 'link'
self.link_cmd = ip_lib.IpLinkCommand(self.parent)
def test_set_address(self):
self.link_cmd.set_address('aa:bb:cc:dd:ee:ff')
self._assert_sudo([], ('set', 'eth0', 'address', 'aa:bb:cc:dd:ee:ff'))
def test_set_mtu(self):
self.link_cmd.set_mtu(1500)
self._assert_sudo([], ('set', 'eth0', 'mtu', 1500))
def test_set_up(self):
self.link_cmd.set_up()
self._assert_sudo([], ('set', 'eth0', 'up'))
def test_set_down(self):
self.link_cmd.set_down()
self._assert_sudo([], ('set', 'eth0', 'down'))
def test_set_netns(self):
self.link_cmd.set_netns('foo')
self._assert_sudo([], ('set', 'eth0', 'netns', 'foo'))
self.assertEqual(self.parent.namespace, 'foo')
def test_set_name(self):
self.link_cmd.set_name('tap1')
self._assert_sudo([], ('set', 'eth0', 'name', 'tap1'))
self.assertEqual(self.parent.name, 'tap1')
def test_set_alias(self):
self.link_cmd.set_alias('openvswitch')
self._assert_sudo([], ('set', 'eth0', 'alias', 'openvswitch'))
def test_delete(self):
self.link_cmd.delete()
self._assert_sudo([], ('delete', 'eth0'))
def test_address_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.address, 'cc:dd:ee:ff:ab:cd')
def test_mtu_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.mtu, 1500)
def test_qdisc_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.qdisc, 'mq')
def test_qlen_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.qlen, 1000)
def test_alias_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.alias, 'openvswitch')
def test_state_property(self):
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.state, 'UP')
def test_settings_property(self):
expected = {'mtu': 1500,
'qlen': 1000,
'state': 'UP',
'qdisc': 'mq',
'brd': 'ff:ff:ff:ff:ff:ff',
'link/ether': 'cc:dd:ee:ff:ab:cd',
'alias': 'openvswitch'}
self.parent._execute = mock.Mock(return_value=LINK_SAMPLE[1])
self.assertEqual(self.link_cmd.attributes, expected)
self._assert_call('o', ('show', 'eth0'))
class TestIpAddrCommand(TestIPCmdBase):
def setUp(self):
super(TestIpAddrCommand, self).setUp()
self.parent.name = 'tap0'
self.command = 'addr'
self.addr_cmd = ip_lib.IpAddrCommand(self.parent)
def test_add_address(self):
self.addr_cmd.add(4, '192.168.45.100/24', '192.168.45.255')
self._assert_sudo([4],
('add', '192.168.45.100/24', 'brd', '192.168.45.255',
'scope', 'global', 'dev', 'tap0'))
def test_add_address_scoped(self):
self.addr_cmd.add(4, '192.168.45.100/24', '192.168.45.255',
scope='link')
self._assert_sudo([4],
('add', '192.168.45.100/24', 'brd', '192.168.45.255',
'scope', 'link', 'dev', 'tap0'))
def test_del_address(self):
self.addr_cmd.delete(4, '192.168.45.100/24')
self._assert_sudo([4],
('del', '192.168.45.100/24', 'dev', 'tap0'))
def test_flush(self):
self.addr_cmd.flush()
self._assert_sudo([], ('flush', 'tap0'))
def test_list(self):
expected = [
dict(ip_version=4, scope='global',
dynamic=False, cidr='172.16.77.240/24',
broadcast='172.16.77.255'),
dict(ip_version=6, scope='global',
dynamic=True, cidr='2001:470:9:1224:5595:dd51:6ba2:e788/64',
broadcast='::'),
dict(ip_version=6, scope='global',
dynamic=True, cidr='2001:470:9:1224:fd91:272:581e:3a32/64',
broadcast='::'),
dict(ip_version=6, scope='global',
dynamic=True, cidr='2001:470:9:1224:4508:b885:5fb:740b/64',
broadcast='::'),
dict(ip_version=6, scope='global',
dynamic=True, cidr='2001:470:9:1224:dfcc:aaff:feb9:76ce/64',
broadcast='::'),
dict(ip_version=6, scope='link',
dynamic=False, cidr='fe80::dfcc:aaff:feb9:76ce/64',
broadcast='::')]
test_cases = [ADDR_SAMPLE, ADDR_SAMPLE2]
for test_case in test_cases:
self.parent._run = mock.Mock(return_value=test_case)
self.assertEqual(self.addr_cmd.list(), expected)
self._assert_call([], ('show', 'tap0'))
def test_list_filtered(self):
expected = [
dict(ip_version=4, scope='global',
dynamic=False, cidr='172.16.77.240/24',
broadcast='172.16.77.255')]
test_cases = [ADDR_SAMPLE, ADDR_SAMPLE2]
for test_case in test_cases:
output = '\n'.join(test_case.split('\n')[0:4])
self.parent._run.return_value = output
self.assertEqual(self.addr_cmd.list('global',
filters=['permanent']), expected)
self._assert_call([], ('show', 'tap0', 'permanent', 'scope',
'global'))
class TestIpRouteCommand(TestIPCmdBase):
def setUp(self):
super(TestIpRouteCommand, self).setUp()
self.parent.name = 'eth0'
self.command = 'route'
self.route_cmd = ip_lib.IpRouteCommand(self.parent)
def test_add_gateway(self):
gateway = '192.168.45.100'
metric = 100
self.route_cmd.add_gateway(gateway, metric)
self._assert_sudo([],
('replace', 'default', 'via', gateway,
'metric', metric,
'dev', self.parent.name))
def test_del_gateway(self):
gateway = '192.168.45.100'
self.route_cmd.delete_gateway(gateway)
self._assert_sudo([],
('del', 'default', 'via', gateway,
'dev', self.parent.name))
def test_get_gateway(self):
test_cases = [{'sample': GATEWAY_SAMPLE1,
'expected': {'gateway': '10.35.19.254',
'metric': 100}},
{'sample': GATEWAY_SAMPLE2,
'expected': {'gateway': '10.35.19.254',
'metric': 100}},
{'sample': GATEWAY_SAMPLE3,
'expected': None},
{'sample': GATEWAY_SAMPLE4,
'expected': {'gateway': '10.35.19.254'}}]
for test_case in test_cases:
self.parent._run = mock.Mock(return_value=test_case['sample'])
self.assertEqual(self.route_cmd.get_gateway(),
test_case['expected'])
def test_pullup_route(self):
# interface is not the first in the list - requires
# deleting and creating existing entries
output = [DEVICE_ROUTE_SAMPLE, SUBNET_SAMPLE1]
def pullup_side_effect(self, *args):
result = output.pop(0)
return result
self.parent._run = mock.Mock(side_effect=pullup_side_effect)
self.route_cmd.pullup_route('tap1d7888a7-10')
self._assert_sudo([], ('del', '10.0.0.0/24', 'dev', 'qr-23380d11-d2'))
self._assert_sudo([], ('append', '10.0.0.0/24', 'proto', 'kernel',
'src', '10.0.0.1', 'dev', 'qr-23380d11-d2'))
def test_pullup_route_first(self):
# interface is first in the list - no changes
output = [DEVICE_ROUTE_SAMPLE, SUBNET_SAMPLE2]
def pullup_side_effect(self, *args):
result = output.pop(0)
return result
self.parent._run = mock.Mock(side_effect=pullup_side_effect)
self.route_cmd.pullup_route('tap1d7888a7-10')
# Check two calls - device get and subnet get
self.assertEqual(len(self.parent._run.mock_calls), 2)
class TestIpNetnsCommand(TestIPCmdBase):
def setUp(self):
super(TestIpNetnsCommand, self).setUp()
self.command = 'netns'
self.netns_cmd = ip_lib.IpNetnsCommand(self.parent)
def test_add_namespace(self):
ns = self.netns_cmd.add('ns')
self._assert_sudo([], ('add', 'ns'), force_root_namespace=True)
self.assertEqual(ns.namespace, 'ns')
def test_delete_namespace(self):
with mock.patch('akanda.rug.common.linux.utils.execute'):
self.netns_cmd.delete('ns')
self._assert_sudo([], ('delete', 'ns'), force_root_namespace=True)
def test_namespace_exists(self):
retval = '\n'.join(NETNS_SAMPLE)
self.parent._as_root.return_value = retval
self.assertTrue(
self.netns_cmd.exists('bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb'))
self._assert_sudo('o', ('list',), force_root_namespace=True)
def test_namespace_doest_not_exist(self):
retval = '\n'.join(NETNS_SAMPLE)
self.parent._as_root.return_value = retval
self.assertFalse(
self.netns_cmd.exists('bbbbbbbb-1111-2222-3333-bbbbbbbbbbbb'))
self._assert_sudo('o', ('list',), force_root_namespace=True)
def test_execute(self):
self.parent.namespace = 'ns'
with mock.patch('akanda.rug.common.linux.utils.execute') as execute:
self.netns_cmd.execute(['ip', 'link', 'list'])
execute.assert_called_once_with(['ip', 'netns', 'exec', 'ns', 'ip',
'link', 'list'],
root_helper='sudo',
check_exit_code=True)
def test_execute_env_var_prepend(self):
self.parent.namespace = 'ns'
with mock.patch('akanda.rug.common.linux.utils.execute') as execute:
env = collections.OrderedDict([('FOO', 1), ('BAR', 2)])
self.netns_cmd.execute(['ip', 'link', 'list'], env)
execute.assert_called_once_with(
['FOO=1', 'BAR=2', 'ip', 'netns', 'exec', 'ns', 'ip', 'link',
'list'],
root_helper='sudo', check_exit_code=True)
class TestDeviceExists(unittest.TestCase):
def test_device_exists(self):
with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute:
_execute.return_value = LINK_SAMPLE[1]
self.assertTrue(ip_lib.device_exists('eth0'))
_execute.assert_called_once_with('o', 'link', ('show', 'eth0'))
def test_device_does_not_exist(self):
with mock.patch.object(ip_lib.IPDevice, '_execute') as _execute:
_execute.return_value = ''
_execute.side_effect = RuntimeError
self.assertFalse(ip_lib.device_exists('eth0'))
| |
# Copyright: See the LICENSE file.
"""Tests for factory_boy/SQLAlchemy interactions."""
import unittest
from unittest import mock
import sqlalchemy
import factory
from factory.alchemy import SQLAlchemyModelFactory
from .alchemyapp import models
class StandardFactory(SQLAlchemyModelFactory):
class Meta:
model = models.StandardModel
sqlalchemy_session = models.session
id = factory.Sequence(lambda n: n)
foo = factory.Sequence(lambda n: 'foo%d' % n)
class NonIntegerPkFactory(SQLAlchemyModelFactory):
class Meta:
model = models.NonIntegerPk
sqlalchemy_session = models.session
id = factory.Sequence(lambda n: 'foo%d' % n)
class NoSessionFactory(SQLAlchemyModelFactory):
class Meta:
model = models.StandardModel
sqlalchemy_session = None
id = factory.Sequence(lambda n: n)
class MultifieldModelFactory(SQLAlchemyModelFactory):
class Meta:
model = models.MultiFieldModel
sqlalchemy_get_or_create = ('slug',)
sqlalchemy_session = models.session
sqlalchemy_session_persistence = 'commit'
id = factory.Sequence(lambda n: n)
foo = factory.Sequence(lambda n: 'foo%d' % n)
class WithGetOrCreateFieldFactory(SQLAlchemyModelFactory):
class Meta:
model = models.StandardModel
sqlalchemy_get_or_create = ('foo',)
sqlalchemy_session = models.session
sqlalchemy_session_persistence = 'commit'
id = factory.Sequence(lambda n: n)
foo = factory.Sequence(lambda n: 'foo%d' % n)
class WithMultipleGetOrCreateFieldsFactory(SQLAlchemyModelFactory):
class Meta:
model = models.MultifieldUniqueModel
sqlalchemy_get_or_create = ("slug", "text",)
sqlalchemy_session = models.session
sqlalchemy_session_persistence = 'commit'
id = factory.Sequence(lambda n: n)
slug = factory.Sequence(lambda n: "slug%s" % n)
text = factory.Sequence(lambda n: "text%s" % n)
class SQLAlchemyPkSequenceTestCase(unittest.TestCase):
def setUp(self):
super().setUp()
StandardFactory.reset_sequence(1)
NonIntegerPkFactory._meta.sqlalchemy_session.rollback()
def test_pk_first(self):
std = StandardFactory.build()
self.assertEqual('foo1', std.foo)
def test_pk_many(self):
std1 = StandardFactory.build()
std2 = StandardFactory.build()
self.assertEqual('foo1', std1.foo)
self.assertEqual('foo2', std2.foo)
def test_pk_creation(self):
std1 = StandardFactory.create()
self.assertEqual('foo1', std1.foo)
self.assertEqual(1, std1.id)
StandardFactory.reset_sequence()
std2 = StandardFactory.create()
self.assertEqual('foo0', std2.foo)
self.assertEqual(0, std2.id)
def test_pk_force_value(self):
std1 = StandardFactory.create(id=10)
self.assertEqual('foo1', std1.foo) # sequence and pk are unrelated
self.assertEqual(10, std1.id)
StandardFactory.reset_sequence()
std2 = StandardFactory.create()
self.assertEqual('foo0', std2.foo) # Sequence doesn't care about pk
self.assertEqual(0, std2.id)
class SQLAlchemyGetOrCreateTests(unittest.TestCase):
def setUp(self):
models.session.rollback()
def test_simple_call(self):
obj1 = WithGetOrCreateFieldFactory(foo='foo1')
obj2 = WithGetOrCreateFieldFactory(foo='foo1')
self.assertEqual(obj1, obj2)
def test_missing_arg(self):
with self.assertRaises(factory.FactoryError):
MultifieldModelFactory()
def test_raises_exception_when_existing_objs(self):
StandardFactory.create_batch(2, foo='foo')
with self.assertRaises(sqlalchemy.orm.exc.MultipleResultsFound):
WithGetOrCreateFieldFactory(foo='foo')
def test_multicall(self):
objs = MultifieldModelFactory.create_batch(
6,
slug=factory.Iterator(['main', 'alt']),
)
self.assertEqual(6, len(objs))
self.assertEqual(2, len(set(objs)))
self.assertEqual(
list(
obj.slug for obj in models.session.query(
models.MultiFieldModel.slug
)
),
["alt", "main"],
)
class MultipleGetOrCreateFieldsTest(unittest.TestCase):
def setUp(self):
models.session.rollback()
def test_one_defined(self):
obj1 = WithMultipleGetOrCreateFieldsFactory()
obj2 = WithMultipleGetOrCreateFieldsFactory(slug=obj1.slug)
self.assertEqual(obj1, obj2)
def test_both_defined(self):
obj1 = WithMultipleGetOrCreateFieldsFactory()
with self.assertRaises(sqlalchemy.exc.IntegrityError):
WithMultipleGetOrCreateFieldsFactory(slug=obj1.slug, text="alt")
def test_unique_field_not_in_get_or_create(self):
WithMultipleGetOrCreateFieldsFactory(title='Title')
with self.assertRaises(sqlalchemy.exc.IntegrityError):
WithMultipleGetOrCreateFieldsFactory(title='Title')
class SQLAlchemySessionPersistenceTestCase(unittest.TestCase):
def setUp(self):
super().setUp()
self.mock_session = mock.NonCallableMagicMock(spec=models.session)
def test_flushing(self):
class FlushingPersistenceFactory(StandardFactory):
class Meta:
sqlalchemy_session = self.mock_session
sqlalchemy_session_persistence = 'flush'
self.mock_session.commit.assert_not_called()
self.mock_session.flush.assert_not_called()
FlushingPersistenceFactory.create()
self.mock_session.commit.assert_not_called()
self.mock_session.flush.assert_called_once_with()
def test_committing(self):
class CommittingPersistenceFactory(StandardFactory):
class Meta:
sqlalchemy_session = self.mock_session
sqlalchemy_session_persistence = 'commit'
self.mock_session.commit.assert_not_called()
self.mock_session.flush.assert_not_called()
CommittingPersistenceFactory.create()
self.mock_session.commit.assert_called_once_with()
self.mock_session.flush.assert_not_called()
def test_noflush_nocommit(self):
class InactivePersistenceFactory(StandardFactory):
class Meta:
sqlalchemy_session = self.mock_session
sqlalchemy_session_persistence = None
self.mock_session.commit.assert_not_called()
self.mock_session.flush.assert_not_called()
InactivePersistenceFactory.create()
self.mock_session.commit.assert_not_called()
self.mock_session.flush.assert_not_called()
def test_type_error(self):
with self.assertRaises(TypeError):
class BadPersistenceFactory(StandardFactory):
class Meta:
sqlalchemy_session_persistence = 'invalid_persistence_option'
model = models.StandardModel
class SQLAlchemyNonIntegerPkTestCase(unittest.TestCase):
def setUp(self):
super().setUp()
NonIntegerPkFactory.reset_sequence()
NonIntegerPkFactory._meta.sqlalchemy_session.rollback()
def test_first(self):
nonint = NonIntegerPkFactory.build()
self.assertEqual('foo0', nonint.id)
def test_many(self):
nonint1 = NonIntegerPkFactory.build()
nonint2 = NonIntegerPkFactory.build()
self.assertEqual('foo0', nonint1.id)
self.assertEqual('foo1', nonint2.id)
def test_creation(self):
nonint1 = NonIntegerPkFactory.create()
self.assertEqual('foo0', nonint1.id)
NonIntegerPkFactory.reset_sequence()
nonint2 = NonIntegerPkFactory.build()
self.assertEqual('foo0', nonint2.id)
def test_force_pk(self):
nonint1 = NonIntegerPkFactory.create(id='foo10')
self.assertEqual('foo10', nonint1.id)
NonIntegerPkFactory.reset_sequence()
nonint2 = NonIntegerPkFactory.create()
self.assertEqual('foo0', nonint2.id)
class SQLAlchemyNoSessionTestCase(unittest.TestCase):
def test_create_raises_exception_when_no_session_was_set(self):
with self.assertRaises(RuntimeError):
NoSessionFactory.create()
def test_build_does_not_raises_exception_when_no_session_was_set(self):
NoSessionFactory.reset_sequence() # Make sure we start at test ID 0
inst0 = NoSessionFactory.build()
inst1 = NoSessionFactory.build()
self.assertEqual(inst0.id, 0)
self.assertEqual(inst1.id, 1)
class NameConflictTests(unittest.TestCase):
"""Regression test for `TypeError: _save() got multiple values for argument 'session'`
See #775.
"""
def test_no_name_conflict_on_save(self):
class SpecialFieldWithSaveFactory(SQLAlchemyModelFactory):
class Meta:
model = models.SpecialFieldModel
sqlalchemy_session = models.session
id = factory.Sequence(lambda n: n)
session = ''
saved_child = SpecialFieldWithSaveFactory()
self.assertEqual(saved_child.session, "")
def test_no_name_conflict_on_get_or_create(self):
class SpecialFieldWithGetOrCreateFactory(SQLAlchemyModelFactory):
class Meta:
model = models.SpecialFieldModel
sqlalchemy_get_or_create = ('session',)
sqlalchemy_session = models.session
id = factory.Sequence(lambda n: n)
session = ''
get_or_created_child = SpecialFieldWithGetOrCreateFactory()
self.assertEqual(get_or_created_child.session, "")
| |
#! /usr/bin/env python
# scapy.contrib.description = Cisco Discovery Protocol
# scapy.contrib.status = loads
#############################################################################
## ##
## cdp.py --- Cisco Discovery Protocol (CDP) extension for Scapy ##
## ##
## Copyright (C) 2006 Nicolas Bareil <nicolas.bareil AT eads DOT net> ##
## Arnaud Ebalard <arnaud.ebalard AT eads DOT net> ##
## EADS/CRC security team ##
## ##
## This file is part of Scapy ##
## Scapy is free software: you can redistribute it and/or modify it ##
## under the terms of the GNU General Public License version 2 as ##
## published by the Free Software Foundation; version 2. ##
## ##
## This program is distributed in the hope that it will be useful, but ##
## WITHOUT ANY WARRANTY; without even the implied warranty of ##
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ##
## General Public License for more details. ##
## ##
#############################################################################
from __future__ import absolute_import
from scapy.packet import *
from scapy.fields import *
from scapy.layers.inet6 import *
from scapy.modules.six.moves import range
#####################################################################
# Helpers and constants
#####################################################################
# CDP TLV classes keyed by type
_cdp_tlv_cls = { 0x0001: "CDPMsgDeviceID",
0x0002: "CDPMsgAddr",
0x0003: "CDPMsgPortID",
0x0004: "CDPMsgCapabilities",
0x0005: "CDPMsgSoftwareVersion",
0x0006: "CDPMsgPlatform",
0x0007: "CDPMsgIPPrefix",
0x0008: "CDPMsgProtoHello",
0x0009: "CDPMsgVTPMgmtDomain", # CDPv2
0x000a: "CDPMsgNativeVLAN", # CDPv2
0x000b: "CDPMsgDuplex", #
# 0x000c: "CDPMsgGeneric",
# 0x000d: "CDPMsgGeneric",
0x000e: "CDPMsgVoIPVLANReply",
0x000f: "CDPMsgVoIPVLANQuery",
0x0010: "CDPMsgPower",
0x0011: "CDPMsgMTU",
0x0012: "CDPMsgTrustBitmap",
0x0013: "CDPMsgUntrustedPortCoS",
# 0x0014: "CDPMsgSystemName",
# 0x0015: "CDPMsgSystemOID",
0x0016: "CDPMsgMgmtAddr",
# 0x0017: "CDPMsgLocation",
0x0019: "CDPMsgUnknown19",
# 0x001a: "CDPPowerAvailable"
}
_cdp_tlv_types = { 0x0001: "Device ID",
0x0002: "Addresses",
0x0003: "Port ID",
0x0004: "Capabilities",
0x0005: "Software Version",
0x0006: "Platform",
0x0007: "IP Prefix",
0x0008: "Protocol Hello",
0x0009: "VTP Management Domain", # CDPv2
0x000a: "Native VLAN", # CDPv2
0x000b: "Duplex", #
0x000c: "CDP Unknown command (send us a pcap file)",
0x000d: "CDP Unknown command (send us a pcap file)",
0x000e: "VoIP VLAN Reply",
0x000f: "VoIP VLAN Query",
0x0010: "Power",
0x0011: "MTU",
0x0012: "Trust Bitmap",
0x0013: "Untrusted Port CoS",
0x0014: "System Name",
0x0015: "System OID",
0x0016: "Management Address",
0x0017: "Location",
0x0018: "CDP Unknown command (send us a pcap file)",
0x0019: "CDP Unknown command (send us a pcap file)",
0x001a: "Power Available"}
def _CDPGuessPayloadClass(p, **kargs):
cls = conf.raw_layer
if len(p) >= 2:
t = struct.unpack("!H", p[:2])[0]
clsname = _cdp_tlv_cls.get(t, "CDPMsgGeneric")
cls = globals()[clsname]
return cls(p, **kargs)
class CDPMsgGeneric(Packet):
name = "CDP Generic Message"
fields_desc = [ XShortEnumField("type", None, _cdp_tlv_types),
FieldLenField("len", None, "val", "!H"),
StrLenField("val", "", length_from=lambda x:x.len - 4) ]
def guess_payload_class(self, p):
return conf.padding_layer # _CDPGuessPayloadClass
class CDPMsgDeviceID(CDPMsgGeneric):
name = "Device ID"
type = 0x0001
_cdp_addr_record_ptype = {0x01: "NLPID", 0x02: "802.2"}
_cdp_addrrecord_proto_ip = b"\xcc"
_cdp_addrrecord_proto_ipv6 = b"\xaa\xaa\x03\x00\x00\x00\x86\xdd"
class CDPAddrRecord(Packet):
name = "CDP Address"
fields_desc = [ ByteEnumField("ptype", 0x01, _cdp_addr_record_ptype),
FieldLenField("plen", None, "proto", "B"),
StrLenField("proto", None, length_from=lambda x:x.plen),
FieldLenField("addrlen", None, length_of=lambda x:x.addr),
StrLenField("addr", None, length_from=lambda x:x.addrlen)]
def guess_payload_class(self, p):
return conf.padding_layer
class CDPAddrRecordIPv4(CDPAddrRecord):
name = "CDP Address IPv4"
fields_desc = [ ByteEnumField("ptype", 0x01, _cdp_addr_record_ptype),
FieldLenField("plen", 1, "proto", "B"),
StrLenField("proto", _cdp_addrrecord_proto_ip, length_from=lambda x:x.plen),
ShortField("addrlen", 4),
IPField("addr", "0.0.0.0")]
class CDPAddrRecordIPv6(CDPAddrRecord):
name = "CDP Address IPv6"
fields_desc = [ ByteEnumField("ptype", 0x02, _cdp_addr_record_ptype),
FieldLenField("plen", 8, "proto", "B"),
StrLenField("proto", _cdp_addrrecord_proto_ipv6, length_from=lambda x:x.plen),
ShortField("addrlen", 16),
IP6Field("addr", "::1")]
def _CDPGuessAddrRecord(p, **kargs):
cls = conf.raw_layer
if len(p) >= 2:
plen = struct.unpack("B", p[1])[0]
proto = ''.join(struct.unpack("s" * plen, p[2:plen + 2])[0:plen])
if proto == _cdp_addrrecord_proto_ip:
clsname = "CDPAddrRecordIPv4"
elif proto == _cdp_addrrecord_proto_ipv6:
clsname = "CDPAddrRecordIPv6"
else:
clsname = "CDPAddrRecord"
cls = globals()[clsname]
return cls(p, **kargs)
class CDPMsgAddr(CDPMsgGeneric):
name = "Addresses"
fields_desc = [ XShortEnumField("type", 0x0002, _cdp_tlv_types),
ShortField("len", None),
FieldLenField("naddr", None, "addr", "!I"),
PacketListField("addr", [], _CDPGuessAddrRecord, count_from=lambda x:x.naddr) ]
def post_build(self, pkt, pay):
if self.len is None:
l = 8 + len(self.addr) * 9
pkt = pkt[:2] + struct.pack("!H", l) + pkt[4:]
p = pkt + pay
return p
class CDPMsgPortID(CDPMsgGeneric):
name = "Port ID"
fields_desc = [ XShortEnumField("type", 0x0003, _cdp_tlv_types),
FieldLenField("len", None, "iface", "!H"),
StrLenField("iface", "Port 1", length_from=lambda x:x.len - 4) ]
_cdp_capabilities = ["Router",
"TransparentBridge",
"SourceRouteBridge",
"Switch",
"Host",
"IGMPCapable",
"Repeater"] + ["Bit%d" % x for x in range(25, 0, -1)]
class CDPMsgCapabilities(CDPMsgGeneric):
name = "Capabilities"
fields_desc = [ XShortEnumField("type", 0x0004, _cdp_tlv_types),
ShortField("len", 8),
FlagsField("cap", 0, 32, _cdp_capabilities) ]
class CDPMsgSoftwareVersion(CDPMsgGeneric):
name = "Software Version"
type = 0x0005
class CDPMsgPlatform(CDPMsgGeneric):
name = "Platform"
type = 0x0006
_cdp_duplex = { 0x00: "Half", 0x01: "Full" }
# ODR Routing
class CDPMsgIPPrefix(CDPMsgGeneric):
name = "IP Prefix"
type = 0x0007
fields_desc = [ XShortEnumField("type", 0x0007, _cdp_tlv_types),
ShortField("len", 8),
IPField("defaultgw", "192.168.0.1") ]
class CDPMsgProtoHello(CDPMsgGeneric):
name = "Protocol Hello"
type = 0x0008
fields_desc = [ XShortEnumField("type", 0x0008, _cdp_tlv_types),
ShortField("len", 32),
X3BytesField("oui", 0x00000c),
XShortField("protocol_id", 0x0),
# TLV length (len) - 2 (type) - 2 (len) - 3 (OUI) - 2
# (Protocol ID)
StrLenField("data", "", length_from=lambda p: p.len - 9) ]
class CDPMsgVTPMgmtDomain(CDPMsgGeneric):
name = "VTP Management Domain"
type = 0x0009
class CDPMsgNativeVLAN(CDPMsgGeneric):
name = "Native VLAN"
fields_desc = [ XShortEnumField("type", 0x000a, _cdp_tlv_types),
ShortField("len", 6),
ShortField("vlan", 1) ]
class CDPMsgDuplex(CDPMsgGeneric):
name = "Duplex"
fields_desc = [ XShortEnumField("type", 0x000b, _cdp_tlv_types),
ShortField("len", 5),
ByteEnumField("duplex", 0x00, _cdp_duplex) ]
class CDPMsgVoIPVLANReply(CDPMsgGeneric):
name = "VoIP VLAN Reply"
fields_desc = [ XShortEnumField("type", 0x000e, _cdp_tlv_types),
ShortField("len", 7),
ByteField("status?", 1),
ShortField("vlan", 1) ]
class CDPMsgVoIPVLANQuery(CDPMsgGeneric):
name = "VoIP VLAN Query"
type = 0x000f
fields_desc = [ XShortEnumField("type", 0x000f, _cdp_tlv_types),
ShortField("len", 7),
XByteField("unknown1", 0),
ShortField("vlan", 1),
# TLV length (len) - 2 (type) - 2 (len) - 1 (unknown1) - 2 (vlan)
StrLenField("unknown2", "", length_from=lambda p: p.len - 7) ]
class _CDPPowerField(ShortField):
def i2repr(self, pkt, x):
if x is None:
x = 0
return "%d mW" % x
class CDPMsgPower(CDPMsgGeneric):
name = "Power"
# Check if field length is fixed (2 bytes)
fields_desc = [ XShortEnumField("type", 0x0010, _cdp_tlv_types),
ShortField("len", 6),
_CDPPowerField("power", 1337)]
class CDPMsgMTU(CDPMsgGeneric):
name = "MTU"
# Check if field length is fixed (2 bytes)
fields_desc = [ XShortEnumField("type", 0x0011, _cdp_tlv_types),
ShortField("len", 6),
ShortField("mtu", 1500)]
class CDPMsgTrustBitmap(CDPMsgGeneric):
name = "Trust Bitmap"
fields_desc = [ XShortEnumField("type", 0x0012, _cdp_tlv_types),
ShortField("len", 5),
XByteField("trust_bitmap", 0x0) ]
class CDPMsgUntrustedPortCoS(CDPMsgGeneric):
name = "Untrusted Port CoS"
fields_desc = [ XShortEnumField("type", 0x0013, _cdp_tlv_types),
ShortField("len", 5),
XByteField("untrusted_port_cos", 0x0) ]
class CDPMsgMgmtAddr(CDPMsgAddr):
name = "Management Address"
type = 0x0016
class CDPMsgUnknown19(CDPMsgGeneric):
name = "Unknown CDP Message"
type = 0x0019
class CDPMsg(CDPMsgGeneric):
name = "CDP "
fields_desc = [ XShortEnumField("type", None, _cdp_tlv_types),
FieldLenField("len", None, "val", "!H"),
StrLenField("val", "", length_from=lambda x:x.len - 4) ]
class _CDPChecksum:
def _check_len(self, pkt):
"""Check for odd packet length and pad according to Cisco spec.
This padding is only used for checksum computation. The original
packet should not be altered."""
if len(pkt) % 2:
last_chr = pkt[-1]
if last_chr <= b'\x80':
return pkt[:-1] + b'\x00' + last_chr
else:
return pkt[:-1] + b'\xff' + chr(ord(last_chr) - 1)
else:
return pkt
def post_build(self, pkt, pay):
p = pkt + pay
if self.cksum is None:
cksum = checksum(self._check_len(p))
p = p[:2] + struct.pack("!H", cksum) + p[4:]
return p
class CDPv2_HDR(_CDPChecksum, CDPMsgGeneric):
name = "Cisco Discovery Protocol version 2"
fields_desc = [ ByteField("vers", 2),
ByteField("ttl", 180),
XShortField("cksum", None),
PacketListField("msg", [], _CDPGuessPayloadClass) ]
bind_layers(SNAP, CDPv2_HDR, {"code": 0x2000, "OUI": 0xC})
| |
#-*- coding: utf-8 -*-
# Copyright 2017 ibelie, Chen Jie, Joungtao. All rights reserved.
# Use of this source code is governed by The MIT License
# that can be found in the LICENSE file.
# [joungtao] Kill magic attributes, make "composite field", "message map" and
# "repeated composite field" can be set directly.
from typy.google.protobuf.message import Message
from typy.google.protobuf.internal import python_message
from typy.google.protobuf.internal import type_checkers
from typy.google.protobuf.descriptor import FieldDescriptor
from typy.google.protobuf import text_format
from Type import toType, FixedPoint, Symbol, List, Dict, Instance, PythonDelegate
from Proto import SymbolEncodedLen, EncodeSymbol, DecodeSymbol
from Enum import MetaEnum
def setVariant(obj, value):
if value is None:
obj.Clear()
elif isinstance(value, bool):
if hasattr(obj, 'Boolean'):
obj.Boolean = value
elif hasattr(obj, 'Long'):
obj.Long = value
elif hasattr(obj, 'Integer'):
obj.Integer = value
elif hasattr(obj, 'Enum'):
obj.Enum = value
elif hasattr(obj, 'Double'):
obj.Double = value
elif hasattr(obj, 'Float'):
obj.Float = value
elif hasattr(obj, 'FixedPoint'):
obj.FixedPoint = value
else:
raise TypeError
elif isinstance(value, (int, long)):
if hasattr(obj, 'Long'):
obj.Long = value
elif hasattr(obj, 'Integer'):
obj.Integer = value
elif hasattr(obj, 'Enum'):
obj.Enum = value
elif hasattr(obj, 'Boolean'):
obj.Boolean = value
elif hasattr(obj, 'Double'):
obj.Double = value
elif hasattr(obj, 'Float'):
obj.Float = value
elif hasattr(obj, 'FixedPoint'):
obj.FixedPoint = value
else:
raise TypeError
elif isinstance(value, float):
if hasattr(obj, 'Double'):
obj.Double = value
elif hasattr(obj, 'Float'):
obj.Float = value
elif hasattr(obj, 'FixedPoint'):
obj.FixedPoint = value
elif hasattr(obj, 'Long'):
obj.Long = value
elif hasattr(obj, 'Integer'):
obj.Integer = value
elif hasattr(obj, 'Enum'):
obj.Enum = value
elif hasattr(obj, 'Boolean'):
obj.Boolean = value
else:
raise TypeError
elif isinstance(value, unicode):
if hasattr(obj, 'Symbol'):
obj.Symbol = value
elif hasattr(obj, 'String'):
obj.String = value
elif hasattr(obj, 'Bytes'):
obj.Bytes = value
else:
raise TypeError
elif isinstance(value, str):
if hasattr(obj, 'Symbol'):
obj.Symbol = value
elif hasattr(obj, 'Bytes'):
obj.Bytes = value
elif hasattr(obj, 'String'):
obj.String = value
else:
raise TypeError
elif hasattr(value, 'iteritems') and hasattr(obj, 'Dict'):
obj.Dict = value
elif hasattr(value, '__iter__') and hasattr(obj, 'List'):
obj.List = value
else:
setattr(obj, value.__class__.__name__, value)
class PythonMessage(object):
def __init__(self, obj):
self.obj = obj
self._is_present_in_parent = True
def __str__(self):
return str(self.obj)
def IsInitialized(self):
return True
def MergeFrom(self, msg):
if isinstance(msg, PythonMessage):
self.obj = msg.obj
else:
self.obj = msg
@staticmethod
def Static_ByteSize(obj):
if type(obj) in PythonDelegate:
return PythonDelegate[type(obj)].ByteSize(obj)
elif hasattr(obj, 'iteritems') and dict in PythonDelegate:
return PythonDelegate[dict].ByteSize(obj)
elif hasattr(obj, '__iter__') and list in PythonDelegate:
return PythonDelegate[list].ByteSize(obj)
else:
return obj is not None and obj.ByteSize() or 0
def ByteSize(self):
return PythonMessage.Static_ByteSize(self.obj)
@staticmethod
def Static_InternalSerialize(obj, write):
if type(obj) in PythonDelegate:
write(PythonDelegate[type(obj)].Serialize(obj))
elif hasattr(obj, 'iteritems') and dict in PythonDelegate:
write(PythonDelegate[dict].Serialize(obj))
elif hasattr(obj, '__iter__') and list in PythonDelegate:
write(PythonDelegate[list].Serialize(obj))
else:
obj is not None and write(obj.Serialize())
def _InternalSerialize(self, write):
PythonMessage.Static_InternalSerialize(self.obj, write)
def _InternalParse(self, buffer, pos, new_pos):
if type(self.obj) in PythonDelegate:
PythonDelegate[type(self.obj)].Deserialize(self.obj, buffer[pos: new_pos])
elif hasattr(self.obj, 'iteritems') and dict in PythonDelegate:
PythonDelegate[dict].Deserialize(self.obj, buffer[pos: new_pos])
elif hasattr(self.obj, '__iter__') and list in PythonDelegate:
PythonDelegate[list].Deserialize(self.obj, buffer[pos: new_pos])
else:
pos < new_pos and self.obj.Deserialize(buffer[pos: new_pos])
return new_pos
class PythonDescriptor(object):
def __init__(self, cls):
self.name = cls.__name__
self.full_name = '%s.%s' % (cls.__module__, cls.__name__)
self._concrete_class = cls
self.has_options = None
self.oneofs = None
del Message.__eq__
Origin_AddEqualsMethod = python_message._AddEqualsMethod
def _AddEqualsMethod(message_descriptor, cls):
if not hasattr(cls, '__eq__') and not hasattr(cls, '__ne__') and not hasattr(cls, '__cmp__'):
Origin_AddEqualsMethod(message_descriptor, cls)
python_message._AddEqualsMethod = _AddEqualsMethod
Origin_AddByteSizeMethod = python_message._AddByteSizeMethod
def _AddByteSizeMethod(message_descriptor, cls):
Origin_AddByteSizeMethod(message_descriptor, cls)
Origin_ByteSize = cls.ByteSize
def ByteSize(self):
self._cached_byte_size_dirty = True
return Origin_ByteSize(self)
cls.ByteSize = ByteSize
python_message._AddByteSizeMethod = _AddByteSizeMethod
def _AddHasFieldMethod(message_descriptor, cls):
from typy.google.protobuf import descriptor as descriptor_mod
is_proto3 = (message_descriptor.syntax == "proto3")
error_msg = python_message._Proto3HasError if is_proto3 else python_message._Proto2HasError
hassable_fields = {}
for field in message_descriptor.fields:
# For proto3, only submessages and fields inside a oneof have presence.
if (is_proto3 and field.cpp_type != FieldDescriptor.CPPTYPE_MESSAGE and
not field.containing_oneof):
continue
hassable_fields[field.name] = field
if not is_proto3:
# Fields inside oneofs are never repeated (enforced by the compiler).
for oneof in message_descriptor.oneofs:
hassable_fields[oneof.name] = oneof
def HasField(self, field_name):
try:
field = hassable_fields[field_name]
except KeyError:
raise ValueError(error_msg % field_name)
if isinstance(field, descriptor_mod.OneofDescriptor):
try:
return HasField(self, self._oneofs[field].name)
except KeyError:
return False
else:
if field.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE:
value = self._fields.get(field)
return value is not None and value._is_present_in_parent
else:
return field in self._fields
cls.HasField = HasField
python_message._AddHasFieldMethod = _AddHasFieldMethod
Origin_DefaultValueConstructorForField = python_message._DefaultValueConstructorForField
def _DefaultValueConstructorForField(field):
if isinstance(field.message_type, PythonDescriptor) and field.label != FieldDescriptor.LABEL_REPEATED:
def MakePythonMessageDefault(message):
return PythonMessage(field.message_type._concrete_class())
return MakePythonMessageDefault
else:
return Origin_DefaultValueConstructorForField(field)
python_message._DefaultValueConstructorForField = _DefaultValueConstructorForField
Origin_PrintFieldValue = text_format._Printer.PrintFieldValue
def _PrintFieldValue(self, field, value):
if isinstance(field.message_type, PythonDescriptor):
self.out.write(str(value))
else:
Origin_PrintFieldValue(self, field, value)
text_format._Printer.PrintFieldValue = _PrintFieldValue
class BytesChecker(object):
def DefaultValue(self):
return b''
def CheckValue(self, proposed_value):
if proposed_value is None: return None
if not isinstance(proposed_value, (str, unicode)):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), (str, unicode)))
raise TypeError(message)
return str(proposed_value)
type_checkers._VALUE_CHECKERS[FieldDescriptor.CPPTYPE_STRING] = BytesChecker()
def _AddListFieldsMethod(message_descriptor, cls):
def ListFields(self):
all_fields = [(k, v) for k, v in self._fields.iteritems() if python_message._IsPresent((k, v)) and k._sizer(v) > encoder._TagSize(k.number)]
all_fields.sort(key = lambda item: item[0].number)
return all_fields
cls.ListFields = ListFields
python_message._AddListFieldsMethod = _AddListFieldsMethod
def _AddPropertiesForRepeatedField(field, cls):
proto_field_name = field.name
property_name = python_message._PropertyName(proto_field_name)
def _get_default(self):
field_value = self._fields.get(field)
if field_value is None:
field_value = field._default_constructor(self)
field_value = self._fields.setdefault(field, field_value)
return field_value
def getter(self):
return _get_default(self)
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
def setter(self, new_value):
field_value = _get_default(self)
if python_message._IsMapField(field):
field_value._values = {}
if not new_value:
field_value._message_listener.Modified()
else:
for key in new_value:
field_value[key] = new_value[key]
else:
field_value._values = []
if not new_value:
field_value._message_listener.Modified()
else:
for item in new_value:
field_value.append(item)
if field.containing_oneof:
self._UpdateOneofState(field)
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, property(getter, setter, doc=doc))
python_message._AddPropertiesForRepeatedField = _AddPropertiesForRepeatedField
Origin_AddPropertiesForNonRepeatedScalarField = python_message._AddPropertiesForNonRepeatedScalarField
def _AddPropertiesForNonRepeatedScalarField(field, cls):
if field.enum_type is None:
return Origin_AddPropertiesForNonRepeatedScalarField(field, cls)
proto_field_name = field.name
property_name = python_message._PropertyName(proto_field_name)
type_checker = type_checkers.GetTypeChecker(field)
default_value = field.default_value
is_proto3 = field.containing_type.syntax == "proto3"
def getter(self):
return MetaEnum.Enums[field.enum_type.name].__enum__[self._fields.get(field, default_value)]
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
clear_when_set_to_default = is_proto3 and not field.containing_oneof
def field_setter(self, new_value):
new_value = type_checker.CheckValue(new_value)
if clear_when_set_to_default and not new_value:
self._fields.pop(field, None)
else:
self._fields[field] = new_value
if not self._cached_byte_size_dirty:
self._Modified()
if field.containing_oneof:
def setter(self, new_value):
field_setter(self, new_value)
self._UpdateOneofState(field)
else:
setter = field_setter
setter.__module__ = None
setter.__doc__ = 'Setter for %s.' % proto_field_name
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, property(getter, setter, doc=doc))
python_message._AddPropertiesForNonRepeatedScalarField = _AddPropertiesForNonRepeatedScalarField
Origin_AddMergeFromMethod = python_message._AddMergeFromMethod
def _AddMergeFromMethod(cls):
Origin_AddMergeFromMethod(cls)
Origin_MergeFrom = cls.MergeFrom
def MergeFrom(self, msg):
if not isinstance(msg, cls) and cls.DESCRIPTOR.oneofs:
return setVariant(self, msg)
return Origin_MergeFrom(self, msg)
cls.MergeFrom = MergeFrom
python_message._AddMergeFromMethod = _AddMergeFromMethod
def _AddPropertiesForNonRepeatedCompositeField(field, cls):
proto_field_name = field.name
property_name = python_message._PropertyName(proto_field_name)
def getter(self):
field_value = self._fields.get(field, None)
if field.message_type.oneofs:
if field_value is None:
return None
else:
attr = field_value.WhichOneof('Variant')
return None if attr is None else getattr(field_value, attr)
if field_value is None: return None
if isinstance(field.message_type, PythonDescriptor):
return field_value.obj
return field_value
getter.__module__ = None
getter.__doc__ = 'Getter for %s.' % proto_field_name
def setter(self, value):
if value is None:
self._fields.pop(field, None)
self._Modified()
else:
if field.message_type.oneofs:
field_value = self._fields.get(field)
if field_value is None:
field_value = field._default_constructor(self)
field_value = self._fields.setdefault(field, field_value)
return setVariant(field_value, value)
if isinstance(field.message_type, PythonDescriptor):
value = PythonMessage(value)
self._fields[field] = value
self._Modified()
value._is_present_in_parent = True
if field.containing_oneof:
self._UpdateOneofState(field)
doc = 'Magic attribute generated for "%s" proto field.' % proto_field_name
setattr(cls, property_name, property(getter, setter, doc=doc))
python_message._AddPropertiesForNonRepeatedCompositeField = _AddPropertiesForNonRepeatedCompositeField
from typy.google.protobuf.internal import containers
Origin_GetInitializeDefaultForMap = python_message._GetInitializeDefaultForMap
def _GetInitializeDefaultForMap(field):
MakeMapDefault = Origin_GetInitializeDefaultForMap(field)
value_type = field.message_type.fields_by_name['value']
if value_type.label == FieldDescriptor.LABEL_REPEATED:
if value_type.cpp_type == FieldDescriptor.CPPTYPE_MESSAGE:
if value_type.message_type.has_options and value_type.message_type.GetOptions().map_entry:
def MakeDictMapDefault(message):
_map = MakeMapDefault(message)
_map._nestingDict = lambda: value_type._default_constructor(message)
return _map
return MakeDictMapDefault
else:
def MakeListMapDefault(message):
_map = MakeMapDefault(message)
_map._nestingList = lambda: value_type._default_constructor(message)
return _map
return MakeListMapDefault
else:
def MakeListMapDefault(message):
_map = MakeMapDefault(message)
_map._nestingList = True
return _map
return MakeListMapDefault
else:
return MakeMapDefault
python_message._GetInitializeDefaultForMap = _GetInitializeDefaultForMap
class _ScalarMap(containers.ScalarMap):
__slots__ = '_nestingList'
_is_present_in_parent = True
__marker = object()
def __deepcopy__(self, memo = None):
import copy
return {key: copy.deepcopy(self[key]) for key in self}
def pop(self, key, default = __marker):
if key in self._values:
value = self[key]
del self._values[key]
self._message_listener.Modified()
return value
if default is self.__marker:
raise KeyError
return default
def __getitem__(self, key):
if getattr(self, '_nestingList', None):
return self._values[key]
return super(_ScalarMap, self).__getitem__(key)
def __setitem__(self, key, value):
if getattr(self, '_nestingList', None):
checked_key = self._key_checker.CheckValue(key)
checked_value = [self._value_checker.CheckValue(v) for v in value]
self._values[checked_key] = checked_value
self._message_listener.Modified()
else:
super(_ScalarMap, self).__setitem__(key, value)
def setdefault(self, key, default = None):
if key in self._values:
return self[key]
else:
self[key] = default
return default
containers.ScalarMap = _ScalarMap
class _MessageMap(containers.MessageMap):
__slots__ = '_nestingList', '_nestingDict'
_is_present_in_parent = True
__marker = object()
def __deepcopy__(self, memo = None):
import copy
return {key: copy.deepcopy(self[key]) for key in self}
def pop(self, key, default = __marker):
if key in self._values:
value = self[key]
del self._values[key]
self._message_listener.Modified()
return value
if default is self.__marker:
raise KeyError
return default
def __getitem__(self, key):
nesting = getattr(self, '_nestingList', None) or getattr(self, '_nestingDict', None)
if nesting:
try:
return self._values[key]
except KeyError:
key = self._key_checker.CheckValue(key)
new_element = nesting()
self._values[key] = new_element
self._message_listener.Modified()
return new_element
elif self._message_descriptor.oneofs:
if key not in self._values:
return None
value = self._values[key]
attr = value.WhichOneof('Variant')
return None if attr is None else getattr(value, attr)
elif isinstance(self._message_descriptor, PythonDescriptor):
try:
return self._values[key].obj
except KeyError:
key = self._key_checker.CheckValue(key)
new_element = PythonMessage(self._message_descriptor._concrete_class())
self._values[key] = new_element
self._message_listener.Modified()
return new_element.obj
return super(_MessageMap, self).__getitem__(key)
def __setitem__(self, key, value):
if getattr(self, '_nestingList', None):
field_value = self.__getitem__(key)
field_value._values = []
return field_value.extend(value)
elif getattr(self, '_nestingDict', None):
field_value = self.__getitem__(key)
field_value._values = {}
for k, v in value.iteritems():
field_value[k] = v
return
elif self._message_descriptor.oneofs:
variant = super(_MessageMap, self).__getitem__(key)
return setVariant(variant, value)
if isinstance(self._message_descriptor, PythonDescriptor):
value = PythonMessage(value)
self._values[key] = value
self._message_listener.Modified()
if value is not None:
value._is_present_in_parent = True
def setdefault(self, key, default = None):
if key in self._values:
return self[key]
else:
self[key] = default
return default
def MergeFrom(self, other):
for key in other:
self[key] = other[key]
containers.MessageMap = _MessageMap
class _RepeatedScalarFieldContainer(containers.RepeatedScalarFieldContainer):
_is_present_in_parent = True
def __deepcopy__(self, memo = None):
import copy
return [copy.deepcopy(value) for value in self]
def __add__(self, other):
return [v for v in self] + [v for v in other]
def __init__(self, message_listener, type_checker):
containers.BaseContainer.__init__(self, message_listener)
self._type_checker = type_checker
def __iter__(self):
return iter(self._values)
containers.RepeatedScalarFieldContainer = _RepeatedScalarFieldContainer
class _RepeatedCompositeFieldContainer(containers.RepeatedCompositeFieldContainer):
_is_present_in_parent = True
def __deepcopy__(self, memo = None):
import copy
return [copy.deepcopy(value) for value in self]
def __init__(self, message_listener, message_descriptor):
containers.BaseContainer.__init__(self, message_listener)
self._message_descriptor = message_descriptor
def __getitem__(self, key):
if self._message_descriptor.oneofs:
value = self._values[key]
attr = value.WhichOneof('Variant')
return None if attr is None else getattr(value, attr)
elif isinstance(self._message_descriptor, PythonDescriptor):
return self._values[key].obj
return super(_RepeatedCompositeFieldContainer, self).__getitem__(key)
def __add__(self, other):
return [v for v in self] + [v for v in other]
def __iter__(self):
if self._message_descriptor.oneofs:
return iter([None if v.WhichOneof('Variant') is None else getattr(v, v.WhichOneof('Variant'), None) for v in self._values])
elif isinstance(self._message_descriptor, PythonDescriptor):
return iter([v.obj for v in self._values])
return iter(self._values)
def add(self, **kwargs):
if isinstance(self._message_descriptor, PythonDescriptor):
new_element = PythonMessage(self._message_descriptor._concrete_class(**kwargs))
self._values.append(new_element)
if not self._message_listener.dirty:
self._message_listener.Modified()
return new_element
return super(_RepeatedCompositeFieldContainer, self).add(**kwargs)
def append(self, item):
if self._message_descriptor.oneofs:
return setVariant(self.add(), item)
if isinstance(self._message_descriptor, PythonDescriptor):
item = PythonMessage(item)
else:
assert item is None or type(item) is self._message_descriptor._concrete_class
self._values.append(item)
if not self._message_listener.dirty:
self._message_listener.Modified()
if item is not None:
item._is_present_in_parent = True
containers.RepeatedCompositeFieldContainer = _RepeatedCompositeFieldContainer
from typy.google.protobuf.internal import decoder
from typy.google.protobuf.internal import encoder
from typy.google.protobuf.internal import wire_format
Origin_MapDecoder = decoder.MapDecoder
def _MapDecoder(field_descriptor, new_default, is_message_map):
Origin_DecodeMap = Origin_MapDecoder(field_descriptor, new_default, is_message_map)
if not is_message_map:
return Origin_DecodeMap
key = field_descriptor
tag_bytes = encoder.TagBytes(field_descriptor.number, wire_format.WIRETYPE_LENGTH_DELIMITED)
tag_len = len(tag_bytes)
local_DecodeVarint = decoder._DecodeVarint
message_type = field_descriptor.message_type
def _DecodeMap(buffer, pos, end, message, field_dict):
submsg = message_type._concrete_class()
value = field_dict.get(key)
if value is None:
value = field_dict.setdefault(key, new_default(message))
while 1:
(size, pos) = local_DecodeVarint(buffer, pos)
new_pos = pos + size
if new_pos > end:
raise decoder._DecodeError('Truncated message.')
submsg.Clear()
if submsg._InternalParse(buffer, pos, new_pos) != new_pos:
raise decoder._DecodeError('Unexpected end-group tag.')
if submsg.value is None:
value.__setitem__(submsg.key, None)
elif getattr(value, '_nestingList', None) or getattr(value, '_nestingDict', None):
value.__getitem__(submsg.key).MergeFrom(submsg.value)
elif isinstance(value._message_descriptor, PythonDescriptor):
value.__setitem__(submsg.key, submsg.value)
else:
super(_MessageMap, value).__getitem__(submsg.key).MergeFrom(submsg.value)
pos = new_pos + tag_len
if buffer[new_pos:pos] != tag_bytes or new_pos == end:
return new_pos
return _DecodeMap
decoder.MapDecoder = _MapDecoder
Origin_MessageSizer = encoder.MessageSizer
def _MessageSizer(field_number, is_repeated, is_packed):
if is_repeated:
tag_size = encoder._TagSize(field_number)
def RepeatedFieldSize(value):
isPythonMessage = isinstance(value._message_descriptor, PythonDescriptor)
result = tag_size * len(value)
if value._message_descriptor.oneofs:
value = value._values
for element in value:
if isPythonMessage:
l = element and PythonMessage.Static_ByteSize(element) or 0
else:
l = element and element.ByteSize() or 0
result += encoder._VarintSize(l) + l
return result
return RepeatedFieldSize
else:
return Origin_MessageSizer(field_number, is_repeated, is_packed)
encoder.MessageSizer = _MessageSizer
type_checkers.TYPE_TO_SIZER[FieldDescriptor.TYPE_MESSAGE] = _MessageSizer
Origin_MessageEncoder = encoder.MessageEncoder
def _MessageEncoder(field_number, is_repeated, is_packed):
if is_repeated:
tag = encoder.TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)
def EncodeRepeatedField(write, value):
isPythonMessage = isinstance(value._message_descriptor, PythonDescriptor)
if value._message_descriptor.oneofs:
value = value._values
for element in value:
write(tag)
if element is None:
encoder._EncodeVarint(write, 0)
else:
if isPythonMessage:
encoder._EncodeVarint(write, PythonMessage.Static_ByteSize(element))
PythonMessage.Static_InternalSerialize(element, write)
else:
encoder._EncodeVarint(write, element.ByteSize())
element._InternalSerialize(write)
return EncodeRepeatedField
else:
return Origin_MessageEncoder(field_number, is_repeated, is_packed)
encoder.MessageEncoder = _MessageEncoder
type_checkers.TYPE_TO_ENCODER[FieldDescriptor.TYPE_MESSAGE] = _MessageEncoder
def SymbolSizer(sizer, includeZero):
def _SymbolSizer(value):
l = SymbolEncodedLen(value)
return (sizer(l) + l) if includeZero or value else 0
return _SymbolSizer
def SymbolEncoder(encoder, includeZero):
def _EncodeSymbol(write, value):
if includeZero or value:
encoder(write, SymbolEncodedLen(value))
return write(EncodeSymbol(value))
return _EncodeSymbol
def SymbolDecoder(decoder):
def _DecodeSymbol(buffer, pos):
(size, pos) = decoder(buffer, pos)
new_pos = pos + size
return DecodeSymbol(buffer[pos:new_pos]), new_pos
return _DecodeSymbol
def _AttachSymbolHelpers(cls, field):
is_packed = False
is_repeated = (field.label == FieldDescriptor.LABEL_REPEATED)
sizer = SymbolSizer(encoder._VarintSize, is_repeated)
field._encoder = encoder._SimpleEncoder(wire_format.WIRETYPE_LENGTH_DELIMITED, SymbolEncoder(encoder._EncodeVarint, is_repeated), sizer)(field.number, is_repeated, is_packed)
field._sizer = encoder._SimpleSizer(sizer)(field.number, is_repeated, is_packed)
oneof_descriptor = None if field.containing_oneof is None else field
field_decoder = decoder._SimpleDecoder(wire_format.WIRETYPE_LENGTH_DELIMITED, SymbolDecoder(decoder._DecodeVarint))(field.number, is_repeated, is_packed, field, field._default_constructor)
tag_bytes = encoder.TagBytes(field.number, wire_format.WIRETYPE_LENGTH_DELIMITED)
cls._decoders_by_tag[tag_bytes] = (field_decoder, oneof_descriptor)
if hasattr(cls, 'fields_by_tag'):
cls.field_tag[field] = tag_bytes
cls.fields_by_tag[tag_bytes] = field
def FixedPointSizer(sizer, precision, floor, includeZero):
precision = 10 ** precision
floor = floor * precision
def _FixedPointSizer(value):
value = int(value * precision)
return sizer(value - floor) if includeZero or value != floor else 0
return _FixedPointSizer
def EncodeFixedPoint(encoder, precision, floor, includeZero):
precision = 10 ** precision
floor = floor * precision
def _EncodeFixedPoint(write, value):
value = int(value * precision)
if includeZero or value != floor:
return encoder(write, value - floor)
return _EncodeFixedPoint
def DecodeFixedPoint(decoder, precision, floor):
precision = 10 ** precision
def _DecodeFixedPoint(buffer, pos):
(value, new_pos) = decoder(buffer, pos)
return float(value) / precision + floor, new_pos
return _DecodeFixedPoint
def _AttachFixedPointHelpers(cls, field, precision, floor):
is_repeated = (field.label == FieldDescriptor.LABEL_REPEATED)
is_packed = (is_repeated and wire_format.IsTypePackable(FieldDescriptor.TYPE_INT64))
sizer = FixedPointSizer(encoder._SignedVarintSize, precision, floor, is_repeated)
field._encoder = encoder._SimpleEncoder(wire_format.WIRETYPE_VARINT, EncodeFixedPoint(encoder._EncodeSignedVarint, precision, floor, is_repeated), sizer)(field.number, is_repeated, is_packed)
field._sizer = encoder._SimpleSizer(sizer)(field.number, is_repeated, is_packed)
oneof_descriptor = None if field.containing_oneof is None else field
tag_bytes = encoder.TagBytes(field.number, type_checkers.FIELD_TYPE_TO_WIRE_TYPE[FieldDescriptor.TYPE_INT64])
field_decoder = decoder._SimpleDecoder(wire_format.WIRETYPE_VARINT, DecodeFixedPoint(decoder._DecodeSignedVarint, precision, floor))(field.number, is_repeated, is_packed, field, field._default_constructor)
cls._decoders_by_tag[tag_bytes] = (field_decoder, oneof_descriptor)
if hasattr(cls, 'fields_by_tag'):
cls.field_tag[field] = tag_bytes
cls.fields_by_tag[tag_bytes] = field
cls.fields_by_tag.pop(encoder.TagBytes(field.number, type_checkers.FIELD_TYPE_TO_WIRE_TYPE[field.type]))
if is_packed:
tag_bytes = encoder.TagBytes(field.number, wire_format.WIRETYPE_LENGTH_DELIMITED)
cls._decoders_by_tag[tag_bytes] = (field_decoder, oneof_descriptor)
if hasattr(cls, 'fields_by_tag'):
cls.field_tag[field] = tag_bytes
cls.fields_by_tag[tag_bytes] = field
def initObjectClass(cls, clsname, bases, attrs):
cls.IsInitialized = lambda s, *args, **kwargs: True
descriptor = attrs[python_message.GeneratedProtocolMessageType._DESCRIPTOR_KEY]
for field in descriptor.fields:
p = cls.____properties__[field.name]
if isinstance(p, FixedPoint):
_AttachFixedPointHelpers(cls, field, p.precision, p.floor)
elif isinstance(p, Symbol):
_AttachSymbolHelpers(cls, field)
elif isinstance(p, List):
if isinstance(p.elementType, FixedPoint):
_AttachFixedPointHelpers(cls, field, p.elementType.precision, p.elementType.floor)
elif isinstance(p.elementType, Symbol):
_AttachSymbolHelpers(cls, field)
elif isinstance(p.elementType, Instance) and len(p.elementType.pyType) > 1:
for vp in p.elementType.pyType:
if vp is None: continue
vp = toType(vp)
if isinstance(vp, FixedPoint):
_AttachFixedPointHelpers(attrs[field.message_type.name], field.message_type.fields_by_name['FixedPoint'], vp.precision, vp.floor)
elif isinstance(vp, Symbol):
_AttachSymbolHelpers(attrs[field.message_type.name], field.message_type.fields_by_name['Symbol'])
elif isinstance(p, Dict):
if isinstance(p.valueType, FixedPoint):
_AttachFixedPointHelpers(attrs[field.message_type.name], field.message_type.fields_by_name['value'], p.valueType.precision, p.valueType.floor)
elif isinstance(p.valueType, Symbol):
_AttachSymbolHelpers(attrs[field.message_type.name], field.message_type.fields_by_name['value'])
elif isinstance(p.valueType, Instance) and len(p.valueType.pyType) > 1:
for vp in p.valueType.pyType:
if vp is None: continue
vp = toType(vp)
if isinstance(vp, FixedPoint):
_AttachFixedPointHelpers(attrs[field.message_type.fields_by_name['value'].message_type.name], field.message_type.fields_by_name['value'].message_type.fields_by_name['FixedPoint'], vp.precision, vp.floor)
elif isinstance(vp, Symbol):
_AttachSymbolHelpers(attrs[field.message_type.fields_by_name['value'].message_type.name], field.message_type.fields_by_name['value'].message_type.fields_by_name['Symbol'])
if isinstance(p.keyType, Symbol):
_AttachSymbolHelpers(attrs[field.message_type.name], field.message_type.fields_by_name['key'])
elif isinstance(p, Instance) and len(p.pyType) > 1:
for vp in p.pyType:
if vp is None: continue
vp = toType(vp)
if isinstance(vp, FixedPoint):
_AttachFixedPointHelpers(attrs[field.message_type.name], field.message_type.fields_by_name['FixedPoint'], vp.precision, vp.floor)
elif isinstance(vp, Symbol):
_AttachSymbolHelpers(attrs[field.message_type.name], field.message_type.fields_by_name['Symbol'])
| |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for V1 metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import parameterized
from tensorflow.contrib.distribute.python import combinations
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import test
from tensorflow.python.framework import ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import variables
def _labeled_dataset_fn():
# First four batches of x: labels, predictions -> (labels == predictions)
# 0: 0, 0 -> True; 1: 1, 1 -> True; 2: 2, 2 -> True; 3: 3, 0 -> False
# 4: 4, 1 -> False; 5: 0, 2 -> False; 6: 1, 0 -> False; 7: 2, 1 -> False
# 8: 3, 2 -> False; 9: 4, 0 -> False; 10: 0, 1 -> False; 11: 1, 2 -> False
# 12: 2, 0 -> False; 13: 3, 1 -> False; 14: 4, 2 -> False; 15: 0, 0 -> True
return dataset_ops.Dataset.range(1000).map(
lambda x: {"labels": x % 5, "predictions": x % 3}).batch(4)
def _boolean_dataset_fn():
# First four batches of labels, predictions: {TP, FP, TN, FN}
# with a threshold of 0.5:
# T, T -> TP; F, T -> FP; T, F -> FN
# F, F -> TN; T, T -> TP; F, T -> FP
# T, F -> FN; F, F -> TN; T, T -> TP
# F, T -> FP; T, F -> FN; F, F -> TN
return dataset_ops.Dataset.from_tensor_slices({
"labels": [True, False, True, False],
"predictions": [True, True, False, False]}).repeat().batch(3)
def _threshold_dataset_fn():
# First four batches of labels, predictions: {TP, FP, TN, FN}
# with a threshold of 0.5:
# True, 1.0 -> TP; False, .75 -> FP; True, .25 -> FN
# False, 0.0 -> TN; True, 1.0 -> TP; False, .75 -> FP
# True, .25 -> FN; False, 0.0 -> TN; True, 1.0 -> TP
# False, .75 -> FP; True, .25 -> FN; False, 0.0 -> TN
return dataset_ops.Dataset.from_tensor_slices({
"labels": [True, False, True, False],
"predictions": [1.0, 0.75, 0.25, 0.]}).repeat().batch(3)
def _regression_dataset_fn():
return dataset_ops.Dataset.from_tensor_slices({
"labels": [1., .5, 1., 0.],
"predictions": [1., .75, .25, 0.]}).repeat()
# TODO(priyag): Add TPU Strategy to this once metrics aggregate correctly using
# TowerLocalVariables on TPUs. Submit http://cl/208914352.
def all_combinations():
return combinations.combine(
distribution=[combinations.default_strategy,
combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus],
mode=["graph"])
# TODO(josh11b): Test metrics.recall_at_top_k, metrics.average_precision_at_k,
# metrics.precision_at_k
class MetricsV1Test(test.TestCase, parameterized.TestCase):
def _test_metric(self, distribution, dataset_fn, metric_fn, expected_fn):
with ops.Graph().as_default(), distribution.scope():
iterator = distribution.distribute_dataset(
dataset_fn).make_one_shot_iterator()
value, update = distribution.call_for_each_tower(
metric_fn, iterator.get_next())
update = distribution.group(update)
self.evaluate(variables.local_variables_initializer())
# TODO(josh11b): Once we switch to using a global batch size for input,
# replace "distribution.num_towers" with "1".
batches_per_update = distribution.num_towers
# Update variables using the first `num_towers` batches.
self.evaluate(update)
self.assertAllClose(expected_fn(batches_per_update), self.evaluate(value),
0.001, msg="After first update")
# Update variables using the second `num_towers` batches.
self.evaluate(update)
self.assertAllClose(expected_fn(2 * batches_per_update),
self.evaluate(value),
0.001,
msg="After second update")
if batches_per_update == 1: # Consume 4 input batches
self.evaluate(update)
self.assertAllClose(expected_fn(3 * batches_per_update),
self.evaluate(value),
0.001,
msg="After third update")
self.evaluate(update)
self.assertAllClose(expected_fn(4 * batches_per_update),
self.evaluate(value),
0.001,
msg="After fourth update")
@combinations.generate(all_combinations())
def testMean(self, distribution):
def _dataset_fn():
return dataset_ops.Dataset.range(1000).map(math_ops.to_float).batch(4)
def _expected_fn(num_batches):
# Mean(0..3) = 1.5, Mean(0..7) = 3.5, Mean(0..11) = 5.5, etc.
return num_batches * 2 - 0.5
self._test_metric(distribution, _dataset_fn, metrics.mean, _expected_fn)
@combinations.generate(all_combinations())
def testAccuracy(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.accuracy(labels, predictions)
def _expected_fn(num_batches):
return [3./4, 3./8, 3./12, 4./16][num_batches - 1]
self._test_metric(
distribution, _labeled_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations())
def testMeanPerClassAccuracy(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.mean_per_class_accuracy(
labels, predictions, num_classes=5)
def _expected_fn(num_batches):
mean = lambda x: sum(x) / len(x)
return [mean([1., 1., 1., 0., 0.]),
mean([0.5, 0.5, 0.5, 0., 0.]),
mean([1./3, 1./3, 0.5, 0., 0.]),
mean([0.5, 1./3, 1./3, 0., 0.])][num_batches - 1]
self._test_metric(
distribution, _labeled_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations())
def testMeanIOU(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.mean_iou(
labels, predictions, num_classes=5)
def _expected_fn(num_batches):
mean = lambda x: sum(x) / len(x)
return [mean([1./2, 1./1, 1./1, 0.]), # no class 4 in first batch
mean([1./4, 1./4, 1./3, 0., 0.]),
mean([1./6, 1./6, 1./5, 0., 0.]),
mean([2./8, 1./7, 1./7, 0., 0.])][num_batches - 1]
self._test_metric(
distribution, _labeled_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations())
def testMeanTensor(self, distribution):
def _dataset_fn():
dataset = dataset_ops.Dataset.range(1000).map(math_ops.to_float)
# Want to produce a fixed, known shape, so drop remainder when batching.
dataset = dataset.batch(4, drop_remainder=True)
return dataset
def _expected_fn(num_batches):
# Mean(0, 4, ..., 4 * num_batches - 4) == 2 * num_batches - 2
# Mean(1, 5, ..., 4 * num_batches - 3) == 2 * num_batches - 1
# Mean(2, 6, ..., 4 * num_batches - 2) == 2 * num_batches
# Mean(3, 7, ..., 4 * num_batches - 1) == 2 * num_batches + 1
first = 2. * num_batches - 2.
return [first, first + 1., first + 2., first + 3.]
self._test_metric(
distribution, _dataset_fn, metrics.mean_tensor, _expected_fn)
@combinations.generate(all_combinations())
def testAUCROC(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.auc(labels, predictions, num_thresholds=8, curve="ROC",
summation_method="careful_interpolation")
def _expected_fn(num_batches):
return [0.5, 7./9, 0.8, 0.75][num_batches - 1]
self._test_metric(
distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations())
def testAUCPR(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.auc(labels, predictions, num_thresholds=8, curve="PR",
summation_method="careful_interpolation")
def _expected_fn(num_batches):
return [0.797267, 0.851238, 0.865411, 0.797267][num_batches - 1]
self._test_metric(
distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations())
def testFalseNegatives(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.false_negatives(labels, predictions)
def _expected_fn(num_batches):
return [1., 1., 2., 3.][num_batches - 1]
self._test_metric(
distribution, _boolean_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations())
def testFalseNegativesAtThresholds(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.false_negatives_at_thresholds(labels, predictions, [.5])
def _expected_fn(num_batches):
return [[1.], [1.], [2.], [3.]][num_batches - 1]
self._test_metric(
distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations())
def testTrueNegatives(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.true_negatives(labels, predictions)
def _expected_fn(num_batches):
return [0., 1., 2., 3.][num_batches - 1]
self._test_metric(
distribution, _boolean_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations())
def testTrueNegativesAtThresholds(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.true_negatives_at_thresholds(labels, predictions, [.5])
def _expected_fn(num_batches):
return [[0.], [1.], [2.], [3.]][num_batches - 1]
self._test_metric(
distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations())
def testFalsePositives(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.false_positives(labels, predictions)
def _expected_fn(num_batches):
return [1., 2., 2., 3.][num_batches - 1]
self._test_metric(
distribution, _boolean_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations())
def testFalsePositivesAtThresholds(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.false_positives_at_thresholds(labels, predictions, [.5])
def _expected_fn(num_batches):
return [[1.], [2.], [2.], [3.]][num_batches - 1]
self._test_metric(
distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations())
def testTruePositives(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.true_positives(labels, predictions)
def _expected_fn(num_batches):
return [1., 2., 3., 3.][num_batches - 1]
self._test_metric(
distribution, _boolean_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations())
def testTruePositivesAtThresholds(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.true_positives_at_thresholds(labels, predictions, [.5])
def _expected_fn(num_batches):
return [[1.], [2.], [3.], [3.]][num_batches - 1]
self._test_metric(
distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations())
def testPrecision(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.precision(labels, predictions)
def _expected_fn(num_batches):
return [0.5, 0.5, 0.6, 0.5][num_batches - 1]
self._test_metric(
distribution, _boolean_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations())
def testPrecisionAtThreshold(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.precision_at_thresholds(labels, predictions, [0.5])
def _expected_fn(num_batches):
return [[0.5], [0.5], [0.6], [0.5]][num_batches - 1]
self._test_metric(
distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations())
def testRecall(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.recall(labels, predictions)
def _expected_fn(num_batches):
return [0.5, 2./3, 0.6, 0.5][num_batches - 1]
self._test_metric(
distribution, _boolean_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations())
def testRecallAtThreshold(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.recall_at_thresholds(labels, predictions, [0.5])
def _expected_fn(num_batches):
return [[0.5], [2./3], [0.6], [0.5]][num_batches - 1]
self._test_metric(
distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations())
def testMeanSquaredError(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.mean_squared_error(labels, predictions)
def _expected_fn(num_batches):
return [0., 1./32, 0.208333, 0.15625][num_batches - 1]
self._test_metric(
distribution, _regression_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations())
def testRootMeanSquaredError(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.root_mean_squared_error(labels, predictions)
def _expected_fn(num_batches):
return [0., 0.176777, 0.456435, 0.395285][num_batches - 1]
self._test_metric(
distribution, _regression_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations())
def testSensitivityAtSpecificity(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.sensitivity_at_specificity(labels, predictions, 0.8)
def _expected_fn(num_batches):
return [0.5, 2./3, 0.6, 0.5][num_batches - 1]
self._test_metric(
distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)
@combinations.generate(all_combinations())
def testSpecificityAtSensitivity(self, distribution):
def _metric_fn(x):
labels = x["labels"]
predictions = x["predictions"]
return metrics.specificity_at_sensitivity(labels, predictions, 0.95)
def _expected_fn(num_batches):
return [0., 1./3, 0.5, 0.5][num_batches - 1]
self._test_metric(
distribution, _threshold_dataset_fn, _metric_fn, _expected_fn)
if __name__ == "__main__":
test.main()
| |
# -*- coding: utf-8 -*-
########################################################################
#
# License: BSD
# Created: November 8, 2014
# Author: Alistair Muldal - alimuldal@gmail.com
#
# $Id$
#
########################################################################
"""This utility prints the contents of an HDF5 file as a tree.
Pass the flag -h to this for help on usage.
"""
import tables
import numpy as np
import os
import argparse
from collections import defaultdict, deque
import warnings
def _get_parser():
parser = argparse.ArgumentParser(
description='''
`pttree` is designed to give a quick overview of the contents of a
PyTables HDF5 file by printing a depth-indented list of nodes, similar
to the output of the Unix `tree` function.
It can also display the size, shape and compression states of
individual nodes, as well as summary information for the whole file.
For a more verbose output (including metadata), see `ptdump`.
''')
parser.add_argument(
'-L', '--max-level', type=int, dest='max_depth',
help='maximum branch depth of tree to display (-1 == no limit)',
)
parser.add_argument(
'-S', '--sort-by', type=str, dest='sort_by',
help='artificially order nodes, can be either "size", "name" or "none"'
)
parser.add_argument(
'--print-size', action='store_true', dest='print_size',
help='print size of each node/branch',
)
parser.add_argument(
'--no-print-size', action='store_false', dest='print_size',
)
parser.add_argument(
'--print-shape', action='store_true', dest='print_shape',
help='print shape of each node',
)
parser.add_argument(
'--no-print-shape', action='store_false', dest='print_shape',
)
parser.add_argument(
'--print-compression', action='store_true', dest='print_compression',
help='print compression library(level) for each compressed node',
)
parser.add_argument(
'--no-print-compression', action='store_false',
dest='print_compression',
)
parser.add_argument(
'--print-percent', action='store_true', dest='print_percent',
help='print size of each node as a %% of the total tree size on disk',
)
parser.add_argument(
'--no-print-percent', action='store_false',
dest='print_percent',
)
parser.add_argument(
'--use-si-units', action='store_true', dest='use_si_units',
help='report sizes in SI units (1 MB == 10^6 B)',
)
parser.add_argument(
'--use-binary-units', action='store_false', dest='use_si_units',
help='report sizes in binary units (1 MiB == 2^20 B)',
)
parser.add_argument('src', metavar='filename[:nodepath]',
help='path to the root of the tree structure')
parser.set_defaults(max_depth=1, sort_by="size", print_size=True,
print_percent=True, print_shape=False,
print_compression=False, use_si_units=False)
return parser
def main():
parser = _get_parser()
args = parser.parse_args()
# Catch the files passed as the last arguments
src = args.__dict__.pop('src').split(':')
if len(src) == 1:
filename, nodename = src[0], "/"
else:
filename, nodename = src
if nodename == "":
# case where filename == "filename:" instead of "filename:/"
nodename = "/"
with tables.open_file(filename, 'r') as f:
tree_str = get_tree_str(f, nodename, **args.__dict__)
print tree_str
pass
def get_tree_str(f, where='/', max_depth=-1, print_class=True,
print_size=True, print_percent=True, print_shape=False,
print_compression=False, print_total=True, sort_by=None,
use_si_units=False):
"""
Generate the ASCII string representing the tree structure, and the summary
info (if requested)
"""
root = f.get_node(where)
root._g_check_open()
start_depth = root._v_depth
if max_depth < 0:
max_depth = os.sys.maxint
b2h = bytes2human(use_si_units)
# we will pass over each node in the tree twice
# on the first pass we'll start at the root node and recurse down the
# branches, finding all of the leaf nodes and calculating the total size
# over all tables and arrays
total_in_mem = 0
total_on_disk = 0
total_items = 0
# defaultdicts for holding the cumulative branch sizes at each node
in_mem = defaultdict(lambda: 0.)
on_disk = defaultdict(lambda: 0.)
leaf_count = defaultdict(lambda: 0)
# keep track of node addresses within the HDF5 file so that we don't count
# nodes with multiple references (i.e. hardlinks) multiple times
ref_count = defaultdict(lambda: 0)
ref_idx = defaultdict(lambda: 0)
hl_addresses = defaultdict(lambda: None)
hl_targets = defaultdict(lambda: '')
stack = deque(root)
leaves = deque()
while stack:
node = stack.pop()
if isinstance(node, tables.link.Link):
# we treat links like leaves, except we don't dereference them to
# get their sizes or addresses
leaves.append(node)
continue
path = node._v_pathname
addr, rc = node._get_obj_info()
ref_count[addr] += 1
ref_idx[path] = ref_count[addr]
hl_addresses[path] = addr
if isinstance(node, tables.Leaf):
# only count the size of a hardlinked leaf the first time it is
# visited
if ref_count[addr] == 1:
try:
m = node.size_in_memory
d = node.size_on_disk
# size of this node
in_mem[path] += m
on_disk[path] += d
leaf_count[path] += 1
# total over all nodes
total_in_mem += m
total_on_disk += d
total_items += 1
# arbitrarily treat this node as the 'target' for all other
# hardlinks that point to the same address
hl_targets[addr] = path
except NotImplementedError as e:
# size_on_disk is not implemented for VLArrays
warnings.warn(e.message)
# push leaf nodes onto the stack for the next pass
leaves.append(node)
elif isinstance(node, tables.Group):
# don't recurse down the same hardlinked branch multiple times!
if ref_count[addr] == 1:
stack.extend(node._v_children.values())
hl_targets[addr] = path
# if we've already visited this group's address, treat it as a leaf
# instead
else:
leaves.append(node)
# on the second pass we start at each leaf and work upwards towards the
# root node, computing the cumulative size of each branch at each node, and
# instantiating a PrettyTree object for each node to create an ASCII
# representation of the tree structure
# this will store the PrettyTree objects for every node we're printing
pretty = {}
stack = leaves
while stack:
node = stack.pop()
path = node._v_pathname
parent = node._v_parent
parent_path = parent._v_pathname
# cumulative size at parent node
in_mem[parent_path] += in_mem[path]
on_disk[parent_path] += on_disk[path]
leaf_count[parent_path] += leaf_count[path]
depth = node._v_depth - start_depth
# if we're deeper than the max recursion depth, we print nothing
if not depth > max_depth:
# create a PrettyTree representation of this node
name = node._v_name
if print_class:
name += " (%s)" % node.__class__.__name__
labels = []
pct = 100 * on_disk[path] / total_on_disk
# if the address of this object has a ref_count > 1, it has
# multiple hardlinks
if ref_count[hl_addresses[path]] > 1:
name += ', addr=%i, ref=%i/%i' % (
hl_addresses[path], ref_idx[path],
ref_count[hl_addresses[path]]
)
if isinstance(node, tables.link.Link):
labels.append('softlink --> %s' % node.target)
elif ref_idx[path] > 1:
labels.append('hardlink --> %s'
% hl_targets[hl_addresses[path]])
elif isinstance(node, (tables.Array, tables.Table)):
if print_size:
sizestr = 'mem=%s, disk=%s' % (
b2h(in_mem[path]), b2h(on_disk[path]))
if print_percent:
sizestr += ' [%4.1f%%]' % pct
labels.append(sizestr)
if print_shape:
labels.append('shape=%s' % repr(node.shape))
if print_compression:
lib = node.filters.complib
level = node.filters.complevel
if level:
compstr = '%s(%i)' % (lib, level)
else:
compstr = 'None'
labels.append('compression=%s' % compstr)
# if we're at our max recursion depth, we'll print summary
# information for this branch
elif depth == max_depth:
itemstr = '... %i leaves' % leaf_count[path]
if print_size:
itemstr += ', mem=%s, disk=%s' % (
b2h(in_mem[path]), b2h(on_disk[path]))
if print_percent:
itemstr += ' [%4.1f%%]' % pct
labels.append(itemstr)
# create a PrettyTree for this node, if one doesn't exist already
if path not in pretty:
pretty.update({path: PrettyTree()})
pretty[path].name = name
pretty[path].labels = labels
if sort_by == 'size':
# descending size order
pretty[path].sort_by = -pct
elif sort_by == 'name':
pretty[path].sort_by = node._v_name
else:
# natural order
if path is '/':
# root is not in root._v_children
pretty[path].sort_by = 0
else:
pretty[path].sort_by = parent._v_children.values(
).index(node)
# exclude root node or we'll get infinite recursions (since '/' is
# the parent of '/')
if path is not '/':
# create a PrettyTree for the parent of this node, if one
# doesn't exist already
if parent_path not in pretty:
pretty.update({parent_path: PrettyTree()})
# make this PrettyTree a child of the parent PrettyTree
pretty[parent_path].add_child(pretty[path])
if node is not root and parent not in stack:
# we append to the 'bottom' of the stack, so that we exhaust all of
# the nodes at this level before going up a level in the tree
stack.appendleft(parent)
out_str = '\n' + '-' * 60 + '\n' * 2
out_str += str(pretty[root._v_pathname]) + '\n' * 2
if print_total:
avg_ratio = float(total_on_disk) / total_in_mem
fsize = os.stat(f.filename).st_size
out_str += '-' * 60 + '\n'
out_str += 'Total branch leaves: %i\n' % total_items
out_str += 'Total branch size: %s in memory, %s on disk\n' % (
b2h(total_in_mem), b2h(total_on_disk))
out_str += 'Mean compression ratio: %.2f\n' % avg_ratio
out_str += 'HDF5 file size: %s\n' % b2h(fsize)
out_str += '-' * 60 + '\n'
return out_str
class PrettyTree(object):
"""
A pretty ASCII representation of a recursive tree structure. Each node can
have multiple labels, given as a list of strings.
Example:
--------
A = PrettyTree('A', labels=['wow'])
B = PrettyTree('B', labels=['such tree'])
C = PrettyTree('C', children=[A, B])
D = PrettyTree('D', labels=['so recursive'])
root = PrettyTree('root', labels=['many nodes'], children=[C, D])
print root
Credit to Andrew Cooke's blog:
<http://www.acooke.org/cute/ASCIIDispl0.html>
"""
def __init__(self, name=None, children=None, labels=None, sort_by=None):
# NB: do NOT assign default list/dict arguments in the function
# declaration itself - these objects are shared between ALL instances
# of PrettyTree, and by assigning to them it's easy to get into
# infinite recursions, e.g. when 'self in self.children == True'
if children is None:
children = []
if labels is None:
labels = []
self.name = name
self.children = children
self.labels = labels
self.sort_by = sort_by
def add_child(self, child):
# some basic checks to help to avoid infinite recursion
assert child is not self
assert self not in child.children
if child not in self.children:
self.children.append(child)
def tree_lines(self):
yield self.name
for label in self.labels:
yield ' ' + label
children = sorted(self.children, key=(lambda c: c.sort_by))
last = children[-1] if children else None
for child in children:
prefix = '`--' if child is last else '+--'
for line in child.tree_lines():
yield prefix + line
prefix = ' ' if child is last else '| '
def __str__(self):
return "\n".join(self.tree_lines())
def __repr__(self):
return '<%s at %s>' % (self.__class__.__name__, hex(id(self)))
def bytes2human(use_si_units=False):
if use_si_units:
prefixes = 'TB', 'GB', 'MB', 'kB', 'B'
values = 1E12, 1E9, 1E6, 1E3, 1
else:
prefixes = 'TiB', 'GiB', 'MiB', 'KiB', 'B'
values = 2 ** 40, 2 ** 30, 2 ** 20, 2 ** 10, 1
def b2h(nbytes):
for (prefix, value) in zip(prefixes, values):
scaled = float(nbytes) / value
if scaled >= 1:
break
return "%.1f%s" % (scaled, prefix)
return b2h
def make_test_file(prefix='/tmp'):
f = tables.open_file(os.path.join(prefix, 'test_pttree.hdf5'), 'w')
g1 = f.create_group('/', 'group1')
g1a = f.create_group(g1, 'group1a')
g1b = f.create_group(g1, 'group1b')
filters = tables.Filters(complevel=5, complib='bzip2')
for gg in g1a, g1b:
f.create_carray(gg, 'zeros128b', obj=np.zeros(32, dtype=np.float64),
filters=filters)
f.create_carray(gg, 'random128b', obj=np.random.rand(32),
filters=filters)
g2 = f.create_group('/', 'group2')
softlink = f.create_soft_link(g2, 'softlink_g1_z128',
'/group1/group1a/zeros128b')
hardlink = f.create_hard_link(g2, 'hardlink_g1a_z128',
'/group1/group1a/zeros128b')
hlgroup = f.create_hard_link(g2, 'hardlink_g1a', '/group1/group1a')
return f
| |
#!/usr/bin/env python
# coding:utf-8
"""
user.py
~~~~~~~
"""
from datetime import datetime
import hashlib
from werkzeug.security import generate_password_hash, check_password_hash
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from markdown import markdown
# import bleach
from flask import current_app, request, url_for
from flask.ext.login import UserMixin, AnonymousUserMixin
from sqlalchemy.exc import IntegrityError
from sqlalchemy import or_
from random import seed
import forgery_py
from fine.exceptions import ValidationError
from permission import Permission, Role
from fine import db, login_manager
# class Follow(db.Model):
# __tablename__ = 'follows'
# follower_id = db.Column(db.Integer, db.ForeignKey('users.id'),
# primary_key=True)
# followed_id = db.Column(db.Integer, db.ForeignKey('users.id'),
# primary_key=True)
# timestamp = db.Column(db.DateTime, default=datetime.utcnow)
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(42), unique=True, index=True)
username = db.Column(db.String(64), unique=True, index=True)
social_id = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer)
role = db.relationship('Role', foreign_keys=[role_id],
primaryjoin='User.role_id == Role.id',
backref='users')
password_hash = db.Column(db.String(128))
confirmed = db.Column(db.Boolean, default=False)
name = db.Column(db.String(64))
location = db.Column(db.String(64))
about_me = db.Column(db.String(128))
member_since = db.Column(db.DateTime, default=datetime.utcnow)
last_seen = db.Column(db.DateTime, default=datetime.utcnow)
avatar_hash = db.Column(db.String(32))
# followed = db.relationship('Follow',
# foreign_keys=[Follow.follower_id],
# backref=db.backref('follower', lazy='joined'),
# lazy='dynamic',
# cascade='all, delete-orphan')
# followers = db.relationship('Follow',
# foreign_keys=[Follow.followed_id],
# backref=db.backref('followed', lazy='joined'),
# lazy='dynamic',
# cascade='all, delete-orphan')
@staticmethod
def generate_fake(count=12):
seed()
u_query = User.query
for i in xrange(count):
email = forgery_py.internet.email_address()
username=forgery_py.internet.user_name(True)
user = u_query.filter(or_(User.email==email,
User.username==username)).first()
if user:
continue
u = User(email=email,
username=username,
password=forgery_py.lorem_ipsum.word(),
confirmed=True,
name=forgery_py.name.full_name(),
location=forgery_py.address.city(),
about_me=forgery_py.lorem_ipsum.sentence(),
member_since=forgery_py.date.date(True))
u.avatar_hash = u.gravatar()
db.session.add(u)
try:
db.session.commit()
except IntegrityError:
db.session.rollback()
# @staticmethod
# def add_self_follows():
# for user in User.query.all():
# if not user.is_following(user):
# user.follow(user)
# db.session.add(user)
# db.session.commit()
def __init__(self, **kwargs):
super(User, self).__init__(**kwargs)
if not self.role:
if self.email == current_app.config['FINEPY_ADMIN_EMAIL']:
self.role = Role.query.filter_by(
permissions=Permission.ADMIN).first()
if not self.role:
self.role = Role.query.filter_by(default=True).first()
if self.email and not self.avatar_hash:
self.avatar_hash = hashlib.md5(
self.email.encode('utf-8')).hexdigest()
# self.followed.append(Follow(followed=self))
@property
def password(self):
raise AttributeError('password is not readable')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def generate_confirmation_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'confirm': self.id})
def confirm(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('confirm') != self.id:
return False
self.confirmed = True
db.session.add(self)
return True
def generate_reset_token(self, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'],expiration)
return s.dumps({'reset': self.id})
def reset_password(self, token, new_password):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('reset') != self.id:
return False
self.password = new_password
db.session.add(self)
return True
def generate_email_change_token(self, new_email, expiration=3600):
s = Serializer(current_app.config['SECRET_KEY'], expiration)
return s.dumps({'change_email': self.id, 'new_email': new_email})
def change_email(self, token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return False
if data.get('change_email') != self.id:
return False
new_email = data.get('new_email')
if not new_email:
return False
if self.query.filter_by(email=new_email).first():
return False
self.email = new_email
self.avatar_hash = hashlib.md5(
self.email.encode('utf-8')).hexdigest()
db.session.add(self)
return True
def can(self, permissions):
return (self.role and
(self.role.permissions & permissions) == permissions)
def is_admin(self):
return self.can(Permission.ADMIN)
def ping(self):
self.last_seen = datetime.utcnow()
db.session.add(self)
def gravatar(self, size=24, default='identicon', rating='g'):
"""Generate avatar
#TODO maybe changed to https://github.com/maethor/avatar-generator
"""
if request.is_secure:
url = 'https://secure.gravatar.com/avatar'
else:
url = 'http://cn.gravatar.com/avatar'
# GitHub login cannot get email when user has no public email
# use username as avatar
hash = self.avatar_hash
if not hash:
if self.email:
hash = hashlib.md5(
self.email.encode('utf-8')).hexdigest()
else:
hash = hashlib.md5(
self.username.encode('utf-8')).hexdigest()
return '{url}/{hash}?s={size}&d={default}&r={rating}'.format(
url=url, hash=hash, size=size, default=default, rating=rating)
# def follow(self, user):
# if not self.is_following(user):
# f = Follow(follower=self, followed=user)
# db.session.add(f)
def unfollow(self, user):
f = self.followed.filter_by(followed_id=user.id).first()
if f:
db.session.delete(f)
def is_following(self, user):
return self.followed.filter_by(
followed_id=user.id).first()
def is_followed_by(self, user):
return self.followers.filter_by(
follower_id=user.id).first()
# @property
# def followed_posts(self):
# return Post.query.join(Follow,
# Follow.followed_id == Post.author_id).filter(
# Follow.follower_id == self.id)
def to_json(self):
json_user = {
'url': url_for('api.get_post', id=self.id, _external=True),
'username': self.username,
'member_since': self.member_since,
'last_seen': self.last_seen,
'posts': url_for('api.get_user_posts', id=self.id, _external=True),
'followed_posts': url_for('api.get_user_followed_posts',
id=self.id, _external=True),
'post_count': self.posts.count()
}
return json_user
def generate_auth_token(self, expiration):
s = Serializer(current_app.config['SECRET_KEY'],
expiration=expiration)
return s.dumps({'id': self.id}).decode('ascii')
@staticmethod
def verify_auth_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
data = s.loads(token)
except:
return None
return User.query.get(data['id'])
def __repr__(self):
return '<User %d>' % self.id
class AnonymUser(AnonymousUserMixin):
def can(self, permissions):
return False
def is_admin(self):
return False
login_manager.anonymous_user = AnonymUser
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
| |
from StringIO import StringIO
from os import path
import re
import shutil
import tempfile
import logging
import argparse
from datetime import datetime
from tarsnapper.script import (
TarsnapBackend, MakeCommand, ListCommand, ExpireCommand, parse_args,
DEFAULT_DATEFORMAT)
from tarsnapper.config import Job, parse_deltas, str_to_timedelta
class FakeBackend(TarsnapBackend):
def __init__(self, *a, **kw):
TarsnapBackend.__init__(self, *a, **kw)
self.calls = []
self.fake_archives = []
def _exec_tarsnap(self, args):
self.calls.append(args[1:]) # 0 is "tarsnap"
if '--list-archives' in args:
return "\n".join(self.fake_archives)
def _exec_util(self, cmdline):
self.calls.append(cmdline)
def match(self, expect_calls):
"""Compare the calls we have captured with what the list of
regexes in ``expect``.
"""
print expect_calls, '==', self.calls
if not len(expect_calls) == len(self.calls):
return False
for args, expected_args in zip(self.calls, expect_calls):
# Each call has multiple arguments
if not len(args) == len(expected_args):
return False
for actual, expected_re in zip(args, expected_args):
if not re.match(expected_re, actual):
return False
return True
class BaseTest(object):
def setup(self):
self.log = logging.getLogger("test_script")
self._tmpdir = tempfile.mkdtemp()
# We need at least a file for tarsnapper to consider a source
# to "exist".
open(path.join(self._tmpdir, '.placeholder'), 'w').close()
self.now = datetime.utcnow()
def teardown(self):
shutil.rmtree(self._tmpdir)
def run(self, jobs, archives, **args):
final_args = {
'tarsnap_options': (),
'no_expire': False,
}
final_args.update(args)
cmd = self.command_class(argparse.Namespace(**final_args),
self.log, backend_class=FakeBackend)
cmd.backend.fake_archives = archives
for job in (jobs if isinstance(jobs, list) else [jobs]):
cmd.run(job)
return cmd
def job(self, deltas='1d 2d', name='test', **kwargs):
"""Make a job object.
"""
opts = dict(
target="$name-$date",
deltas=parse_deltas(deltas),
name=name,
sources=[self._tmpdir])
opts.update(kwargs)
return Job(**opts)
def filename(self, delta, name='test', fmt='%s-%s'):
return fmt % (
name,
(self.now - str_to_timedelta(delta)).strftime(DEFAULT_DATEFORMAT))
class TestTarsnapOptions(BaseTest):
command_class = ExpireCommand
def tset_parse(self):
parse_args(['-o', 'name', 'foo', '-', 'list'])
parse_args(['-o', 'name', '-', 'list'])
parse_args(['-o', 'name', 'sdf', 'sdf', '-', 'list'])
def test_pass_along(self):
# Short option
cmd = self.run(self.job(), [], tarsnap_options=(('o', '1'),))
assert cmd.backend.match([('-o', '1', '--list-archives')])
# Long option
cmd = self.run(self.job(), [], tarsnap_options=(('foo', '1'),))
assert cmd.backend.match([('--foo', '1', '--list-archives')])
# No value
cmd = self.run(self.job(), [], tarsnap_options=(('foo',),))
assert cmd.backend.match([('--foo', '--list-archives')])
# Multiple values
cmd = self.run(self.job(), [], tarsnap_options=(('foo', '1', '2'),))
assert cmd.backend.match([('--foo', '1', '2', '--list-archives')])
class TestMake(BaseTest):
command_class = MakeCommand
def test(self):
cmd = self.run(self.job(), [])
assert cmd.backend.match([
('-c', '-f', 'test-.*', '.*'),
('--list-archives',)
])
def test_no_sources(self):
"""If no sources are defined, the job is skipped."""
cmd = self.run(self.job(sources=None), [])
assert cmd.backend.match([])
def test_excludes(self):
cmd = self.run(self.job(excludes=['foo']), [])
assert cmd.backend.match([
('-c', '--exclude', 'foo', '-f', 'test-.*', '.*'),
('--list-archives',)
])
def test_no_expire(self):
cmd = self.run(self.job(), [], no_expire=True)
assert cmd.backend.match([
('-c', '-f', 'test-.*', '.*'),
])
def test_exec(self):
"""Test ``exec_before`` and ``exec_after`` options.
"""
cmd = self.run(self.job(exec_before="echo begin", exec_after="echo end"),
[], no_expire=True)
assert cmd.backend.match([
('echo begin'),
('-c', '-f', 'test-.*', '.*'),
('echo end'),
])
class TestExpire(BaseTest):
command_class = ExpireCommand
def test_nothing_to_do(self):
cmd = self.run(self.job(deltas='1d 10d'), [
self.filename('1d'),
self.filename('5d'),
])
assert cmd.backend.match([
('--list-archives',)
])
def test_no_deltas(self):
"""If a job does not define deltas, we skip it."""
cmd = self.run(self.job(deltas=None), [
self.filename('1d'),
self.filename('5d'),
])
assert cmd.backend.match([])
def test_something_to_expire(self):
cmd = self.run(self.job(deltas='1d 2d'), [
self.filename('1d'),
self.filename('5d'),
])
assert cmd.backend.match([
('--list-archives',),
('-d', '-f', 'test-.*'),
])
def test_aliases(self):
cmd = self.run(self.job(deltas='1d 2d', aliases=['alias']), [
self.filename('1d'),
self.filename('5d', name='alias'),
])
assert cmd.backend.match([
('--list-archives',),
('-d', '-f', 'alias-.*'),
])
def test_date_name_mismatch(self):
"""Make sure that when processing a target "home-$date",
we won't stumble over "home-dev-$date". This can be an issue
due to the way we try to parse the dates in filenames.
"""
cmd = self.run(self.job(name="home"), [
self.filename('1d', name="home-dev"),
])
class TestList(BaseTest):
command_class = ListCommand
def test(self):
cmd = self.run([self.job(), self.job(name='foo')], [
self.filename('1d'),
self.filename('5d'),
self.filename('1d', name='foo'),
self.filename('1d', name='something-else'),
])
# We ask to list two jobs, but only one --list-archives call is
# necessary.
assert cmd.backend.match([
('--list-archives',)
])
| |
# stdlib
from inspect import isclass
from time import time
from datetime import datetime
import os
# 3rd-party
from lxml import etree
_TSFMT = "%Y%m%d%H%M%S"
import json
from jnpr.junos.factory.to_json import TableJSONEncoder
class Table(object):
ITEM_XPATH = None
ITEM_NAME_XPATH = 'name'
VIEW = None
def __init__(self, dev=None, xml=None, path=None):
"""
:dev: Device instance
:xml: lxml Element instance
:path: file path to XML, to be used rather than :dev:
"""
self._dev = dev
self.xml = xml
self.view = self.VIEW
self._key_list = []
self._path = path
self._lxml = xml
# -------------------------------------------------------------------------
# PROPERTIES
# -------------------------------------------------------------------------
@property
def D(self):
""" the Device instance """
return self._dev
@property
def RPC(self):
""" the Device.rpc instance """
return self.D.rpc
@property
def view(self):
""" returns the current view assigned to this table """
return self._view
@view.setter
def view(self, cls):
""" assigns a new view to the table """
if cls is None:
self._view = None
return
if not isclass(cls):
raise ValueError("Must be given RunstatView class")
self._view = cls
@property
def hostname(self):
return self.D.hostname
@property
def is_container(self):
"""
True if this table does not have records, but is a container of fields
False otherwise
"""
return self.ITEM_XPATH is None
@property
def key_list(self):
""" the list of keys, as property for caching """
return self._key_list
# -------------------------------------------------------------------------
# PRIVATE METHODS
# -------------------------------------------------------------------------
def _assert_data(self):
if self.xml is None:
raise RuntimeError("Table is empty, use get()")
def _tkey(self, this, key_list):
""" keys with missing XPATH nodes are set to None """
keys = []
for k in key_list:
try:
keys.append(this.xpath(k)[0].text)
except:
keys.append(None)
return tuple(keys)
def _keys_composite(self, xpath, key_list):
""" composite keys return a tuple of key-items """
return [self._tkey(item, key_list) for item in self.xml.xpath(xpath)]
def _keys_simple(self, xpath):
return [x.text.strip() for x in self.xml.xpath(xpath)]
def _keyspec(self):
""" returns tuple (keyname-xpath, item-xpath) """
return (self.ITEM_NAME_XPATH, self.ITEM_XPATH)
def _clearkeys(self):
self._key_list = []
# -------------------------------------------------------------------------
# PUBLIC METHODS
# -------------------------------------------------------------------------
# ------------------------------------------------------------------------
# keys
# ------------------------------------------------------------------------
def _keys(self):
""" return a list of data item keys from the Table XML """
self._assert_data()
key_value, xpath = self._keyspec()
if isinstance(key_value, str):
# Check if pipe is in the key_value, if so append xpath to each value
if ' | ' in key_value:
return self._keys_simple(' | '.join([xpath + '/' + x for x in key_value.split(' | ')]))
return self._keys_simple(xpath + '/' + key_value)
if not isinstance(key_value, list):
raise RuntimeError(
"What to do with key, table:'%s'" %
self.__class__.__name__)
# ok, so it's a list, which means we need to extract tuple values
return self._keys_composite(xpath, key_value)
def keys(self):
# if the key_list has been cached, then use it
if len(self.key_list):
return self.key_list
# otherwise, build the list of keys into the cache
self._key_list = self._keys()
return self._key_list
# ------------------------------------------------------------------------
# values
# ------------------------------------------------------------------------
def values(self):
""" returns list of table entry items() """
self._assert_data()
if self.view is None:
# no View, so provide XML for each item
return [this for this in self]
else:
# view object for each item
return [list(this.items()) for this in self]
# ------------------------------------------------------------------------
# items
# ------------------------------------------------------------------------
def items(self):
""" returns list of tuple(name,values) for each table entry """
return list(zip(self.keys(), self.values()))
# ------------------------------------------------------------------------
# get - loads the data from source
# ------------------------------------------------------------------------
def get(self, *vargs, **kvargs):
# implemented by either OpTable or CfgTable
# @@@ perhaps this should raise an exception rather than just 'pass',??
pass
# ------------------------------------------------------------------------
# savexml - saves the table XML to a local file
# ------------------------------------------------------------------------
def savexml(self, path, hostname=False, timestamp=False, append=None):
"""
Save a copy of the table XML data to a local file. The name of the
output file (:path:) can include the name of the Device host, the
timestamp of this action, as well as any user-defined appended value.
These 'add-ons' will be added to the :path: value prior to the file
extension in the order (hostname,timestamp,append), separated by
underscore (_).
For example, if both hostname=True and append='BAZ1', then when
:path: = '/var/tmp/foo.xml' and the Device.hostname is "srx123", the
final file-path will be "/var/tmp/foo_srx123_BAZ1.xml"
:path:
file-path to write the XML file on the local filesystem
:hostname:
if True, will append the hostname to the :path:
:timestamp:
if True, will append the timestamp to the :path: using the default
timestamp format
if <str> the timestamp will use the value as the timestamp format as
defied by strftime()
:append:
any <str> value that you'd like appended to the :path: value
preceding the filename extension.
"""
fname, fext = os.path.splitext(path)
if hostname is True:
fname += "_%s" % self.D.hostname
if timestamp is not False:
tsfmt = _TSFMT if timestamp is True else timestamp
tsfmt_val = datetime.fromtimestamp(time()).strftime(tsfmt)
fname += "_%s" % tsfmt_val
if append is not None:
fname += "_%s" % append
path = fname + fext
return etree.ElementTree(self.xml).write(open(path, 'w'))
def to_json(self):
"""
:returns: JSON encoded string of entire Table contents
"""
return json.dumps(self, cls=TableJSONEncoder)
# -------------------------------------------------------------------------
# OVERLOADS
# -------------------------------------------------------------------------
__call__ = get
def __repr__(self):
cls_name = self.__class__.__name__
source = self.D.hostname if self.D is not None else self._path
if self.xml is None:
return "%s:%s - Table empty" % (cls_name, source)
else:
n_items = len(self.keys())
return "%s:%s: %s items" % (cls_name, source, n_items)
def __len__(self):
self._assert_data()
return len(self.keys())
def __iter__(self):
""" iterate over each time in the table """
self._assert_data()
as_xml = lambda table, view_xml: view_xml
view_as = self.view or as_xml
for this in self.xml.xpath(self.ITEM_XPATH):
yield view_as(self, this)
def __getitem__(self, value):
"""
returns a table item. If a table view is set (should be by default)
then the item will be converted to the view upon return. if there is
no table view, then the XML object will be returned.
:value:
for <string>, this will perform a select based on key-name
for <tuple>, this will perform a select based on compsite key-name
for <int>, this will perform a select based by position, like <list>
[0] is the first item
[-1] is the last item
when it is a <slice> then this will return a <list> of View widgets
"""
self._assert_data()
keys = self.keys()
if isinstance(value, int):
# if selection by index, then grab the key at this index and
# recursively call this method using that key, yo!
return self.__getitem__(keys[value])
if isinstance(value, slice):
# implements the 'slice' mechanism
return [self.__getitem__(key) for key in keys[value]]
# ---[ get_xpath ] ----------------------------------------------------
def get_xpath(find_value):
namekey_xpath, item_xpath = self._keyspec()
xnkv = '[{0}="{1}"]'
if isinstance(find_value, str):
# find by name, simple key
return item_xpath + xnkv.format(namekey_xpath, find_value)
if isinstance(find_value, tuple):
# composite key (value1, value2, ...) will create an
# iterative xpath of the fmt statement for each key/value pair
# skip over missing keys
kv = []
for k, v in zip(namekey_xpath, find_value):
if v is not None:
kv.append(xnkv.format(k.replace('_', '-'), v))
xpf = ''.join(kv)
return item_xpath + xpf
# ---[END: get_xpath ] ------------------------------------------------
found = self.xml.xpath(get_xpath(value))
if not len(found):
return None
as_xml = lambda table, view_xml: view_xml
use_view = self.view or as_xml
return use_view(table=self, view_xml=found[0])
def __contains__(self, key):
""" membership for use with 'in' """
return bool(key in self.keys())
| |
import uuid
import logging
import os
import sys
import suds
from suds.client import Client
from suds.wsse import Security, UsernameToken
from urllib2 import URLError
DEFAULT_EVURL = 'https://webservice.exacttarget.com/etframework.wsdl'
class ExactTargetAPI:
def __init__(self, username, password, schema_url=None, log_path=None):
self.username = username
self.password = password
# it's possible to provide your own modified schema
if(schema_url):
self.schema_url = schema_url
else:
self.schema_url = DEFAULT_EVURL
# configure logging for ET errors
if log_path is not None:
log_path = os.path.join(log_path, 'ExactTargetAPI.error')
else:
log_path = 'ExactTargetAPI.error'
# create an error logger
self.logger = logging.getLogger('ExactTargetAPI')
fh = logging.FileHandler(log_path)
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
self.logger.addHandler(fh)
def log(self, msg, level=logging.DEBUG):
if sys.stdout.fileno() is not None and os.isatty(sys.stdout.fileno()):
print msg
else:
self.logger.log(level, msg)
def init_client(self):
try:
# create the SOAP client
self.client = Client(self.schema_url)
except URLError as e:
self.log(e, logging.CRITICAL)
return None
# add WS-Security token
security = Security()
token = UsernameToken(self.username, self.password)
security.tokens.append(token)
self.client.set_options(wsse=security)
return self.client
def add_to_triggered_send_definition(self, tsd_key, email, subscriberkey,
attribs=None):
# create a subscriber object
s = self.client.factory.create('Subscriber')
s.EmailAddress = email
s.SubscriberKey = subscriberkey
# add extra attributes to data extension
if attribs is not None:
for k in attribs.keys():
s.Attributes.append({'Name': k, 'Value': attribs[k]})
s.Status = 'Active'
s.EmailTypePreference = 'HTML'
s.PrimarySMSPublicationStatus = 'OptedIn'
tsd = self.client.factory.create('TriggeredSendDefinition')
tsd.SourceAddressType = 'DefaultPrivateIPAddress'
tsd.DomainType = 'DefaultDomain'
tsd.HeaderSalutationSource = 'None'
tsd.FooterSalutationSource = 'None'
tsd.TriggeredSendType = 'Continuous' # not sure about this
tsd.TriggeredSendStatus = 'Active'
tsd.CustomerKey = tsd_key
ts = self.client.factory.create('TriggeredSend')
ts.TriggeredSendDefinition = tsd
ts.Subscribers = [s]
co = self.client.factory.create('CreateOptions')
co.RequestType = 'Synchronous'
co.QueuePriority = 'Medium'
so = self.client.factory.create('SaveOption')
so.PropertyName = '*'
so.SaveAction = 'UpdateAdd'
co.SaveOptions = [so]
opts = [ts]
try:
resp = self.client.service.Create(co, opts)
except suds.WebFault as e:
raise SoapError(str(e))
if resp.OverallStatus != 'OK':
self.log(resp, logging.ERROR)
raise ExactTargetError(resp.RequestID, resp.Results[0].StatusMessage)
return resp.RequestID
def _create_api_property(self, name, value):
p = self.client.factory.create('APIProperty')
p.Name = name
p.Value = value
return p
def add_to_data_extension(self, de_key, rows):
opts = []
# convert props to WSDL format
for props in rows:
apiprops = []
for k in props.keys():
apiprops.append(self._create_api_property(k, props[k]))
# create DE and map in our properties array
deo = self.client.factory.create('DataExtensionObject')
innerprops = []
for p in apiprops:
innerprops.append(p)
deo.Properties = [{'Property': innerprops}]
deo.CustomerKey = de_key
opts.append(deo)
# createoptions for insertion
co = self.client.factory.create('CreateOptions')
co.RequestType = 'Asynchronous'
co.QueuePriority = 'Medium'
so = self.client.factory.create('SaveOption')
so.PropertyName = '*'
so.SaveAction = 'UpdateAdd'
co.SaveOptions = [so]
print self.client.service.Create(co, opts)
def get_subscriber(self, key):
# retrieve a subscriber
rr = self.client.factory.create('RetrieveRequest')
rr.ObjectType = 'Subscriber'
rr.Properties = ['ID', 'EmailAddress', 'SubscriberKey',
'UnsubscribedDate', 'Status', 'EmailTypePreference']
rr.Options = None
sfp = self.client.factory.create('SimpleFilterPart')
sfp.Property = 'SubscriberKey'
sfp.SimpleOperator = 'equals'
sfp.Value = key
rr.Filter = sfp
try:
resp = self.client.service.Retrieve(rr)
except suds.WebFault as e:
raise SoapError(str(e))
if resp.OverallStatus != 'OK':
self.log(resp, logging.ERROR)
raise ExactTargetError(resp.RequestID, resp.Results[0].StatusMessage)
try:
return resp.Results[0]
except AttributeError:
pass
return None
def create(self, objtype, key=None):
obj = self.client.factory.create(objtype)
for p in obj.__keylist__:
obj[p] = None
if key is not None:
obj.CustomerKey = key
return obj
def _deo_to_list(self, resp):
results = []
if 'Results' not in resp:
return None
for r in resp.Results:
row = {}
for p in r.Properties.Property:
row[p.Name] = p.Value
results.append(row)
return results
def get_data_extension(self, de_key, cols, start_date=None, start_date_field=None, more_data=True):
rr = self.client.factory.create('RetrieveRequest')
rr.ObjectType = 'DataExtensionObject[' + de_key + ']'
rr.Properties = cols
rr.Options = None
if start_date is not None and start_date_field is not None:
sfp = self.client.factory.create('SimpleFilterPart')
sfp.Property = start_date_field
sfp.SimpleOperator = 'greaterThanOrEqual'
sfp.Value = start_date
rr.Filter = sfp
while True:
try:
resp = self.client.service.Retrieve(rr)
yield self._deo_to_list(resp)
except suds.WebFault:
continue
break
if more_data:
while resp.OverallStatus == 'MoreDataAvailable':
rr = self.client.factory.create('RetrieveRequest')
rr.ContinueRequest = resp.RequestID
while True:
try:
resp = self.client.service.Retrieve(rr)
yield self._deo_to_list(resp)
except suds.WebFault:
continue
break
def get_object(self, objtype, props):
rr = self.create('RetrieveRequest')
rr.ObjectType = objtype
rr.Properties = props
try:
resp = self.client.service.Retrieve(rr)
except suds.WebFault as e:
raise SoapError(str(e))
if resp.OverallStatus != 'OK':
self.log(resp, logging.ERROR)
raise ExactTargetError(resp.RequestID, resp.Results[0].StatusMessage)
return resp.Results
def strip_object(self, obj):
id_list = ['ObjectID', 'ID', 'CustomerKey']
for p in obj.__keylist__:
if p not in id_list:
obj[p] = None
return obj
def delete_objects(self, objs):
for o in objs:
self.strip_object(o)
try:
resp = self.client.service.Delete(None, objs)
except suds.WebFault as e:
raise SoapError(str(e))
if resp.OverallStatus != 'OK':
self.log(resp, logging.ERROR)
raise ExactTargetError(resp.RequestID, resp.Results[0].StatusMessage)
return True
def update_object(self, obj):
try:
resp = self.client.service.Update(None, obj)
except suds.WebFault as e:
raise SoapError(str(e))
if resp.OverallStatus != 'OK':
self.log(resp, logging.ERROR)
raise ExactTargetError(resp.RequestID, resp.Results[0].StatusMessage)
return True
def create_email(self, name, subject, is_html, body, folder=None):
email = self.create('Email')
email.Name = name
email.Subject = subject
email.Folder = folder
email.CharacterSet = 'UTF-8'
if is_html:
email.EmailType = 'HTML'
email.HTMLBody = body
email.IsHTMLPaste = True
else:
email.EmailType = 'Text Only'
email.TextBody = body
try:
resp = self.client.service.Create(None, [email])
except suds.WebFault as e:
raise SoapError(str(e))
if resp.OverallStatus != 'OK':
self.log(resp, logging.ERROR)
raise ExactTargetError(resp.RequestID, resp.Results[0].StatusMessage)
ret_obj = resp.Results[0].Object
ret_obj.ID = resp.Results[0].NewID
return ret_obj
def create_tsd(self, name, key, email, de=None, et_list=None, is_transactional=False, add_subscribers=True):
tsd = self.create('TriggeredSendDefinition')
tsd.Name = name
tsd.CustomerKey = key
tsd.Email = self.strip_object(email)
tsd.SendClassification = self.create('SendClassification')
tsd.SendClassification.CustomerKey = 'Default Commercial'
if is_transactional:
tsd.SendClassification.CustomerKey = 'Default Transactional'
tsd.IsMultipart = True
if et_list:
tsd.List = self.strip_object(et_list)
tsd.AutoAddSubscribers = add_subscribers
tsd.SendSourceDataExtension = self.strip_object(de)
try:
resp = self.client.service.Create(None, [tsd])
except suds.WebFault as e:
raise SoapError(str(e))
if resp.OverallStatus != 'OK':
self.log(resp, logging.ERROR)
raise ExactTargetError(resp.RequestID, resp.Results[0].StatusMessage)
return resp.Results[0].Object
def create_data_extension_field(self, name, field_type, is_primary=False, is_nillable=False, length=None, default=None):
field = self.create('DataExtensionField')
field.Name = name
field.FieldType = field_type
field.IsPrimaryKey = is_primary
field.IsRequired = not is_nillable
field.IsNillable = is_nillable
field.MaxLength = length
field.DefaultValue = default
return field
def create_data_extension(self, name, key, de_fields, sender_field=None, description=None, folder=None, template=None):
de = self.create('DataExtension')
de.Name = name
de.Description = description
de.CustomerKey = key
de.IsSendable = False
# TriggeredSendDataExtension
if template is not None:
for o in self.get_object('DataExtensionTemplate', ['Name', 'ObjectID']):
if o.Name == template:
de.Template = o
sender_field = 'SubscriberKey'
break
de.CategoryID = folder
if sender_field is not None:
de.IsSendable = True
de.SendableDataExtensionField = sender_field
de.SendableSubscriberField = self.create('Attribute')
de.SendableSubscriberField.Name = "Subscriber Key"
de.SendableSubscriberField.Value = ""
# arrays of self.create_data_extension_field
de.Fields = {'Field': de_fields}
try:
resp = self.client.service.Create(None, [de])
except suds.WebFault as e:
raise SoapError(str(e))
if resp.OverallStatus != 'OK':
self.log(resp, logging.ERROR)
raise ExactTargetError(resp.RequestID, resp.Results[0].StatusMessage)
return resp.Results[0].Object
def create_subscriber(self, email, firstname, lastname, listname=None):
# create subscriber object
s = self.client.factory.create('Subscriber')
s.EmailAddress = email
s.SubscriberKey = uuid.uuid1()
s.Status = 'Active'
s.EmailTypePreference = 'HTML'
s.PrimarySMSPublicationStatus = 'OptedIn'
s.Attributes = [
{'Name':'First Name', 'Value':firstname},
{'Name':'Last Name', 'Value':lastname}
]
# add the subscriber to a list if supplied
if listname is not None:
l = self.get_subscriber_list(listname)
if l is not None:
sl = self.client.factory.create('SubscriberList')
sl.ID = l.ID
sl.Status = 'Active'
s.Lists = [sl]
co = self.client.factory.create('CreateOptions')
co.RequestType = 'Synchronous'
co.QueuePriority = 'Medium'
so = self.client.factory.create('SaveOption')
so.PropertyName = '*'
so.SaveAction = 'UpdateAdd'
co.SaveOptions = [so]
opts = [s]
try:
resp = self.client.service.Create(co, opts)
except suds.WebFault as e:
raise SoapError(str(e))
if resp.OverallStatus != 'OK':
self.log(resp, logging.ERROR)
raise ExactTargetError(resp.RequestID, resp.Results[0].StatusMessage)
return s
def get_subscriber_list(self, listname, create_if_not_exists=False):
# retrieve a subscriber list
rr = self.client.factory.create('RetrieveRequest')
rr.ObjectType = 'List'
rr.Properties = ['ID', 'ListName', 'Description', 'Type',
'ListClassification']
rr.Options = None
sfp = self.client.factory.create('SimpleFilterPart')
sfp.Property = 'ListName'
sfp.SimpleOperator = 'equals'
sfp.Value = listname
rr.Filter = sfp
try:
resp = self.client.service.Retrieve(rr)
except suds.WebFault as e:
raise SoapError(str(e))
if resp.OverallStatus != 'OK':
self.log(resp, logging.ERROR)
raise ExactTargetError(resp.RequestID, resp.Results[0].StatusMessage)
try:
return resp.Results[0]
except AttributeError:
pass
# create the subscriber list
if create_if_not_exists == True:
return self.create_subscriber_list(listname)
else:
return None
def add_subscribers_to_list(self, subs, async=True):
sublist = self.create('SubscriberList')
sublist.Action = 'create'
objs = []
for subscribers in subs:
sublist.ID = subscribers[0]
for email in subscribers[1]:
sub = self.create('Subscriber')
sub.SubscriberKey = email
sub.EmailAddress = email
sub.Lists = [sublist]
objs.append(sub)
uo = self.create('UpdateOptions')
if async:
uo.RequestType = 'Asynchronous'
for obj in chunks(objs, 400):
try:
resp = self.client.service.Create(None, obj)
except suds.WebFault as e:
raise SoapError(str(e))
if resp.OverallStatus != 'OK':
self.log(resp, logging.ERROR)
raise ExactTargetError(resp.RequestID, resp.Results[0].StatusMessage)
def create_subscriber_lists(self, lists, folder=0):
objs = []
for li in lists:
l = self.create('List')
if folder > 0:
l.Category = folder
l.CustomerKey = li['key']
l.ListName = li['name']
l.Description = li['description']
objs.append(l)
try:
resp = self.client.service.Create(None, objs)
except suds.WebFault as e:
raise SoapError(str(e))
if resp.OverallStatus != 'OK':
self.log(resp, logging.ERROR)
raise ExactTargetError(resp.RequestID, resp.Results[0].StatusMessage)
else:
list_obs = {}
for r in resp.Results:
list_obs[r.Object.CustomerKey] = r.NewID
return list_obs
def get_email_receivers(self, jobid):
# retrieve all users who received this email
rr = self.client.factory.create('RetrieveRequest')
rr.ObjectType = 'SentEvent'
rr.Properties = ['SendID', 'EventDate', 'SubscriberKey']
rr.Options = None
sfp = self.client.factory.create('SimpleFilterPart')
sfp.Property = 'SendID'
sfp.SimpleOperator = 'equals'
sfp.Value = jobid
rr.Filter = sfp
try:
resp = self.client.service.Retrieve(rr)
except suds.WebFault as e:
raise SoapError(str(e))
if resp.OverallStatus != 'OK':
self.log(resp, logging.ERROR)
raise ExactTargetError(resp.RequestID, resp.Results[0].StatusMessage)
try:
return resp.Results[0]
except AttributeError:
pass
def get_email_stats(self, jobid):
# retrieve stats on a single email send
rr = self.client.factory.create('RetrieveRequest')
rr.ObjectType = 'Send'
rr.Properties = ['SentDate', 'UniqueOpens', 'NumberSent', 'NumberDelivered', 'HardBounces', 'SoftBounces']
rr.Options = None
sfp = self.client.factory.create('SimpleFilterPart')
sfp.Property = 'ID'
sfp.SimpleOperator = 'equals'
sfp.Value = jobid
rr.Filter = sfp
try:
resp = self.client.service.Retrieve(rr)
except suds.WebFault as e:
raise SoapError(str(e))
if resp.OverallStatus != 'OK':
self.log(resp, logging.ERROR)
raise ExactTargetError(resp.RequestID, resp.Results[0].StatusMessage)
try:
return resp.Results[0]
except AttributeError:
pass
def create_filter(self, prop, operator, value):
sfp = self.create('SimpleFilterPart')
sfp.Property = prop
sfp.SimpleOperator = operator
sfp.Value = value
return sfp
def create_filter_definition(self, name, filters, key=None, description=None, operator='AND'):
fd = self.create('FilterDefinition', key)
fd.Name = name
fd.Description = description
filter_len = len(filters)
if filter_len is 1:
fd.DataFilter = self.create_filter(*filters[0])
else:
cfp = self.create('SimpleFilterPart')
cfp.LeftOperand = self.create_filter(*filters[0])
cfp.LogicalOperator = operator
cfp.RightOperand = self.create_filter(*filters[1])
if filter_len > 2:
cfp.AdditionalOperands = []
for f in filters[2:]:
cfp.AdditionalOperands.append(self.create_filter(*f))
fd.DataFilter = cfp
self.client.service.Create(None, fd)
def start_tsd(self, tsd):
tsd.TriggeredSendStatus = 'Active'
self.update_object(tsd)
def stop_tsd(self, tsd):
tsd.TriggeredSendStatus = 'Inactive'
self.update_object(tsd)
def run_import(self, key):
im = self.create('ImportDefinition')
im.CustomerKey = key
objs = {'Definition': [im,]}
try:
resp = self.client.service.Perform(self.create('PerformOptions'), 'start', objs)
except suds.WebFault as e:
raise SoapError(str(e))
if resp.OverallStatus != 'OK':
self.log(resp, logging.ERROR)
raise ExactTargetError(resp.RequestID, resp.Results[0].StatusMessage)
class ExactTargetError(Exception):
def __init__(self, request_id, message):
Exception.__init__(self, message)
self.request_id = request_id
def __unicode__(self):
return "Request %s failed with message '%s'" % (self.request_id, self.message)
def __str__(self):
return str(self.__unicode__())
class SoapError(Exception):
pass
# http://stackoverflow.com/a/1751478/271768
def chunks(l, n):
return [l[i:i+n] for i in range(0, len(l), n)]
| |
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import six
import ruamel.yaml as yaml
import os
import json
"""
This module provides classes to perform analyses of
the local environments (e.g., finding near neighbors)
of single sites in molecules and structures.
To do:
- Insert LocalStructOrderParas class here.
"""
__author__ = "Shyue Ping Ong, Geoffroy Hautier, Sai Jayaraman,"+\
" Nils E. R. Zimmermann, Bharat Medasani"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Nils E. R. Zimmermann"
__email__ = "nils.e.r.zimmermann@gmail.com"
__status__ = "Production"
__date__ = "August 17, 2017"
from math import pow, pi, asin, atan, sqrt, exp, cos, acos
import numpy as np
from bisect import bisect_left
from scipy.spatial import Voronoi
from pymatgen import Element
from pymatgen.core.structure import Structure
from pymatgen.util.num import abs_cap
from pymatgen.analysis.bond_valence import BV_PARAMS
from pymatgen.analysis.structure_analyzer import OrderParameters
file_dir = os.path.dirname(__file__)
rad_file = os.path.join(file_dir, 'ionic_radii.json')
with open(rad_file, 'r') as fp:
_ion_radii = json.load(fp)
class ValenceIonicRadiusEvaluator(object):
"""
Computes site valences and ionic radii for a structure using bond valence
analyzer
Args:
structure: pymatgen.core.structure.Structure
"""
def __init__(self, structure):
self._structure = structure.copy()
self._valences = self._get_valences()
self._ionic_radii = self._get_ionic_radii()
@property
def radii(self):
"""
List of ionic radii of elements in the order of sites.
"""
el = [site.species_string for site in self._structure.sites]
radii_dict = dict(zip(el, self._ionic_radii))
#print radii_dict
return radii_dict
@property
def valences(self):
"""
List of oxidation states of elements in the order of sites.
"""
el = [site.species_string for site in self._structure.sites]
valence_dict = dict(zip(el, self._valences))
return valence_dict
@property
def structure(self):
"""
Returns oxidation state decorated structure.
"""
return self._structure.copy()
def _get_ionic_radii(self):
"""
Computes ionic radii of elements for all sites in the structure.
If valence is zero, atomic radius is used.
"""
radii = []
vnn = VoronoiNN() # self._structure)
def nearest_key(sorted_vals, key):
i = bisect_left(sorted_vals, key)
if i == len(sorted_vals):
return sorted_vals[-1]
if i == 0:
return sorted_vals[0]
before = sorted_vals[i-1]
after = sorted_vals[i]
if after-key < key-before:
return after
else:
return before
for i in range(len(self._structure.sites)):
site = self._structure.sites[i]
if isinstance(site.specie,Element):
radius = site.specie.atomic_radius
# Handle elements with no atomic_radius
# by using calculated values instead.
if radius is None:
radius = site.specie.atomic_radius_calculated
if radius is None:
raise ValueError(
"cannot assign radius to element {}".format(
site.specie))
radii.append(radius)
continue
el = site.specie.symbol
oxi_state = int(round(site.specie.oxi_state))
coord_no = int(round(vnn.get_cn(self._structure, i)))
try:
tab_oxi_states = sorted(map(int, _ion_radii[el].keys()))
oxi_state = nearest_key(tab_oxi_states, oxi_state)
radius = _ion_radii[el][str(oxi_state)][str(coord_no)]
except KeyError:
if vnn.get_cn(self._structure, i)-coord_no > 0:
new_coord_no = coord_no + 1
else:
new_coord_no = coord_no - 1
try:
radius = _ion_radii[el][str(oxi_state)][str(new_coord_no)]
coord_no = new_coord_no
except:
tab_coords = sorted(map(int, _ion_radii[el][str(oxi_state)].keys()))
new_coord_no = nearest_key(tab_coords, coord_no)
i = 0
for val in tab_coords:
if val > coord_no:
break
i = i + 1
if i == len(tab_coords):
key = str(tab_coords[-1])
radius = _ion_radii[el][str(oxi_state)][key]
elif i == 0:
key = str(tab_coords[0])
radius = _ion_radii[el][str(oxi_state)][key]
else:
key = str(tab_coords[i-1])
radius1 = _ion_radii[el][str(oxi_state)][key]
key = str(tab_coords[i])
radius2 = _ion_radii[el][str(oxi_state)][key]
radius = (radius1+radius2)/2
#implement complex checks later
radii.append(radius)
return radii
def _get_valences(self):
"""
Computes ionic valences of elements for all sites in the structure.
"""
try:
bv = BVAnalyzer()
self._structure = bv.get_oxi_state_decorated_structure(self._structure)
valences = bv.get_valences(self._structure)
except:
try:
bv = BVAnalyzer(symm_tol=0.0)
self._structure = bv.get_oxi_state_decorated_structure(self._structure)
valences = bv.get_valences(self._structure)
except:
valences = []
for site in self._structure.sites:
if len(site.specie.common_oxidation_states) > 0:
valences.append(site.specie.common_oxidation_states[0])
# Handle noble gas species
# which have no entries in common_oxidation_states.
else:
valences.append(0)
if sum(valences):
valences = [0]*self._structure.num_sites
else:
self._structure.add_oxidation_state_by_site(valences)
#raise
#el = [site.specie.symbol for site in self._structure.sites]
#el = [site.species_string for site in self._structure.sites]
#el = [site.specie for site in self._structure.sites]
#valence_dict = dict(zip(el, valences))
#print valence_dict
return valences
class NearNeighbors(object):
"""
Base class to determine near neighbors that typically include nearest
neighbors and others that are within some tolerable distance.
"""
def __init__(self):
pass
def get_cn(self, structure, n, use_weights=False):
"""
Get coordination number, CN, of site with index n in structure.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine CN.
use_weights (boolean): flag indicating whether (True)
to use weights for computing the coordination number
or not (False, default: each coordinated site has equal
weight).
Returns:
cn (integer or float): coordination number.
"""
siw = self.get_nn_info(structure, n)
return sum([e['weight'] for e in siw]) if use_weights else len(siw)
def get_nn(self, structure, n):
"""
Get near neighbors of site with index n in structure.
Args:
structure (Structure): input structure.
n (integer): index of site in structure for which to determine
neighbors.
Returns:
sites (list of Site objects): near neighbors.
"""
return [e['site'] for e in self.get_nn_info(structure, n)]
def get_weights_of_nn_sites(self, n):
"""
Get weight associated with each near neighbor of site with
index n in structure.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine the weights.
Returns:
weights (list of floats): near-neighbor weights.
"""
return [e['weight'] for e in self.get_nn_info(structure, n)]
def get_nn_images(self, structure, n):
"""
Get image location of all near neighbors of site with index n in
structure.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine the image
location of near neighbors.
Returns:
images (list of 3D integer array): image locations of
near neighbors.
"""
return [e['image'] for e in self.get_nn_info(structure, n)]
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near-neighbor
information.
Returns:
siw (list of dicts): each dictionary provides information
about a single near neighbor, where key 'site' gives
access to the corresponding Site object, 'image' gives
the image location, and 'weight' provides the weight
that a given near-neighbor site contributes
to the coordination number (1 or smaller), 'site_index'
gives index of the corresponding site in
the original structure.
"""
raise NotImplementedError("get_nn_info(structure, n)"
" is not defined!")
@staticmethod
def _get_image(frac_coords):
"""Private convenience method for get_nn_info,
gives lattice image from provided PeriodicSite."""
return [int(f) if f >= 0 else int(f - 1)
for f in frac_coords]
@staticmethod
def _get_original_site(structure, site):
"""Private convenience method for get_nn_info,
gives original site index from ProvidedPeriodicSite."""
is_periodic_image = [site.is_periodic_image(s) for s in structure]
return is_periodic_image.index(True)
class VoronoiNN(NearNeighbors):
"""
Uses a Voronoi algorithm to determine near neighbors for each site in a
structure.
Args:
tol (float): tolerance parameter for near-neighbor finding
(default: 0).
targets (Element or list of Elements): target element(s).
cutoff (float): cutoff radius in Angstrom to look for near-neighbor
atoms. Defaults to 10.0.
allow_pathological (bool): whether to allow infinite vertices in
determination of Voronoi coordination.
"""
def __init__(self, tol=0, targets=None, cutoff=10.0,
allow_pathological=False):
self.tol = tol
self.cutoff = cutoff
self.allow_pathological = allow_pathological
self.targets = targets
def get_voronoi_polyhedra(self, structure, n):
"""
Gives a weighted polyhedra around a site. This uses the Voronoi
construction with solid angle weights.
See ref: A Proposed Rigorous Definition of Coordination Number,
M. O'Keeffe, Acta Cryst. (1979). A35, 772-775
Args:
structure (Structure): structure for which to evaluate the
coordination environment.
n (integer): site index.
Returns:
A dict of sites sharing a common Voronoi facet with the site
n and their solid angle weights
"""
if self.targets is None:
targets = structure.composition.elements
else:
targets = self.targets
center = structure[n]
neighbors = structure.get_sites_in_sphere(
center.coords, self.cutoff)
neighbors = [i[0] for i in sorted(neighbors, key=lambda s: s[1])]
qvoronoi_input = [s.coords for s in neighbors]
voro = Voronoi(qvoronoi_input)
all_vertices = voro.vertices
results = {}
for nn, vind in voro.ridge_dict.items():
if 0 in nn:
if -1 in vind:
if self.allow_pathological:
continue
else:
raise RuntimeError("This structure is pathological,"
" infinite vertex in the voronoi "
"construction")
facets = [all_vertices[i] for i in vind]
results[neighbors[sorted(nn)[1]]] = solid_angle(
center.coords, facets)
maxangle = max(results.values())
resultweighted = {}
for nn, angle in results.items():
# is nn site is ordered use "nn.specie" to get species, else use "nn.species_and_occu" to get species
if nn.is_ordered:
if nn.specie in targets:
resultweighted[nn] = angle / maxangle
else: # is nn site is disordered
for disordered_sp in nn.species_and_occu.keys():
if disordered_sp in targets:
resultweighted[nn] = angle / maxangle
return resultweighted
def get_nn_info(self, structure, n):
""""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n in structure
using Voronoi decomposition.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near-neighbor
sites.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a coordinated site, its image location,
and its weight.
"""
if self.targets is None:
targets = structure.composition.elements
else:
targets = self.targets
siw = []
for site, weight in self.get_voronoi_polyhedra(
structure, n).items():
if weight > self.tol and site.specie in targets:
siw.append({'site': site,
'image': self._get_image(site.frac_coords),
'weight': weight,
'site_index': self._get_original_site(structure, site)})
return siw
class JMolNN(NearNeighbors):
"""
Determine near-neighbor sites and coordination number using an emulation
of JMol's default autoBond() algorithm. This version of the algorithm
does not take into account any information regarding known charge
states.
Args:
tol (float): tolerance parameter for bond determination
(default: 1E-3).
el_radius_updates: (dict) symbol->float to override default atomic
radii table values
"""
def __init__(self, tol=1E-3, el_radius_updates=None):
self.tol = tol
# Load elemental radii table
bonds_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
"bonds_jmol_ob.yaml")
with open(bonds_file, 'r') as f:
self.el_radius = yaml.safe_load(f)
# Update any user preference elemental radii
if el_radius_updates:
self.el_radius.update(el_radius_updates)
def get_max_bond_distance(self, el1_sym, el2_sym, constant=0.56):
"""
Use JMol algorithm to determine bond length from atomic parameters
Args:
el1_sym: (str) symbol of atom 1
el2_sym: (str) symbol of atom 2
constant: (float) factor to tune model
Returns: (float) max bond length
"""
return sqrt(
(self.el_radius[el1_sym] + self.el_radius[el2_sym] + constant) ** 2)
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n using the bond identification
algorithm underlying JMol.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near
neighbors.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a neighbor site, its image location,
and its weight.
"""
site = structure[n]
# Determine relevant bond lengths based on atomic radii table
bonds = {}
for el in structure.composition.elements:
bonds[site.specie, el] = self.get_max_bond_distance(
site.specie.symbol, el.symbol)
# Search for neighbors up to max bond length + tolerance
max_rad = max(bonds.values()) + self.tol
min_rad = min(bonds.values())
siw = []
for neighb, dist in structure.get_neighbors(site, max_rad):
# Confirm neighbor based on bond length specific to atom pair
if dist <= bonds[(site.specie, neighb.specie)] + self.tol:
weight = min_rad / dist
siw.append({'site': neighb,
'image': self._get_image(neighb.frac_coords),
'weight': weight,
'site_index': self._get_original_site(structure, neighb)})
return siw
class MinimumDistanceNN(NearNeighbors):
"""
Determine near-neighbor sites and coordination number using the
nearest neighbor(s) at distance, d_min, plus all neighbors
within a distance (1 + delta) * d_min, where delta is a
(relative) distance tolerance parameter.
Args:
tol (float): tolerance parameter for neighbor identification
(default: 0.1).
cutoff (float): cutoff radius in Angstrom to look for trial
near-neighbor sites (default: 10.0).
"""
def __init__(self, tol=0.1, cutoff=10.0):
self.tol = tol
self.cutoff = cutoff
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n using the closest neighbor
distance-based method.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near
neighbors.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a neighbor site, its image location,
and its weight.
"""
site = structure[n]
neighs_dists = structure.get_neighbors(site, self.cutoff)
min_dist = min([dist for neigh, dist in neighs_dists])
siw = []
for s, dist in neighs_dists:
if dist < (1.0 + self.tol) * min_dist:
w = min_dist / dist
siw.append({'site': s,
'image': self._get_image(s.frac_coords),
'weight': w,
'site_index': self._get_original_site(structure, s)})
return siw
class MinimumOKeeffeNN(NearNeighbors):
"""
Determine near-neighbor sites and coordination number using the
neighbor(s) at closest relative distance, d_min_OKeffee, plus some
relative tolerance, where bond valence parameters from O'Keeffe's
bond valence method (J. Am. Chem. Soc. 1991, 3226-3229) are used
to calculate relative distances.
Args:
tol (float): tolerance parameter for neighbor identification
(default: 0.1).
cutoff (float): cutoff radius in Angstrom to look for trial
near-neighbor sites (default: 10.0).
"""
def __init__(self, tol=0.1, cutoff=10.0):
self.tol = tol
self.cutoff = cutoff
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n using the closest relative
neighbor distance-based method with O'Keeffe parameters.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near
neighbors.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a neighbor site, its image location,
and its weight.
"""
site = structure[n]
neighs_dists = structure.get_neighbors(site, self.cutoff)
try:
eln = site.specie.element
except:
eln = site.species_string
reldists_neighs = []
for neigh, dist in neighs_dists:
try:
el2 = neigh.specie.element
except:
el2 = neigh.species_string
reldists_neighs.append([dist / get_okeeffe_distance_prediction(
eln, el2), neigh])
siw = []
min_reldist = min([reldist for reldist, neigh in reldists_neighs])
for reldist, s in reldists_neighs:
if reldist < (1.0 + self.tol) * min_reldist:
w = min_reldist / reldist
siw.append({'site': s,
'image': self._get_image(s.frac_coords),
'weight': w,
'site_index': self._get_original_site(structure, s)})
return siw
class MinimumVIRENN(NearNeighbors):
"""
Determine near-neighbor sites and coordination number using the
neighbor(s) at closest relative distance, d_min_VIRE, plus some
relative tolerance, where atom radii from the
ValenceIonicRadiusEvaluator (VIRE) are used
to calculate relative distances.
Args:
tol (float): tolerance parameter for neighbor identification
(default: 0.1).
cutoff (float): cutoff radius in Angstrom to look for trial
near-neighbor sites (default: 10.0).
"""
def __init__(self, tol=0.1, cutoff=10.0):
self.tol = tol
self.cutoff = cutoff
def get_nn_info(self, structure, n):
"""
Get all near-neighbor sites as well as the associated image locations
and weights of the site with index n using the closest relative
neighbor distance-based method with VIRE atomic/ionic radii.
Args:
structure (Structure): input structure.
n (integer): index of site for which to determine near
neighbors.
Returns:
siw (list of tuples (Site, array, float)): tuples, each one
of which represents a neighbor site, its image location,
and its weight.
"""
vire = ValenceIonicRadiusEvaluator(structure)
site = vire.structure[n]
neighs_dists = vire.structure.get_neighbors(site, self.cutoff)
rn = vire.radii[vire.structure[n].species_string]
reldists_neighs = []
for neigh, dist in neighs_dists:
reldists_neighs.append([dist / (
vire.radii[neigh.species_string] + rn), neigh])
siw = []
min_reldist = min([reldist for reldist, neigh in reldists_neighs])
for reldist, s in reldists_neighs:
if reldist < (1.0 + self.tol) * min_reldist:
w = min_reldist / reldist
siw.append({'site': s,
'image': self._get_image(s.frac_coords),
'weight': w,
'site_index': self._get_original_site(structure, s)})
return siw
def solid_angle(center, coords):
"""
Helper method to calculate the solid angle of a set of coords from the
center.
Args:
center (3x1 array): Center to measure solid angle from.
coords (Nx3 array): List of coords to determine solid angle.
Returns:
The solid angle.
"""
o = np.array(center)
r = [np.array(c) - o for c in coords]
r.append(r[0])
n = [np.cross(r[i + 1], r[i]) for i in range(len(r) - 1)]
n.append(np.cross(r[1], r[0]))
vals = []
for i in range(len(n) - 1):
v = -np.dot(n[i], n[i + 1]) \
/ (np.linalg.norm(n[i]) * np.linalg.norm(n[i + 1]))
vals.append(acos(abs_cap(v)))
phi = sum(vals)
return phi + (3 - len(r)) * pi
def get_okeeffe_params(el_symbol):
"""
Returns the elemental parameters related to atom size and
electronegativity which are used for estimating bond-valence
parameters (bond length) of pairs of atoms on the basis of data
provided in 'Atoms Sizes and Bond Lengths in Molecules and Crystals'
(O'Keeffe & Brese, 1991).
Args:
el_symbol (str): element symbol.
Returns:
(dict): atom-size ('r') and electronegativity-related ('c')
parameter.
"""
el = Element(el_symbol)
if el not in list(BV_PARAMS.keys()):
raise RuntimeError("Could not find O'Keeffe parameters for element"
" \"{}\" in \"BV_PARAMS\"dictonary"
" provided by pymatgen".format(el_symbol))
return BV_PARAMS[el]
def get_okeeffe_distance_prediction(el1, el2):
"""
Returns an estimate of the bond valence parameter (bond length) using
the derived parameters from 'Atoms Sizes and Bond Lengths in Molecules
and Crystals' (O'Keeffe & Brese, 1991). The estimate is based on two
experimental parameters: r and c. The value for r is based off radius,
while c is (usually) the Allred-Rochow electronegativity. Values used
are *not* generated from pymatgen, and are found in
'okeeffe_params.json'.
Args:
el1, el2 (Element): two Element objects
Returns:
a float value of the predicted bond length
"""
el1_okeeffe_params = get_okeeffe_params(el1)
el2_okeeffe_params = get_okeeffe_params(el2)
r1 = el1_okeeffe_params['r']
r2 = el2_okeeffe_params['r']
c1 = el1_okeeffe_params['c']
c2 = el2_okeeffe_params['c']
return r1 + r2 - r1 * r2 * pow(
sqrt(c1) - sqrt(c2), 2) / (c1 * r1 + c2 * r2)
def get_neighbors_of_site_with_index(struct, n, approach="min_dist", delta=0.1, \
cutoff=10.0):
"""
Returns the neighbors of a given site using a specific neighbor-finding
method.
Args:
struct (Structure): input structure.
n (int): index of site in Structure object for which motif type
is to be determined.
approach (str): type of neighbor-finding approach, where
"min_dist" will use the MinimumDistanceNN class,
"voronoi" the VoronoiNN class, "min_OKeeffe" the
MinimumOKeeffe class, and "min_VIRE" the MinimumVIRENN class.
delta (float): tolerance involved in neighbor finding.
cutoff (float): (large) radius to find tentative neighbors.
Returns: neighbor sites.
"""
if approach == "min_dist":
return MinimumDistanceNN(tol=delta, cutoff=cutoff).get_nn(
struct, n)
elif approach == "voronoi":
return VoronoiNN(tol=delta, cutoff=cutoff).get_nn(
struct, n)
elif approach == "min_OKeeffe":
return MinimumOKeeffeNN(tol=delta, cutoff=cutoff).get_nn(
struct, n)
elif approach == "min_VIRE":
return MinimumVIRENN(tol=delta, cutoff=cutoff).get_nn(
struct, n)
else:
raise RuntimeError("unsupported neighbor-finding method ({}).".format(
approach))
def site_is_of_motif_type(struct, n, approach="min_dist", delta=0.1, \
cutoff=10.0, thresh=None):
"""
Returns the motif type of the site with index n in structure struct;
currently featuring "tetrahedral", "octahedral", "bcc", and "cp"
(close-packed: fcc and hcp) as well as "square pyramidal" and
"trigonal bipyramidal". If the site is not recognized,
"unrecognized" is returned. If a site should be assigned to two
different motifs, "multiple assignments" is returned.
Args:
struct (Structure): input structure.
n (int): index of site in Structure object for which motif type
is to be determined.
approach (str): type of neighbor-finding approach, where
"min_dist" will use the MinimumDistanceNN class,
"voronoi" the VoronoiNN class, "min_OKeeffe" the
MinimumOKeeffe class, and "min_VIRE" the MinimumVIRENN class.
delta (float): tolerance involved in neighbor finding.
cutoff (float): (large) radius to find tentative neighbors.
thresh (dict): thresholds for motif criteria (currently, required
keys and their default values are "qtet": 0.5,
"qoct": 0.5, "qbcc": 0.5, "q6": 0.4).
Returns: motif type (str).
"""
if thresh is None:
thresh = {
"qtet": 0.5, "qoct": 0.5, "qbcc": 0.5, "q6": 0.4,
"qtribipyr": 0.8, "qsqpyr": 0.8}
ops = OrderParameters([
"cn", "tet", "oct", "bcc", "q6", "sq_pyr", "tri_bipyr"])
neighs_cent = get_neighbors_of_site_with_index(
struct, n, approach=approach, delta=delta, cutoff=cutoff)
neighs_cent.append(struct.sites[n])
opvals = ops.get_order_parameters(
neighs_cent, len(neighs_cent)-1, indices_neighs=[
i for i in range(len(neighs_cent)-1)])
cn = int(opvals[0] + 0.5)
motif_type = "unrecognized"
nmotif = 0
if cn == 4 and opvals[1] > thresh["qtet"]:
motif_type = "tetrahedral"
nmotif += 1
if cn == 5 and opvals[5] > thresh["qsqpyr"]:
motif_type = "square pyramidal"
nmotif += 1
if cn == 5 and opvals[6] > thresh["qtribipyr"]:
motif_type = "trigonal bipyramidal"
nmotif += 1
if cn == 6 and opvals[2] > thresh["qoct"]:
motif_type = "octahedral"
nmotif += 1
if cn == 8 and (opvals[3] > thresh["qbcc"] and opvals[1] < thresh["qtet"]):
motif_type = "bcc"
nmotif += 1
if cn == 12 and (opvals[4] > thresh["q6"] and opvals[1] < thresh["q6"] and \
opvals[2] < thresh["q6"] and opvals[3] < thresh["q6"]):
motif_type = "cp"
nmotif += 1
if nmotif > 1:
motif_type = "multiple assignments"
return motif_type
| |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import mock
import sys
import os
import six
import shutil
import tempfile
from .. import base
from girder import constants
from girder.models.user import User
from girder.utility import install
pluginRoot = os.path.normpath(os.path.join(
os.path.dirname(os.path.abspath(__file__)), '..', '..', 'test', 'test_plugins'
))
class PluginOpts(object):
def __init__(self, plugin=None, force=False, symlink=False, dev=False, npm='npm',
skip_requirements=False, all_plugins=False, plugins=None, watch=False,
watch_plugin=None, plugin_prefix='plugin', no_plugins=False):
self.plugin = plugin
self.force = force
self.symlink = symlink
self.development = dev
self.npm = npm
self.skip_requirements = skip_requirements
self.all_plugins = all_plugins
self.plugins = plugins
self.watch = watch
self.watch_plugin = watch_plugin
self.plugin_prefix = plugin_prefix
self.no_plugins = no_plugins
class ProcMock(object):
def __init__(self, rc=0, keyboardInterrupt=False):
self.returncode = rc
self.kbi = keyboardInterrupt
def communicate(self):
return (None, None)
def wait(self):
if self.kbi:
raise KeyboardInterrupt()
def setUpModule():
base.startServer()
def tearDownModule():
base.stopServer()
class InstallTestCase(base.TestCase):
"""
Test installing resources and plugins using the install module.
"""
def setUp(self):
base.TestCase.setUp(self)
self.baseDir = tempfile.mkdtemp()
self.pluginDir = os.path.join(self.baseDir, 'plugins')
os.mkdir(self.pluginDir)
self.mockPluginDir(self.pluginDir)
def tearDown(self):
base.TestCase.tearDown(self)
self.unmockPluginDir()
shutil.rmtree(self.baseDir)
def testInstallPlugin(self):
install.install_plugin(PluginOpts(plugin=[
os.path.join(pluginRoot, 'has_deps'),
os.path.join(constants.ROOT_DIR, 'plugins', 'jobs')
]))
self.assertTrue(os.path.exists(
os.path.join(self.pluginDir, 'jobs', 'plugin.yml')))
self.assertTrue(os.path.exists(
os.path.join(self.pluginDir, 'has_deps', 'plugin.json')))
# Should fail if exists and force=False
with six.assertRaisesRegex(self, Exception, 'Plugin already exists'):
install.install_plugin(PluginOpts(plugin=[
os.path.join(pluginRoot, 'has_deps')
]))
# Should succeed if force=True
install.install_plugin(PluginOpts(force=True, plugin=[
os.path.join(pluginRoot, 'has_deps')
]))
# Test skip_requirements
install.install_plugin(PluginOpts(
force=True, skip_requirements=True,
plugin=[os.path.join(pluginRoot, 'has_deps')]))
# If bad path is given, should fail gracefully
with six.assertRaisesRegex(self, Exception, 'Invalid plugin directory'):
install.install_plugin(PluginOpts(force=True, plugin=[
'/bad/install/path'
]))
# If src == dest, we should still run npm and succeed.
install.install_plugin(PluginOpts(force=True, plugin=[
os.path.join(self.pluginDir, 'has_deps')
]))
# Should fail if exists as directory and symlink is true
with six.assertRaisesRegex(self, Exception, 'Plugin already exists'):
install.install_plugin(PluginOpts(plugin=[
os.path.join(pluginRoot, 'has_deps')
], symlink=True))
# Should be a link if force=True and symlink=True
install.install_plugin(PluginOpts(force=True, plugin=[
os.path.join(pluginRoot, 'has_deps')
], symlink=True))
self.assertTrue(os.path.islink(os.path.join(
self.pluginDir, 'has_deps')))
# Should fail if exists as link and symlink is false
with six.assertRaisesRegex(self, Exception, 'Plugin already exists'):
install.install_plugin(PluginOpts(plugin=[
os.path.join(pluginRoot, 'has_deps')
]))
# Should not be a link if force=True and symlink=False
install.install_plugin(PluginOpts(force=True, plugin=[
os.path.join(pluginRoot, 'has_deps')
]))
self.assertFalse(os.path.islink(os.path.join(
self.pluginDir, 'has_deps')))
def testDevDependencies(self):
install.install_plugin(PluginOpts(plugin=[
os.path.join(pluginRoot, 'has_dev_deps'),
os.path.join(constants.ROOT_DIR, 'plugins', 'jobs')
]))
self.assertTrue(os.path.exists(
os.path.join(self.pluginDir, 'has_dev_deps', 'plugin.json')))
install.install_plugin(PluginOpts(plugin=[
os.path.join(pluginRoot, 'has_dev_deps'),
os.path.join(constants.ROOT_DIR, 'plugins', 'jobs')
], force=True, dev=True))
def testGruntDependencies(self):
install.install_plugin(PluginOpts(plugin=[
os.path.join(pluginRoot, 'has_grunt_deps')
]))
self.assertTrue(os.path.exists(
os.path.join(self.pluginDir, 'has_grunt_deps', 'plugin.json')))
# Should fail if exists and force=False
with six.assertRaisesRegex(self, Exception, 'Plugin already exists'):
install.install_plugin(PluginOpts(plugin=[
os.path.join(pluginRoot, 'has_grunt_deps')
]))
# Should succeed if force=True
install.install_plugin(PluginOpts(force=True, plugin=[
os.path.join(pluginRoot, 'has_grunt_deps')
]))
# If bad path is given, should fail gracefully
with six.assertRaisesRegex(self, Exception, 'Invalid plugin directory'):
install.install_plugin(PluginOpts(force=True, plugin=[
'/bad/install/path'
]))
# If src == dest, we should still run npm and succeed.
install.install_plugin(PluginOpts(force=True, plugin=[
os.path.join(self.pluginDir, 'has_grunt_deps')
]))
# Should fail if exists as directory and symlink is true
with six.assertRaisesRegex(self, Exception, 'Plugin already exists'):
install.install_plugin(PluginOpts(plugin=[
os.path.join(pluginRoot, 'has_grunt_deps')
], symlink=True))
# Should be a link if force=True and symlink=True
install.install_plugin(PluginOpts(force=True, plugin=[
os.path.join(pluginRoot, 'has_grunt_deps')
], symlink=True))
self.assertTrue(os.path.islink(os.path.join(self.pluginDir, 'has_grunt_deps')))
# Should fail if exists as link and symlink is false
with six.assertRaisesRegex(self, Exception, 'Plugin already exists'):
install.install_plugin(PluginOpts(plugin=[
os.path.join(pluginRoot, 'has_grunt_deps')
]))
# Should not be a link if force=True and symlink=False
install.install_plugin(PluginOpts(force=True, plugin=[
os.path.join(pluginRoot, 'has_grunt_deps')
]))
self.assertFalse(os.path.islink(os.path.join(self.pluginDir, 'has_grunt_deps')))
def testWebInstall(self):
# Test initiation of web install via the REST API
user = User().createUser(
login='admin', firstName='admin', lastName='admin', email='a@foo.com',
password='passwd', admin=True)
with mock.patch('subprocess.Popen', return_value=ProcMock()) as p:
# Test without progress
resp = self.request('/system/web_build', method='POST', user=user)
self.assertStatusOk(resp)
self.assertEqual(len(p.mock_calls), 2)
self.assertEqual(
list(p.mock_calls[0][1][0]),
['npm', 'install', '--unsafe-perm', '--no-save', '--production'])
self.assertEqual(
list(p.mock_calls[1][1][0]),
['npm', 'run', 'build', '--',
'--no-progress=true', '--env=prod', '--plugins=', '--configure-plugins='])
# Test with progress (requires actually calling a subprocess)
os.environ['PATH'] = '%s:%s' % (
os.path.join(os.path.abspath(os.path.dirname(__file__)), 'mockpath'),
os.environ.get('PATH', '')
)
resp = self.request('/system/web_build', method='POST', user=user, params={
'progress': True
})
self.assertStatusOk(resp)
def testWebInstallCli(self):
def invokeCli(argList):
args = ['girder-install']
args += list(argList)
with mock.patch.object(sys, 'argv', args):
install.main()
# Test web install
with mock.patch('subprocess.Popen', return_value=ProcMock(rc=2)) as p,\
six.assertRaisesRegex(self, Exception, 'npm install .* returned 2'):
invokeCli(['web'])
self.assertEqual(len(p.mock_calls), 1)
self.assertEqual(p.mock_calls[0][1][0][:2], ('npm', 'install'))
self.assertEqual(p.mock_calls[0][2]['cwd'], constants.PACKAGE_DIR)
with mock.patch('subprocess.Popen', return_value=ProcMock()) as p:
invokeCli(['web'])
self.assertIn('--production', p.mock_calls[0][1][0])
with mock.patch('subprocess.Popen', return_value=ProcMock()) as p:
invokeCli(['web', '--dev'])
self.assertNotIn('--production', p.mock_calls[0][1][0])
# Test watch commands
with mock.patch('subprocess.Popen', return_value=ProcMock()) as p:
invokeCli(['web', '--watch'])
self.assertEqual(len(p.mock_calls), 1)
self.assertEqual(list(p.mock_calls[0][1][0]), ['npm', 'run', 'watch'])
with mock.patch('subprocess.Popen', return_value=ProcMock()) as p:
invokeCli(['web', '--watch-plugin=jobs'])
self.assertEqual(len(p.mock_calls), 1)
self.assertEqual(
list(p.mock_calls[0][1][0]),
['npm', 'run', 'watch', '--', '--plugins=jobs', '--configure-plugins=',
'webpack:plugin_jobs']
)
# Keyboard interrupt should be handled gracefully
with mock.patch('subprocess.Popen', return_value=ProcMock(keyboardInterrupt=True)):
invokeCli(['web', '--watch'])
# Test "--plugins=" and --no-plugins
with mock.patch('girder.utility.install.Setting') as p,\
mock.patch('subprocess.Popen', return_value=ProcMock()):
invokeCli(['web', '--no-plugins'])
invokeCli(['web', '--plugins='])
invokeCli(['web', '--plugins=,'])
self.assertEqual(len(p.mock_calls), 0)
def testStaticDependencies(self):
for p in ('does_nothing', 'has_deps', 'has_static_deps', 'has_webroot', 'test_plugin'):
install.install_plugin(PluginOpts(plugin=[
os.path.join(pluginRoot, p)
]))
with mock.patch('subprocess.Popen', return_value=ProcMock()) as p:
install.install_web(PluginOpts(plugins='has_static_deps'))
self.assertEqual(len(p.mock_calls), 2)
self.assertEqual(list(p.mock_calls[1][1][0][:-1]), [
'npm', 'run', 'build', '--', '--no-progress=true', '--env=prod',
'--plugins=has_static_deps',
])
lastArg = p.mock_calls[1][1][0][-1]
six.assertRegex(self, lastArg, '--configure-plugins=.*')
self.assertEqual(
set(lastArg.split('=')[-1].split(',')), {
'does_nothing', 'has_deps', 'has_webroot', 'test_plugin'
})
| |
"""
Implementation of actions.
Includes those which might be used by the AI (movement and combat)
and those which are currently only offered to the player.
Magical effects and targeting (spells.py) could also live here.
Conditionals and interfaces for the player sit up top in roguelike.py.
"""
# Copyright 2016 Thomas C. Hudson
# Governed by the license described in LICENSE.txt
import copy
import libtcodpy as libtcod
import log
import algebra
from components import *
import map
# Every 100 pts of exhaustion = -1 to all skills
# (equivalent to 1 wound point)
ATTACK_EXHAUSTION = 10
CLIMB_EXHAUSTION = 9
MOVE_EXHAUSTION = 1
# CLIMB_EXHAUSTION is additive with MOVE_EXHAUSTION
# BUG: AI doesn't suffer climb_exhaustion because of coding awkwardness
# (actions.move() doesn't know about elevation changes, only
# roguelike.player_move_or_attack())
def random_direction():
return algebra.directions[libtcod.random_get_int(0, 0, 7)]
def add_to_map(new_map, pos, obj):
obj.pos = pos
obj.current_map = new_map # not necessarily necessary?
new_map.objects.insert(0, obj)
def move(obj, direction):
"""
Moves object by (dx, dy).
Returns true if move succeeded.
"""
goal = obj.pos + direction
if (goal.x < 0 or goal.y < 0 or
goal.x >= obj.current_map.width or
goal.y >= obj.current_map.height):
# try_ catches this for the player, but need to
# check here for NPCs
return False
if not obj.current_map.is_blocked_from(obj.pos, goal):
obj.pos = goal
if obj.fighter:
obj.fighter.exhaustion += MOVE_EXHAUSTION
return True
return False
def move_towards(obj, target_pos):
"""
Moves object one step towards target location.
Returns true if move succeeded.
"""
d = algebra.Direction(target_pos.x - obj.x, target_pos.y - obj.y)
d.normalize()
return move(obj, d)
def move_away_from(obj, target_pos):
"""
Moves object one step away from target location.
Returns true if move succeeded.
"""
d = algebra.Direction(obj.x - target_pos.x, obj.y - target_pos.y)
d.normalize()
return move(obj, d)
def _assign_damage(fighter, attack_skill, target, defense_skill, quantity, method, report=True):
if quantity > 0:
if report:
log.message(
fighter.name.capitalize() + ' (' + str(attack_skill) + ') ' +
method + ' ' + target.name + ' (' + str(defense_skill) + ')' +
' for ' + str(quantity) + ' wounds.')
inflict_damage(fighter, target.fighter, quantity)
elif report:
log.message(
fighter.name.capitalize() + ' (' + str(attack_skill) + ') ' +
method + ' ' + target.name + ' (' + str(defense_skill) + ')' +
' but it has no effect!')
def _drop_ammo_on_hit(target, ammo):
"""
If a shot hits, ammo is 33% reusable, found in same square
"""
if libtcod.random_get_int(0, 1, 6) > 2:
return
new_ammo = copy.deepcopy(ammo)
new_ammo.item.count = 1
add_to_map(target.current_map, target.pos, new_ammo)
def _drop_ammo_on_miss(target, ammo):
"""
If a shot misses, ammo goes into an adjacent square,
reusable if that square is not blocked.
"""
site = target.pos + random_direction()
if target.current_map.is_blocked_at(site):
return
new_ammo = copy.deepcopy(ammo)
new_ammo.item.count = 1
add_to_map(target.current_map, site, new_ammo)
def _base_combat_skill(who_ftr):
skill = who_ftr.skills.get('grappling', 10)
# print(who_ftr.owner.name.capitalize() + ' grappling is ' + str(a_weapon_skill))
eqp = get_equipped_in_slot(who_ftr.owner, 'right hand')
if eqp:
# print(who_ftr.owner.name.capitalize() + ' is wielding ' + a_weapon.owner.name)
skill = (who_ftr.skills.get(eqp.owner.melee_weapon.skill, 10) +
eqp.owner.melee_weapon.skill_bonus)
# print(who_ftr.owner.name.capitalize() + ' ' + a_weapon.owner.melee_weapon.skill + ' is ' + str(a_weapon_skill))
return skill, eqp
def attack(attacker_ftr, target_obj, report=True):
"""
Melee offence: attacker's weapon skill.
Melee defense: half defender's weapon skill, plus defender's shield skill.
Melee impact: attacker's weapon damage.
Melee absorption: defender's armor soak.
"""
target_obj.fighter.last_attacker = attacker_ftr.owner
attacker_ftr.exhaustion += ATTACK_EXHAUSTION
a_weapon_skill, a_weapon_eq = _base_combat_skill(attacker_ftr)
d_weapon_skill, d_weapon_eq = _base_combat_skill(target_obj.fighter)
# if a left-hand item has a defense bonus, use it as a shield
d_shield_eq = get_equipped_in_slot(target_obj, 'left hand')
shield_skill = 0
if d_shield_eq and d_shield_eq.defense_bonus > 0:
shield_skill = target_obj.fighter.skills.get('shield', 10)
total_defense_skill = shield_skill + d_weapon_skill / 2
# print('Attacker action penalty is ' + str(fighter.action_penalty))
effective_attack_skill = max(a_weapon_skill - attacker_ftr.action_penalty, 10)
effective_defense_skill = max(total_defense_skill - target_obj.fighter.action_penalty, 10)
attack_roll = libtcod.random_get_int(0, 1, effective_attack_skill)
defense_roll = libtcod.random_get_int(0, 1, effective_defense_skill)
if defense_roll > attack_roll:
if report:
log.message(attacker_ftr.owner.name.capitalize() + ' (' + str(effective_attack_skill) + ')' +
' attacks ' + target_obj.name + ' (' + str(effective_defense_skill) + ')' +
' but misses.')
return
impact = attacker_ftr.unarmed_damage
if a_weapon_eq:
impact = a_weapon_eq.owner.melee_weapon.damage
active_armor = (libtcod.random_get_int(0, 0, target_obj.fighter.defense) +
libtcod.random_get_int(0, 0, target_obj.fighter.defense))
damage = impact - active_armor
_assign_damage(attacker_ftr.owner, effective_attack_skill,
target_obj, effective_defense_skill,
damage, 'attacks', report)
if damage > 0:
if a_weapon_eq:
strike_fn = a_weapon_eq.owner.melee_weapon.on_strike
else:
strike_fn = attacker_ftr.on_unarmed_strike
if strike_fn:
strike_fn(attacker_ftr, target_obj, damage)
def draw(actor_obj, weapon_obj, report=True):
if report:
log.message(actor_obj.name.capitalize() + ' readies a ' + weapon_obj.name)
actor_obj.game_state = 'shooting'
def fire(actor_obj, weapon_eq, ammo_eq, target_obj, report=True):
ammo_eq.owner.item.count -= 1
if ammo_eq.owner.item.count == 0:
unequip(actor_obj, ammo_eq, False)
actor_obj.inventory.remove(ammo_eq.owner)
target_obj.fighter.last_attacker = actor_obj
actor_obj.fighter.exhaustion += ATTACK_EXHAUSTION
a_weapon_skill = actor_obj.fighter.skills.get(weapon_eq.owner.missile_weapon.skill, 10)
effective_attack_skill = max(a_weapon_skill - actor_obj.fighter.action_penalty, 10)
d_shield_eq = get_equipped_in_slot(target_obj, 'left hand')
effective_shield_skill = 0
if d_shield_eq and d_shield_eq.defense_bonus > 0:
shield_skill = target_obj.fighter.skills.get('shield', 10)
effective_shield_skill = max(shield_skill - target_obj.fighter.action_penalty, 0) / 2
vector = target_obj.pos - actor_obj.pos
distance = math.sqrt(vector.x ** 2 + vector.y ** 2)
effective_defense_skill = 5 + 5 * int(distance) + effective_shield_skill
attack_roll = libtcod.random_get_int(0, 1, effective_attack_skill)
defense_roll = libtcod.random_get_int(0, 1, effective_defense_skill)
if defense_roll > attack_roll:
if report:
log.message(actor_obj.name.capitalize() + ' (' + str(effective_attack_skill) + ')' +
' shoots at ' + target_obj.name + ' (' + str(effective_defense_skill) + ')' +
' but misses.')
_drop_ammo_on_miss(target_obj, ammo_eq.owner)
return
damage = weapon_eq.owner.missile_weapon.damage - target_obj.fighter.defense
_assign_damage(actor_obj, effective_attack_skill,
target_obj, effective_defense_skill,
damage, 'shoots', report)
_drop_ammo_on_hit(target_obj, ammo_eq.owner)
def inflict_damage(actor_obj, target_ftr, damage):
"""
Apply damage.
"""
if damage > 0:
target_ftr.wounds += damage
# for now flat 50% chance of inflicting bleeding
# TODO: base on weapon type?
if libtcod.random_get_int(0, 0, 1):
inflict_bleeding(actor_obj, target_ftr, damage / 2)
if target_ftr.wounds >= target_ftr.max_hp:
# combat model says we just fall unconscious
# but in a single-player game is that really
# worth simulating?
function = target_ftr.death_function
if function is not None:
function(target_ftr.owner)
def inflict_bleeding(actor_obj, target_ftr, bloodloss):
"""
Apply bleeding.
"""
bloodloss -= target_ftr.bleeding_defense
if bloodloss > 0:
target_ftr.bleeding += bloodloss
log.message(target_ftr.owner.name.capitalize() + ' bleeds!', libtcod.red)
def bleed(actor_obj):
# go into floats here so that we can model bleeding continuously
# instead of assessing it every 10 turns
actor_obj.fighter.hp -= actor_obj.fighter.bleeding / 10.
if actor_obj.fighter.hp <= 0:
function = actor_obj.fighter.death_function
if function is not None:
function(actor_obj)
def heal(target_ftr, amount):
"""
Heal by the given amount, without going over the maximum.
"""
target_ftr.hp += amount
if target_ftr.hp > target_ftr.max_hp:
target_ftr.hp = target_ftr.max_hp
def pick_up(actor, obj, report=True):
"""
Add an Object to the actor's inventory and remove from the map.
"""
for match in actor.inventory:
if obj.item.can_combine(match):
match.item.count += obj.item.count
actor.current_map.objects.remove(obj)
if report:
log.message(actor.name.capitalize() + ' picked up a ' + obj.name + '!', libtcod.green)
return True
if len(actor.inventory) >= 22:
if report:
log.message(actor.name.capitalize() + ' inventory is full, cannot pick up ' +
obj.name + '.', libtcod.red)
return False
else:
actor.inventory.append(obj)
actor.current_map.objects.remove(obj)
if report:
if obj.item.count > 1:
log.message(actor.name.capitalize() + ' picked up ' + str(obj.item.count) +
'x ' + obj.name + '!', libtcod.green)
else:
log.message(actor.name.capitalize() + ' picked up a ' + obj.name + '!', libtcod.green)
# Special case: automatically equip if the corresponding equipment slot is unused.
equipment = obj.equipment
if equipment and get_equipped_in_slot(actor, equipment.slot) is None:
equip(actor, equipment, report)
return True
def drop(actor, obj, report=True, drop_all=False):
"""
Remove an Object from the actor's inventory and add it to the map
at the player's coordinates.
If it's equipment, unequip before dropping.
"""
must_split = False
if obj.item.count > 1 and not drop_all:
obj.item.count -= 1
must_split = True
else:
if obj.equipment:
unequip(actor, obj.equipment, report)
actor.inventory.remove(obj)
combined = False
for match in actor.current_map.objects:
if match.pos == actor.pos and obj.item.can_combine(match):
if drop_all:
match.item.count += obj.item.count
else:
match.item.count += 1
combined = True
break
if not combined:
new_o = obj
if must_split:
new_o = copy.deepcopy(obj)
if drop_all:
new_o.item.count = obj.item.count
else:
new_o.item.count = 1
add_to_map(actor.current_map, actor.pos, new_o)
if report:
if drop_all:
log.message(actor.name.capitalize() + ' dropped ' + str(obj.item.count) + 'x ' + obj.name + '.', libtcod.yellow)
else:
log.message(actor.name.capitalize() + ' dropped a ' + obj.name + '.', libtcod.yellow)
def use(actor, obj, report=True):
"""
If the object has the Equipment component, toggle equip/unequip.
Otherwise invoke its use_function and (if not cancelled) destroy it.
"""
if obj.equipment:
_toggle_equip(actor, obj.equipment, report)
return
if obj.item.use_function is None:
if report:
log.message('The ' + obj.name + ' cannot be used.')
else:
if obj.item.use_function(actor) != 'cancelled':
if obj.item.count > 1:
obj.item.count -= 1
else:
actor.inventory.remove(obj)
def _toggle_equip(actor, eqp, report=True):
if eqp.is_equipped:
unequip(actor, eqp, report)
else:
equip(actor, eqp, report)
def equip(actor, eqp, report=True):
"""
Equip the object (and log unless report=False).
Ensure only one object per slot.
"""
old_equipment = get_equipped_in_slot(actor, eqp.slot)
if old_equipment is not None:
unequip(actor, old_equipment, report)
eqp.is_equipped = True
if report:
log.message('Equipped ' + eqp.owner.name + ' on ' + eqp.slot + '.', libtcod.light_green)
def unequip(actor, eqp, report=True):
"""
Unequip the object (and log).
"""
if not eqp.is_equipped:
return
eqp.is_equipped = False
if report:
log.message('Unequipped ' + eqp.owner.name + ' from ' + eqp.slot + '.', libtcod.light_yellow)
def get_equipped_in_slot(actor, slot):
"""
Returns Equipment in a slot, or None.
"""
if hasattr(actor, 'inventory'):
for obj in actor.inventory:
if obj.equipment and obj.equipment.slot == slot and obj.equipment.is_equipped:
return obj.equipment
return None
class _MockMap(object):
def is_blocked_at(self, pos):
return False
def _test_move():
obj = Object(algebra.Location(0, 0), 'o', 'test object', libtcod.white)
obj.current_map = _MockMap()
assert obj.pos == algebra.Location(0, 0)
move(obj, algebra.south)
assert obj.pos == algebra.Location(0, 1)
move(obj, algebra.southeast)
assert obj.pos == algebra.Location(1, 2)
def _test_move_towards():
obj = Object(algebra.Location(0, 0), 'o', 'test object', libtcod.white)
obj.current_map = _MockMap()
assert obj.pos == algebra.Location(0, 0)
move_towards(obj, algebra.Location(10, 10))
assert obj.pos == algebra.Location(1, 1)
move_towards(obj, algebra.Location(10, 10))
assert obj.pos == algebra.Location(2, 2)
move_towards(obj, algebra.Location(-10, 2))
assert obj.pos == algebra.Location(1, 2)
move_towards(obj, obj.pos)
assert obj.pos == algebra.Location(1, 2)
def _test_attack():
af = Fighter(100)
df = Fighter(100)
a = Object(algebra.Location(0, 0), 'a', 'test attacker', libtcod.white, fighter=af)
d = Object(algebra.Location(1, 1), 'd', 'test defender', libtcod.white, fighter=df)
assert af.hp == 100
assert df.hp == 100
# if defense == 0, full damage is done
attack(af, d, False)
assert df.hp == 90
df.base_defense = 5
attack(af, d, False)
assert df.hp == 85
# if defense > attack, no damage is done
df.base_defense = 15
attack(af, d, False)
assert df.hp == 85
def _test_actions():
_test_move()
_test_move_towards()
_test_attack()
if __name__ == '__main__':
_test_actions()
print('Action tests complete.')
| |
#!/usr/bin/env python
"""
Twython is a library for Python that wraps the Twitter API.
It aims to abstract away all the API endpoints, so that additions to the library
and/or the Twitter API won't cause any overall problems.
Questions, comments? ryan@venodesigns.net
"""
__author__ = "Ryan McGrath <ryan@venodesigns.net>"
__version__ = "2.3.4"
import urllib
import re
import warnings
import requests
from requests.auth import OAuth1
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
# Twython maps keyword based arguments to Twitter API endpoints. The endpoints
# table is a file with a dictionary of every API endpoint that Twython supports.
from twitter_endpoints import base_url, api_table, twitter_http_status_codes
try:
import simplejson
except ImportError:
try:
# Python 2.6 and up
import json as simplejson
except ImportError:
try:
from django.utils import simplejson
except:
# Seriously wtf is wrong with you if you get this Exception.
raise Exception("Twython requires the simplejson library (or Python 2.6) to work. http://www.undefined.org/python/")
class TwythonError(Exception):
"""
Generic error class, catch-all for most Twython issues.
Special cases are handled by TwythonAPILimit and TwythonAuthError.
Note: To use these, the syntax has changed as of Twython 1.3. To catch these,
you need to explicitly import them into your code, e.g:
from twython import TwythonError, TwythonAPILimit, TwythonAuthError
"""
def __init__(self, msg, error_code=None, retry_after=None):
self.msg = msg
self.error_code = error_code
if error_code is not None and error_code in twitter_http_status_codes:
self.msg = '%s: %s -- %s' % \
(twitter_http_status_codes[error_code][0],
twitter_http_status_codes[error_code][1],
self.msg)
def __str__(self):
return repr(self.msg)
class TwythonAuthError(TwythonError):
""" Raised when you try to access a protected resource and it fails due to
some issue with your authentication.
"""
pass
class TwythonRateLimitError(TwythonError):
""" Raised when you've hit a rate limit.
retry_wait_seconds is the number of seconds to wait before trying again.
"""
def __init__(self, msg, error_code, retry_after=None):
TwythonError.__init__(self, msg, error_code=error_code)
if isinstance(retry_after, int):
self.msg = '%s (Retry after %d seconds)' % (msg, retry_after)
class Twython(object):
def __init__(self, app_key=None, app_secret=None, oauth_token=None, oauth_token_secret=None, \
headers=None, callback_url=None, twitter_token=None, twitter_secret=None, proxies=None):
"""Instantiates an instance of Twython. Takes optional parameters for authentication and such (see below).
:param app_key: (optional) Your applications key
:param app_secret: (optional) Your applications secret key
:param oauth_token: (optional) Used with oauth_token_secret to make authenticated calls
:param oauth_token_secret: (optional) Used with oauth_token to make authenticated calls
:param headers: (optional) Custom headers to send along with the request
:param callback_url: (optional) If set, will overwrite the callback url set in your application
:param proxies: (optional) A dictionary of proxies, for example {"http":"proxy.example.org:8080", "https":"proxy.example.org:8081"}.
"""
# Needed for hitting that there API.
self.api_url = 'https://api.twitter.com/%s'
self.request_token_url = self.api_url % 'oauth/request_token'
self.access_token_url = self.api_url % 'oauth/access_token'
self.authorize_url = self.api_url % 'oauth/authorize'
self.authenticate_url = self.api_url % 'oauth/authenticate'
# Enforce unicode on keys and secrets
self.app_key = app_key and unicode(app_key) or twitter_token and unicode(twitter_token)
self.app_secret = app_key and unicode(app_secret) or twitter_secret and unicode(twitter_secret)
self.oauth_token = oauth_token and u'%s' % oauth_token
self.oauth_token_secret = oauth_token_secret and u'%s' % oauth_token_secret
self.callback_url = callback_url
# If there's headers, set them, otherwise be an embarassing parent for their own good.
self.headers = headers or {'User-Agent': 'Twython v' + __version__}
# Allow for unauthenticated requests
self.client = requests.session(proxies=proxies)
self.auth = None
if self.app_key is not None and self.app_secret is not None and \
self.oauth_token is None and self.oauth_token_secret is None:
self.auth = OAuth1(self.app_key, self.app_secret,
signature_type='auth_header')
if self.app_key is not None and self.app_secret is not None and \
self.oauth_token is not None and self.oauth_token_secret is not None:
self.auth = OAuth1(self.app_key, self.app_secret,
self.oauth_token, self.oauth_token_secret,
signature_type='auth_header')
if self.auth is not None:
self.client = requests.session(headers=self.headers, auth=self.auth, proxies=proxies)
# register available funcs to allow listing name when debugging.
def setFunc(key):
return lambda **kwargs: self._constructFunc(key, **kwargs)
for key in api_table.keys():
self.__dict__[key] = setFunc(key)
# create stash for last call intel
self._last_call = None
def _constructFunc(self, api_call, **kwargs):
# Go through and replace any mustaches that are in our API url.
fn = api_table[api_call]
url = re.sub(
'\{\{(?P<m>[a-zA-Z_]+)\}\}',
# The '1' here catches the API version. Slightly hilarious.
lambda m: "%s" % kwargs.get(m.group(1), '1'),
base_url + fn['url']
)
content = self._request(url, method=fn['method'], params=kwargs)
return content
def _request(self, url, method='GET', params=None, files=None, api_call=None):
'''Internal response generator, no sense in repeating the same
code twice, right? ;)
'''
method = method.lower()
if not method in ('get', 'post'):
raise TwythonError('Method must be of GET or POST')
params = params or {}
func = getattr(self.client, method)
if method == 'get':
response = func(url, params=params)
else:
response = func(url, data=params, files=files)
content = response.content.decode('utf-8')
# create stash for last function intel
self._last_call = {
'api_call': api_call,
'api_error': None,
'cookies': response.cookies,
'error': response.error,
'headers': response.headers,
'status_code': response.status_code,
'url': response.url,
'content': content,
}
try:
content = simplejson.loads(content)
except ValueError:
raise TwythonError('Response was not valid JSON, unable to decode.')
if response.status_code > 304:
# If there is no error message, use a default.
error_msg = content.get(
'error', 'An error occurred processing your request.')
self._last_call['api_error'] = error_msg
if response.status_code == 420:
exceptionType = TwythonRateLimitError
else:
exceptionType = TwythonError
raise exceptionType(error_msg,
error_code=response.status_code,
retry_after=response.headers.get('retry-after'))
return content
'''
# Dynamic Request Methods
Just in case Twitter releases something in their API
and a developer wants to implement it on their app, but
we haven't gotten around to putting it in Twython yet. :)
'''
def request(self, endpoint, method='GET', params=None, files=None, version=1):
# In case they want to pass a full Twitter URL
# i.e. https://search.twitter.com/
if endpoint.startswith('http://') or endpoint.startswith('https://'):
url = endpoint
else:
url = '%s/%s.json' % (self.api_url % version, endpoint)
content = self._request(url, method=method, params=params, files=files, api_call=url)
return content
def get(self, endpoint, params=None, version=1):
return self.request(endpoint, params=params, version=version)
def post(self, endpoint, params=None, files=None, version=1):
return self.request(endpoint, 'POST', params=params, files=files, version=version)
# End Dynamic Request Methods
def get_lastfunction_header(self, header):
"""Returns the header in the last function
This must be called after an API call, as it returns header based
information.
This will return None if the header is not present
Most useful for the following header information:
x-ratelimit-limit
x-ratelimit-remaining
x-ratelimit-class
x-ratelimit-reset
"""
if self._last_call is None:
raise TwythonError('This function must be called after an API call. It delivers header information.')
if header in self._last_call['headers']:
return self._last_call['headers'][header]
return None
def get_authentication_tokens(self):
"""Returns an authorization URL for a user to hit.
"""
request_args = {}
if self.callback_url:
request_args['oauth_callback'] = self.callback_url
response = self.client.get(self.request_token_url, params=request_args)
if response.status_code != 200:
raise TwythonAuthError("Seems something couldn't be verified with your OAuth junk. Error: %s, Message: %s" % (response.status_code, response.content))
request_tokens = dict(parse_qsl(response.content))
if not request_tokens:
raise TwythonError('Unable to decode request tokens.')
oauth_callback_confirmed = request_tokens.get('oauth_callback_confirmed') == 'true'
auth_url_params = {
'oauth_token': request_tokens['oauth_token'],
}
# Use old-style callback argument if server didn't accept new-style
if self.callback_url and not oauth_callback_confirmed:
auth_url_params['oauth_callback'] = self.callback_url
request_tokens['auth_url'] = self.authenticate_url + '?' + urllib.urlencode(auth_url_params)
return request_tokens
def get_authorized_tokens(self):
"""Returns authorized tokens after they go through the auth_url phase.
"""
response = self.client.get(self.access_token_url)
authorized_tokens = dict(parse_qsl(response.content))
if not authorized_tokens:
raise TwythonError('Unable to decode authorized tokens.')
return authorized_tokens
# ------------------------------------------------------------------------------------------------------------------------
# The following methods are all different in some manner or require special attention with regards to the Twitter API.
# Because of this, we keep them separate from all the other endpoint definitions - ideally this should be change-able,
# but it's not high on the priority list at the moment.
# ------------------------------------------------------------------------------------------------------------------------
@staticmethod
def shortenURL(url_to_shorten, shortener='http://is.gd/api.php'):
"""Shortens url specified by url_to_shorten.
Note: Twitter automatically shortens all URLs behind their own custom t.co shortener now,
but we keep this here for anyone who was previously using it for alternative purposes. ;)
:param url_to_shorten: (required) The URL to shorten
:param shortener: (optional) In case you want to use a different
URL shortening service
"""
if shortener == '':
raise TwythonError('Please provide a URL shortening service.')
request = requests.get(shortener, params={
'query': url_to_shorten
})
if request.status_code in [301, 201, 200]:
return request.text
else:
raise TwythonError('shortenURL() failed with a %s error code.' % request.status_code)
@staticmethod
def constructApiURL(base_url, params):
return base_url + "?" + "&".join(["%s=%s" % (Twython.unicode2utf8(key), urllib.quote_plus(Twython.unicode2utf8(value))) for (key, value) in params.iteritems()])
def search(self, **kwargs):
""" Returns tweets that match a specified query.
Documentation: https://dev.twitter.com/doc/get/search
:param q: (required) The query you want to search Twitter for
:param geocode: (optional) Returns tweets by users located within
a given radius of the given latitude/longitude.
The parameter value is specified by
"latitude,longitude,radius", where radius units
must be specified as either "mi" (miles) or
"km" (kilometers).
Example Values: 37.781157,-122.398720,1mi
:param lang: (optional) Restricts tweets to the given language,
given by an ISO 639-1 code.
:param locale: (optional) Specify the language of the query you
are sending. Only ``ja`` is currently effective.
:param page: (optional) The page number (starting at 1) to return
Max ~1500 results
:param result_type: (optional) Default ``mixed``
mixed: Include both popular and real time
results in the response.
recent: return only the most recent results in
the response
popular: return only the most popular results
in the response.
e.g x.search(q='jjndf', page='2')
"""
return self.get('https://search.twitter.com/search.json', params=kwargs)
def searchGen(self, search_query, **kwargs):
""" Returns a generator of tweets that match a specified query.
Documentation: https://dev.twitter.com/doc/get/search
See Twython.search() for acceptable parameters
e.g search = x.searchGen('python')
for result in search:
print result
"""
kwargs['q'] = search_query
content = self.get('https://search.twitter.com/search.json', params=kwargs)
if not content['results']:
raise StopIteration
for tweet in content['results']:
yield tweet
if 'page' not in kwargs:
kwargs['page'] = '2'
else:
try:
kwargs['page'] = int(kwargs['page'])
kwargs['page'] += 1
kwargs['page'] = str(kwargs['page'])
except TypeError:
raise TwythonError("searchGen() exited because page takes type str")
for tweet in self.searchGen(search_query, **kwargs):
yield tweet
# The following methods are apart from the other Account methods,
# because they rely on a whole multipart-data posting function set.
def updateProfileBackgroundImage(self, file_, tile=True, version=1):
"""Updates the authenticating user's profile background image.
:param file_: (required) A string to the location of the file
(less than 800KB in size, larger than 2048px width will scale down)
:param tile: (optional) Default ``True`` If set to true the background image
will be displayed tiled. The image will not be tiled otherwise.
:param version: (optional) A number, default 1 because that's the
only API version Twitter has now
"""
url = 'https://api.twitter.com/%d/account/update_profile_background_image.json' % version
return self._media_update(url,
{'image': (file_, open(file_, 'rb'))},
**{'tile': tile})
def bulkUserLookup(self, **kwargs):
"""Stub for a method that has been deprecated, kept for now to raise errors
properly if people are relying on this (which they are...).
"""
warnings.warn(
"This function has been deprecated. Please migrate to .lookupUser() - params should be the same.",
DeprecationWarning,
stacklevel=2
)
def updateProfileImage(self, file_, version=1):
"""Updates the authenticating user's profile image (avatar).
:param file_: (required) A string to the location of the file
:param version: (optional) A number, default 1 because that's the
only API version Twitter has now
"""
url = 'https://api.twitter.com/%d/account/update_profile_image.json' % version
return self._media_update(url,
{'image': (file_, open(file_, 'rb'))})
def updateStatusWithMedia(self, file_, version=1, **params):
"""Updates the users status with media
:param file_: (required) A string to the location of the file
:param version: (optional) A number, default 1 because that's the
only API version Twitter has now
**params - You may pass items that are taken in this doc
(https://dev.twitter.com/docs/api/1/post/statuses/update_with_media)
"""
url = 'https://upload.twitter.com/%d/statuses/update_with_media.json' % version
return self._media_update(url,
{'media': (file_, open(file_, 'rb'))},
**params)
def _media_update(self, url, file_, **params):
return self.post(url, params=params, files=file_)
def getProfileImageUrl(self, username, size='normal', version=1):
"""Gets the URL for the user's profile image.
:param username: (required) Username, self explanatory.
:param size: (optional) Default 'normal' (48px by 48px)
bigger - 73px by 73px
mini - 24px by 24px
original - undefined, be careful -- images may be
large in bytes and/or size.
:param version: A number, default 1 because that's the only API
version Twitter has now
"""
endpoint = 'users/profile_image/%s' % username
url = self.api_url % version + '/' + endpoint
response = self.client.get(url, params={'size': size}, allow_redirects=False)
image_url = response.headers.get('location')
if response.status_code in (301, 302, 303, 307) and image_url is not None:
return image_url
else:
raise TwythonError('getProfileImageUrl() threw an error.',
error_code=response.status_code)
@staticmethod
def stream(data, callback):
"""A Streaming API endpoint, because requests (by Kenneth Reitz)
makes this not stupidly annoying to implement.
In reality, Twython does absolutely *nothing special* here,
but people new to programming expect this type of function to
exist for this library, so we provide it for convenience.
Seriously, this is nothing special. :)
For the basic stream you're probably accessing, you'll want to
pass the following as data dictionary keys. If you need to use
OAuth (newer streams), passing secrets/etc
as keys SHOULD work...
This is all done over SSL (https://), so you're not left
totally vulnerable by passing your password.
:param username: (required) Username, self explanatory.
:param password: (required) The Streaming API doesn't use OAuth,
so we do this the old school way.
:param callback: (required) Callback function to be fired when
tweets come in (this is an event-based-ish API).
:param endpoint: (optional) Override the endpoint you're using
with the Twitter Streaming API. This is defaulted
to the one that everyone has access to, but if
Twitter <3's you feel free to set this to your
wildest desires.
"""
endpoint = 'https://stream.twitter.com/1/statuses/filter.json'
if 'endpoint' in data:
endpoint = data.pop('endpoint')
needs_basic_auth = False
if 'username' in data and 'password' in data:
needs_basic_auth = True
username = data.pop('username')
password = data.pop('password')
if needs_basic_auth:
stream = requests.post(endpoint,
data=data,
auth=(username, password))
else:
stream = requests.post(endpoint, data=data)
for line in stream.iter_lines():
if line:
try:
callback(simplejson.loads(line))
except ValueError:
raise TwythonError('Response was not valid JSON, unable to decode.')
@staticmethod
def unicode2utf8(text):
try:
if isinstance(text, unicode):
text = text.encode('utf-8')
except:
pass
return text
@staticmethod
def encode(text):
if isinstance(text, (str, unicode)):
return Twython.unicode2utf8(text)
return str(text)
| |
#
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from zoo.common.utils import enable_multi_fs_load, enable_multi_fs_save
from zoo.orca.data.utils import row_to_sample, xshard_to_sample
from zoo.orca.learn.utils import convert_predict_rdd_to_dataframe, bigdl_metric_results_to_dict, \
process_xshards_of_pandas_dataframe
from zoo.pipeline.estimator.estimator import Estimator as SparkEstimator
from zoo.orca.learn.ray_estimator import Estimator as OrcaRayEstimator
from zoo.orca.learn.pytorch.training_operator import TrainingOperator
from zoo.orca.learn.spark_estimator import Estimator as OrcaSparkEstimator
from zoo.orca.learn.optimizers import Optimizer as OrcaOptimizer, SGD
from zoo.orca.learn.metrics import Accuracy
from zoo.orca.data import SparkXShards
from bigdl.optim.optimizer import MaxEpoch, OptimMethod
from zoo.feature.common import FeatureSet
from torch.optim.optimizer import Optimizer as TorchOptimizer
from torch.utils.data import DataLoader
from pyspark.sql import DataFrame
import warnings
import torch
import types
class Estimator(object):
@staticmethod
def from_torch(*,
model,
optimizer,
loss=None,
metrics=None,
scheduler_creator=None,
training_operator_cls=TrainingOperator,
initialization_hook=None,
config=None,
scheduler_step_freq="batch",
use_tqdm=False,
workers_per_node=1,
model_dir=None,
backend="bigdl"):
"""
Create an Estimator for torch.
:param model: PyTorch model or model creator function if backend="bigdl", PyTorch
model creator function if backend="horovod" or "torch_distributed"
:param optimizer: Orca/PyTorch optimizer or optimizer creator function if backend="bigdl"
, PyTorch optimizer creator function if backend="horovod" or "torch_distributed"
:param loss: PyTorch loss or loss creator function if backend="bigdl", PyTorch loss creator
function if backend="horovod" or "torch_distributed"
:param metrics: Orca validation methods for evaluate.
:param scheduler_creator: parameter for `horovod` and `torch_distributed` backends. a
learning rate scheduler wrapping the optimizer. You will need to set
``scheduler_step_freq="epoch"`` for the scheduler to be incremented correctly.
:param config: parameter config dict to create model, optimizer loss and data.
:param scheduler_step_freq: parameter for `horovod` and `torch_distributed` backends.
"batch", "epoch" or None. This will determine when ``scheduler.step`` is called. If
"batch", ``step`` will be called after every optimizer step. If "epoch", ``step``
will be called after one pass of the DataLoader. If a scheduler is passed in, this
value is expected to not be None.
:param use_tqdm: parameter for `horovod` and `torch_distributed` backends. You can monitor
training progress if use_tqdm=True.
:param workers_per_node: parameter for `horovod` and `torch_distributed` backends. worker
number on each node. default: 1.
:param model_dir: parameter for `bigdl` backend. The path to save model. During the
training, if checkpoint_trigger is defined and triggered, the model will be saved to
model_dir.
:param backend: You can choose "horovod", "torch_distributed" or "bigdl" as backend.
Default: `bigdl`.
:return: an Estimator object.
"""
if backend in {"horovod", "torch_distributed"}:
return PyTorchRayEstimator(model_creator=model,
optimizer_creator=optimizer,
loss_creator=loss,
metrics=metrics,
scheduler_creator=scheduler_creator,
training_operator_cls=training_operator_cls,
initialization_hook=initialization_hook,
config=config,
scheduler_step_freq=scheduler_step_freq,
use_tqdm=use_tqdm,
workers_per_node=workers_per_node,
backend=backend)
elif backend == "bigdl":
return PyTorchSparkEstimator(model=model,
loss=loss,
optimizer=optimizer,
config=config,
metrics=metrics,
model_dir=model_dir,
bigdl_type="float")
else:
raise ValueError("Only horovod, torch_distributed and bigdl backends are supported"
f" for now, got backend: {backend}")
class PyTorchRayEstimator(OrcaRayEstimator):
def __init__(self,
*,
model_creator,
optimizer_creator,
loss_creator=None,
metrics=None,
scheduler_creator=None,
training_operator_cls=TrainingOperator,
initialization_hook=None,
config=None,
scheduler_step_freq="batch",
use_tqdm=False,
backend="torch_distributed",
workers_per_node=1):
if config is not None and "batch_size" in config:
raise Exception("Please do not specify batch_size in config. Input batch_size in the"
" fit/evaluate/predict function of the estimator instead.")
from zoo.orca.learn.pytorch.pytorch_ray_estimator import PyTorchRayEstimator
self.estimator = PyTorchRayEstimator(model_creator=model_creator,
optimizer_creator=optimizer_creator,
loss_creator=loss_creator,
metrics=metrics,
scheduler_creator=scheduler_creator,
training_operator_cls=training_operator_cls,
initialization_hook=initialization_hook,
config=config,
scheduler_step_freq=scheduler_step_freq,
use_tqdm=use_tqdm,
backend=backend,
workers_per_node=workers_per_node)
def fit(self, data, epochs=1, batch_size=32, profile=False, reduce_results=True, info=None,
feature_cols=None, label_cols=None):
"""
Trains a PyTorch model given training data for several epochs.
Calls `TrainingOperator.train_epoch()` on N parallel workers simultaneously
underneath the hood.
:param data: An instance of SparkXShards, a Spark DataFrame or a function that
takes config and batch_size as argument and returns a PyTorch DataLoader for
training.
:param epochs: The number of epochs to train the model. Default is 1.
:param batch_size: The number of samples per batch for each worker. Default is 32.
The total batch size would be workers_per_node*num_nodes.
If your training data is a function, you can set batch_size to be the input
batch_size of the function for the PyTorch DataLoader.
:param profile: Boolean. Whether to return time stats for the training procedure.
Default is False.
:param reduce_results: Boolean. Whether to average all metrics across all workers into
one dict. If a metric is a non-numerical value (or nested dictionaries), one value
will be randomly selected among the workers. If False, returns a list of dicts for
all workers.
Default is True.
:param info: An optional dictionary that can be passed to the TrainingOperator for
train_epoch and train_batch.
:param feature_cols: feature column names if data is Spark DataFrame.
:param label_cols: label column names if data is Spark DataFrame.
:return: A list of dictionary of metrics for every training epoch. If reduce_results is
False, this will return a nested list of metric dictionaries whose length will be
equal to the total number of workers.
You can also provide custom metrics by passing in a custom training_operator_cls
when creating the Estimator.
"""
return self.estimator.train(data=data, epochs=epochs, batch_size=batch_size,
profile=profile, reduce_results=reduce_results,
info=info, feature_cols=feature_cols,
label_cols=label_cols)
def predict(self, data, batch_size=32, feature_cols=None, profile=False):
"""
Using this PyTorch model to make predictions on the data.
:param data: An instance of SparkXShards or a Spark DataFrame
:param batch_size: The number of samples per batch for each worker. Default is 32.
:param profile: Boolean. Whether to return time stats for the training procedure.
Default is False.
:param feature_cols: feature column names if data is a Spark DataFrame.
:return: A SparkXShards that contains the predictions with key "prediction" in each shard
"""
return self.estimator.predict(data, batch_size=batch_size,
feature_cols=feature_cols,
profile=profile)
def evaluate(self, data, batch_size=32, num_steps=None, profile=False, info=None,
feature_cols=None, label_cols=None):
"""
Evaluates a PyTorch model given validation data.
Note that only accuracy for classification with zero-based label is supported by
default. You can override validate_batch in TrainingOperator for other metrics.
Calls `TrainingOperator.validate()` on N parallel workers simultaneously
underneath the hood.
:param data: An instance of SparkXShards, a Spark DataFrame or a function that
takes config and batch_size as argument and returns a PyTorch DataLoader for
validation.
:param batch_size: The number of samples per batch for each worker. Default is 32.
The total batch size would be workers_per_node*num_nodes.
If your validation data is a function, you can set batch_size to be the input
batch_size of the function for the PyTorch DataLoader.
:param num_steps: The number of batches to compute the validation results on. This
corresponds to the number of times `TrainingOperator.validate_batch` is called.
:param profile: Boolean. Whether to return time stats for the training procedure.
Default is False.
:param info: An optional dictionary that can be passed to the TrainingOperator
for validate.
:param feature_cols: feature column names if train data is Spark DataFrame.
:param label_cols: label column names if train data is Spark DataFrame.
:return: A dictionary of metrics for the given data, including validation accuracy and loss.
You can also provide custom metrics by passing in a custom training_operator_cls
when creating the Estimator.
"""
return self.estimator.validate(data=data, batch_size=batch_size, num_steps=num_steps,
profile=profile, info=info, feature_cols=feature_cols,
label_cols=label_cols)
def get_model(self):
"""
Returns the learned PyTorch model.
:return: The learned PyTorch model.
"""
return self.estimator.get_model()
@enable_multi_fs_save
def save(self, model_path):
"""
Saves the Estimator state (including model and optimizer) to the provided model_path.
:param model_path: (str) Path to save the model.
:return:
"""
return self.estimator.save(model_path)
@enable_multi_fs_load
def load(self, model_path):
"""
Loads the Estimator state (including model and optimizer) from the provided model_path.
:param model_path: (str) Path to the existing model.
"""
return self.estimator.load(model_path)
def shutdown(self, force=False):
"""
Shuts down workers and releases resources.
:return:
"""
return self.estimator.shutdown(force=force)
class PyTorchSparkEstimator(OrcaSparkEstimator):
def __init__(self, model, loss, optimizer, config=None, metrics=None, model_dir=None,
bigdl_type="float"):
from zoo.pipeline.api.torch import TorchModel, TorchLoss, TorchOptim
self.loss = loss
self.optimizer = optimizer
self.config = {} if config is None else config
if self.loss is None:
self.loss = TorchLoss()
else:
self.loss = TorchLoss.from_pytorch(loss)
if isinstance(model, types.FunctionType):
def model_creator(self):
return model(self.config)
model = model_creator(self)
if self.optimizer is None:
from zoo.orca.learn.optimizers.schedule import Default
self.optimizer = SGD(learningrate_schedule=Default()).get_optimizer()
elif isinstance(self.optimizer, TorchOptimizer):
self.optimizer = TorchOptim.from_pytorch(self.optimizer)
elif isinstance(self.optimizer, OrcaOptimizer):
self.optimizer = self.optimizer.get_optimizer()
else:
raise ValueError("Only PyTorch optimizer and orca optimizer are supported")
from zoo.orca.learn.metrics import Metric
self.metrics = Metric.convert_metrics_list(metrics)
self.log_dir = None
self.app_name = None
self.model_dir = model_dir
self.model = TorchModel.from_pytorch(model)
self.estimator = SparkEstimator(self.model, self.optimizer, model_dir,
bigdl_type=bigdl_type)
def _handle_dataframe(self, data, validation_data, feature_cols, label_cols):
schema = data.schema
train_rdd = data.rdd.map(lambda row: row_to_sample(row, schema, feature_cols, label_cols))
train_feature_set = FeatureSet.sample_rdd(train_rdd)
if validation_data is None:
val_feature_set = None
else:
assert isinstance(validation_data, DataFrame), "validation_data should also be a " \
"DataFrame"
val_feature_set = FeatureSet.sample_rdd(validation_data.rdd.map(
lambda row: row_to_sample(row, schema, feature_cols, label_cols)))
return train_feature_set, val_feature_set
def _handle_xshards(self, data, validation_data):
train_rdd = data.rdd.flatMap(xshard_to_sample)
train_feature_set = FeatureSet.sample_rdd(train_rdd)
if validation_data is None:
val_feature_set = None
else:
assert isinstance(validation_data, SparkXShards), "validation_data should be a " \
"SparkXShards"
val_feature_set = FeatureSet.sample_rdd(validation_data.rdd.flatMap(xshard_to_sample))
return train_feature_set, val_feature_set
def _handle_data_loader(self, data, validation_data):
train_feature_set = FeatureSet.pytorch_dataloader(data, "", "")
if validation_data is None:
val_feature_set = None
else:
assert isinstance(validation_data, DataLoader) or callable(data), \
"validation_data should be a pytorch DataLoader or a callable data_creator"
val_feature_set = FeatureSet.pytorch_dataloader(validation_data)
return train_feature_set, val_feature_set
def fit(self, data, epochs=1, batch_size=None, feature_cols=None, label_cols=None,
validation_data=None, checkpoint_trigger=None):
"""
Train this torch model with train data.
:param data: train data. It can be a XShards, Spark Dataframe, PyTorch DataLoader and
PyTorch DataLoader creator function that takes config and batch_size as argument and
returns a PyTorch DataLoader for training.
If data is an XShards, each partition can be a Pandas DataFrame or a dictionary of
{'x': feature, 'y': label}, where feature(label) is a numpy array or
a list of numpy arrays.
:param epochs: Number of epochs to train the model. Default: 1.
:param batch_size: Batch size used for training. Only used when data is an XShards.
Default: 32.
:param feature_cols: Feature column name(s) of data. Only used when data
is a Spark DataFrame or an XShards of Pandas DataFrame. Default: None.
:param label_cols: Label column name(s) of data. Only used when data is
a Spark DataFrame or an XShards of Pandas DataFrame. Default: None.
:param validation_data: Validation data. XShards, PyTorch DataLoader and PyTorch DataLoader
creator function are supported.
If data is XShards, each partition can be a Pandas DataFrame or a dictionary of
{'x': feature, 'y': label}, where feature(label) is a numpy array or a list of
numpy arrays.
:param checkpoint_trigger: Orca Trigger to set a checkpoint.
:return: The trained estimator object.
"""
from zoo.orca.learn.trigger import Trigger
end_trigger = MaxEpoch(epochs)
if isinstance(data, DataLoader):
assert batch_size is None and data.batch_size > 0, "When using PyTorch Dataloader as " \
"input, you need to specify the " \
"batch size in DataLoader and " \
"don't specify batch_size " \
"in the fit method."
else:
assert batch_size is not None and batch_size > 0, "batch_size should be greater than 0"
checkpoint_trigger = Trigger.convert_trigger(checkpoint_trigger)
if self.log_dir is not None and self.app_name is not None:
self.estimator.set_tensorboard(self.log_dir, self.app_name)
if validation_data:
assert self.metrics is not None, "You should provide metrics when creating this " \
"estimator if you provide validation_data."
if isinstance(data, SparkXShards):
if data._get_class_name() == 'pandas.core.frame.DataFrame':
data, validation_data = process_xshards_of_pandas_dataframe(data, feature_cols,
label_cols,
validation_data,
mode="fit")
train_fset, val_fset = self._handle_xshards(data, validation_data)
self.estimator.train(train_fset, self.loss, end_trigger, checkpoint_trigger,
val_fset, self.metrics, batch_size)
elif isinstance(data, DataFrame):
train_fset, val_fset = self._handle_dataframe(data, validation_data,
feature_cols, label_cols)
self.estimator.train(train_fset, self.loss, end_trigger, checkpoint_trigger,
val_fset, self.metrics, batch_size)
elif isinstance(data, DataLoader) or callable(data) or isinstance(data, types.FunctionType):
if isinstance(data, types.FunctionType):
data, validation_data = data(self.config, batch_size), validation_data(self.config,
batch_size)
train_fset, val_fset = self._handle_data_loader(data, validation_data)
self.estimator.train_minibatch(train_fset, self.loss, end_trigger,
checkpoint_trigger, val_fset, self.metrics)
else:
raise ValueError("Data and validation data should be SparkXShards, DataLoaders or "
"callable data_creators but get " + data.__class__.__name__)
return self
def predict(self, data, batch_size=4, feature_cols=None):
"""
Predict input data.
:param data: data to be predicted. It can be an XShards or a Spark Dataframe.
If it is an XShards, each partition can be a Pandas DataFrame or a dictionary of
{'x': feature}, where feature is a numpy array or a list of numpy arrays.
:param batch_size: batch size used for inference.
:param feature_cols: Feature column name(s) of data. Only used when data
is a Spark DataFrame or an XShards of Pandas DataFrame. Default: None.
:return: predicted result. The predict result is a XShards, each partition of the XShards
is a dictionary of {'prediction': result}, where result is a numpy array or a list
of numpy arrays.
"""
from zoo.orca.learn.utils import convert_predict_rdd_to_xshard
if isinstance(data, SparkXShards):
if data._get_class_name() == 'pandas.core.frame.DataFrame':
data = process_xshards_of_pandas_dataframe(data, feature_cols)
from zoo.orca.data.utils import xshard_to_sample
data_rdd = data.rdd.flatMap(xshard_to_sample)
elif isinstance(data, DataFrame):
schema = data.schema
data_rdd = data.rdd.map(lambda row: row_to_sample(row, schema,
feature_cols, None))
else:
raise ValueError("Data should be XShards, each element needs to be {'x': a feature "
"numpy array}.")
predicted_rdd = self.model.predict(data_rdd, batch_size=batch_size)
if isinstance(data, SparkXShards):
result = convert_predict_rdd_to_xshard(data, predicted_rdd)
else:
result = convert_predict_rdd_to_dataframe(data, predicted_rdd)
return result
def evaluate(self, data, batch_size=None, feature_cols=None, label_cols=None,
validation_metrics=None):
"""
Evaluate model.
:param data: data: evaluation data. It can be an XShards, Spark Dataframe,
PyTorch DataLoader and PyTorch DataLoader creator function.
If data is an XShards, each partition can be a Pandas DataFrame or a dictionary of
{'x': feature, 'y': label}, where feature(label) is a numpy array or a list of
numpy arrays.
:param batch_size: Batch size used for evaluation. Only used when data is a SparkXShard.
:param feature_cols: Feature column name(s) of data. Only used when data
is a Spark DataFrame or an XShards of Pandas DataFrame. Default: None.
:param label_cols: Label column name(s) of data. Only used when data is
a Spark DataFrame or an XShards of Pandas DataFrame. Default: None.
:param validation_metrics: Orca validation metrics to be computed on validation_data.
:return: validation results.
"""
from zoo.orca.data.utils import xshard_to_sample
assert data is not None, "validation data shouldn't be None"
assert self.metrics is not None, "metrics shouldn't be None, please specify the metrics" \
" argument when creating this estimator."
if isinstance(data, DataLoader):
assert batch_size is None and data.batch_size > 0, "When using PyTorch Dataloader as " \
"input, you need to specify the " \
"batch size in DataLoader and " \
"don't specify batch_size " \
"in the fit method."
else:
assert batch_size is not None and batch_size > 0, "batch_size should be greater than 0"
if isinstance(data, SparkXShards):
if data._get_class_name() == 'pandas.core.frame.DataFrame':
data = process_xshards_of_pandas_dataframe(data, feature_cols, label_cols)
val_feature_set = FeatureSet.sample_rdd(data.rdd.flatMap(xshard_to_sample))
result = self.estimator.evaluate(val_feature_set, self.metrics, batch_size)
elif isinstance(data, DataFrame):
schema = data.schema
val_feature_set = FeatureSet.sample_rdd(data.rdd.map(
lambda row: row_to_sample(row, schema, feature_cols, label_cols)))
result = self.estimator.evaluate(val_feature_set, self.metrics, batch_size)
elif isinstance(data, DataLoader) or callable(data) or isinstance(data, types.FunctionType):
if isinstance(data, types.FunctionType):
data = data(self.config, batch_size)
val_feature_set = FeatureSet.pytorch_dataloader(data)
result = self.estimator.evaluate_minibatch(val_feature_set, self.metrics)
else:
raise ValueError("Data should be a SparkXShards, a DataLoader or a callable "
"data_creator, but get " + data.__class__.__name__)
return bigdl_metric_results_to_dict(result)
def get_model(self):
"""
Get the trained PyTorch model.
:return: The trained PyTorch model.
"""
return self.model.to_pytorch()
def _get_optimizer_path(self, model_path):
if "." in model_path:
path_split = model_path.rsplit('.', 1)
return path_split[0] + "_optim." + path_split[1]
else:
return model_path + "_optim"
@enable_multi_fs_save
def save(self, model_path):
"""
Saves the Estimator state (including model and optimizer) to the provided model_path.
:param model_path: path to save the model.
:return: model_path
"""
optim_path = self._get_optimizer_path(model_path)
torch.save(self.get_model().state_dict(), model_path)
if self.optimizer is not None:
self.optimizer.save(path=optim_path, overWrite=True)
return model_path
@enable_multi_fs_load
def load(self, model_path):
"""
Load the Estimator state (model and possibly with optimizer) from provided model_path.
The model file should be generated by the save method of this estimator, or by
``torch.save(state_dict, model_path)``, where `state_dict` can be obtained by
the ``state_dict()`` method of a pytorch model.
:param model_path: path to the saved model.
:return:
"""
from zoo.pipeline.api.torch import TorchModel
import os
try:
pytorch_model = self.get_model()
pytorch_model.load_state_dict(torch.load(model_path))
self.model = TorchModel.from_pytorch(pytorch_model)
except Exception:
raise ValueError("Cannot load the PyTorch model. Please check your model path.")
optim_path = self._get_optimizer_path(model_path)
if os.path.isfile(optim_path):
try:
self.optimizer = OptimMethod.load(optim_path)
except Exception:
raise ValueError("Cannot load the optimizer. Only `bigdl.optim.optimizer."
"OptimMethod` is supported for loading.")
else:
self.optimizer = None
self.estimator = SparkEstimator(self.model, self.optimizer, self.model_dir)
def load_orca_checkpoint(self, path, version=None, prefix=None):
"""
Load existing checkpoint. To load a specific checkpoint, please provide both `version` and
`perfix`. If `version` is None, then the latest checkpoint will be loaded.
:param path: Path to the existing checkpoint (or directory containing Orca checkpoint
files).
:param version: checkpoint version, which is the suffix of model.* file, i.e., for
modle.4 file, the version is 4. If it is None, then load the latest checkpoint.
:param prefix: optimMethod prefix, for example 'optimMethod-TorchModelf53bddcc'.
:return:
"""
import os
from bigdl.nn.layer import Model
from bigdl.optim.optimizer import OptimMethod
from zoo.orca.learn.utils import find_latest_checkpoint
from zoo.pipeline.api.torch import TorchModel
if version is None:
path, prefix, version = find_latest_checkpoint(path, model_type="pytorch")
if path is None:
raise ValueError("Cannot find PyTorch checkpoint, please check your checkpoint"
" path.")
else:
assert prefix is not None, "You should provide optimMethod prefix, " \
"for example 'optimMethod-TorchModelf53bddcc'"
try:
loaded_model = Model.load(os.path.join(path, "model.{}".format(version)))
self.model = TorchModel.from_value(loaded_model.value)
self.optimizer = OptimMethod.load(os.path.join(path, "{}.{}".format(prefix, version)))
except Exception as e:
raise ValueError("Cannot load PyTorch checkpoint, please check your checkpoint path "
"and checkpoint type." + str(e))
self.estimator = SparkEstimator(self.model, self.optimizer, self.model_dir)
def get_train_summary(self, tag=None):
"""
Get the scalar from model train summary.
This method will return a list of summary data of
[iteration_number, scalar_value, timestamp].
:param tag: The string variable represents the scalar wanted
"""
return self.estimator.get_train_summary(tag=tag)
def get_validation_summary(self, tag=None):
"""
Get the scalar from model validation summary.
This method will return a list of summary data of
[iteration_number, scalar_value, timestamp].
Note that the metric and tag may not be consistent.
Please look up following form to pass tag parameter.
Left side is your metric during compile.
Right side is the tag you should pass.
>>> 'Accuracy' | 'Top1Accuracy'
>>> 'BinaryAccuracy' | 'Top1Accuracy'
>>> 'CategoricalAccuracy' | 'Top1Accuracy'
>>> 'SparseCategoricalAccuracy' | 'Top1Accuracy'
>>> 'AUC' | 'AucScore'
>>> 'HitRatio' | 'HitRate@k' (k is Top-k)
>>> 'Loss' | 'Loss'
>>> 'MAE' | 'MAE'
>>> 'NDCG' | 'NDCG'
>>> 'TFValidationMethod' | '${name + " " + valMethod.toString()}'
>>> 'Top5Accuracy' | 'Top5Accuracy'
>>> 'TreeNNAccuracy' | 'TreeNNAccuracy()'
>>> 'MeanAveragePrecision' | 'MAP@k' (k is Top-k) (BigDL)
>>> 'MeanAveragePrecision' | 'PascalMeanAveragePrecision' (Zoo)
>>> 'StatelessMetric' | '${name}'
:param tag: The string variable represents the scalar wanted
"""
return self.estimator.get_validation_summary(tag=tag)
def clear_gradient_clipping(self):
"""
Clear gradient clipping parameters. In this case, gradient clipping will not be applied.
In order to take effect, it needs to be called before fit.
:return:
"""
self.estimator.clear_gradient_clipping()
def set_constant_gradient_clipping(self, min, max):
"""
Set constant gradient clipping during the training process.
In order to take effect, it needs to be called before fit.
:param min: The minimum value to clip by.
:param max: The maximum value to clip by.
:return:
"""
self.estimator.set_constant_gradient_clipping(min=min, max=max)
def set_l2_norm_gradient_clipping(self, clip_norm):
"""
Clip gradient to a maximum L2-Norm during the training process.
In order to take effect, it needs to be called before fit.
:param clip_norm: Gradient L2-Norm threshold.
:return:
"""
self.estimator.set_l2_norm_gradient_clipping(clip_norm=clip_norm)
| |
import six
import pytest
from mock import Mock, patch
from nefertari import tweens
def mock_timer():
mock_timer.time = 0
def time_func():
mock_timer.time += 1
return mock_timer.time
return time_func
class DummyConfigurator(object):
def __init__(self):
self.subscribed = []
def add_subscriber(self, wrapped, ifaces):
self.subscribed.append((wrapped, ifaces))
class TestTweens(object):
@patch('nefertari.tweens.time')
@patch('nefertari.tweens.log')
def test_request_timing(self, mock_log, mock_time):
mock_time.time = mock_timer()
request = Mock(method='GET', url='http://example.com')
registry = Mock()
registry.settings = {'request_timing.slow_request_threshold': 1000}
handler = lambda request: request
timing = tweens.request_timing(handler, registry)
timing(request)
mock_log.debug.assert_called_once_with(
'GET (http://example.com) request took 1 seconds')
assert not mock_log.warning.called
@patch('nefertari.tweens.time')
@patch('nefertari.tweens.log')
def test_request_timing_slow_request(self, mock_log, mock_time):
mock_time.time = mock_timer()
request = Mock(method='GET', url='http://example.com')
registry = Mock()
registry.settings = {'request_timing.slow_request_threshold': 0}
handler = lambda request: request
timing = tweens.request_timing(handler, registry)
timing(request)
mock_log.warning.assert_called_once_with(
'GET (http://example.com) request took 1 seconds')
assert not mock_log.debug.called
def test_get_tunneling(self):
class GET(dict):
def mixed(self):
return self
request = Mock(GET=GET({'_m': 'POST', 'foo': 'bar'}), method='GET')
get_tunneling = tweens.get_tunneling(lambda x: x, None)
get_tunneling(request)
assert request.GET == {"foo": "bar"}
assert request.method == 'POST'
assert request.content_type == 'application/json'
assert request.body == six.b('{"foo": "bar"}')
def test_get_tunneling_reserved_params_dropped(self):
from nefertari import RESERVED_PARAMS
class GET(dict):
def mixed(self):
return self
reserved = RESERVED_PARAMS[0]
get_data = GET({
'_m': 'POST',
'foo': 'bar',
reserved: 'boo',
})
request = Mock(GET=get_data, method='GET')
get_tunneling = tweens.get_tunneling(lambda x: x, None)
get_tunneling(request)
assert request.GET == {'foo': 'bar', reserved: 'boo'}
assert request.method == 'POST'
assert request.content_type == 'application/json'
assert request.body == six.b('{"foo": "bar"}')
assert request._tunneled_get
def test_get_tunneling_not_allowed_method(self):
class GET(dict):
def mixed(self):
return self
request = Mock(
GET=GET({'_m': 'DELETE', 'foo': 'bar'}), method='GET',
body=None, content_type=None)
get_tunneling = tweens.get_tunneling(lambda x: x, None)
get_tunneling(request)
assert request.GET == {"foo": "bar"}
assert request.method == 'DELETE'
assert request.content_type is None
assert request.body is None
def test_cors_no_origins_no_creds(self):
registry = Mock(settings={
'cors.allow_origins': '',
'cors.allow_credentials': None,
})
handler = lambda x: Mock(headerlist=[])
request = Mock(
headers={'Origin': '127.0.0.1:8080'},
host_url='127.0.0.1:8080')
response = tweens.cors(handler, registry)(request)
assert response.headerlist == []
def test_cors_disallow_creds(self):
registry = Mock(settings={
'cors.allow_origins': '',
'cors.allow_credentials': False,
})
handler = lambda x: Mock(headerlist=[])
request = Mock(
headers={'Origin': '127.0.0.1:8080'},
host_url='127.0.0.1:8080')
response = tweens.cors(handler, registry)(request)
assert response.headerlist == [
('Access-Control-Allow-Credentials', False)]
def test_cors_allow_creds_and_origin(self):
registry = Mock(settings={
'cors.allow_origins': '127.0.0.1:8080,127.0.0.1:8090',
'cors.allow_credentials': True,
})
handler = lambda x: Mock(headerlist=[])
request = Mock(
headers={'Origin': '127.0.0.1:8080'},
host_url='127.0.0.1:8080')
response = tweens.cors(handler, registry)(request)
assert response.headerlist == [
('Access-Control-Allow-Origin', '127.0.0.1:8080'),
('Access-Control-Allow-Credentials', True)]
def test_cors_wrong_origin(self):
registry = Mock(settings={
'cors.allow_origins': '127.0.0.1:8080,127.0.0.1:8090',
'cors.allow_credentials': None,
})
handler = lambda x: Mock(headerlist=[])
request = Mock(
headers={'Origin': '127.0.0.1:8000'},
host_url='127.0.0.1:8000')
response = tweens.cors(handler, registry)(request)
assert response.headerlist == []
def test_cors_source_or_host_url(self):
registry = Mock(settings={
'cors.allow_origins': '127.0.0.1:8080,127.0.0.1:8090',
'cors.allow_credentials': None,
})
handler = lambda x: Mock(headerlist=[])
request = Mock(
headers={'Origin': '127.0.0.1:8080'},
host_url='')
response = tweens.cors(handler, registry)(request)
assert response.headerlist == [
('Access-Control-Allow-Origin', '127.0.0.1:8080')]
request = Mock(
headers={},
host_url='127.0.0.1:8080')
response = tweens.cors(handler, registry)(request)
assert response.headerlist == [
('Access-Control-Allow-Origin', '127.0.0.1:8080')]
def test_cors_allow_origins_star_credentials_true(self):
registry = Mock(settings={
'cors.allow_origins': '*',
'cors.allow_credentials': True,
})
handler = lambda x: Mock(headerlist=[])
with pytest.raises(Exception) as ex:
tweens.cors(handler, registry)
expected = ('Not allowed Access-Control-Allow-Credentials '
'to set to TRUE if origin is *')
assert str(ex.value) == expected
def test_cors_allow_origins_star_credentials_false(self):
registry = Mock(settings={
'cors.allow_origins': '*',
'cors.allow_credentials': None,
})
handler = lambda x: Mock(headerlist=[])
request = Mock(
headers={},
host_url='127.1.2.3:1234')
response = tweens.cors(handler, registry)(request)
assert response.headerlist == [
('Access-Control-Allow-Origin', '127.1.2.3:1234')]
def test_cache_control_header_not_set(self):
handler = lambda x: Mock(headerlist=[('Cache-Control', '')])
response = tweens.cache_control(handler, None)(None)
assert not response.cache_expires.called
def test_cache_control_header_set(self):
handler = lambda x: Mock(headerlist=[])
response = tweens.cache_control(handler, None)(None)
response.cache_expires.assert_called_once_with(0)
def test_ssl_url_scheme(self):
request = Mock(
scheme=None,
environ={'HTTP_X_URL_SCHEME': 'Foo'}
)
tweens.ssl(lambda x: x, None)(request)
assert request.environ['wsgi.url_scheme'] == 'foo'
assert request.scheme == 'foo'
def test_ssl_forwarded_proto(self):
request = Mock(
scheme=None,
environ={'HTTP_X_FORWARDED_PROTO': 'Foo'}
)
tweens.ssl(lambda x: x, None)(request)
assert request.environ['wsgi.url_scheme'] == 'foo'
assert request.scheme == 'foo'
def test_ssl_no_scheme(self):
request = Mock(scheme=None, environ={})
tweens.ssl(lambda x: x, None)(request)
assert request.environ == {}
assert request.scheme is None
def test_enable_selfalias(self):
from pyramid.events import ContextFound
config = DummyConfigurator()
assert config.subscribed == []
tweens.enable_selfalias(config, 'foo')
assert len(config.subscribed) == 1
assert six.callable(config.subscribed[0][0])
assert config.subscribed[0][1] is ContextFound
def test_context_found_subscriber_alias_enabled(self):
config = DummyConfigurator()
tweens.enable_selfalias(config, 'foo')
context_found_subscriber = config.subscribed[0][0]
request = Mock(
user=Mock(username='user12'),
matchdict={'foo': 'self'})
context_found_subscriber(Mock(request=request))
assert request.matchdict['foo'] == 'user12'
def test_context_found_subscriber_no_matchdict(self):
config = DummyConfigurator()
tweens.enable_selfalias(config, 'foo')
context_found_subscriber = config.subscribed[0][0]
request = Mock(
user=Mock(username='user12'),
matchdict=None)
context_found_subscriber(Mock(request=request))
assert request.matchdict is None
def test_context_found_subscriber_not_self(self):
config = DummyConfigurator()
tweens.enable_selfalias(config, 'foo')
context_found_subscriber = config.subscribed[0][0]
request = Mock(
user=Mock(username='user12'),
matchdict={'foo': '1'})
context_found_subscriber(Mock(request=request))
assert request.matchdict['foo'] == '1'
def test_context_found_subscriber_not_authenticated(self):
config = DummyConfigurator()
tweens.enable_selfalias(config, 'foo')
context_found_subscriber = config.subscribed[0][0]
request = Mock(
user=None,
matchdict={'foo': 'self'})
context_found_subscriber(Mock(request=request))
assert request.matchdict['foo'] == 'self'
def test_context_found_subscriber_wrong_id_name(self):
config = DummyConfigurator()
tweens.enable_selfalias(config, 'foo')
context_found_subscriber = config.subscribed[0][0]
request = Mock(
user=Mock(username='user12'),
matchdict={'qoo': 'self'})
context_found_subscriber(Mock(request=request))
assert request.matchdict['qoo'] == 'self'
| |
"""
These helper classes are written to make it easier to write 'mock test cases'
for Core objects
"""
from core.models import Allocation, Application, AtmosphereUser, Group,\
Identity, IdentityMembership, \
Instance, InstanceStatusHistory,\
MachineRequest, Provider, \
ProviderType, PlatformType, \
ProviderMachine, Size, Quota
from uuid import uuid4
def _new_providers():
kvm = PlatformType.objects.get_or_create(
name='KVM')[0]
openstack_type = ProviderType.objects.get_or_create(
name='OpenStack')[0]
openstack = Provider.objects.get_or_create(
location="Example OpenStack - Tucson",
virtualization=kvm,
type=openstack_type, public=True)[0]
openstack_workshop = Provider.objects.get_or_create(
location="Example OpenStack - Workshop",
virtualization=kvm,
type=openstack_type, public=True)[0]
return {
"openstack": openstack,
"workshop": openstack_workshop
}
def _new_mock_identity_member(username, provider):
# Mock a user and an identity..
mock_user = AtmosphereUser.objects.get_or_create(
username=username)[0]
mock_group = Group.objects.get_or_create(
name=username)[0]
mock_identity = Identity.objects.get_or_create(
created_by=mock_user,
provider=provider)[0]
mock_allocation = Allocation.default_allocation()
mock_quota = Quota.default_quota()
mock_identity_member = IdentityMembership.objects.get_or_create(
identity=mock_identity, member=mock_group,
allocation=mock_allocation, quota=mock_quota)[0]
return mock_identity_member
def _new_provider_machine(name, version, identifier, identity):
app = Application.objects.get_or_create(
name=name,
description='Mock Test Application named %s' % name,
created_by=identity.created_by, created_by_identity=identity,
uuid=identifier)[0]
machine = ProviderMachine.objects.get_or_create(
application=app, provider=identity.provider,
created_by=identity.created_by, created_by_identity=identity,
identifier=identifier,
version=version)[0]
return machine
def _new_core_instance(name, alias, start_date, identity, machine=None):
if not machine:
machine = _new_provider_machine("Mock Machine", "1.0",
uuid4(), identity)
mock_user = identity.created_by
return Instance.objects.get_or_create(
name=name, provider_alias=alias,
provider_machine=machine, ip_address='1.2.3.4',
created_by=mock_user, created_by_identity=identity,
token='unique-test-token-%s' % alias,
password='password', shell=False, start_date=start_date)[0]
class CoreStatusHistoryHelper(object):
def __init__(
self,
instance,
start_date,
status_name='active',
size_name='small'):
self._init_sizes(instance.provider_machine.provider)
self.instance = instance
self.status_name = status_name
self.set_size(size_name)
self.set_start_date(start_date)
def first_transaction(self):
history = InstanceStatusHistory.create_history(
self.status_name, self.instance, self.size, self.start_date)
history.save()
return history
def new_transaction(self):
return InstanceStatusHistory.transaction(
self.status_name, self.instance, self.size, self.start_date)
def _init_sizes(self, provider):
size_params = [
# name, alias, CPU, MEM, DISK/ROOT
('1', 'tiny', 1, 1024 * 2, 0),
('2', 'small', 2, 1024 * 4, 0),
('3', 'medium', 4, 1024 * 8, 0),
('4', 'large', 8, 1024 * 16, 0),
]
self.AVAILABLE_SIZES = {}
for s_params in size_params:
core_size = Size.objects.get_or_create(
name=s_params[0], alias=s_params[1],
cpu=s_params[2], mem=s_params[3],
disk=s_params[4], root=s_params[4],
provider=provider)[0]
self.AVAILABLE_SIZES[s_params[1]] = core_size
def set_start_date(self, start_date):
self.start_date = start_date
def set_size(self, size_name):
if size_name not in self.AVAILABLE_SIZES:
raise ValueError("Size:%s not found in AVAILABLE_SIZES"
% size_name)
self.size = self.AVAILABLE_SIZES[size_name]
class CoreInstanceHelper(object):
def __init__(self, name, provider_alias, start_date,
provider='openstack', machine='ubuntu', username='mock_user'):
self.name = name
self.provider_alias = provider_alias
self.start_date = start_date
# Mock Provider and dependencies..
self.AVAILABLE_PROVIDERS = _new_providers()
self.set_provider(provider)
# Mock the User, Identity, and dependencies..
identity_member = _new_mock_identity_member(
username, self.provider)
self.identity = identity_member.identity
self.user = self.identity.created_by
self._init_provider_machines()
self.set_machine(machine)
def set_provider(self, provider):
if provider not in self.AVAILABLE_PROVIDERS:
raise ValueError(
"The test provider specified '%s' is not a valid provider"
% provider)
self.provider = self.AVAILABLE_PROVIDERS[provider]
def set_machine(self, machine):
# If a provider machine is passed in, its always accepted
if isinstance(machine, ProviderMachine):
self.machine = machine
return
# If a string is passed in, it must match exactly
if machine not in self.AVAILABLE_MACHINES:
raise ValueError(
"The test machine specified '%s' is not a valid machine"
% machine)
self.machine = self.AVAILABLE_MACHINES[machine]
def to_core_instance(self):
return Instance.objects.get_or_create(
name=self.name, provider_alias=self.provider_alias,
provider_machine=self.machine, ip_address='1.2.3.4',
created_by=self.user, created_by_identity=self.identity,
token='unique-test-token-%s' % self.name,
password='password',
shell=False, start_date=self.start_date)[0]
def _init_provider_machines(self):
# Mock a machine and its dependencies..
app = Application.objects.get_or_create(
name='Ubuntu',
description='', created_by=self.user,
created_by_identity=self.identity,
uuid='1234-ubuntu-mock-APP')[0]
ubuntu = ProviderMachine.objects.get_or_create(
application=app, provider=self.provider,
created_by=self.user, created_by_identity=self.identity,
identifier='1234-ubuntu-mock-machine',
version="1.0")[0]
app = Application.objects.get_or_create(
name='CentOS',
description='', created_by=self.user,
created_by_identity=self.identity,
uuid='1234-centos-mock-APP')[0]
centos = ProviderMachine.objects.get_or_create(
application=app, provider=self.provider,
created_by=self.user, created_by_identity=self.identity,
identifier='1234-centos-mock-machine',
version='1.0')[0]
self.AVAILABLE_MACHINES = {
"ubuntu": ubuntu,
"centos": centos,
}
class CoreProviderMachineHelper(object):
def __init__(
self,
name,
identifier,
provider,
start_date,
username='mock_user'):
self.name = name
self.uuid = identifier
# NOTE: Using 'identifier' as ProviderMachine and Application id's
self.version = '1.0.0'
self.start_date = start_date
# Mock Provider and dependencies..
self.AVAILABLE_PROVIDERS = _new_providers()
self.set_provider(provider)
# Mock the User, Identity, and dependencies..
identity_member = _new_mock_identity_member(
username, self.provider)
self.identity = identity_member.identity
self.user = self.identity.created_by
def set_provider(self, provider):
if provider not in self.AVAILABLE_PROVIDERS:
raise ValueError(
"The test provider specified '%s' is not a valid provider"
% provider)
self.provider = self.AVAILABLE_PROVIDERS[provider]
def to_core_machine(self):
self.machine = _new_provider_machine(self.name, self.version,
self.uuid, self.identity)
return self.machine
class CoreMachineRequestHelper(object):
def __init__(self, new_application_name, start_date,
new_machine_version='1.0', new_machine_forked=True,
instance=None, provider='openstack', username='mock_user'):
self.AVAILABLE_PROVIDERS = _new_providers()
self.set_provider(provider)
identity_member = _new_mock_identity_member(
username, self.provider)
self.identity = identity_member.identity
self.user = self.identity.created_by
self.forked = new_machine_forked
if not instance:
instance = _new_core_instance(
"Mock Instance", uuid4(), start_date, self.identity, None)
self.new_application_name = new_application_name
self.new_machine_version = new_machine_version
self.instance = instance
self.start_date = start_date
def set_provider(self, provider):
if provider not in self.AVAILABLE_PROVIDERS:
raise ValueError(
"The test provider specified '%s' is not a valid provider"
% provider)
self.provider = self.AVAILABLE_PROVIDERS[provider]
def to_core_machine_request(self):
provider_machine = self.instance.provider_machine
return MachineRequest.objects.get_or_create(
instance=self.instance, status='pending',
parent_machine=provider_machine,
new_machine_provider=provider_machine.provider,
new_application_name=self.new_application_name,
new_machine_version=self.new_machine_version,
new_machine_owner=self.user, new_machine_visibility='public',
new_machine_forked=self.forked, start_date=self.start_date)[0]
| |
import random
import sys
import string
import math
import array
import itertools
import operator
import collections
import os
sys.setrecursionlimit(20000)
def read(path,mapa):
inputs = []
archivo = open(path+mapa, "r")
for line in archivo.readlines():
if line[-1] == '\n':
if line[0]!='s':
inputs.append('s'+line[:-1])
else:
inputs.append(line[:-1])
else:
if line[0]!='s':
inputs.append('s'+line[:-1])
else:
inputs.append(line[:-1])
if inputs[-1][1] != 's':
length = len(inputs[-1])
line = 's'*length
inputs.append(line)
# for i in inputs:
# print i
return inputs
def obtainPaths(path):
mapas = []
for root, dirs, files in os.walk(path):
for file in files[1:]:
mapas.append(file)
mapas = sorted(mapas, key=lambda mapa:int(mapa[6:-4]))
return mapas
def unicTiles(uT,lowTiles):
for line in lowTiles:
for character in line:
if not character in uT:
uT.append(character)
return uT
def splitMaps(mapa, splitNumber):
splitMap = []
splitsAux = []
start = 0
length = len(mapa) / splitNumber
limit = length
for index in xrange(0,splitNumber):
if limit <= len(mapa) and limit+length <= len(mapa):
splitsAux = mapa[start:limit]
start = limit
limit = start+length
else:
limit = len(mapa)
splitsAux = mapa[start:limit]
splitMap.append(splitsAux)
return splitMap
def networkStructure(tipo):
return -1
def createMatrix(unicTiles):
m = [ [0 for x in range(len(unicTiles))] for y in range(len(unicTiles))]
return m
def createMatrixNetwork3(unicTiles):
m = [ {"sss":0} for y in range(len(unicTiles))]
return m
def fillMatrixNetwork3(mapa,matrix,unicTiles):
for i,line in enumerate(mapa):
if i+1 < len(mapa):
for prevC,actualC,diagC,botC in zip(line[0:],line[1:],mapa[i+1][0:],mapa[i+1][1:]):
key = prevC+diagC+botC
index = unicTiles.index(actualC)
if key in matrix[index]:
matrix[index][key] += 1
else:
matrix[index][key] = 1
return matrix
def fillProbabilityMatrixNetwork3(mapa,matrix,unicTiles):
total = 0
unicTilesTotal = []
unicTotal = []
pM = createMatrixNetwork3(unicTiles)
fillMatrixNetwork3(mapa,pM,unicTiles)
for m in matrix:
for i,j in m.iteritems():
total += j
unicTotal.append(total)
total = 0
print unicTotal
for i,m in enumerate(matrix):
for key,val in m.iteritems():
if unicTotal[i] > 0:
auxVal = val
pM[i][key] = float(auxVal)/unicTotal[i]
return pM
def garantizeSum(probabilities):
# Garantize that the sum is equal to 1 or an aproximatly
for nextC in probabilities:
for previousC in nextC:
total += previousC
unicTotal.append(total)
total = 0
if all(unicTotal[0] == item for unics in unicTotal):
return true
return false
def fillMatrix(mapa,matrix,unicTiles):
if mapa[-1][1] == 's':
for line in mapa[:-1]:
for previousC,nextC in zip(line[0:],line[1:]):
indexP = unicTiles.index(previousC)
indexN = unicTiles.index(nextC)
matrix[indexN][indexP] += 1
else:
for line in mapa:
for previousC,nextC in zip(line[0:],line[1:]):
indexP = unicTiles.index(previousC)
indexN = unicTiles.index(nextC)
matrix[indexN][indexP] += 1
return matrix
def fillProbabilityMatrix(matrix,unicTiles):
pM = createMatrix(unicTiles)
# createMatrixNetwork3(unicTiles);
total = 0
unicTilesTotal = []
unicTotal = []
# Obtain the total per each line
for i in xrange(len(matrix)):
for nextC in matrix:
total += nextC[i]
unicTilesTotal.append(total)
total = 0
# Get the probability of each value
for i in xrange(len(matrix)):
for iN,nextC in enumerate(matrix):
if unicTilesTotal[i] != 0:
pM[iN][i] = float(matrix[iN][i])/unicTilesTotal[i]
return pM
def training(path,mapas,uT, splitNumber):
probabilities = [None]*splitNumber
probabilities2 = [None]*splitNumber
m2 = [None] * splitNumber
splitM = []
for mapa in mapas:
input_data = read(path,mapa)
unicTiles(uT,input_data)
splitM.append(splitMaps(input_data, splitNumber))
for i in xrange(0,splitNumber):
# m[i] = createMatrix(uT)
m2[i] = createMatrixNetwork3(uT)
print '\n'
for mapa in splitM:
for i,sM in enumerate(mapa):
if i <= splitNumber-2:
aux = ('s'*len(sM[-1]))
sM.append(aux)
# for sm in sM:
# print sm
print '\n'
# fillMatrix(sM, m[i], uT)
m = fillMatrixNetwork3(sM,m2[i],uT)
print "sum:",i,'\n', m2[i],"\n"
# probabilities[i] = fillProbabilityMatrix(m[i],uT)
probabilities2[i] = fillProbabilityMatrixNetwork3(sM,m,uT);
return probabilities2
def nextMap(mapas):
mapa = mapas[-1]
number = int(mapa[6:-4])+1
if number > 10:
mapa = mapa[:-6]+str(number)+".txt"
else:
mapa = mapa[:-5]+str(number)+".txt"
return mapa
def getMaxProbability(index,probabilities):
maxProbabilities = []
maxP = 0
nextI = 0
ind = 0
for i,probability in enumerate(probabilities):
if probability[index] > maxP:
maxProbabilities.append(i)
maxP = probability[index]
ind = i
# rI = random.randint(0,len(maxProbabilities)-1)
# nextI = maxProbabilities[rI]
return ind
def deleteContent(pfile):
pfile.seek(0)
pfile.truncate()
def getMaxProbabilityNetwork3(key,probabilities):
maxP = 0
ind = 1
for i,dic in enumerate(probabilities):
if key in dic:
val = dic[key]
if val > maxP:
ind = i
maxP = val
return ind
def writingRecursionNetwork3(path,mapa,nextMap,limitFile,limitColumn, probabilities, uT):
newMap = []
if limitFile-1 >= 0:
for i in range(limitFile-1):
newMap.append('s')
# print "prev", limitFile-1,limitColumn
# print "diag", limitFile, limitColumn
# print "bot", limitFile,limitColumn+1
prevC = mapa[limitFile-1][limitColumn]
diagC = mapa[limitFile][limitColumn]
botC = mapa[limitFile][limitColumn+1]
key = prevC+diagC+botC
nextI = getMaxProbabilityNetwork3(key,probabilities)
nextC = uT[nextI]
nextString = mapa[limitFile-1]+nextC
newMap.append(nextString);
if limitFile < len(mapa)-1:
for i in xrange(limitFile,len(mapa)):
nextString = mapa[i]
newMap.append(nextString)
else:
nextString = mapa[limitFile]
newMap.append(nextString)
if limitColumn == len(mapa[limitFile])-2 and limitFile == 1:
# file = open(path, "w+")
# deleteContent(file)
for nm in newMap:
nextMap.append(nm)
# file.write(nm+'\n')
if limitColumn+1 < len(mapa[-1])-1:
writingRecursionNetwork3(path,newMap,nextMap,limitFile,limitColumn+1,probabilities,uT)
else:
writingRecursionNetwork3(path,newMap,nextMap,limitFile-1,0,probabilities,uT)
# file.close()/
def writingMapNetwork3(path, mapa, uT, probabilities, splitNumber):
file = open(path+mapa, "w+")
newMap = []
nextI = []
data = []
nextMap = []
rH = 12
rW = 200
for i in range(0,rH):
file.write("s\n")
for j in range(0,rW):
file.write("s")
last_line = 's' * rW
file = open(path+mapa, "w+")
for line in file.readlines():
if line[-1] == '\n':
data.append(line[:-1])
else:
data.append(line)
sMaps = splitMaps(data,splitNumber)
# Get the next characters
for i,sM in enumerate(sMaps):
if i <= splitNumber-2:
aux = 's'+('s'*len(sMaps[-1][-1]))
sM.append(aux)
print "Split: ",i,"\n",probabilities[i],"\n"
newMap = writingRecursionNetwork3(path+mapa,sM, nextMap,len(sM)-1,0, probabilities[i], uT)
for nt in nextMap:
print nt
print '\n'
for nt in nextMap:
print nt
def writingMap(path, mapa, uT, probabilities, splitNumber):
file = open(path+mapa, "w+")
newMap = []
nextI = []
data = []
# put sentinels
# rH = random.randint(11,14)
# rW = random.randint(150,201)
rH = 4
rW = 5
for i in range(0,rH):
file.write("s\n")
for j in range(0,rW):
file.write("s")
last_line = 's' * rW
# put the rest of the characteres
for h in xrange(0,rW-1):
file = open(path+mapa, "w+")
for line in file.readlines():
data.append(line[:-1])
sMaps = splitMaps(data,splitNumber)
for i,sM in enumerate(sMaps):
if i < splitNumber-1:
for line in sM:
character = line[-1]
if character in uT:
index = uT.index(character)
nextI = getMaxProbability(index, probabilities[i])
nextString = line + uT[nextI] + '\n'
newMap.append(nextString)
else:
for line in sM[:-1]:
character = line[-1]
if character in uT:
index = uT.index(character)
nextI = getMaxProbability(index, probabilities[i])
nextString = line + uT[nextI] + '\n'
newMap.append(nextString)
newMap.append(last_line)
deleteContent(file)
for nM in newMap:
file.write(nM)
data = []
newMap = []
file.close
def sampling(path,mapas,uT, probabilities, splitNumber):
mapa = nextMap(mapas)
# writingMap(path, mapa, uT, probabilities, splitNumber)
writingMapNetwork3(path,mapa,uT,probabilities,splitNumber)
def main():
path = '/Users/Cr1s/Documents/Tesis/VGLC/'
mapas = obtainPaths(path)
uT = []
splitNumber = 1
probabilities = training(path,mapas,uT, splitNumber)
sampling(path,mapas,uT, probabilities,splitNumber)
print uT
if __name__ == "__main__":
main()
| |
#!/usr/bin/env python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import os.path
import shutil
import subprocess
import sys
# On mac, the values of these globals are modified when parsing -Wcrl, flags. On
# ios, the script uses the defaults.
DSYMUTIL_INVOKE = ['xcrun', 'dsymutil']
STRIP_INVOKE = ['xcrun', 'strip']
# Setting this flag will emit a deterministic binary by stripping dates from the
# N_OSO field.
DETERMINISTIC_FLAG = '--deterministic'
# The linker_driver.py is responsible for forwarding a linker invocation to
# the compiler driver, while processing special arguments itself.
#
# Usage: linker_driver.py clang++ main.o -L. -llib -o prog -Wcrl,dsym,out
#
# On Mac, the logical step of linking is handled by three discrete tools to
# perform the image link, debug info link, and strip. The linker_driver.py
# combines these three steps into a single tool.
#
# The command passed to the linker_driver.py should be the compiler driver
# invocation for the linker. It is first invoked unaltered (except for the
# removal of the special driver arguments, described below). Then the driver
# performs additional actions, based on these arguments:
#
# -Wcrl,dsym,<dsym_path_prefix>
# After invoking the linker, this will run `dsymutil` on the linker's
# output, producing a dSYM bundle, stored at dsym_path_prefix. As an
# example, if the linker driver were invoked with:
# "... -o out/gn/obj/foo/libbar.dylib ... -Wcrl,dsym,out/gn ..."
# The resulting dSYM would be out/gn/libbar.dylib.dSYM/.
#
# -Wcrl,dsymutilpath,<dsymutil_path>
# Sets the path to the dsymutil to run with -Wcrl,dsym, in which case
# `xcrun` is not used to invoke it.
#
# -Wcrl,unstripped,<unstripped_path_prefix>
# After invoking the linker, and before strip, this will save a copy of
# the unstripped linker output in the directory unstripped_path_prefix.
#
# -Wcrl,strip,<strip_arguments>
# After invoking the linker, and optionally dsymutil, this will run
# the strip command on the linker's output. strip_arguments are
# comma-separated arguments to be passed to the strip command.
#
# -Wcrl,strippath,<strip_path>
# Sets the path to the strip to run with -Wcrl,strip, in which case
# `xcrun` is not used to invoke it.
def Main(args):
"""Main function for the linker driver. Separates out the arguments for
the main compiler driver and the linker driver, then invokes all the
required tools.
Args:
args: list of string, Arguments to the script.
"""
if len(args) < 2:
raise RuntimeError("Usage: linker_driver.py [linker-invocation]")
# Collect arguments to the linker driver (this script) and remove them from
# the arguments being passed to the compiler driver.
linker_driver_actions = {}
compiler_driver_args = []
deterministic = False
for arg in args[1:]:
if arg.startswith(_LINKER_DRIVER_ARG_PREFIX):
# Convert driver actions into a map of name => lambda to invoke.
driver_action = ProcessLinkerDriverArg(arg)
assert driver_action[0] not in linker_driver_actions
linker_driver_actions[driver_action[0]] = driver_action[1]
elif arg == DETERMINISTIC_FLAG:
deterministic = True
else:
compiler_driver_args.append(arg)
linker_driver_outputs = [_FindLinkerOutput(compiler_driver_args)]
try:
# Zero the mtime in OSO fields for deterministic builds.
# https://crbug.com/330262.
env = os.environ.copy()
if deterministic:
env['ZERO_AR_DATE'] = '1'
# Run the linker by invoking the compiler driver.
subprocess.check_call(compiler_driver_args, env=env)
# Run the linker driver actions, in the order specified by the actions list.
for action in _LINKER_DRIVER_ACTIONS:
name = action[0]
if name in linker_driver_actions:
linker_driver_outputs += linker_driver_actions[name](args)
except:
# If a linker driver action failed, remove all the outputs to make the
# build step atomic.
map(_RemovePath, linker_driver_outputs)
# Re-report the original failure.
raise
def ProcessLinkerDriverArg(arg):
"""Processes a linker driver argument and returns a tuple containing the
name and unary lambda to invoke for that linker driver action.
Args:
arg: string, The linker driver argument.
Returns:
A 2-tuple:
0: The driver action name, as in _LINKER_DRIVER_ACTIONS.
1: An 1-ary lambda that takes the full list of arguments passed to
Main(). The lambda should call the linker driver action that
corresponds to the argument and return a list of outputs from the
action.
"""
if not arg.startswith(_LINKER_DRIVER_ARG_PREFIX):
raise ValueError('%s is not a linker driver argument' % (arg,))
sub_arg = arg[len(_LINKER_DRIVER_ARG_PREFIX):]
for driver_action in _LINKER_DRIVER_ACTIONS:
(name, action) = driver_action
if sub_arg.startswith(name):
return (name,
lambda full_args: action(sub_arg[len(name):], full_args))
raise ValueError('Unknown linker driver argument: %s' % (arg,))
def RunDsymUtil(dsym_path_prefix, full_args):
"""Linker driver action for -Wcrl,dsym,<dsym-path-prefix>. Invokes dsymutil
on the linker's output and produces a dsym file at |dsym_file| path.
Args:
dsym_path_prefix: string, The path at which the dsymutil output should be
located.
full_args: list of string, Full argument list for the linker driver.
Returns:
list of string, Build step outputs.
"""
if not len(dsym_path_prefix):
raise ValueError('Unspecified dSYM output file')
linker_out = _FindLinkerOutput(full_args)
base = os.path.basename(linker_out)
dsym_out = os.path.join(dsym_path_prefix, base + '.dSYM')
# Remove old dSYMs before invoking dsymutil.
_RemovePath(dsym_out)
subprocess.check_call(DSYMUTIL_INVOKE + ['-o', dsym_out, linker_out])
return [dsym_out]
def SetDsymutilPath(dsymutil_path, full_args):
"""Linker driver action for -Wcrl,dsymutilpath,<dsymutil_path>.
Sets the invocation command for dsymutil, which allows the caller to specify
an alternate dsymutil. This action is always processed before the RunDsymUtil
action.
Args:
dsymutil_path: string, The path to the dsymutil binary to run
full_args: list of string, Full argument list for the linker driver.
Returns:
No output - this step is run purely for its side-effect.
"""
global DSYMUTIL_INVOKE
DSYMUTIL_INVOKE = [dsymutil_path]
return []
def RunSaveUnstripped(unstripped_path_prefix, full_args):
"""Linker driver action for -Wcrl,unstripped,<unstripped_path_prefix>. Copies
the linker output to |unstripped_path_prefix| before stripping.
Args:
unstripped_path_prefix: string, The path at which the unstripped output
should be located.
full_args: list of string, Full argument list for the linker driver.
Returns:
list of string, Build step outputs.
"""
if not len(unstripped_path_prefix):
raise ValueError('Unspecified unstripped output file')
linker_out = _FindLinkerOutput(full_args)
base = os.path.basename(linker_out)
unstripped_out = os.path.join(unstripped_path_prefix, base + '.unstripped')
shutil.copyfile(linker_out, unstripped_out)
return [unstripped_out]
def RunStrip(strip_args_string, full_args):
"""Linker driver action for -Wcrl,strip,<strip_arguments>.
Args:
strip_args_string: string, Comma-separated arguments for `strip`.
full_args: list of string, Full arguments for the linker driver.
Returns:
list of string, Build step outputs.
"""
strip_command = list(STRIP_INVOKE)
if len(strip_args_string) > 0:
strip_command += strip_args_string.split(',')
strip_command.append(_FindLinkerOutput(full_args))
subprocess.check_call(strip_command)
return []
def SetStripPath(strip_path, full_args):
"""Linker driver action for -Wcrl,strippath,<strip_path>.
Sets the invocation command for strip, which allows the caller to specify
an alternate strip. This action is always processed before the RunStrip
action.
Args:
strip_path: string, The path to the strip binary to run
full_args: list of string, Full argument list for the linker driver.
Returns:
No output - this step is run purely for its side-effect.
"""
global STRIP_INVOKE
STRIP_INVOKE = [strip_path]
return []
def _FindLinkerOutput(full_args):
"""Finds the output of the linker by looking for the output flag in its
argument list. As this is a required linker argument, raises an error if it
cannot be found.
"""
# The linker_driver.py script may be used to wrap either the compiler linker
# (uses -o to configure the output) or lipo (uses -output to configure the
# output). Since wrapping the compiler linker is the most likely possibility
# use try/except and fallback to checking for -output if -o is not found.
try:
output_flag_index = full_args.index('-o')
except ValueError:
output_flag_index = full_args.index('-output')
return full_args[output_flag_index + 1]
def _RemovePath(path):
"""Removes the file or directory at |path| if it exists."""
if os.path.exists(path):
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.unlink(path)
_LINKER_DRIVER_ARG_PREFIX = '-Wcrl,'
"""List of linker driver actions. The sort order of this list affects the
order in which the actions are invoked. The first item in the tuple is the
argument's -Wcrl,<sub_argument> and the second is the function to invoke.
"""
_LINKER_DRIVER_ACTIONS = [
('dsymutilpath,', SetDsymutilPath),
('dsym,', RunDsymUtil),
('unstripped,', RunSaveUnstripped),
('strip,', RunStrip),
('strippath,', SetStripPath),
]
if __name__ == '__main__':
Main(sys.argv)
sys.exit(0)
| |
# coding:utf-8
import sys
import csv
import json
import os
import FuelSDK
import et_objects
from getpass import getpass
CONFIG_PATH = '~/.fuelsdk/config.python'
class Commands(object):
def authenticate(self, client_id=None, client_secret=None, debug=False):
if client_id is None or client_secret is None:
self.client = FuelSDK.ET_Client(debug=debug)
else:
self.client = FuelSDK.ET_Client(
params={
'clientid': client_id,
'clientsecret': client_secret
}, debug=debug)
def configure(self, args):
fuelsdk_config = os.path.expanduser(CONFIG_PATH)
if (
os.path.isfile(fuelsdk_config) and
raw_input('Do you want to overwrite {} ?(y/n)'.format(CONFIG_PATH)) != 'y'
):
return
client_id = raw_input('Input Your ExactTarget Client ID: ')
client_secret = getpass('Input Your ExactTarget Client Secret: ')
fuelsdk_dir = os.path.expanduser('~/.fuelsdk')
if not os.path.isdir(fuelsdk_dir):
os.mkdir(fuelsdk_dir)
f = open(fuelsdk_config, 'w')
f.write("""[Web Services]
appsignature: none
clientid: {0}
clientsecret: {1}
defaultwsdl: https://webservice.exacttarget.com/etframework.wsdl
authenticationurl: https://auth.exacttargetapis.com/v1/requestToken?legacy=1""".format(client_id, client_secret))
def describe_de_command(self, args):
fields = self.describe_de(args)
print(json.dumps(fields))
def describe_de(self, args):
"""
describe data extension with customer key.
:param string customer_key: data extension's customer key
:return: data extension's name array.
"""
de_target_fields = [
"Name",
"CustomerKey",
"DefaultValue",
"FieldType",
"Scale",
"MaxLength",
"IsPrimaryKey",
"IsRequired",
]
deColumn = FuelSDK.ET_DataExtension_Column()
deColumn.auth_stub = self.client
deColumn.props = de_target_fields
deColumn.search_filter = {
'Property': 'DataExtension.CustomerKey',
'SimpleOperator': 'equals',
'Value': args.customer_key
}
response = deColumn.get()
return [
self.convert_field_to_dict(result, de_target_fields)
for result in response.results
]
def convert_field_to_dict(self, field, target_fields):
converted_dict = {}
for field_name in target_fields:
if hasattr(field, field_name):
converted_dict[field_name] = getattr(field, field_name)
return converted_dict
def retrieve_de(self, args):
"""
retrieve all rows from data extension.
:param string customer_key: data extension's customer key
:return: data extension's name array.
"""
fields = self.describe_de(args)
row = FuelSDK.ET_DataExtension_Row()
row.auth_stub = self.client
row.CustomerKey = args.customer_key
row.props = [field['Name'] for field in fields]
response = row.get()
writer = csv.writer(
sys.stdout, quoting=csv.QUOTE_ALL, lineterminator='\n')
writer.writerow(row.props)
for result in response.results:
row = []
for prop in result.Properties[0]:
if prop.Value is None:
row.append("")
else:
row.append(prop.Value.encode("utf-8"))
writer.writerow(row)
def describe_all_de(self, args):
"""
describe all data extension.
:param string customer_key: data extension's customer key
:return: data extension's name array.
"""
de = FuelSDK.ET_DataExtension()
de.auth_stub = self.client
de.props = ["Name", "CustomerKey", "ObjectID"]
response = de.get()
writer = csv.writer(
sys.stdout, quoting=csv.QUOTE_ALL, lineterminator='\n')
writer.writerow(de.props)
for result in response.results:
writer.writerow([
result.Name.encode("utf-8"),
result.CustomerKey.encode("utf-8"),
result.ObjectID.encode("utf-8")
])
def retrieve_subs(self, args):
"""
retrieve all subscriber rows.
:param string customer_key: data extension's customer key
:return: data extension's name array.
"""
getSub = FuelSDK.ET_Subscriber()
getSub.auth_stub = self.client
response = getSub.get()
attributes = []
if (hasattr(response.results[0], 'Attributes')):
attributes = [
attr.Name.encode("utf-8")
for attr in response.results[0].Attributes
]
writer = csv.writer(
sys.stdout, quoting=csv.QUOTE_ALL, lineterminator='\n')
header = ["SubscriberID", "EmailAddress", "SubscriberKey"]
header.extend(attributes)
writer.writerow(header)
for result in response.results:
field_map = {}
if (hasattr(result, 'Attributes')):
for field in result.Attributes:
field_map[field.Name] = field.Value
fields = [result.ID, result.EmailAddress, result.SubscriberKey]
for attribute in attributes:
val = field_map[attribute]
if val is None:
fields.append("")
else:
fields.append(val.encode("utf-8"))
writer.writerow(fields)
def retrieve_triggeredsend(self, args):
"""
retrive a triggered send with customer key.
:param string customer_key: data extension's customer key
:return: data extension's name array.
"""
getTS = FuelSDK.ET_TriggeredSend()
getTS.auth_stub = self.client
getTS.props = [
"CustomerKey",
"Name",
"TriggeredSendStatus",
"ObjectID"
]
getTS.search_filter = {
'Property': 'CustomerKey',
'SimpleOperator': 'equals',
'Value': args.customer_key
}
getResponse = getTS.get()
for result in getResponse.results:
return result.ObjectID
return ""
def retrieve_sentevent(self, args):
"""
retrieve all sent event with triggered send's customer key.
:param string customer_key: data extension's customer key
:return: data extension's name array.
"""
triggeredSendDefinitionObjectID = self.retrieve_triggeredsend(args)
getSentEvent = FuelSDK.ET_SentEvent()
getSentEvent.auth_stub = self.client
getSentEvent.props = [
"SendID",
"SubscriberKey",
"EventDate",
"Client.ID",
"EventType",
"BatchID",
"TriggeredSendDefinitionObjectID",
"ListID",
"PartnerKey",
"SubscriberID"
]
getSentEvent.search_filter = {
'Property': 'TriggeredSendDefinitionObjectID',
'SimpleOperator': 'equals',
'Value': triggeredSendDefinitionObjectID
}
getResponse = getSentEvent.get()
writer = csv.writer(
sys.stdout, quoting=csv.QUOTE_ALL, lineterminator='\n')
writer.writerow(["EventDate", "SubscriberID"])
for result in getResponse.results:
writer.writerow([result.EventDate, result.SubscriberKey])
while getResponse.more_results:
getResponse = getSentEvent.getMoreResults()
for result in getResponse.results:
writer.writerow([result.EventDate, result.SubscriberKey])
def retrieve_openevent(self, args):
"""
retrieve all open event with triggered send's customer key.
:param string customer_key: data extension's customer key
:return: data extension's name array.
"""
triggeredSendDefinitionObjectID = self.retrieve_triggeredsend(args)
getOpenEvent = FuelSDK.ET_OpenEvent()
getOpenEvent.auth_stub = self.client
getOpenEvent.props = [
"SendID",
"SubscriberKey",
"EventDate",
"Client.ID",
"EventType",
"BatchID",
"TriggeredSendDefinitionObjectID",
"ListID",
"PartnerKey",
"SubscriberID"
]
getOpenEvent.search_filter = {
'Property': 'TriggeredSendDefinitionObjectID',
'SimpleOperator': 'equals',
'Value': triggeredSendDefinitionObjectID
}
getResponse = getOpenEvent.get()
writer = csv.writer(
sys.stdout, quoting=csv.QUOTE_ALL, lineterminator='\n')
writer.writerow(["EventDate", "SubscriberID"])
for result in getResponse.results:
writer.writerow([result.EventDate, result.SubscriberKey])
while getResponse.more_results:
getResponse = getOpenEvent.getMoreResults()
for result in getResponse.results:
writer.writerow([result.EventDate, result.SubscriberKey])
def retrieve_bounceevent(self, args):
"""
retrieve all bounce event with triggered send's customer key.
:param string customer_key: data extension's customer key
:return: data extension's name array.
"""
triggeredSendDefinitionObjectID = self.retrieve_triggeredsend(args)
getBounceEvent = FuelSDK.ET_BounceEvent()
getBounceEvent.auth_stub = self.client
getBounceEvent.props = [
"SendID",
"SubscriberKey",
"EventDate",
"Client.ID",
"EventType",
"BatchID",
"TriggeredSendDefinitionObjectID",
"ListID",
"PartnerKey",
"SubscriberID"
]
getBounceEvent.search_filter = {
'Property': 'TriggeredSendDefinitionObjectID',
'SimpleOperator': 'equals',
'Value': triggeredSendDefinitionObjectID
}
getResponse = getBounceEvent.get()
writer = csv.writer(
sys.stdout, quoting=csv.QUOTE_ALL, lineterminator='\n')
writer.writerow(["EventDate", "SubscriberID"])
for result in getResponse.results:
writer.writerow([result.EventDate, result.SubscriberKey])
while getResponse.more_results:
getResponse = getBounceEvent.getMoreResults()
for result in getResponse.results:
writer.writerow([result.EventDate, result.SubscriberKey])
def create_de_row(self, args):
"""
create data extension row.
:param string customer_key: data extension's customer key
:param string attributes_json:
:return: data extension's name array.
"""
deRow = FuelSDK.ET_DataExtension_Row()
deRow.CustomerKey = args.customer_key
deRow.auth_stub = self.client
args.attributes = json.loads(args.attribute_file.read())
deRow.props = json.loads(args.attributes_json)
deRowResponse = deRow.post()
print(json.dumps(deRowResponse.results))
def triggered_send(self, args):
sendTrig = FuelSDK.ET_TriggeredSend()
sendTrig.auth_stub = self.client
sendTrig.props = {"CustomerKey": args.customer_key}
if args.attribute_file is None:
attributes = {}
else:
attributes = json.loads(args.attribute_file.read())
sendTrig.subscribers = [{
"EmailAddress": args.email,
"SubscriberKey": args.subscriber_key,
}]
sendTrig.attributes = [{"Name": key, "Value": val} for key, val in attributes.items()]
sendResponse = sendTrig.send()
print(json.dumps([{
"StatusCode": result.StatusCode,
"StatusMessage": result.StatusMessage,
"OrdinalID": result.OrdinalID,
"NewID": result.NewID,
"ErrorCode": result.ErrorCode if hasattr(result, "ErrorCode") else None,
} for result in sendResponse.results]))
def push_message(self, args):
pushMessageContact = et_objects.ET_PushMessageContact()
pushMessageContact.auth_stub = self.client
pushMessageContact.props = {
"messageId": args.message_id,
"SubscriberKeys": args.subscriber_keys,
"DeviceTokens": args.device_tokens
}
if args.is_override:
pushMessageContact.props['Override'] = True
input_data = args.additional_params if args.additional_params is not None else sys.stdin.read()
pushMessageContact.props.update(json.loads(input_data))
pushMessageContactResponse = pushMessageContact.post()
print(json.dumps(pushMessageContactResponse.results))
def fire_event(self, args):
postInteractionEvent = et_objects.ET_InteractionEvents()
postInteractionEvent.auth_stub = self.client
postInteractionEvent.props = {
"ContactKey": args.subscriber_key,
"EventDefinitionKey": args.event_definition_key,
"Data": json.loads(args.data_file.read())
}
postInteractionEventResponse = postInteractionEvent.post()
print(json.dumps(postInteractionEventResponse.results))
| |
"""
Min-heaps.
"""
__author__ = """ysitu <ysitu@users.noreply.github.com>"""
# Copyright (C) 2014 ysitu <ysitu@users.noreply.github.com>
# All rights reserved.
# BSD license.
from heapq import heappop, heappush
from itertools import count
import networkx as nx
__all__ = ['MinHeap', 'PairingHeap', 'BinaryHeap']
class MinHeap(object):
"""Base class for min-heaps.
A MinHeap stores a collection of key-value pairs ordered by their values.
It supports querying the minimum pair, inserting a new pair, decreasing the
value in an existing pair and deleting the minimum pair.
"""
class _Item(object):
"""Used by subclassess to represent a key-value pair.
"""
__slots__ = ('key', 'value')
def __init__(self, key, value):
self.key = key
self.value = value
def __repr__(self):
return repr((self.key, self.value))
def __init__(self):
"""Initialize a new min-heap.
"""
self._dict = {}
def min(self):
"""Query the minimum key-value pair.
Returns
-------
key, value : tuple
The key-value pair with the minimum value in the heap.
Raises
------
NetworkXError
If the heap is empty.
"""
raise NotImplementedError
def pop(self):
"""Delete the minimum pair in the heap.
Returns
-------
key, value : tuple
The key-value pair with the minimum value in the heap.
Raises
------
NetworkXError
If the heap is empty.
"""
raise NotImplementedError
def get(self, key, default=None):
"""Returns the value associated with a key.
Parameters
----------
key : hashable object
The key to be looked up.
default : object
Default value to return if the key is not present in the heap.
Default value: None.
Returns
-------
value : object.
The value associated with the key.
"""
raise NotImplementedError
def insert(self, key, value, allow_increase=False):
"""Insert a new key-value pair or modify the value in an existing
pair.
Parameters
----------
key : hashable object
The key.
value : object comparable with existing values.
The value.
allow_increase : bool
Whether the value is allowed to increase. If False, attempts to
increase an existing value have no effect. Default value: False.
Returns
-------
decreased : bool
True if a pair is inserted or the existing value is decreased.
"""
raise NotImplementedError
def __nonzero__(self):
"""Returns whether the heap if empty.
"""
return bool(self._dict)
def __bool__(self):
"""Returns whether the heap if empty.
"""
return bool(self._dict)
def __len__(self):
"""Returns the number of key-value pairs in the heap.
"""
return len(self._dict)
def __contains__(self, key):
"""Returns whether a key exists in the heap.
Parameters
----------
key : any hashable object.
The key to be looked up.
"""
return key in self._dict
def _inherit_doc(cls):
"""Decorator for inheriting docstrings from base classes.
"""
def func(fn):
fn.__doc__ = cls.__dict__[fn.__name__].__doc__
return fn
return func
class PairingHeap(MinHeap):
"""A pairing heap.
"""
class _Node(MinHeap._Item):
"""A node in a pairing heap.
A tree in a pairing heap is stored using the left-child, right-sibling
representation.
"""
__slots__ = ('left', 'next', 'prev', 'parent')
def __init__(self, key, value):
super(PairingHeap._Node, self).__init__(key, value)
# The leftmost child.
self.left = None
# The next sibling.
self.next = None
# The previous sibling.
self.prev = None
# The parent.
self.parent = None
def __init__(self):
"""Initialize a pairing heap.
"""
super(PairingHeap, self).__init__()
self._root = None
@_inherit_doc(MinHeap)
def min(self):
if self._root is None:
raise nx.NetworkXError('heap is empty.')
return (self._root.key, self._root.value)
@_inherit_doc(MinHeap)
def pop(self):
if self._root is None:
raise nx.NetworkXError('heap is empty.')
min_node = self._root
self._root = self._merge_children(self._root)
del self._dict[min_node.key]
return (min_node.key, min_node.value)
@_inherit_doc(MinHeap)
def get(self, key, default=None):
node = self._dict.get(key)
return node.value if node is not None else default
@_inherit_doc(MinHeap)
def insert(self, key, value, allow_increase=False):
node = self._dict.get(key)
root = self._root
if node is not None:
if value < node.value:
node.value = value
if node is not root and value < node.parent.value:
self._cut(node)
self._root = self._link(root, node)
return True
elif allow_increase and value > node.value:
node.value = value
child = self._merge_children(node)
# Nonstandard step: Link the merged subtree with the root. See
# below for the standard step.
if child is not None:
self._root = self._link(self._root, child)
# Standard step: Perform a decrease followed by a pop as if the
# value were the smallest in the heap. Then insert the new
# value into the heap.
# if node is not root:
# self._cut(node)
# if child is not None:
# root = self._link(root, child)
# self._root = self._link(root, node)
# else:
# self._root = (self._link(node, child)
# if child is not None else node)
return False
else:
# Insert a new key.
node = self._Node(key, value)
self._dict[key] = node
self._root = self._link(root, node) if root is not None else node
return True
def _link(self, root, other):
"""Link two nodes, making the one with the smaller value the parent of
the other.
"""
if other.value < root.value:
root, other = other, root
next = root.left
other.next = next
if next is not None:
next.prev = other
other.prev = None
root.left = other
other.parent = root
return root
def _merge_children(self, root):
"""Merge the subtrees of the root using the standard two-pass method.
The resulting subtree is detached from the root.
"""
node = root.left
root.left = None
if node is not None:
link = self._link
# Pass 1: Merge pairs of consecutive subtrees from left to right.
# At the end of the pass, only the prev pointers of the resulting
# subtrees have meaningful values. The other pointers will be fixed
# in pass 2.
prev = None
while True:
next = node.next
if next is None:
node.prev = prev
break
next_next = next.next
node = link(node, next)
node.prev = prev
prev = node
if next_next is None:
break
node = next_next
# Pass 2: Successively merge the subtrees produced by pass 1 from
# right to left with the rightmost one.
prev = node.prev
while prev is not None:
prev_prev = prev.prev
node = link(prev, node)
prev = prev_prev
# Now node can become the new root. Its has no parent nor siblings.
node.prev = None
node.next = None
node.parent = None
return node
def _cut(self, node):
"""Cut a node from its parent.
"""
prev = node.prev
next = node.next
if prev is not None:
prev.next = next
else:
node.parent.left = next
node.prev = None
if next is not None:
next.prev = prev
node.next = None
node.parent = None
class BinaryHeap(MinHeap):
"""A binary heap.
"""
def __init__(self):
"""Initialize a binary heap.
"""
super(BinaryHeap, self).__init__()
self._heap = []
self._count = count()
@_inherit_doc(MinHeap)
def min(self):
dict = self._dict
if not dict:
raise nx.NetworkXError('heap is empty')
heap = self._heap
pop = heappop
# Repeatedly remove stale key-value pairs until a up-to-date one is
# met.
while True:
value, _, key = heap[0]
if key in dict and value == dict[key]:
break
pop(heap)
return (key, value)
@_inherit_doc(MinHeap)
def pop(self):
dict = self._dict
if not dict:
raise nx.NetworkXError('heap is empty')
heap = self._heap
pop = heappop
# Repeatedly remove stale key-value pairs until a up-to-date one is
# met.
while True:
value, _, key = heap[0]
pop(heap)
if key in dict and value == dict[key]:
break
del dict[key]
return (key, value)
@_inherit_doc(MinHeap)
def get(self, key, default=None):
return self._dict.get(key, default)
@_inherit_doc(MinHeap)
def insert(self, key, value, allow_increase=False):
dict = self._dict
if key in dict:
old_value = dict[key]
if value < old_value or (allow_increase and value > old_value):
# Since there is no way to efficiently obtain the location of a
# key-value pair in the heap, insert a new pair even if ones
# with the same key may already be present. Deem the old ones
# as stale and skip them when the minimum pair is queried.
dict[key] = value
heappush(self._heap, (value, next(self._count), key))
return value < old_value
return False
else:
dict[key] = value
heappush(self._heap, (value, next(self._count), key))
return True
| |
from __future__ import absolute_import
import logging
import os
import re
from pip._internal.models.link import Link
from pip._internal.utils.logging import indent_log
from pip._internal.utils.misc import (
display_path, rmtree, split_auth_from_netloc,
)
from pip._internal.vcs import VersionControl, vcs
_svn_xml_url_re = re.compile('url="([^"]+)"')
_svn_rev_re = re.compile(r'committed-rev="(\d+)"')
_svn_info_xml_rev_re = re.compile(r'\s*revision="(\d+)"')
_svn_info_xml_url_re = re.compile(r'<url>(.*)</url>')
logger = logging.getLogger(__name__)
class Subversion(VersionControl):
name = 'svn'
dirname = '.svn'
repo_name = 'checkout'
schemes = ('svn', 'svn+ssh', 'svn+http', 'svn+https', 'svn+svn')
def get_base_rev_args(self, rev):
return ['-r', rev]
def export(self, location):
"""Export the svn repository at the url to the destination location"""
url, rev_options = self.get_url_rev_options(self.url)
logger.info('Exporting svn repository %s to %s', url, location)
with indent_log():
if os.path.exists(location):
# Subversion doesn't like to check out over an existing
# directory --force fixes this, but was only added in svn 1.5
rmtree(location)
cmd_args = ['export'] + rev_options.to_args() + [url, location]
self.run_command(cmd_args, show_stdout=False)
def fetch_new(self, dest, url, rev_options):
rev_display = rev_options.to_display()
logger.info(
'Checking out %s%s to %s',
url,
rev_display,
display_path(dest),
)
cmd_args = ['checkout', '-q'] + rev_options.to_args() + [url, dest]
self.run_command(cmd_args)
def switch(self, dest, url, rev_options):
cmd_args = ['switch'] + rev_options.to_args() + [url, dest]
self.run_command(cmd_args)
def update(self, dest, url, rev_options):
cmd_args = ['update'] + rev_options.to_args() + [dest]
self.run_command(cmd_args)
def get_location(self, dist, dependency_links):
for url in dependency_links:
egg_fragment = Link(url).egg_fragment
if not egg_fragment:
continue
if '-' in egg_fragment:
# FIXME: will this work when a package has - in the name?
key = '-'.join(egg_fragment.split('-')[:-1]).lower()
else:
key = egg_fragment
if key == dist.key:
return url.split('#', 1)[0]
return None
def get_revision(self, location):
"""
Return the maximum revision for all files under a given location
"""
# Note: taken from setuptools.command.egg_info
revision = 0
for base, dirs, files in os.walk(location):
if self.dirname not in dirs:
dirs[:] = []
continue # no sense walking uncontrolled subdirs
dirs.remove(self.dirname)
entries_fn = os.path.join(base, self.dirname, 'entries')
if not os.path.exists(entries_fn):
# FIXME: should we warn?
continue
dirurl, localrev = self._get_svn_url_rev(base)
if base == location:
base = dirurl + '/' # save the root url
elif not dirurl or not dirurl.startswith(base):
dirs[:] = []
continue # not part of the same svn tree, skip it
revision = max(revision, localrev)
return revision
def get_netloc_and_auth(self, netloc, scheme):
"""
This override allows the auth information to be passed to svn via the
--username and --password options instead of via the URL.
"""
if scheme == 'ssh':
# The --username and --password options can't be used for
# svn+ssh URLs, so keep the auth information in the URL.
return super(Subversion, self).get_netloc_and_auth(
netloc, scheme)
return split_auth_from_netloc(netloc)
def get_url_rev_and_auth(self, url):
# hotfix the URL scheme after removing svn+ from svn+ssh:// readd it
url, rev, user_pass = super(Subversion, self).get_url_rev_and_auth(url)
if url.startswith('ssh://'):
url = 'svn+' + url
return url, rev, user_pass
def make_rev_args(self, username, password):
extra_args = []
if username:
extra_args += ['--username', username]
if password:
extra_args += ['--password', password]
return extra_args
def get_url(self, location):
# In cases where the source is in a subdirectory, not alongside
# setup.py we have to look up in the location until we find a real
# setup.py
orig_location = location
while not os.path.exists(os.path.join(location, 'setup.py')):
last_location = location
location = os.path.dirname(location)
if location == last_location:
# We've traversed up to the root of the filesystem without
# finding setup.py
logger.warning(
"Could not find setup.py for directory %s (tried all "
"parent directories)",
orig_location,
)
return None
return self._get_svn_url_rev(location)[0]
def _get_svn_url_rev(self, location):
from pip._internal.exceptions import InstallationError
entries_path = os.path.join(location, self.dirname, 'entries')
if os.path.exists(entries_path):
with open(entries_path) as f:
data = f.read()
else: # subversion >= 1.7 does not have the 'entries' file
data = ''
if (data.startswith('8') or
data.startswith('9') or
data.startswith('10')):
data = list(map(str.splitlines, data.split('\n\x0c\n')))
del data[0][0] # get rid of the '8'
url = data[0][3]
revs = [int(d[9]) for d in data if len(d) > 9 and d[9]] + [0]
elif data.startswith('<?xml'):
match = _svn_xml_url_re.search(data)
if not match:
raise ValueError('Badly formatted data: %r' % data)
url = match.group(1) # get repository URL
revs = [int(m.group(1)) for m in _svn_rev_re.finditer(data)] + [0]
else:
try:
# subversion >= 1.7
xml = self.run_command(
['info', '--xml', location],
show_stdout=False,
)
url = _svn_info_xml_url_re.search(xml).group(1)
revs = [
int(m.group(1)) for m in _svn_info_xml_rev_re.finditer(xml)
]
except InstallationError:
url, revs = None, []
if revs:
rev = max(revs)
else:
rev = 0
return url, rev
def get_src_requirement(self, dist, location):
repo = self.get_url(location)
if repo is None:
return None
# FIXME: why not project name?
egg_project_name = dist.egg_name().split('-', 1)[0]
rev = self.get_revision(location)
return 'svn+%s@%s#egg=%s' % (repo, rev, egg_project_name)
def is_commit_id_equal(self, dest, name):
"""Always assume the versions don't match"""
return False
vcs.register(Subversion)
| |
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import mock
from dashboard.common import testing_common
from dashboard.api import api_auth
from dashboard.pinpoint import test
from dashboard.pinpoint.handlers import cancel
from dashboard.pinpoint.models import job as job_module
from dashboard.pinpoint.models import scheduler
@mock.patch('dashboard.common.utils.ServiceAccountHttp', mock.MagicMock())
class CancelJobTest(test.TestCase):
def setUp(self):
super(CancelJobTest, self).setUp()
self.SetCurrentUserOAuth(testing_common.INTERNAL_USER)
self.SetCurrentClientIdOAuth(api_auth.OAUTH_CLIENT_ID_ALLOWLIST[0])
self.add_bug_comment = mock.MagicMock()
self.get_issue = mock.MagicMock()
patcher = mock.patch(
'dashboard.services.issue_tracker_service.IssueTrackerService')
issue_tracker_service = patcher.start()
issue_tracker_service.return_value = mock.MagicMock(
AddBugComment=self.add_bug_comment, GetIssue=self.get_issue)
self.addCleanup(patcher.stop)
@mock.patch.object(cancel.utils, 'GetEmail',
mock.MagicMock(return_value='lovely.user@example.com'))
@mock.patch.object(cancel.utils, 'IsAdministrator',
mock.MagicMock(return_value=False))
@mock.patch.object(cancel.utils, 'IsTryjobUser',
mock.MagicMock(return_value=True))
@mock.patch.object(cancel.utils, 'IsAllowedToDelegate',
mock.MagicMock(return_value=False))
def testCancelKnownJobByOwner(self):
job = job_module.Job.New((), (), user='lovely.user@example.com', bug_id=123)
scheduler.Schedule(job)
self.Post(
'/api/job/cancel', {
'job_id': job.job_id,
'reason': 'testing!'
},
status=200)
job = job_module.JobFromId(job.job_id)
self.assertTrue(job.cancelled)
self.assertIn('lovely.user@example.com: testing!', job.cancel_reason)
self.ExecuteDeferredTasks('default')
self.assertTrue(self.add_bug_comment.called)
@mock.patch.object(cancel.utils, 'GetEmail',
mock.MagicMock(return_value='an.administrator@example.com')
)
@mock.patch.object(cancel.utils, 'IsAdministrator',
mock.MagicMock(return_value=True))
@mock.patch.object(cancel.utils, 'IsTryjobUser',
mock.MagicMock(return_value=True))
@mock.patch.object(cancel.utils, 'IsAllowedToDelegate',
mock.MagicMock(return_value=False))
def testCancelKnownJobByAdmin(self):
job = job_module.Job.New((), (), user='lovely.user@example.com', bug_id=123)
scheduler.Schedule(job)
self.Post(
'/api/job/cancel', {
'job_id': job.job_id,
'reason': 'testing!'
},
status=200)
job = job_module.JobFromId(job.job_id)
self.assertTrue(job.cancelled)
self.assertIn('an.administrator@example.com: testing!', job.cancel_reason)
self.ExecuteDeferredTasks('default')
self.assertTrue(self.add_bug_comment.called)
@mock.patch.object(cancel.utils, 'GetEmail',
mock.MagicMock(return_value='lovely.user@example.com'))
@mock.patch.object(cancel.utils, 'IsAdministrator',
mock.MagicMock(return_value=False))
@mock.patch.object(cancel.utils, 'IsTryjobUser',
mock.MagicMock(return_value=True))
@mock.patch.object(cancel.utils, 'IsAllowedToDelegate',
mock.MagicMock(return_value=False))
def testCancelUnknownJob(self):
job = job_module.Job.New((), (), user='lovely.user@example.com')
scheduler.Schedule(job)
self.addCleanup(scheduler.Cancel, job)
self.Post(
'/api/job/cancel', {
'job_id': job.job_id + '1',
'reason': 'testing!'
},
status=404)
job = job_module.JobFromId(job.job_id + '1')
self.assertIsNone(job)
@mock.patch.object(cancel.utils, 'GetEmail',
mock.MagicMock(return_value='lovely.user@example.com'))
@mock.patch.object(cancel.utils, 'IsAdministrator',
mock.MagicMock(return_value=False))
@mock.patch.object(cancel.utils, 'IsTryjobUser',
mock.MagicMock(return_value=True))
def testCancelCancelledJob(self):
job = job_module.Job.New((), (), user='lovely.user@example.com')
scheduler.Schedule(job)
self.Post(
'/api/job/cancel', {
'job_id': job.job_id,
'reason': 'testing!'
},
status=200)
job = job_module.JobFromId(job.job_id)
self.assertTrue(job.cancelled)
self.assertIn('lovely.user@example.com: testing!', job.cancel_reason)
self.Post(
'/api/job/cancel', {
'job_id': job.job_id,
'reason': 'cancelling again!'
},
status=400)
job = job_module.JobFromId(job.job_id)
self.assertTrue(job.cancelled)
self.assertIn('lovely.user@example.com: testing!', job.cancel_reason)
@mock.patch.object(cancel.utils, 'GetEmail',
mock.MagicMock(return_value='another.user@example.com'))
@mock.patch.object(cancel.utils, 'IsAdministrator',
mock.MagicMock(return_value=False))
@mock.patch.object(cancel.utils, 'IsTryjobUser',
mock.MagicMock(return_value=True))
@mock.patch.object(cancel.utils, 'IsAllowedToDelegate',
mock.MagicMock(return_value=False))
def testCancelForbiddenUser(self):
job = job_module.Job.New((), (), user='lovely.user@example.com')
scheduler.Schedule(job)
self.addCleanup(scheduler.Cancel, job)
self.Post(
'/api/job/cancel', {
'job_id': job.job_id,
'reason': 'testing!'
},
status=403)
@mock.patch.object(cancel.utils, 'GetEmail',
mock.MagicMock(return_value='another.user@example.com'))
@mock.patch.object(cancel.utils, 'IsAdministrator',
mock.MagicMock(return_value=False))
@mock.patch.object(cancel.utils, 'IsTryjobUser',
mock.MagicMock(return_value=True))
@mock.patch.object(cancel.utils, 'IsAllowedToDelegate',
mock.MagicMock(return_value=True))
def testCancelDelegationSupported(self):
job = job_module.Job.New((), (), user='lovely.user@example.com')
scheduler.Schedule(job)
self.addCleanup(scheduler.Cancel, job)
self.Post(
'/api/job/cancel', {
'user': job.user,
'job_id': job.job_id,
'reason': 'testing!'
},
status=200)
@mock.patch.object(
cancel.utils, 'GetEmail',
mock.MagicMock(return_value='some-service-account@example.com'))
@mock.patch.object(cancel.utils, 'IsAdministrator',
mock.MagicMock(return_value=False))
@mock.patch.object(cancel.utils, 'IsTryjobUser',
mock.MagicMock(return_value=True))
@mock.patch.object(cancel.utils, 'IsAllowedToDelegate',
mock.MagicMock(return_value=False))
def testCancelDelegationRejected(self):
job = job_module.Job.New((), (), user='lovely.user@example.com')
scheduler.Schedule(job)
self.addCleanup(scheduler.Cancel, job)
self.Post(
'/api/job/cancel', {
'user': job.user,
'job_id': job.job_id,
'reason': 'testing!'
},
status=403)
@mock.patch.object(
cancel.utils, 'GetEmail',
mock.MagicMock(return_value='some-service-account@example.com'))
@mock.patch.object(cancel.utils, 'IsAdministrator',
mock.MagicMock(return_value=True))
@mock.patch.object(cancel.utils, 'IsTryjobUser',
mock.MagicMock(return_value=True))
@mock.patch.object(cancel.utils, 'IsAllowedToDelegate',
mock.MagicMock(return_value=True))
def testCancelDelegationForAnAdminWorks(self):
job = job_module.Job.New((), (), user='lovely.user@example.com')
scheduler.Schedule(job)
self.addCleanup(scheduler.Cancel, job)
self.Post(
'/api/job/cancel', {
'user': 'admin@example.com',
'job_id': job.job_id,
'reason': 'testing!'
},
status=200)
@mock.patch.object(cancel.utils, 'GetEmail',
mock.MagicMock(return_value='lovely.user@example.com'))
@mock.patch.object(cancel.utils, 'IsAdministrator',
mock.MagicMock(return_value=False))
@mock.patch.object(cancel.utils, 'IsTryjobUser',
mock.MagicMock(return_value=True))
def testCancelAlreadyRunningJob(self):
job = job_module.Job.New((), (),
arguments={'configuration': 'mock'},
user='lovely.user@example.com')
scheduler.Schedule(job)
_, status = scheduler.PickJobs(job.configuration)[0]
self.assertEqual(status, 'Queued')
job.task = '123'
job.started = True
job.put()
self.assertTrue(job.running)
self.addCleanup(scheduler.Cancel, job)
self.Post(
'/api/job/cancel', {
'job_id': job.job_id,
'reason': 'testing!'
},
status=200)
| |
# coding: utf-8
import os
import copy
import collections
import types
import sys
from collections import namedtuple
from jinja2 import nodes
from jinja2 import Environment, TemplateNotFound, FileSystemLoader
from jinja2.ext import Extension
from jinja2.loaders import split_template_path
from jinja2.utils import open_if_exists
from schema import Schema
from snaql.convertors import (
guard_bool,
guard_case,
guard_date,
guard_datetime,
guard_float,
guard_integer,
guard_regexp,
guard_string,
guard_time,
guard_timedelta,
)
PY = sys.version_info
PY3K = PY >= (3, 0, 0)
class RawFileSystemLoader(FileSystemLoader):
def get_source(self, environment, template):
pieces = split_template_path(template)
for searchpath in self.searchpath:
filename = os.path.join(searchpath, *pieces)
f = open_if_exists(filename)
if f is None:
continue
try:
contents = f.read().decode(self.encoding)
finally:
f.close()
mtime = os.path.getmtime(filename)
# Need to save original raw template before compilation
environment.sql_params.setdefault('raws', {}).update({
template: [c.strip() for c in contents.splitlines()]
})
def uptodate():
try:
return os.path.getmtime(filename) == mtime
except OSError:
return False
return contents, filename, uptodate
raise TemplateNotFound(template)
class JinjaSQLExtension(Extension):
tags = set(['sql', 'query'])
def parse(self, parser):
lineno = next(parser.stream).lineno
expr = parser.parse_expression()
args = [expr]
kwargs = [nodes.Keyword('func', expr)]
if parser.stream.skip_if('comma'):
# Optional 'note' for function docstring
if (
parser.stream.current.type == 'name' and
parser.stream.current.value in (
'note', 'cond_for', 'depends_on'
)
):
stream_type = parser.stream.current.value
next(parser.stream)
parser.stream.expect('assign')
# Depends meta is always a list
if stream_type == 'depends_on':
c_expr = parser.parse_list()
else:
c_expr = parser.parse_expression()
args.append(c_expr)
kwargs.append(nodes.Keyword(stream_type, c_expr))
body = parser.parse_statements(
['name:endsql', 'name:endquery'], drop_needle=True
)
raw_template = self.environment.sql_params['raws'][parser.name]
# Lines range of original raw template
raw_lines = slice(lineno, parser.stream.current.lineno-1)
self.environment.sql_params.setdefault('funcs', {}).update({
expr.value: {'raw_sql': '\n '.join(raw_template[raw_lines])}
})
call_node = nodes.Call(
self.attr('_sql_process', lineno=lineno),
args, kwargs, None, None
)
return nodes.CallBlock(call_node, [], [], body)
def _sql_process(self, *args, **kwargs):
caller = kwargs['caller']
raw_sql = '\n '.join(x.strip() for x in caller().split('\n') if x)
if 'cond_for' in kwargs:
origin = (
self.environment.sql_params['funcs'].get(kwargs['cond_for'])
)
if origin:
origin.setdefault('conds', []).append(kwargs['cond_for'])
origin = self.environment.sql_params['funcs'].get(kwargs['func'])
origin.update({
'sql': raw_sql,
'note': kwargs.get('note'),
'is_cond': 'cond_for' in kwargs,
'depends_on': kwargs.get('depends_on', []),
'node': None,
})
if origin['is_cond']:
origin['cond_for'] = kwargs['cond_for']
return raw_sql
class SnaqlDepNode(object):
def __init__(self, name):
self.name = name
self.edges = []
def add_edge(self, node):
self.edges.append(node)
def __str__(self):
return '<SnaqlDepNode %s>' % self.name
def __repr__(self):
return '<SnaqlDepNode %s>' % self.name
class SnaqlException(Exception):
pass
class Snaql(object):
def __init__(self, sql_root, sql_ns):
self.sql_root = sql_root
self.jinja_env = Environment(
trim_blocks=True,
extensions=[JinjaSQLExtension],
loader=RawFileSystemLoader(os.path.join(self.sql_root, sql_ns)),
)
self.jinja_env.filters.update({
'guards.string': guard_string,
'guards.integer': guard_integer,
'guards.datetime': guard_datetime,
'guards.date': guard_date,
'guards.float': guard_float,
'guards.timedelta': guard_timedelta,
'guards.time': guard_time,
'guards.case': guard_case,
'guards.regexp': guard_regexp,
'guards.bool': guard_bool,
})
self.jinja_env.extend(sql_params={})
def gen_func(self, name, meta_struct, env):
def subrender_cond(owner_name, cond_func, context):
if (
isinstance(cond_func, collections.Callable) and
cond_func.is_cond
):
cond_struct = meta_struct['funcs'][cond_func.func_name]
if cond_struct['cond_for'] != owner_name:
raise SnaqlException(
'"%s" is not proper condition for "%s"' % (
cond_func.func_name,
owner_name
)
)
cond_tmpl = env.from_string(
meta_struct['funcs'][cond_func.func_name]['raw_sql']
)
return cond_tmpl.render(**context).strip()
return cond_func
def fn(**kwargs):
if meta_struct['funcs'][name]['is_cond']:
raise SnaqlException((
'"%s" is condition for "%s" and can not '
'be rendered outside of it\'s scope'
) % (name, meta_struct['funcs'][name]['cond_for']))
if kwargs:
for point, val in kwargs.items():
maybe_cond_sql = subrender_cond(name, val, kwargs)
if maybe_cond_sql:
kwargs[point] = maybe_cond_sql
if (
isinstance(val, collections.Iterable) and
not isinstance(
val, (str if PY3K else types.StringTypes, dict)
)
):
val = [subrender_cond(name, v, kwargs) for v in val]
kwargs[point] = [v for v in val if v]
if 'schema' in kwargs and isinstance(kwargs['schema'], Schema):
validation_schema = kwargs.pop('schema')
kwargs = validation_schema.validate(kwargs)
sql_tmpl = (
env.from_string(meta_struct['funcs'][name]['raw_sql'])
)
return sql_tmpl.render(**kwargs).strip()
return meta_struct['funcs'][name]['sql']
fn.__doc__ = meta_struct['funcs'][name]['note']
fn.is_cond = meta_struct['funcs'][name]['is_cond']
fn.func_name = str(name)
return fn
def gen_dep_graph(self, node, accum):
for edge in node.edges:
if edge not in accum:
self.gen_dep_graph(edge, accum)
accum.append(node)
return accum
def load_queries(self, sql_path):
template = self.jinja_env.get_template(sql_path)
template.render()
factory_methods = {}
meta_struct = copy.deepcopy(self.jinja_env.sql_params)
blocks = set(meta_struct['funcs'])
node = SnaqlDepNode('root')
for name, block in meta_struct['funcs'].items():
# Dependency graph building
block['node'] = block['node'] or SnaqlDepNode(name)
for dep in block['depends_on']:
if dep not in blocks:
raise SnaqlException(
'"%s" block not found in "%s"' % (dep, sql_path)
)
if meta_struct['funcs'][dep]['node'] is None:
meta_struct['funcs'][dep]['node'] = SnaqlDepNode(dep)
block['node'].add_edge(meta_struct['funcs'][dep]['node'])
node.add_edge(block['node'])
fn = self.gen_func(name, meta_struct, self.jinja_env)
factory_methods[name] = fn
edges_accum = []
graph = self.gen_dep_graph(node, edges_accum)
graph.pop() # root node
factory_methods['ordered_blocks'] = [
factory_methods[n.name]
for n in graph
]
factory = namedtuple('SQLFactory', factory_methods.keys())
struct = factory(*factory_methods.values())
self.jinja_env.sql_params.clear()
return struct
| |
# -*- coding: utf-8 -*-
from keras.optimizers import SGD
from keras.layers import Input, merge, ZeroPadding2D
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import AveragePooling2D, GlobalAveragePooling2D, MaxPooling2D
from keras.layers.normalization import BatchNormalization
from keras.models import Model
import keras.backend as K
from sklearn.metrics import log_loss
from custom_layers.scale_layer import Scale
from load_cifar10 import load_cifar10_data
def densenet169_model(img_rows, img_cols, color_type=1, nb_dense_block=4, growth_rate=32, nb_filter=64, reduction=0.5, dropout_rate=0.0, weight_decay=1e-4, num_classes=None):
'''
DenseNet 169 Model for Keras
Model Schema is based on
https://github.com/flyyufelix/DenseNet-Keras
ImageNet Pretrained Weights
Theano: https://drive.google.com/open?id=0Byy2AcGyEVxfN0d3T1F1MXg0NlU
TensorFlow: https://drive.google.com/open?id=0Byy2AcGyEVxfSEc5UC1ROUFJdmM
# Arguments
nb_dense_block: number of dense blocks to add to end
growth_rate: number of filters to add per dense block
nb_filter: initial number of filters
reduction: reduction factor of transition blocks.
dropout_rate: dropout rate
weight_decay: weight decay factor
classes: optional number of classes to classify images
weights_path: path to pre-trained weights
# Returns
A Keras model instance.
'''
eps = 1.1e-5
# compute compression factor
compression = 1.0 - reduction
# Handle Dimension Ordering for different backends
global concat_axis
if K.image_dim_ordering() == 'tf':
concat_axis = 3
img_input = Input(shape=(224, 224, 3), name='data')
else:
concat_axis = 1
img_input = Input(shape=(3, 224, 224), name='data')
# From architecture for ImageNet (Table 1 in the paper)
nb_filter = 64
nb_layers = [6,12,32,32] # For DenseNet-169
# Initial convolution
x = ZeroPadding2D((3, 3), name='conv1_zeropadding')(img_input)
x = Convolution2D(nb_filter, 7, 7, subsample=(2, 2), name='conv1', bias=False)(x)
x = BatchNormalization(epsilon=eps, axis=concat_axis, name='conv1_bn')(x)
x = Scale(axis=concat_axis, name='conv1_scale')(x)
x = Activation('relu', name='relu1')(x)
x = ZeroPadding2D((1, 1), name='pool1_zeropadding')(x)
x = MaxPooling2D((3, 3), strides=(2, 2), name='pool1')(x)
# Add dense blocks
for block_idx in range(nb_dense_block - 1):
stage = block_idx+2
x, nb_filter = dense_block(x, stage, nb_layers[block_idx], nb_filter, growth_rate, dropout_rate=dropout_rate, weight_decay=weight_decay)
# Add transition_block
x = transition_block(x, stage, nb_filter, compression=compression, dropout_rate=dropout_rate, weight_decay=weight_decay)
nb_filter = int(nb_filter * compression)
final_stage = stage + 1
x, nb_filter = dense_block(x, final_stage, nb_layers[-1], nb_filter, growth_rate, dropout_rate=dropout_rate, weight_decay=weight_decay)
x = BatchNormalization(epsilon=eps, axis=concat_axis, name='conv'+str(final_stage)+'_blk_bn')(x)
x = Scale(axis=concat_axis, name='conv'+str(final_stage)+'_blk_scale')(x)
x = Activation('relu', name='relu'+str(final_stage)+'_blk')(x)
x_fc = GlobalAveragePooling2D(name='pool'+str(final_stage))(x)
x_fc = Dense(1000, name='fc6')(x_fc)
x_fc = Activation('softmax', name='prob')(x_fc)
model = Model(img_input, x_fc, name='densenet')
if K.image_dim_ordering() == 'th':
# Use pre-trained weights for Theano backend
weights_path = 'imagenet_models/densenet169_weights_th.h5'
else:
# Use pre-trained weights for Tensorflow backend
weights_path = 'imagenet_models/densenet169_weights_tf.h5'
model.load_weights(weights_path, by_name=True)
# Truncate and replace softmax layer for transfer learning
# Cannot use model.layers.pop() since model is not of Sequential() type
# The method below works since pre-trained weights are stored in layers but not in the model
x_newfc = GlobalAveragePooling2D(name='pool'+str(final_stage))(x)
x_newfc = Dense(num_classes, name='fc6')(x_newfc)
x_newfc = Activation('softmax', name='prob')(x_newfc)
model = Model(img_input, x_newfc)
# Learning rate is changed to 0.001
sgd = SGD(lr=1e-3, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(optimizer=sgd, loss='categorical_crossentropy', metrics=['accuracy'])
return model
def conv_block(x, stage, branch, nb_filter, dropout_rate=None, weight_decay=1e-4):
'''Apply BatchNorm, Relu, bottleneck 1x1 Conv2D, 3x3 Conv2D, and option dropout
# Arguments
x: input tensor
stage: index for dense block
branch: layer index within each dense block
nb_filter: number of filters
dropout_rate: dropout rate
weight_decay: weight decay factor
'''
eps = 1.1e-5
conv_name_base = 'conv' + str(stage) + '_' + str(branch)
relu_name_base = 'relu' + str(stage) + '_' + str(branch)
# 1x1 Convolution (Bottleneck layer)
inter_channel = nb_filter * 4
x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base+'_x1_bn')(x)
x = Scale(axis=concat_axis, name=conv_name_base+'_x1_scale')(x)
x = Activation('relu', name=relu_name_base+'_x1')(x)
x = Convolution2D(inter_channel, 1, 1, name=conv_name_base+'_x1', bias=False)(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
# 3x3 Convolution
x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base+'_x2_bn')(x)
x = Scale(axis=concat_axis, name=conv_name_base+'_x2_scale')(x)
x = Activation('relu', name=relu_name_base+'_x2')(x)
x = ZeroPadding2D((1, 1), name=conv_name_base+'_x2_zeropadding')(x)
x = Convolution2D(nb_filter, 3, 3, name=conv_name_base+'_x2', bias=False)(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
return x
def transition_block(x, stage, nb_filter, compression=1.0, dropout_rate=None, weight_decay=1E-4):
''' Apply BatchNorm, 1x1 Convolution, averagePooling, optional compression, dropout
# Arguments
x: input tensor
stage: index for dense block
nb_filter: number of filters
compression: calculated as 1 - reduction. Reduces the number of feature maps in the transition block.
dropout_rate: dropout rate
weight_decay: weight decay factor
'''
eps = 1.1e-5
conv_name_base = 'conv' + str(stage) + '_blk'
relu_name_base = 'relu' + str(stage) + '_blk'
pool_name_base = 'pool' + str(stage)
x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base+'_bn')(x)
x = Scale(axis=concat_axis, name=conv_name_base+'_scale')(x)
x = Activation('relu', name=relu_name_base)(x)
x = Convolution2D(int(nb_filter * compression), 1, 1, name=conv_name_base, bias=False)(x)
if dropout_rate:
x = Dropout(dropout_rate)(x)
x = AveragePooling2D((2, 2), strides=(2, 2), name=pool_name_base)(x)
return x
def dense_block(x, stage, nb_layers, nb_filter, growth_rate, dropout_rate=None, weight_decay=1e-4, grow_nb_filters=True):
''' Build a dense_block where the output of each conv_block is fed to subsequent ones
# Arguments
x: input tensor
stage: index for dense block
nb_layers: the number of layers of conv_block to append to the model.
nb_filter: number of filters
growth_rate: growth rate
dropout_rate: dropout rate
weight_decay: weight decay factor
grow_nb_filters: flag to decide to allow number of filters to grow
'''
eps = 1.1e-5
concat_feat = x
for i in range(nb_layers):
branch = i+1
x = conv_block(concat_feat, stage, branch, growth_rate, dropout_rate, weight_decay)
concat_feat = merge([concat_feat, x], mode='concat', concat_axis=concat_axis, name='concat_'+str(stage)+'_'+str(branch))
if grow_nb_filters:
nb_filter += growth_rate
return concat_feat, nb_filter
if __name__ == '__main__':
# Example to fine-tune on 3000 samples from Cifar10
img_rows, img_cols = 224, 224 # Resolution of inputs
channel = 3
num_classes = 10
batch_size = 16
nb_epoch = 10
# Load Cifar10 data. Please implement your own load_data() module for your own dataset
X_train, Y_train, X_valid, Y_valid = load_cifar10_data(img_rows, img_cols)
# Load our model
model = densenet169_model(img_rows=img_rows, img_cols=img_cols, color_type=channel, num_classes=num_classes)
# Start Fine-tuning
model.fit(X_train, Y_train,
batch_size=batch_size,
nb_epoch=nb_epoch,
shuffle=True,
verbose=1,
validation_data=(X_valid, Y_valid),
)
# Make predictions
predictions_valid = model.predict(X_valid, batch_size=batch_size, verbose=1)
# Cross-entropy loss score
score = log_loss(Y_valid, predictions_valid)
| |
# -*- coding: utf-8 -*-
import mock
import pytest
import urlparse
from django.db import connection, transaction
from django.test.utils import CaptureQueriesContext
from django.utils.timezone import now
from osf.utils.sanitize import strip_html
from osf.models import QuickFilesNode
from api.base.settings.defaults import API_BASE
from api.base.utils import waterbutler_api_url_for
from osf_tests.factories import (
AuthUserFactory,
CollectionFactory,
ProjectFactory,
)
from website.views import find_bookmark_collection
@pytest.mark.django_db
class TestUserDetail:
@pytest.fixture()
def user_one(self):
user_one = AuthUserFactory()
user_one.social['twitter'] = 'rheisendennis'
user_one.save()
return user_one
@pytest.fixture()
def user_two(self):
return AuthUserFactory()
def test_get(self, app, user_one, user_two):
# test_gets_200
url = '/{}users/{}/'.format(API_BASE, user_one._id)
res = app.get(url)
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
# test_get_correct_pk_user
url = '/{}users/{}/'.format(API_BASE, user_one._id)
res = app.get(url)
user_json = res.json['data']
assert user_json['attributes']['full_name'] == user_one.fullname
assert user_one.social['twitter'] in user_json['attributes']['social']['twitter']
# test_get_incorrect_pk_user_logged_in
url = '/{}users/{}/'.format(API_BASE, user_two._id)
res = app.get(url)
user_json = res.json['data']
assert user_json['attributes']['full_name'] != user_one.fullname
# test_returns_timezone_and_locale
url = '/{}users/{}/'.format(API_BASE, user_one._id)
res = app.get(url)
attributes = res.json['data']['attributes']
assert attributes['timezone'] == user_one.timezone
assert attributes['locale'] == user_one.locale
# test_get_new_users
url = '/{}users/{}/'.format(API_BASE, user_two._id)
res = app.get(url)
assert res.status_code == 200
assert res.json['data']['attributes']['full_name'] == user_two.fullname
assert res.json['data']['attributes']['social'] == {}
# test_get_incorrect_pk_user_not_logged_in
url = '/{}users/{}/'.format(API_BASE, user_two._id)
res = app.get(url, auth=user_one.auth)
user_json = res.json['data']
assert user_json['attributes']['full_name'] != user_one.fullname
assert user_json['attributes']['full_name'] == user_two.fullname
# test_user_detail_takes_profile_image_size_param
size = 42
url = '/{}users/{}/?profile_image_size={}'.format(
API_BASE, user_one._id, size)
res = app.get(url)
user_json = res.json['data']
profile_image_url = user_json['links']['profile_image']
query_dict = urlparse.parse_qs(
urlparse.urlparse(profile_image_url).query)
assert int(query_dict.get('s')[0]) == size
# test_profile_image_in_links
url = '/{}users/{}/'.format(API_BASE, user_one._id)
res = app.get(url)
user_json = res.json['data']
assert 'profile_image' in user_json['links']
def test_files_relationship_upload(self, app, user_one):
url = '/{}users/{}/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_one)
quickfiles = QuickFilesNode.objects.get(creator=user_one)
user_json = res.json['data']
upload_url = user_json['relationships']['quickfiles']['links']['upload']['href']
waterbutler_upload = waterbutler_api_url_for(
quickfiles._id, 'osfstorage')
assert upload_url == waterbutler_upload
def test_preprint_relationship(self, app, user_one):
url = '/{}users/{}/'.format(API_BASE, user_one._id)
preprint_url = '/{}users/{}/preprints/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_one)
user_json = res.json['data']
href_url = user_json['relationships']['preprints']['links']['related']['href']
assert preprint_url in href_url
def test_registrations_relationship(self, app, user_one):
url = '/{}users/{}/'.format(API_BASE, user_one._id)
registration_url = '/{}users/{}/registrations/'.format(
API_BASE, user_one._id)
res = app.get(url, auth=user_one)
user_json = res.json['data']
href_url = user_json['relationships']['registrations']['links']['related']['href']
assert registration_url in href_url
def test_nodes_relationship_is_absent(self, app, user_one):
url = '/{}users/{}/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_one)
assert 'node' not in res.json['data']['relationships'].keys()
# Regression test for https://openscience.atlassian.net/browse/OSF-8966
def test_browsable_api_for_user_detail(self, app, user_one):
url = '/{}users/{}/?format=api'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_one.auth)
assert res.status_code == 200
@pytest.mark.django_db
class TestUserRoutesNodeRoutes:
@pytest.fixture()
def user_one(self):
user_one = AuthUserFactory()
user_one.social['twitter'] = 'rheisendennis'
user_one.save()
return user_one
@pytest.fixture()
def user_two(self):
return AuthUserFactory()
@pytest.fixture()
def project_public_user_one(self, user_one):
return ProjectFactory(
title='Public Project User One',
is_public=True,
creator=user_one)
@pytest.fixture()
def project_private_user_one(self, user_one):
return ProjectFactory(
title='Private Project User One',
is_public=False,
creator=user_one)
@pytest.fixture()
def project_deleted_user_one(self, user_one):
return CollectionFactory(
title='Deleted Project User One',
is_public=False,
creator=user_one,
deleted=now())
@pytest.fixture()
def project_public_user_two(self, user_two):
return ProjectFactory(
title='Public Project User Two',
is_public=True,
creator=user_two)
@pytest.fixture()
def project_private_user_two(self, user_two):
return ProjectFactory(
title='Private Project User Two',
is_public=False,
creator=user_two)
@pytest.fixture()
def folder(self):
return CollectionFactory()
@pytest.fixture()
def folder_deleted(self, user_one):
return CollectionFactory(
title='Deleted Folder User One',
is_public=False,
creator=user_one,
deleted=now())
@pytest.fixture()
def bookmark_collection(self, user_one):
return find_bookmark_collection(user_one)
def test_get_200_responses(
self, app, user_one, user_two,
project_public_user_one,
project_public_user_two,
project_private_user_one,
project_private_user_two,
project_deleted_user_one,
folder, folder_deleted,
bookmark_collection):
# test_get_200_path_users_me_userone_logged_in
url = '/{}users/me/'.format(API_BASE)
res = app.get(url, auth=user_one.auth)
assert res.status_code == 200
# test_get_200_path_users_me_usertwo_logged_in
url = '/{}users/me/'.format(API_BASE)
res = app.get(url, auth=user_two.auth)
assert res.status_code == 200
# test_get_200_path_users_user_id_user_logged_in
url = '/{}users/{}/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_one.auth)
assert res.status_code == 200
# test_get_200_path_users_user_id_no_user
url = '/{}users/{}/'.format(API_BASE, user_two._id)
res = app.get(url)
assert res.status_code == 200
# test_get_200_path_users_user_id_unauthorized_user
url = '/{}users/{}/'.format(API_BASE, user_two._id)
res = app.get(url, auth=user_one.auth)
assert res.status_code == 200
assert res.json['data']['id'] == user_two._id
# test_get_200_path_users_me_nodes_user_logged_in
url = '/{}users/me/nodes/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_one.auth)
assert res.status_code == 200
ids = {each['id'] for each in res.json['data']}
assert project_public_user_one._id in ids
assert project_private_user_one._id in ids
assert project_public_user_two._id not in ids
assert project_private_user_two._id not in ids
assert folder._id not in ids
assert folder_deleted._id not in ids
assert project_deleted_user_one._id not in ids
# test_get_200_path_users_user_id_nodes_user_logged_in
url = '/{}users/{}/nodes/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_one.auth)
assert res.status_code == 200
ids = {each['id'] for each in res.json['data']}
assert project_public_user_one._id in ids
assert project_private_user_one._id in ids
assert project_public_user_two._id not in ids
assert project_private_user_two._id not in ids
assert folder._id not in ids
assert folder_deleted._id not in ids
assert project_deleted_user_one._id not in ids
# test_get_200_path_users_user_id_nodes_no_user
url = '/{}users/{}/nodes/'.format(API_BASE, user_one._id)
res = app.get(url)
assert res.status_code == 200
# an anonymous/unauthorized user can only see the public projects
# user_one contributes to.
ids = {each['id'] for each in res.json['data']}
assert project_public_user_one._id in ids
assert project_private_user_one._id not in ids
assert project_public_user_two._id not in ids
assert project_private_user_two._id not in ids
assert folder._id not in ids
assert folder_deleted._id not in ids
assert project_deleted_user_one._id not in ids
# test_get_200_path_users_user_id_nodes_unauthorized_user
url = '/{}users/{}/nodes/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_two.auth)
assert res.status_code == 200
# an anonymous/unauthorized user can only see the public projects
# user_one contributes to.
ids = {each['id'] for each in res.json['data']}
assert project_public_user_one._id in ids
assert project_private_user_one._id not in ids
assert project_public_user_two._id not in ids
assert project_private_user_two._id not in ids
assert folder._id not in ids
assert folder_deleted._id not in ids
assert project_deleted_user_one._id not in ids
def test_embed_nodes(self, app, user_one, project_public_user_one):
url = '/{}users/{}/?embed=nodes'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_one.auth)
assert res.status_code == 200
embedded_data = res.json['data']['embeds']['nodes']['data'][0]['attributes']
assert embedded_data['title'] == project_public_user_one.title
def test_get_400_responses(self, app, user_one, user_two):
# test_get_403_path_users_me_nodes_no_user
# TODO: change expected exception from 403 to 401 for unauthorized
# users
url = '/{}users/me/nodes/'.format(API_BASE)
res = app.get(url, expect_errors=True)
assert res.status_code == 401
# test_get_403_path_users_me_no_user
# TODO: change expected exception from 403 to 401 for unauthorized
# users
url = '/{}users/me/'.format(API_BASE)
res = app.get(url, expect_errors=True)
assert res.status_code == 401
# test_get_404_path_users_user_id_me_user_logged_in
url = '/{}users/{}/me/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_one.auth, expect_errors=True)
assert res.status_code == 404
# test_get_404_path_users_user_id_me_no_user
url = '/{}users/{}/me/'.format(API_BASE, user_one._id)
res = app.get(url, expect_errors=True)
assert res.status_code == 404
# test_get_404_path_users_user_id_me_unauthorized_user
url = '/{}users/{}/me/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_two.auth, expect_errors=True)
assert res.status_code == 404
# test_get_404_path_users_user_id_nodes_me_user_logged_in
url = '/{}users/{}/nodes/me/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_one.auth, expect_errors=True)
assert res.status_code == 404
# test_get_404_path_users_user_id_nodes_me_unauthorized_user
url = '/{}users/{}/nodes/me/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_two.auth, expect_errors=True)
assert res.status_code == 404
# test_get_404_path_users_user_id_nodes_me_no_user
url = '/{}users/{}/nodes/me/'.format(API_BASE, user_one._id)
res = app.get(url, expect_errors=True)
assert res.status_code == 404
# test_get_404_path_nodes_me_user_logged_in
url = '/{}nodes/me/'.format(API_BASE)
res = app.get(url, auth=user_one.auth, expect_errors=True)
assert res.status_code == 404
# test_get_404_path_nodes_me_no_user
url = '/{}nodes/me/'.format(API_BASE)
res = app.get(url, expect_errors=True)
assert res.status_code == 404
# test_get_404_path_nodes_user_id_user_logged_in
url = '/{}nodes/{}/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_one.auth, expect_errors=True)
assert res.status_code == 404
# test_get_404_path_nodes_user_id_unauthorized_user
url = '/{}nodes/{}/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_two.auth, expect_errors=True)
assert res.status_code == 404
# test_get_404_path_nodes_user_id_no_user
url = '/{}nodes/{}/'.format(API_BASE, user_one._id)
res = app.get(url, expect_errors=True)
assert res.status_code == 404
@pytest.mark.django_db
class TestUserUpdate:
@pytest.fixture()
def user_one(self):
user_one = AuthUserFactory.build(
fullname='Martin Luther King Jr.',
given_name='Martin',
family_name='King',
suffix='Jr.',
social=dict(
github='userOneGithub',
scholar='userOneScholar',
profileWebsites=['http://www.useronepersonalwebsite.com'],
twitter='userOneTwitter',
linkedIn='userOneLinkedIn',
impactStory='userOneImpactStory',
orcid='userOneOrcid',
researcherId='userOneResearcherId'
)
)
user_one.save()
return user_one
@pytest.fixture()
def user_two(self):
return AuthUserFactory()
@pytest.fixture()
def url_user_one(self, user_one):
return '/v2/users/{}/'.format(user_one._id)
@pytest.fixture()
def data_new_user_one(self, user_one):
return {
'data': {
'type': 'users',
'id': user_one._id,
'attributes': {
'full_name': 'el-Hajj Malik el-Shabazz',
'given_name': 'Malcolm',
'middle_names': 'Malik el-Shabazz',
'family_name': 'X',
'suffix': 'Sr.',
'social': {
'github': ['http://github.com/even_newer_github/'],
'scholar': ['http://scholar.google.com/citations?user=newScholar'],
'profileWebsites': ['http://www.newpersonalwebsite.com'],
'twitter': ['http://twitter.com/newtwitter'],
'linkedIn': ['https://www.linkedin.com/newLinkedIn'],
'impactStory': ['https://impactstory.org/newImpactStory'],
'orcid': ['http://orcid.org/newOrcid'],
'researcherId': ['http://researcherid.com/rid/newResearcherId'],
}},
}}
@pytest.fixture()
def data_missing_id(self):
return {
'data': {
'type': 'users',
'attributes': {
'full_name': 'el-Hajj Malik el-Shabazz',
'family_name': 'Z',
}
}
}
@pytest.fixture()
def data_missing_type(self, user_one):
return {
'data': {
'id': user_one._id,
'attributes': {
'fullname': 'el-Hajj Malik el-Shabazz',
'family_name': 'Z',
}
}
}
@pytest.fixture()
def data_incorrect_id(self):
return {
'data': {
'id': '12345',
'type': 'users',
'attributes': {
'full_name': 'el-Hajj Malik el-Shabazz',
'family_name': 'Z',
}
}
}
@pytest.fixture()
def data_incorrect_type(self, user_one):
return {
'data': {
'id': user_one._id,
'type': 'Wrong type.',
'attributes': {
'full_name': 'el-Hajj Malik el-Shabazz',
'family_name': 'Z',
}
}
}
@pytest.fixture()
def data_blank_but_not_empty_full_name(self, user_one):
return {
'data': {
'id': user_one._id,
'type': 'users',
'attributes': {
'full_name': ' '
}
}
}
def test_select_for_update(
self, app, user_one, url_user_one, data_new_user_one):
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
res = app.patch_json_api(url_user_one, {
'data': {
'id': user_one._id,
'type': 'users',
'attributes': {
'family_name': data_new_user_one['data']['attributes']['family_name'],
}
}
}, auth=user_one.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['family_name'] == data_new_user_one['data']['attributes']['family_name']
for_update_sql = connection.ops.for_update_sql()
assert any(for_update_sql in query['sql']
for query in ctx.captured_queries)
@mock.patch('osf.utils.requests.settings.SELECT_FOR_UPDATE_ENABLED', False)
def test_select_for_update_disabled(
self, app, user_one, url_user_one, data_new_user_one):
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
res = app.patch_json_api(url_user_one, {
'data': {
'id': user_one._id,
'type': 'users',
'attributes': {
'family_name': data_new_user_one['data']['attributes']['family_name'],
}
}
}, auth=user_one.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['family_name'] == data_new_user_one['data']['attributes']['family_name']
for_update_sql = connection.ops.for_update_sql()
assert not any(for_update_sql in query['sql']
for query in ctx.captured_queries)
def test_update_patch_errors(
self, app, user_one, user_two, data_new_user_one,
data_incorrect_type, data_incorrect_id,
data_missing_type, data_missing_id,
data_blank_but_not_empty_full_name, url_user_one):
# test_update_user_blank_but_not_empty_full_name
res = app.put_json_api(
url_user_one,
data_blank_but_not_empty_full_name,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be blank.'
# test_partial_update_user_blank_but_not_empty_full_name
res = app.patch_json_api(
url_user_one,
data_blank_but_not_empty_full_name,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be blank.'
# test_patch_user_incorrect_type
res = app.put_json_api(
url_user_one,
data_incorrect_type,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 409
# test_patch_user_incorrect_id
res = app.put_json_api(
url_user_one,
data_incorrect_id,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 409
# test_patch_user_no_type
res = app.put_json_api(
url_user_one,
data_missing_type,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
# test_patch_user_no_id
res = app.put_json_api(
url_user_one,
data_missing_id,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'This field may not be null.'
# test_partial_patch_user_incorrect_type
res = app.patch_json_api(
url_user_one,
data_incorrect_type,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 409
# test_partial_patch_user_incorrect_id
res = app.patch_json_api(
url_user_one,
data_incorrect_id,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 409
# test_partial_patch_user_no_type
res = app.patch_json_api(
url_user_one,
data_missing_type,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 400
# test_partial_patch_user_no_id
res = app.patch_json_api(
url_user_one,
data_missing_id,
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 400
# test_patch_fields_not_nested
res = app.put_json_api(
url_user_one,
{
'data': {
'id': user_one._id,
'type': 'users',
'full_name': 'New name'
}
},
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Request must include /data/attributes.'
# test_partial_patch_fields_not_nested
res = app.patch_json_api(
url_user_one,
{
'data': {
'id': user_one._id,
'type': 'users',
'full_name': 'New name'
}
},
auth=user_one.auth,
expect_errors=True)
assert res.status_code == 400
# test_patch_user_logged_out
res = app.patch_json_api(url_user_one, {
'data': {
'id': user_one._id,
'type': 'users',
'attributes': {
'full_name': data_new_user_one['data']['attributes']['full_name'],
}
}
}, expect_errors=True)
assert res.status_code == 401
# test_put_user_without_required_field
# PUT requires all required fields
res = app.put_json_api(url_user_one, {
'data': {
'id': user_one._id,
'type': 'users',
'attributes': {
'family_name': data_new_user_one['data']['attributes']['family_name'],
}
}
}, auth=user_one.auth, expect_errors=True)
assert res.status_code == 400
# test_put_user_logged_out
res = app.put_json_api(
url_user_one,
data_new_user_one,
expect_errors=True)
assert res.status_code == 401
# test_put_wrong_user
# User tries to update someone else's user information via put
res = app.put_json_api(
url_user_one,
data_new_user_one,
auth=user_two.auth,
expect_errors=True)
assert res.status_code == 403
# test_patch_wrong_user
# User tries to update someone else's user information via patch
res = app.patch_json_api(url_user_one, {
'data': {
'id': user_one._id,
'type': 'users',
'attributes': {
'full_name': data_new_user_one['data']['attributes']['full_name'],
}
}
}, auth=user_two.auth, expect_errors=True)
assert res.status_code == 403
user_one.reload()
assert user_one.fullname != data_new_user_one['data']['attributes']['full_name']
# test_update_user_social_with_invalid_value
"""update the social key which is not profileWebsites with more than one value should throw an error"""
res = app.patch_json_api(url_user_one, {
'data': {
'id': user_one._id,
'type': 'users',
'attributes': {
'full_name': 'new_fullname',
'suffix': 'The Millionth',
'social': {
'github': ['even_newer_github', 'bad_github'],
}
},
}
}, auth=user_one.auth, expect_errors=True)
assert res.status_code == 400
assert 'github only accept a list of one single value' == res.json['errors'][0]['detail']
def test_patch_user_without_required_field(
self, app, user_one, data_new_user_one, url_user_one):
# PATCH does not require required fields
res = app.patch_json_api(url_user_one, {
'data': {
'id': user_one._id,
'type': 'users',
'attributes': {
'family_name': data_new_user_one['data']['attributes']['family_name'],
}
}
}, auth=user_one.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['family_name'] == data_new_user_one['data']['attributes']['family_name']
user_one.reload()
assert user_one.family_name == data_new_user_one['data']['attributes']['family_name']
def test_partial_patch_user_logged_in(self, app, user_one, url_user_one):
# Test to make sure new fields are patched and old fields stay the same
res = app.patch_json_api(url_user_one, {
'data': {
'id': user_one._id,
'type': 'users',
'attributes': {
'full_name': 'new_fullname',
'suffix': 'The Millionth',
'social': {
'github': ['even_newer_github'],
}
},
}}, auth=user_one.auth)
user_one.reload()
assert res.status_code == 200
assert res.json['data']['attributes']['full_name'] == 'new_fullname'
assert res.json['data']['attributes']['suffix'] == 'The Millionth'
social = res.json['data']['attributes']['social']
assert 'even_newer_github' in social['github'][0]
assert res.json['data']['attributes']['given_name'] == user_one.given_name
assert res.json['data']['attributes']['middle_names'] == user_one.middle_names
assert res.json['data']['attributes']['family_name'] == user_one.family_name
assert user_one.social['profileWebsites'] == social['profileWebsites']
assert user_one.social['twitter'] in social['twitter'][0]
assert user_one.social['linkedIn'] in social['linkedIn'][0]
assert user_one.social['impactStory'] in social['impactStory'][0]
assert user_one.social['orcid'] in social['orcid'][0]
assert user_one.social['researcherId'] in social['researcherId'][0]
assert user_one.fullname == 'new_fullname'
assert user_one.suffix == 'The Millionth'
assert user_one.social['github'] == 'even_newer_github'
def test_partial_patch_user_logged_in_no_social_fields(
self, app, user_one, url_user_one):
# Test to make sure new fields are patched and old fields stay the same
res = app.patch_json_api(url_user_one, {
'data': {
'id': user_one._id,
'type': 'users',
'attributes': {
'full_name': 'new_fullname',
'suffix': 'The Millionth',
'social': {
'github': ['even_newer_github'],
}
},
}
}, auth=user_one.auth)
user_one.reload()
assert res.status_code == 200
assert res.json['data']['attributes']['full_name'] == 'new_fullname'
assert res.json['data']['attributes']['suffix'] == 'The Millionth'
social = res.json['data']['attributes']['social']
assert user_one.social['github'] in social['github'][0]
assert res.json['data']['attributes']['given_name'] == user_one.given_name
assert res.json['data']['attributes']['middle_names'] == user_one.middle_names
assert res.json['data']['attributes']['family_name'] == user_one.family_name
assert user_one.social['profileWebsites'] == social['profileWebsites']
assert user_one.social['twitter'] in social['twitter'][0]
assert user_one.social['linkedIn'] in social['linkedIn'][0]
assert user_one.social['impactStory'] in social['impactStory'][0]
assert user_one.social['orcid'] in social['orcid'][0]
assert user_one.social['researcherId'] in social['researcherId'][0]
assert user_one.fullname == 'new_fullname'
assert user_one.suffix == 'The Millionth'
assert user_one.social['github'] == user_one.social['github']
def test_partial_put_user_logged_in(self, app, user_one, url_user_one):
# Test to make sure new fields are patched and old fields stay the same
res = app.put_json_api(url_user_one, {
'data': {
'id': user_one._id,
'type': 'users',
'attributes': {
'full_name': 'new_fullname',
'suffix': 'The Millionth',
'social': {
'github': ['even_newer_github'],
}
},
}
}, auth=user_one.auth)
user_one.reload()
assert res.status_code == 200
assert res.json['data']['attributes']['full_name'] == 'new_fullname'
assert res.json['data']['attributes']['suffix'] == 'The Millionth'
assert 'even_newer_github' in res.json['data']['attributes']['social']['github'][0]
assert res.json['data']['attributes']['given_name'] == user_one.given_name
assert res.json['data']['attributes']['middle_names'] == user_one.middle_names
assert res.json['data']['attributes']['family_name'] == user_one.family_name
assert user_one.fullname == 'new_fullname'
assert user_one.suffix == 'The Millionth'
assert user_one.social['github'] == 'even_newer_github'
def test_put_user_logged_in(
self, app, user_one, data_new_user_one, url_user_one):
# Logged in user updates their user information via put
res = app.put_json_api(
url_user_one,
data_new_user_one,
auth=user_one.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['full_name'] == data_new_user_one['data']['attributes']['full_name']
assert res.json['data']['attributes']['given_name'] == data_new_user_one['data']['attributes']['given_name']
assert res.json['data']['attributes']['middle_names'] == data_new_user_one['data']['attributes']['middle_names']
assert res.json['data']['attributes']['family_name'] == data_new_user_one['data']['attributes']['family_name']
assert res.json['data']['attributes']['suffix'] == data_new_user_one['data']['attributes']['suffix']
social = res.json['data']['attributes']['social']
assert 'even_newer_github' in social['github'][0]
assert 'http://www.newpersonalwebsite.com' in social['profileWebsites'][0]
assert 'newtwitter' in social['twitter'][0]
assert 'newLinkedIn' in social['linkedIn'][0]
assert 'newImpactStory' in social['impactStory'][0]
assert 'newOrcid' in social['orcid'][0]
assert 'newResearcherId' in social['researcherId'][0]
user_one.reload()
assert user_one.fullname == data_new_user_one['data']['attributes']['full_name']
assert user_one.given_name == data_new_user_one['data']['attributes']['given_name']
assert user_one.middle_names == data_new_user_one['data']['attributes']['middle_names']
assert user_one.family_name == data_new_user_one['data']['attributes']['family_name']
assert user_one.suffix == data_new_user_one['data']['attributes']['suffix']
assert 'even_newer_github' in social['github'][0]
assert 'http://www.newpersonalwebsite.com' in social['profileWebsites'][0]
assert 'newtwitter' in social['twitter'][0]
assert 'newLinkedIn' in social['linkedIn'][0]
assert 'newImpactStory' in social['impactStory'][0]
assert 'newOrcid' in social['orcid'][0]
assert 'newResearcherId' in social['researcherId'][0]
def test_update_user_sanitizes_html_properly(
self, app, user_one, url_user_one):
"""Post request should update resource, and any HTML in fields should be stripped"""
bad_fullname = 'Malcolm <strong>X</strong>'
bad_family_name = 'X <script>alert("is")</script> a cool name'
res = app.patch_json_api(url_user_one, {
'data': {
'id': user_one._id,
'type': 'users',
'attributes': {
'full_name': bad_fullname,
'family_name': bad_family_name,
}
}
}, auth=user_one.auth)
assert res.status_code == 200
assert res.json['data']['attributes']['full_name'] == strip_html(
bad_fullname)
assert res.json['data']['attributes']['family_name'] == strip_html(
bad_family_name)
@pytest.mark.django_db
class TestDeactivatedUser:
@pytest.fixture()
def user_one(self):
return AuthUserFactory()
@pytest.fixture()
def user_two(self):
return AuthUserFactory()
def test_requesting_as_deactivated_user_returns_400_response(
self, app, user_one):
url = '/{}users/{}/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_one.auth, expect_errors=True)
assert res.status_code == 200
user_one.is_disabled = True
user_one.save()
res = app.get(url, auth=user_one.auth, expect_errors=True)
assert res.status_code == 400
assert res.json['errors'][0]['detail'] == 'Making API requests with credentials associated with a deactivated account is not allowed.'
def test_unconfirmed_users_return_entire_user_object(
self, app, user_one, user_two):
url = '/{}users/{}/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_two.auth, expect_errors=True)
assert res.status_code == 200
user_one.is_registered = False
user_one.save()
res = app.get(url, expect_errors=True)
assert res.status_code == 200
attr = res.json['data']['attributes']
assert attr['active'] is False
assert res.json['data']['id'] == user_one._id
def test_requesting_deactivated_user_returns_410_response_and_meta_info(
self, app, user_one, user_two):
url = '/{}users/{}/'.format(API_BASE, user_one._id)
res = app.get(url, auth=user_two.auth, expect_errors=True)
assert res.status_code == 200
user_one.is_disabled = True
user_one.save()
res = app.get(url, expect_errors=True)
assert res.status_code == 410
assert res.json['errors'][0]['meta']['family_name'] == user_one.family_name
assert res.json['errors'][0]['meta']['given_name'] == user_one.given_name
assert res.json['errors'][0]['meta']['middle_names'] == user_one.middle_names
assert res.json['errors'][0]['meta']['full_name'] == user_one.fullname
assert urlparse.urlparse(
res.json['errors'][0]['meta']['profile_image']).netloc == 'secure.gravatar.com'
assert res.json['errors'][0]['detail'] == 'The requested user is no longer available.'
| |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class snmpuser(base_resource) :
""" Configuration for SNMP user resource. """
def __init__(self) :
self._name = ""
self._group = ""
self._authtype = ""
self._authpasswd = ""
self._privtype = ""
self._privpasswd = ""
self._engineid = ""
self._storagetype = ""
self._status = ""
self.___count = 0
@property
def name(self) :
ur"""Name for the SNMPv3 user. Can consist of 1 to 31 characters that include uppercase and lowercase letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at sign (@), equals (=), colon (:), and underscore (_) characters.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose it in double or single quotation marks (for example, "my user" or 'my user').<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
ur"""Name for the SNMPv3 user. Can consist of 1 to 31 characters that include uppercase and lowercase letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at sign (@), equals (=), colon (:), and underscore (_) characters.
The following requirement applies only to the NetScaler CLI:
If the name includes one or more spaces, enclose it in double or single quotation marks (for example, "my user" or 'my user').<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def group(self) :
ur"""Name of the configured SNMPv3 group to which to bind this SNMPv3 user. The access rights (bound SNMPv3 views) and security level set for this group are assigned to this user.<br/>Minimum length = 1.
"""
try :
return self._group
except Exception as e:
raise e
@group.setter
def group(self, group) :
ur"""Name of the configured SNMPv3 group to which to bind this SNMPv3 user. The access rights (bound SNMPv3 views) and security level set for this group are assigned to this user.<br/>Minimum length = 1
"""
try :
self._group = group
except Exception as e:
raise e
@property
def authtype(self) :
ur"""Authentication algorithm used by the NetScaler appliance and the SNMPv3 user for authenticating the communication between them. You must specify the same authentication algorithm when you configure the SNMPv3 user in the SNMP manager.<br/>Possible values = MD5, SHA.
"""
try :
return self._authtype
except Exception as e:
raise e
@authtype.setter
def authtype(self, authtype) :
ur"""Authentication algorithm used by the NetScaler appliance and the SNMPv3 user for authenticating the communication between them. You must specify the same authentication algorithm when you configure the SNMPv3 user in the SNMP manager.<br/>Possible values = MD5, SHA
"""
try :
self._authtype = authtype
except Exception as e:
raise e
@property
def authpasswd(self) :
ur"""Plain-text pass phrase to be used by the authentication algorithm specified by the authType (Authentication Type) parameter. Can consist of 1 to 31 characters that include uppercase and lowercase letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at sign (@), equals (=), colon (:), and underscore (_) characters.
The following requirement applies only to the NetScaler CLI:
If the pass phrase includes one or more spaces, enclose it in double or single quotation marks (for example, "my phrase" or 'my phrase').<br/>Minimum length = 8.
"""
try :
return self._authpasswd
except Exception as e:
raise e
@authpasswd.setter
def authpasswd(self, authpasswd) :
ur"""Plain-text pass phrase to be used by the authentication algorithm specified by the authType (Authentication Type) parameter. Can consist of 1 to 31 characters that include uppercase and lowercase letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at sign (@), equals (=), colon (:), and underscore (_) characters.
The following requirement applies only to the NetScaler CLI:
If the pass phrase includes one or more spaces, enclose it in double or single quotation marks (for example, "my phrase" or 'my phrase').<br/>Minimum length = 8
"""
try :
self._authpasswd = authpasswd
except Exception as e:
raise e
@property
def privtype(self) :
ur"""Encryption algorithm used by the NetScaler appliance and the SNMPv3 user for encrypting the communication between them. You must specify the same encryption algorithm when you configure the SNMPv3 user in the SNMP manager.<br/>Possible values = DES, AES.
"""
try :
return self._privtype
except Exception as e:
raise e
@privtype.setter
def privtype(self, privtype) :
ur"""Encryption algorithm used by the NetScaler appliance and the SNMPv3 user for encrypting the communication between them. You must specify the same encryption algorithm when you configure the SNMPv3 user in the SNMP manager.<br/>Possible values = DES, AES
"""
try :
self._privtype = privtype
except Exception as e:
raise e
@property
def privpasswd(self) :
ur"""Encryption key to be used by the encryption algorithm specified by the privType (Encryption Type) parameter. Can consist of 1 to 31 characters that include uppercase and lowercase letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at sign (@), equals (=), colon (:), and underscore (_) characters.
The following requirement applies only to the NetScaler CLI:
If the key includes one or more spaces, enclose it in double or single quotation marks (for example, "my key" or 'my key').<br/>Minimum length = 8.
"""
try :
return self._privpasswd
except Exception as e:
raise e
@privpasswd.setter
def privpasswd(self, privpasswd) :
ur"""Encryption key to be used by the encryption algorithm specified by the privType (Encryption Type) parameter. Can consist of 1 to 31 characters that include uppercase and lowercase letters, numbers, and the hyphen (-), period (.) pound (#), space ( ), at sign (@), equals (=), colon (:), and underscore (_) characters.
The following requirement applies only to the NetScaler CLI:
If the key includes one or more spaces, enclose it in double or single quotation marks (for example, "my key" or 'my key').<br/>Minimum length = 8
"""
try :
self._privpasswd = privpasswd
except Exception as e:
raise e
@property
def engineid(self) :
ur"""The context engine ID of the user.
"""
try :
return self._engineid
except Exception as e:
raise e
@property
def storagetype(self) :
ur"""The storage type for this user.<br/>Possible values = volatile, nonVolatile.
"""
try :
return self._storagetype
except Exception as e:
raise e
@property
def status(self) :
ur"""The status of this user.<br/>Possible values = active.
"""
try :
return self._status
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(snmpuser_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.snmpuser
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def add(cls, client, resource) :
ur""" Use this API to add snmpuser.
"""
try :
if type(resource) is not list :
addresource = snmpuser()
addresource.name = resource.name
addresource.group = resource.group
addresource.authtype = resource.authtype
addresource.authpasswd = resource.authpasswd
addresource.privtype = resource.privtype
addresource.privpasswd = resource.privpasswd
return addresource.add_resource(client)
else :
if (resource and len(resource) > 0) :
addresources = [ snmpuser() for _ in range(len(resource))]
for i in range(len(resource)) :
addresources[i].name = resource[i].name
addresources[i].group = resource[i].group
addresources[i].authtype = resource[i].authtype
addresources[i].authpasswd = resource[i].authpasswd
addresources[i].privtype = resource[i].privtype
addresources[i].privpasswd = resource[i].privpasswd
result = cls.add_bulk_request(client, addresources)
return result
except Exception as e :
raise e
@classmethod
def delete(cls, client, resource) :
ur""" Use this API to delete snmpuser.
"""
try :
if type(resource) is not list :
deleteresource = snmpuser()
if type(resource) != type(deleteresource):
deleteresource.name = resource
else :
deleteresource.name = resource.name
return deleteresource.delete_resource(client)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
deleteresources = [ snmpuser() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
deleteresources = [ snmpuser() for _ in range(len(resource))]
for i in range(len(resource)) :
deleteresources[i].name = resource[i].name
result = cls.delete_bulk_request(client, deleteresources)
return result
except Exception as e :
raise e
@classmethod
def update(cls, client, resource) :
ur""" Use this API to update snmpuser.
"""
try :
if type(resource) is not list :
updateresource = snmpuser()
updateresource.name = resource.name
updateresource.group = resource.group
updateresource.authtype = resource.authtype
updateresource.authpasswd = resource.authpasswd
updateresource.privtype = resource.privtype
updateresource.privpasswd = resource.privpasswd
return updateresource.update_resource(client)
else :
if (resource and len(resource) > 0) :
updateresources = [ snmpuser() for _ in range(len(resource))]
for i in range(len(resource)) :
updateresources[i].name = resource[i].name
updateresources[i].group = resource[i].group
updateresources[i].authtype = resource[i].authtype
updateresources[i].authpasswd = resource[i].authpasswd
updateresources[i].privtype = resource[i].privtype
updateresources[i].privpasswd = resource[i].privpasswd
result = cls.update_bulk_request(client, updateresources)
return result
except Exception as e :
raise e
@classmethod
def unset(cls, client, resource, args) :
ur""" Use this API to unset the properties of snmpuser resource.
Properties that need to be unset are specified in args array.
"""
try :
if type(resource) is not list :
unsetresource = snmpuser()
if type(resource) != type(unsetresource):
unsetresource.name = resource
else :
unsetresource.name = resource.name
return unsetresource.unset_resource(client, args)
else :
if type(resource[0]) != cls :
if (resource and len(resource) > 0) :
unsetresources = [ snmpuser() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i]
else :
if (resource and len(resource) > 0) :
unsetresources = [ snmpuser() for _ in range(len(resource))]
for i in range(len(resource)) :
unsetresources[i].name = resource[i].name
result = cls.unset_bulk_request(client, unsetresources, args)
return result
except Exception as e :
raise e
@classmethod
def get(cls, client, name="", option_="") :
ur""" Use this API to fetch all the snmpuser resources that are configured on netscaler.
"""
try :
if not name :
obj = snmpuser()
response = obj.get_resources(client, option_)
else :
if type(name) != cls :
if type(name) is not list :
obj = snmpuser()
obj.name = name
response = obj.get_resource(client, option_)
else :
if name and len(name) > 0 :
response = [snmpuser() for _ in range(len(name))]
obj = [snmpuser() for _ in range(len(name))]
for i in range(len(name)) :
obj[i] = snmpuser()
obj[i].name = name[i]
response[i] = obj[i].get_resource(client, option_)
return response
except Exception as e :
raise e
@classmethod
def get_filtered(cls, client, filter_) :
ur""" Use this API to fetch filtered set of snmpuser resources.
filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = snmpuser()
option_ = options()
option_.filter = filter_
response = obj.getfiltered(client, option_)
return response
except Exception as e :
raise e
@classmethod
def count(cls, client) :
ur""" Use this API to count the snmpuser resources configured on NetScaler.
"""
try :
obj = snmpuser()
option_ = options()
option_.count = True
response = obj.get_resources(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
@classmethod
def count_filtered(cls, client, filter_) :
ur""" Use this API to count filtered the set of snmpuser resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = snmpuser()
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(client, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e :
raise e
class Storagetype:
Volatile = "volatile"
nonVolatile = "nonVolatile"
class Authtype:
MD5 = "MD5"
SHA = "SHA"
class Status:
active = "active"
class Privtype:
DES = "DES"
AES = "AES"
class snmpuser_response(base_response) :
def __init__(self, length=1) :
self.snmpuser = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.snmpuser = [snmpuser() for _ in range(length)]
| |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from unittest.mock import patch
from heatclient import exc as heatexc
from oslo_utils import uuidutils
import magnum.conf
from magnum.drivers.heat import driver as heat_driver
from magnum.drivers.k8s_fedora_atomic_v1 import driver as k8s_atomic_dr
from magnum import objects
from magnum.objects.fields import ClusterStatus as cluster_status
from magnum.tests import base
from magnum.tests.unit.db import utils
CONF = magnum.conf.CONF
class TestHeatPoller(base.TestCase):
def setUp(self):
super(TestHeatPoller, self).setUp()
self.mock_stacks = dict()
self.def_ngs = list()
def _create_nodegroup(self, cluster, uuid, stack_id, name=None, role=None,
is_default=False, stack_status=None,
status_reason=None, stack_params=None,
stack_missing=False):
"""Create a new nodegroup
Util that creates a new non-default ng, adds it to the cluster
and creates the corresponding mock stack.
"""
role = 'worker' if role is None else role
ng = mock.MagicMock(uuid=uuid, role=role, is_default=is_default,
stack_id=stack_id)
if name is not None:
type(ng).name = name
cluster.nodegroups.append(ng)
if stack_status is None:
stack_status = cluster_status.CREATE_COMPLETE
if status_reason is None:
status_reason = 'stack created'
stack_params = dict() if stack_params is None else stack_params
stack = mock.MagicMock(stack_status=stack_status,
stack_status_reason=status_reason,
parameters=stack_params)
# In order to simulate a stack not found from osc we don't add the
# stack in the dict.
if not stack_missing:
self.mock_stacks.update({stack_id: stack})
else:
# In case the stack is missing we need
# to set the status to the ng, so that
# _sync_missing_heat_stack knows which
# was the previous state.
ng.status = stack_status
return ng
@patch('magnum.conductor.utils.retrieve_cluster_template')
@patch('oslo_config.cfg')
@patch('magnum.common.clients.OpenStackClients')
@patch('magnum.drivers.common.driver.Driver.get_driver')
def setup_poll_test(self, mock_driver, mock_openstack_client, cfg,
mock_retrieve_cluster_template,
default_stack_status=None, status_reason=None,
stack_params=None, stack_missing=False):
cfg.CONF.cluster_heat.max_attempts = 10
if default_stack_status is None:
default_stack_status = cluster_status.CREATE_COMPLETE
cluster = mock.MagicMock(nodegroups=list(),
uuid=uuidutils.generate_uuid())
def_worker = self._create_nodegroup(cluster, 'worker_ng', 'stack1',
name='worker_ng', role='worker',
is_default=True,
stack_status=default_stack_status,
status_reason=status_reason,
stack_params=stack_params,
stack_missing=stack_missing)
def_master = self._create_nodegroup(cluster, 'master_ng', 'stack1',
name='master_ng', role='master',
is_default=True,
stack_status=default_stack_status,
status_reason=status_reason,
stack_params=stack_params,
stack_missing=stack_missing)
cluster.default_ng_worker = def_worker
cluster.default_ng_master = def_master
self.def_ngs = [def_worker, def_master]
def get_ng_stack(stack_id, resolve_outputs=False):
try:
return self.mock_stacks[stack_id]
except KeyError:
# In this case we intentionally didn't add the stack
# to the mock_stacks dict to simulte a not found error.
# For this reason raise heat NotFound exception.
raise heatexc.NotFound("stack not found")
cluster_template_dict = utils.get_test_cluster_template(
coe='kubernetes')
mock_heat_client = mock.MagicMock()
mock_heat_client.stacks.get = get_ng_stack
mock_openstack_client.heat.return_value = mock_heat_client
cluster_template = objects.ClusterTemplate(self.context,
**cluster_template_dict)
mock_retrieve_cluster_template.return_value = cluster_template
mock_driver.return_value = k8s_atomic_dr.Driver()
poller = heat_driver.HeatPoller(mock_openstack_client,
mock.MagicMock(), cluster,
k8s_atomic_dr.Driver())
poller.get_version_info = mock.MagicMock()
return (cluster, poller)
def test_poll_and_check_creating(self):
cluster, poller = self.setup_poll_test(
default_stack_status=cluster_status.CREATE_IN_PROGRESS)
cluster.status = cluster_status.CREATE_IN_PROGRESS
poller.poll_and_check()
for ng in cluster.nodegroups:
self.assertEqual(cluster_status.CREATE_IN_PROGRESS, ng.status)
self.assertEqual(cluster_status.CREATE_IN_PROGRESS, cluster.status)
self.assertEqual(1, cluster.save.call_count)
def test_poll_and_check_create_complete(self):
cluster, poller = self.setup_poll_test()
cluster.status = cluster_status.CREATE_IN_PROGRESS
poller.poll_and_check()
for ng in cluster.nodegroups:
self.assertEqual(cluster_status.CREATE_COMPLETE, ng.status)
self.assertEqual('stack created', ng.status_reason)
self.assertEqual(1, ng.save.call_count)
self.assertEqual(cluster_status.CREATE_COMPLETE, cluster.status)
self.assertEqual(1, cluster.save.call_count)
def test_poll_and_check_create_failed(self):
cluster, poller = self.setup_poll_test(
default_stack_status=cluster_status.CREATE_FAILED)
cluster.status = cluster_status.CREATE_IN_PROGRESS
self.assertIsNone(poller.poll_and_check())
for ng in cluster.nodegroups:
self.assertEqual(cluster_status.CREATE_FAILED, ng.status)
# Two calls to save since the stack ouptputs are synced too.
self.assertEqual(2, ng.save.call_count)
self.assertEqual(cluster_status.CREATE_FAILED, cluster.status)
self.assertEqual(1, cluster.save.call_count)
def test_poll_and_check_updating(self):
cluster, poller = self.setup_poll_test(
default_stack_status=cluster_status.UPDATE_IN_PROGRESS)
cluster.status = cluster_status.UPDATE_IN_PROGRESS
poller.poll_and_check()
for ng in cluster.nodegroups:
self.assertEqual(cluster_status.UPDATE_IN_PROGRESS, ng.status)
self.assertEqual(1, ng.save.call_count)
self.assertEqual(cluster_status.UPDATE_IN_PROGRESS, cluster.status)
self.assertEqual(1, cluster.save.call_count)
def test_poll_and_check_update_complete(self):
stack_params = {
'number_of_minions': 2,
'number_of_masters': 1
}
cluster, poller = self.setup_poll_test(
default_stack_status=cluster_status.UPDATE_COMPLETE,
stack_params=stack_params)
cluster.status = cluster_status.UPDATE_IN_PROGRESS
self.assertIsNone(poller.poll_and_check())
for ng in cluster.nodegroups:
self.assertEqual(cluster_status.UPDATE_COMPLETE, ng.status)
self.assertEqual(2, cluster.default_ng_worker.save.call_count)
self.assertEqual(2, cluster.default_ng_master.save.call_count)
self.assertEqual(2, cluster.default_ng_worker.node_count)
self.assertEqual(1, cluster.default_ng_master.node_count)
self.assertEqual(cluster_status.UPDATE_COMPLETE, cluster.status)
self.assertEqual(1, cluster.save.call_count)
def test_poll_and_check_update_failed(self):
stack_params = {
'number_of_minions': 2,
'number_of_masters': 1
}
cluster, poller = self.setup_poll_test(
default_stack_status=cluster_status.UPDATE_FAILED,
stack_params=stack_params)
cluster.status = cluster_status.UPDATE_IN_PROGRESS
poller.poll_and_check()
for ng in cluster.nodegroups:
self.assertEqual(cluster_status.UPDATE_FAILED, ng.status)
# We have several calls to save because the stack outputs are
# stored too.
self.assertEqual(3, ng.save.call_count)
self.assertEqual(2, cluster.default_ng_worker.node_count)
self.assertEqual(1, cluster.default_ng_master.node_count)
self.assertEqual(cluster_status.UPDATE_FAILED, cluster.status)
self.assertEqual(1, cluster.save.call_count)
def test_poll_and_check_deleting(self):
cluster, poller = self.setup_poll_test(
default_stack_status=cluster_status.DELETE_IN_PROGRESS)
cluster.status = cluster_status.DELETE_IN_PROGRESS
poller.poll_and_check()
for ng in cluster.nodegroups:
self.assertEqual(cluster_status.DELETE_IN_PROGRESS, ng.status)
# We have two calls to save because the stack outputs are
# stored too.
self.assertEqual(1, ng.save.call_count)
self.assertEqual(cluster_status.DELETE_IN_PROGRESS, cluster.status)
self.assertEqual(1, cluster.save.call_count)
def test_poll_and_check_deleted(self):
cluster, poller = self.setup_poll_test(
default_stack_status=cluster_status.DELETE_COMPLETE)
cluster.status = cluster_status.DELETE_IN_PROGRESS
self.assertIsNone(poller.poll_and_check())
self.assertEqual(cluster_status.DELETE_COMPLETE,
cluster.default_ng_worker.status)
self.assertEqual(1, cluster.default_ng_worker.save.call_count)
self.assertEqual(0, cluster.default_ng_worker.destroy.call_count)
self.assertEqual(cluster_status.DELETE_COMPLETE,
cluster.default_ng_master.status)
self.assertEqual(1, cluster.default_ng_master.save.call_count)
self.assertEqual(0, cluster.default_ng_master.destroy.call_count)
self.assertEqual(cluster_status.DELETE_COMPLETE, cluster.status)
self.assertEqual(1, cluster.save.call_count)
self.assertEqual(0, cluster.destroy.call_count)
def test_poll_and_check_delete_failed(self):
cluster, poller = self.setup_poll_test(
default_stack_status=cluster_status.DELETE_FAILED)
cluster.status = cluster_status.DELETE_IN_PROGRESS
poller.poll_and_check()
self.assertEqual(cluster_status.DELETE_FAILED,
cluster.default_ng_worker.status)
# We have two calls to save because the stack outputs are
# stored too.
self.assertEqual(2, cluster.default_ng_worker.save.call_count)
self.assertEqual(0, cluster.default_ng_worker.destroy.call_count)
self.assertEqual(cluster_status.DELETE_FAILED,
cluster.default_ng_master.status)
# We have two calls to save because the stack outputs are
# stored too.
self.assertEqual(2, cluster.default_ng_master.save.call_count)
self.assertEqual(0, cluster.default_ng_master.destroy.call_count)
self.assertEqual(cluster_status.DELETE_FAILED, cluster.status)
self.assertEqual(1, cluster.save.call_count)
self.assertEqual(0, cluster.destroy.call_count)
def test_poll_done_rollback_complete(self):
stack_params = {
'number_of_minions': 1,
'number_of_masters': 1
}
cluster, poller = self.setup_poll_test(
default_stack_status=cluster_status.ROLLBACK_COMPLETE,
stack_params=stack_params)
self.assertIsNone(poller.poll_and_check())
self.assertEqual(1, cluster.save.call_count)
self.assertEqual(cluster_status.ROLLBACK_COMPLETE, cluster.status)
self.assertEqual(1, cluster.default_ng_worker.node_count)
self.assertEqual(1, cluster.default_ng_master.node_count)
def test_poll_done_rollback_failed(self):
stack_params = {
'number_of_minions': 1,
'number_of_masters': 1
}
cluster, poller = self.setup_poll_test(
default_stack_status=cluster_status.ROLLBACK_FAILED,
stack_params=stack_params)
self.assertIsNone(poller.poll_and_check())
self.assertEqual(1, cluster.save.call_count)
self.assertEqual(cluster_status.ROLLBACK_FAILED, cluster.status)
self.assertEqual(1, cluster.default_ng_worker.node_count)
self.assertEqual(1, cluster.default_ng_master.node_count)
def test_poll_and_check_new_ng_creating(self):
cluster, poller = self.setup_poll_test()
ng = self._create_nodegroup(
cluster, 'ng1', 'stack2',
stack_status=cluster_status.CREATE_IN_PROGRESS)
cluster.status = cluster_status.UPDATE_IN_PROGRESS
poller.poll_and_check()
for def_ng in self.def_ngs:
self.assertEqual(cluster_status.CREATE_COMPLETE, def_ng.status)
self.assertEqual(1, def_ng.save.call_count)
self.assertEqual(cluster_status.CREATE_IN_PROGRESS, ng.status)
self.assertEqual(1, ng.save.call_count)
self.assertEqual(cluster_status.UPDATE_IN_PROGRESS, cluster.status)
self.assertEqual(1, cluster.save.call_count)
def test_poll_and_check_new_ng_created(self):
cluster, poller = self.setup_poll_test()
ng = self._create_nodegroup(cluster, 'ng1', 'stack2')
cluster.status = cluster_status.UPDATE_IN_PROGRESS
poller.poll_and_check()
for def_ng in self.def_ngs:
self.assertEqual(cluster_status.CREATE_COMPLETE, def_ng.status)
self.assertEqual(1, def_ng.save.call_count)
self.assertEqual(cluster_status.CREATE_COMPLETE, ng.status)
self.assertEqual(1, ng.save.call_count)
self.assertEqual(cluster_status.UPDATE_COMPLETE, cluster.status)
self.assertEqual(1, cluster.save.call_count)
def test_poll_and_check_new_ng_create_failed(self):
cluster, poller = self.setup_poll_test()
ng = self._create_nodegroup(
cluster, 'ng1', 'stack2',
stack_status=cluster_status.CREATE_FAILED,
status_reason='stack failed')
cluster.status = cluster_status.UPDATE_IN_PROGRESS
poller.poll_and_check()
for def_ng in self.def_ngs:
self.assertEqual(cluster_status.CREATE_COMPLETE, def_ng.status)
self.assertEqual('stack created', def_ng.status_reason)
self.assertEqual(1, def_ng.save.call_count)
self.assertEqual(cluster_status.CREATE_FAILED, ng.status)
self.assertEqual('stack failed', ng.status_reason)
self.assertEqual(2, ng.save.call_count)
self.assertEqual(cluster_status.UPDATE_FAILED, cluster.status)
self.assertEqual(1, cluster.save.call_count)
def test_poll_and_check_new_ng_updated(self):
cluster, poller = self.setup_poll_test()
stack_params = {'number_of_minions': 3}
ng = self._create_nodegroup(
cluster, 'ng1', 'stack2',
stack_status=cluster_status.UPDATE_COMPLETE,
stack_params=stack_params)
cluster.status = cluster_status.UPDATE_IN_PROGRESS
poller.poll_and_check()
for def_ng in self.def_ngs:
self.assertEqual(cluster_status.CREATE_COMPLETE, def_ng.status)
self.assertEqual(1, def_ng.save.call_count)
self.assertEqual(cluster_status.UPDATE_COMPLETE, ng.status)
self.assertEqual(3, ng.node_count)
self.assertEqual(2, ng.save.call_count)
self.assertEqual(cluster_status.UPDATE_COMPLETE, cluster.status)
self.assertEqual(1, cluster.save.call_count)
def test_poll_and_check_new_ng_update_failed(self):
cluster, poller = self.setup_poll_test()
stack_params = {'number_of_minions': 3}
ng = self._create_nodegroup(
cluster, 'ng1', 'stack2',
stack_status=cluster_status.UPDATE_FAILED,
stack_params=stack_params)
cluster.status = cluster_status.UPDATE_IN_PROGRESS
poller.poll_and_check()
for def_ng in self.def_ngs:
self.assertEqual(cluster_status.CREATE_COMPLETE, def_ng.status)
self.assertEqual(1, def_ng.save.call_count)
self.assertEqual(cluster_status.UPDATE_FAILED, ng.status)
self.assertEqual(3, ng.node_count)
self.assertEqual(3, ng.save.call_count)
self.assertEqual(cluster_status.UPDATE_FAILED, cluster.status)
self.assertEqual(1, cluster.save.call_count)
def test_poll_and_check_new_ng_deleting(self):
cluster, poller = self.setup_poll_test()
ng = self._create_nodegroup(
cluster, 'ng1', 'stack2',
stack_status=cluster_status.DELETE_IN_PROGRESS)
cluster.status = cluster_status.UPDATE_IN_PROGRESS
poller.poll_and_check()
for def_ng in self.def_ngs:
self.assertEqual(cluster_status.CREATE_COMPLETE, def_ng.status)
self.assertEqual(1, def_ng.save.call_count)
self.assertEqual(cluster_status.DELETE_IN_PROGRESS, ng.status)
self.assertEqual(1, ng.save.call_count)
self.assertEqual(cluster_status.UPDATE_IN_PROGRESS, cluster.status)
self.assertEqual(1, cluster.save.call_count)
def test_poll_and_check_new_ng_deleted(self):
cluster, poller = self.setup_poll_test()
ng = self._create_nodegroup(
cluster, 'ng1', 'stack2',
stack_status=cluster_status.DELETE_COMPLETE)
cluster.status = cluster_status.UPDATE_IN_PROGRESS
poller.poll_and_check()
for def_ng in self.def_ngs:
self.assertEqual(cluster_status.CREATE_COMPLETE, def_ng.status)
self.assertEqual(1, def_ng.save.call_count)
self.assertEqual(1, ng.destroy.call_count)
self.assertEqual(cluster_status.UPDATE_COMPLETE, cluster.status)
self.assertEqual(1, cluster.save.call_count)
def test_poll_and_check_new_ng_delete_failed(self):
cluster, poller = self.setup_poll_test()
ng = self._create_nodegroup(
cluster, 'ng1', 'stack2',
stack_status=cluster_status.DELETE_FAILED)
cluster.status = cluster_status.UPDATE_IN_PROGRESS
poller.poll_and_check()
for def_ng in self.def_ngs:
self.assertEqual(cluster_status.CREATE_COMPLETE, def_ng.status)
self.assertEqual(1, def_ng.save.call_count)
self.assertEqual(cluster_status.DELETE_FAILED, ng.status)
self.assertEqual(2, ng.save.call_count)
self.assertEqual(0, ng.destroy.call_count)
self.assertEqual(cluster_status.UPDATE_FAILED, cluster.status)
self.assertEqual(1, cluster.save.call_count)
def test_poll_and_check_new_ng_rollback_complete(self):
cluster, poller = self.setup_poll_test()
stack_params = {
'number_of_minions': 2,
'number_of_masters': 0
}
ng = self._create_nodegroup(
cluster, 'ng1', 'stack2',
stack_status=cluster_status.ROLLBACK_COMPLETE,
stack_params=stack_params)
cluster.status = cluster_status.UPDATE_IN_PROGRESS
poller.poll_and_check()
for def_ng in self.def_ngs:
self.assertEqual(cluster_status.CREATE_COMPLETE, def_ng.status)
self.assertEqual(1, def_ng.save.call_count)
self.assertEqual(cluster_status.ROLLBACK_COMPLETE, ng.status)
self.assertEqual(2, ng.node_count)
self.assertEqual(3, ng.save.call_count)
self.assertEqual(0, ng.destroy.call_count)
self.assertEqual(cluster_status.UPDATE_COMPLETE, cluster.status)
self.assertEqual(1, cluster.save.call_count)
def test_poll_and_check_new_ng_rollback_failed(self):
cluster, poller = self.setup_poll_test()
stack_params = {
'number_of_minions': 2,
'number_of_masters': 0
}
ng = self._create_nodegroup(
cluster, 'ng1', 'stack2',
stack_status=cluster_status.ROLLBACK_FAILED,
stack_params=stack_params)
cluster.status = cluster_status.UPDATE_IN_PROGRESS
poller.poll_and_check()
for def_ng in self.def_ngs:
self.assertEqual(cluster_status.CREATE_COMPLETE, def_ng.status)
self.assertEqual(1, def_ng.save.call_count)
self.assertEqual(cluster_status.ROLLBACK_FAILED, ng.status)
self.assertEqual(2, ng.node_count)
self.assertEqual(3, ng.save.call_count)
self.assertEqual(0, ng.destroy.call_count)
self.assertEqual(cluster_status.UPDATE_FAILED, cluster.status)
self.assertEqual(1, cluster.save.call_count)
def test_poll_and_check_multiple_new_ngs(self):
cluster, poller = self.setup_poll_test()
ng1 = self._create_nodegroup(
cluster, 'ng1', 'stack2',
stack_status=cluster_status.CREATE_COMPLETE)
ng2 = self._create_nodegroup(
cluster, 'ng2', 'stack3',
stack_status=cluster_status.UPDATE_IN_PROGRESS)
cluster.status = cluster_status.UPDATE_IN_PROGRESS
poller.poll_and_check()
for def_ng in self.def_ngs:
self.assertEqual(cluster_status.CREATE_COMPLETE, def_ng.status)
self.assertEqual(1, def_ng.save.call_count)
self.assertEqual(cluster_status.CREATE_COMPLETE, ng1.status)
self.assertEqual(1, ng1.save.call_count)
self.assertEqual(cluster_status.UPDATE_IN_PROGRESS, ng2.status)
self.assertEqual(1, ng2.save.call_count)
self.assertEqual(cluster_status.UPDATE_IN_PROGRESS, cluster.status)
self.assertEqual(1, cluster.save.call_count)
def test_poll_and_check_multiple_ngs_failed_and_updating(self):
cluster, poller = self.setup_poll_test()
ng1 = self._create_nodegroup(
cluster, 'ng1', 'stack2',
stack_status=cluster_status.CREATE_FAILED)
ng2 = self._create_nodegroup(
cluster, 'ng2', 'stack3',
stack_status=cluster_status.UPDATE_IN_PROGRESS)
cluster.status = cluster_status.UPDATE_IN_PROGRESS
poller.poll_and_check()
for def_ng in self.def_ngs:
self.assertEqual(cluster_status.CREATE_COMPLETE, def_ng.status)
self.assertEqual(1, def_ng.save.call_count)
self.assertEqual(cluster_status.CREATE_FAILED, ng1.status)
self.assertEqual(2, ng1.save.call_count)
self.assertEqual(cluster_status.UPDATE_IN_PROGRESS, ng2.status)
self.assertEqual(1, ng2.save.call_count)
self.assertEqual(cluster_status.UPDATE_IN_PROGRESS, cluster.status)
self.assertEqual(1, cluster.save.call_count)
@patch('magnum.drivers.heat.driver.trust_manager')
@patch('magnum.drivers.heat.driver.cert_manager')
def test_delete_complete(self, cert_manager, trust_manager):
cluster, poller = self.setup_poll_test()
poller._delete_complete()
self.assertEqual(
1, cert_manager.delete_certificates_from_cluster.call_count)
self.assertEqual(1, trust_manager.delete_trustee_and_trust.call_count)
@patch('magnum.drivers.heat.driver.LOG')
def test_nodegroup_failed(self, logger):
cluster, poller = self.setup_poll_test(
default_stack_status=cluster_status.CREATE_FAILED)
self._create_nodegroup(cluster, 'ng1', 'stack2',
stack_status=cluster_status.CREATE_FAILED)
poller.poll_and_check()
# Verify that we have one log for each failed nodegroup
self.assertEqual(3, logger.error.call_count)
def test_stack_not_found_creating(self):
cluster, poller = self.setup_poll_test(
default_stack_status=cluster_status.CREATE_IN_PROGRESS,
stack_missing=True)
poller.poll_and_check()
for ng in cluster.nodegroups:
self.assertEqual(cluster_status.CREATE_FAILED, ng.status)
def test_stack_not_found_updating(self):
cluster, poller = self.setup_poll_test(
default_stack_status=cluster_status.UPDATE_IN_PROGRESS,
stack_missing=True)
poller.poll_and_check()
for ng in cluster.nodegroups:
self.assertEqual(cluster_status.UPDATE_FAILED, ng.status)
def test_stack_not_found_deleting(self):
cluster, poller = self.setup_poll_test(
default_stack_status=cluster_status.DELETE_IN_PROGRESS,
stack_missing=True)
poller.poll_and_check()
for ng in cluster.nodegroups:
self.assertEqual(cluster_status.DELETE_COMPLETE, ng.status)
def test_stack_not_found_new_ng_creating(self):
cluster, poller = self.setup_poll_test()
ng = self._create_nodegroup(
cluster, 'ng1', 'stack2',
stack_status=cluster_status.CREATE_IN_PROGRESS, stack_missing=True)
poller.poll_and_check()
for def_ng in self.def_ngs:
self.assertEqual(cluster_status.CREATE_COMPLETE, def_ng.status)
self.assertEqual(cluster_status.CREATE_FAILED, ng.status)
def test_stack_not_found_new_ng_updating(self):
cluster, poller = self.setup_poll_test()
ng = self._create_nodegroup(
cluster, 'ng1', 'stack2',
stack_status=cluster_status.UPDATE_IN_PROGRESS, stack_missing=True)
poller.poll_and_check()
for def_ng in self.def_ngs:
self.assertEqual(cluster_status.CREATE_COMPLETE, def_ng.status)
self.assertEqual(cluster_status.UPDATE_FAILED, ng.status)
def test_stack_not_found_new_ng_deleting(self):
cluster, poller = self.setup_poll_test()
ng = self._create_nodegroup(
cluster, 'ng1', 'stack2',
stack_status=cluster_status.DELETE_IN_PROGRESS, stack_missing=True)
poller.poll_and_check()
for def_ng in self.def_ngs:
self.assertEqual(cluster_status.CREATE_COMPLETE, def_ng.status)
self.assertEqual(cluster_status.DELETE_COMPLETE, ng.status)
def test_poll_and_check_failed_default_ng(self):
cluster, poller = self.setup_poll_test(
default_stack_status=cluster_status.UPDATE_FAILED)
ng = self._create_nodegroup(
cluster, 'ng', 'stack2',
stack_status=cluster_status.UPDATE_COMPLETE)
cluster.status = cluster_status.UPDATE_IN_PROGRESS
poller.poll_and_check()
for def_ng in self.def_ngs:
self.assertEqual(cluster_status.UPDATE_FAILED, def_ng.status)
self.assertEqual(2, def_ng.save.call_count)
self.assertEqual(cluster_status.UPDATE_COMPLETE, ng.status)
self.assertEqual(1, ng.save.call_count)
self.assertEqual(cluster_status.UPDATE_FAILED, cluster.status)
self.assertEqual(1, cluster.save.call_count)
def test_poll_and_check_rollback_failed_default_ng(self):
cluster, poller = self.setup_poll_test(
default_stack_status=cluster_status.ROLLBACK_FAILED)
ng = self._create_nodegroup(
cluster, 'ng', 'stack2',
stack_status=cluster_status.UPDATE_COMPLETE)
cluster.status = cluster_status.UPDATE_IN_PROGRESS
poller.poll_and_check()
for def_ng in self.def_ngs:
self.assertEqual(cluster_status.ROLLBACK_FAILED, def_ng.status)
self.assertEqual(2, def_ng.save.call_count)
self.assertEqual(cluster_status.UPDATE_COMPLETE, ng.status)
self.assertEqual(1, ng.save.call_count)
self.assertEqual(cluster_status.UPDATE_FAILED, cluster.status)
self.assertEqual(1, cluster.save.call_count)
def test_poll_and_check_rollback_failed_def_ng(self):
cluster, poller = self.setup_poll_test(
default_stack_status=cluster_status.DELETE_FAILED)
ng = self._create_nodegroup(
cluster, 'ng', 'stack2',
stack_status=cluster_status.DELETE_IN_PROGRESS)
cluster.status = cluster_status.DELETE_IN_PROGRESS
poller.poll_and_check()
for def_ng in self.def_ngs:
self.assertEqual(cluster_status.DELETE_FAILED, def_ng.status)
self.assertEqual(2, def_ng.save.call_count)
self.assertEqual(cluster_status.DELETE_IN_PROGRESS, ng.status)
self.assertEqual(1, ng.save.call_count)
self.assertEqual(cluster_status.DELETE_IN_PROGRESS, cluster.status)
self.assertEqual(1, cluster.save.call_count)
def test_poll_and_check_delete_failed_def_ng(self):
cluster, poller = self.setup_poll_test(
default_stack_status=cluster_status.DELETE_FAILED)
ng = self._create_nodegroup(
cluster, 'ng', 'stack2',
stack_status=cluster_status.DELETE_COMPLETE)
cluster.status = cluster_status.DELETE_IN_PROGRESS
poller.poll_and_check()
for def_ng in self.def_ngs:
self.assertEqual(cluster_status.DELETE_FAILED, def_ng.status)
self.assertEqual(2, def_ng.save.call_count)
# Check that the non-default ng was deleted
self.assertEqual(1, ng.destroy.call_count)
self.assertEqual(cluster_status.DELETE_FAILED, cluster.status)
self.assertEqual(1, cluster.save.call_count)
self.assertIn('worker_ng', cluster.status_reason)
self.assertIn('master_ng', cluster.status_reason)
| |
#
# Copyright (c) SAS Institute Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import copy
import imp
import inspect
import itertools
import new
import os
import string
import sys
import types
import tempfile
import traceback
from conary.repository import errors, trovesource
from conary.build import defaultrecipes
from conary.build import recipe, use
from conary.build import errors as builderrors
from conary.build.errors import RecipeFileError
from conary.build.factory import Factory as FactoryRecipe
from conary.conaryclient import cmdline
from conary.deps import deps
from conary.lib import api, graph, log, util
from conary.local import database
from conary import trove
from conary import versions
class SubloadData(object):
# Collector for all of the data loadSuperClass and loadInstalled
# need; this keeps this gunk out of the Importer class directly
def __init__(self, cfg, repos, db, buildFlavor, directory,
branch, name, ignoreInstalled, overrides):
self.cfg = cfg
self.repos = repos
self.db = db
self.buildFlavor = buildFlavor
self.parentDir = directory
self.branch = branch
self.parentPackageName = name
self.ignoreInstalled = ignoreInstalled
if overrides is None:
self.overrides = {}
else:
self.overrides = overrides
class Importer(object):
baseModuleImports = [
('conary.build', ('build', 'action', 'use')),
('conary.build.use', ('Arch', 'Use', ('LocalFlags', 'Flags'),
'PackageFlags')),
('conary.build.packagerecipe',
('clearBuildReqs', 'clearBuildRequires',
'clearCrossReqs', 'clearCrossRequires',
'AbstractPackageRecipe',
'SourcePackageRecipe',
'BaseRequiresRecipe',
'PackageRecipe', 'BuildPackageRecipe',
'CPackageRecipe', 'AutoPackageRecipe',
)),
('conary.build.grouprecipe', ('_BaseGroupRecipe', '_GroupRecipe',
'GroupRecipe')),
('conary.build.groupsetrecipe', ('_GroupSetRecipe',
'GroupSetRecipe')),
('conary.build.filesetrecipe', ('_FilesetRecipe', 'FilesetRecipe')),
('conary.build.redirectrecipe', ('_RedirectRecipe', 'RedirectRecipe')),
('conary.build.derivedrecipe', ('DerivedChangesetExploder',
'AbstractDerivedPackageRecipe',
'DerivedPackageRecipe')),
('conary.build.inforecipe', ('UserGroupInfoRecipe',
'UserInfoRecipe', 'GroupInfoRecipe')),
('conary.build.capsulerecipe', ('CapsuleRecipe',
)),
('conary.build.derivedcapsulerecipe', ('DerivedChangesetExploder',
'AbstractDerivedCapsuleRecipe',
'DerivedCapsuleRecipe')),
('conary.lib', ('util',)),
('os',),
('re',),
('sys',),
('stat',)]
def __init__(self, objDict = {}, fileName = 'unknownfile.py',
baseName = 'unknown', factory = False,
subloadData = None):
self.fileName = fileName
self.baseName = os.path.basename(self.fileName)
# can't have a '.' in a module name or import code gets confused
self.module = imp.new_module(self.baseName.replace('.', '-'))
self.subloadData = subloadData
self.loadedTroves = []
self.loadedSpecs = {}
for args in self.baseModuleImports:
self._localImport(*args)
if factory:
self._localImport('conary.build.factory',
('Factory', 'FactoryException' ))
self._copyReusedRecipes()
if objDict:
self.module.__dict__.update(objDict.copy())
self.module.loadInstalled = self.loadInstalled
self.module.loadSuperClass = self.loadSuperClass
# XXX when all recipes have been migrated
# we can get rid of loadRecipe
self.module.loadRecipe = self.loadSuperClass
def updateModuleDict(self, d):
self.module.__dict__.update(d)
def _copyReusedRecipes(self):
# XXX HACK - get rid of this when we move the
# recipe classes to the repository.
# makes copies of some of the superclass recipes that are
# created in this module. (specifically, the ones with buildreqs)
recipeClassDict = {}
for recipeClass in self.module.__dict__.values():
if (type(recipeClass) != type or
not issubclass(recipeClass, recipe.Recipe)):
continue
numParents = len(inspect.getmro(recipeClass))
recipeClassDict[recipeClass.__name__] = (numParents, recipeClass)
# create copies of recipes by the number of parents they have
# a class always has more parents than its parent does,
# if you copy the superClasses first, the copies will.
recipeClasses = [ x[1] for x in sorted(recipeClassDict.values(),
key=lambda x: x[0]) ]
for recipeClass in recipeClasses:
className = recipeClass.__name__
# when we create a new class object, it needs its superclasses.
# get the original superclass list and substitute in any
# copies
mro = list(inspect.getmro(recipeClass)[1:])
newMro = []
for superClass in mro:
superName = superClass.__name__
newMro.append(self.module.__dict__.get(superName, superClass))
newDict = {}
for name, attr in recipeClass.__dict__.iteritems():
if type(attr) in [ types.ModuleType, types.MethodType,
types.UnboundMethodType,
types.FunctionType,
staticmethod,
# don't copy in flags, as they
# need to have their data copied out
use.LocalFlagCollection]:
newDict[name] = attr
else:
newDict[name] = copy.deepcopy(attr)
self.module.__dict__[className] = \
new.classobj(className, tuple(newMro), newDict)
def _localImport(self, package, modules=()):
"""
Import a package into a non-global context.
@param package: the name of the module to import
@type package: str
@param modules: a sequence of modules to import from the package.
If a 2-tuple is in the sequence, rename the imported module to
the second value in the tuple.
@type modules: sequence of strings or tuples, or empty tuple
Examples of translated import statements::
from foo import bar as baz:
_localImport(d, "foo", (("bar", "baz"))
from bar import fred, george:
_localImport(d, "bar", ("fred", "george"))
import os
_localImport(d, "os")
"""
m = __import__(package, {}, {}, modules)
if modules:
if isinstance(modules, str):
modules = (modules,)
for name in modules:
if type(name) is tuple:
mod = name[0]
name = name[1]
else:
mod = name
self.module.__dict__[name] = getattr(m, mod)
else:
self.module.__dict__[package] = m
# save a reference to the module into this context, so it won't
# be garbage collected until the context is deleted.
l = self.module.__dict__.setdefault('__localImportModules', [])
l.append(m)
def _loadRecipe(self, troveSpec, label, findInstalled):
""" See docs for loadInstalled and loadSuperClass. """
loader = ChainedRecipeLoader(troveSpec, label, findInstalled,
self.subloadData.cfg,
self.subloadData.repos,
self.subloadData.branch,
self.subloadData.parentPackageName,
self.subloadData.parentDir,
self.subloadData.buildFlavor,
self.subloadData.ignoreInstalled,
self.subloadData.overrides,
self.subloadData.db)
for name, recipe in loader.allRecipes().items():
# hide all recipes from RecipeLoader - we don't want to return
# a recipe that has been loaded by loadRecipe, so we treat them
# for these purposes as if they are abstract base classes
recipe.internalAbstractBaseClass = 1
self.module.__dict__[name] = recipe
if recipe._trove:
# create a tuple with the version and flavor information needed to
# load this trove again. You might be able to rely on the
# flavor that the trove was built with, but when you load a
# recipe that is not a superclass of the current recipe,
# its flavor is not assumed to be relevant to the resulting
# package (otherwise you might have completely irrelevant flavors
# showing up for any package that loads the python recipe, e.g.)
troveTuple = (recipe._trove.getName(), recipe._trove.getVersion(),
recipe._usedFlavor)
log.info('Loaded %s from %s=%s[%s]' % ((name,) + troveTuple))
self.loadedTroves.extend(loader.getLoadedTroves())
self.loadedTroves.append(troveTuple)
self.loadedSpecs[troveSpec] = (troveTuple,
loader.getLoadedSpecs())
# stash a reference to the module in the namespace
# of the recipe that loaded it, or else it will be destroyed
self.module.__dict__[loader.recipe.__module__] = loader
return loader.getRecipe()
def loadSuperClass(self, troveSpec, label=None):
"""
Load a recipe so that its class/data can be used as a super class for
this recipe.
If the package is not installed anywhere on the system, the C{labelPath}
will be searched without reference to the installed system.
@param troveSpec: C{name}I{[}C{=I{version}}I{][}C{[I{flavor}]}I{]}
specification of the trove to load. The flavor given will be used
to find the given recipe and also to set the flavor of the loaded recipe.
@param label: label string to search for the given recipe in place of
using the default C{labelPath}.
If not specified, the labels listed in the version in the including
recipe will be used as the c{labelPath} to search.
For example, if called from recipe with version
C{/conary.rpath.com@rpl:devel//shadow/1.0-1-1},
the default C{labelPath} that would be constructed would be:
C{[conary.rpath.com@rpl:shadow, conary.rpath.com@rpl:devel]}
@return: recipe class loaded
"""
return self._loadRecipe(troveSpec, label, False)
def loadInstalled(self, troveSpec, label=None):
"""
Load a recipe so that its data about the installed system can be used
in this recipe.
If a complete version is not specified in the trovespec, the version of
the recipe to load will be based on what is installed on the system.
For example, if C{loadRecipe('foo')} is called, and package C{foo} with
version C{/bar.org@bar:devel/4.1-1-1} is installed on the system, then
C{foo:source} with version C{/bar.org@bar:devel/4.1-1} will be loaded.
The recipe will also be loaded with the installed package's flavor.
If the package is not installed anywhere on the system, the C{labelPath}
will be searched without reference to the installed system.
@param troveSpec: C{name}I{[}C{=I{version}}I{][}C{[I{flavor}]}I{]}
specification of the trove to load. The flavor given will be used
to find the given recipe and also to set the flavor of the loaded recipe.
@param label: label string to search for the given recipe in place of
using the default C{labelPath}.
If not specified, the labels listed in the version in the including
recipe will be used as the c{labelPath} to search.
For example, if called from recipe with version
C{/conary.rpath.com@rpl:devel//shadow/1.0-1-1},
the default C{labelPath} that would be constructed would be:
C{[conary.rpath.com@rpl:shadow, conary.rpath.com@rpl:devel]}
@return: recipe class loaded
"""
return self._loadRecipe(troveSpec, label, True)
def execString(self, codeString):
try:
code = compile(codeString, self.fileName, 'exec')
except SyntaxError, err:
msg = ('Error in recipe file "%s": %s\n' %(self.baseName, err))
if err.offset is not None:
msg += '%s%s^\n' %(err.text, ' ' * (err.offset-1))
else:
msg += err.text
raise builderrors.RecipeFileError, msg, sys.exc_info()[2]
use.resetUsed()
try:
exec code in self.module.__dict__
except (errors.ConaryError, builderrors.CvcError), err:
# don't show the exception for conary and cvc errors -
# we assume their exception message already contains the
# required information
tb = sys.exc_info()[2]
while tb and tb.tb_frame.f_code.co_filename != self.fileName:
tb = tb.tb_next
linenum = tb.tb_frame.f_lineno
msg = ('Error in recipe file "%s", line %s:\n %s' %
(self.baseName, linenum, err))
raise builderrors.RecipeFileError, msg, sys.exc_info()[2]
except Exception, err:
tb = sys.exc_info()[2]
while tb and tb.tb_frame.f_code.co_filename != self.fileName:
tb = tb.tb_next
if not tb:
raise
err = ''.join(traceback.format_exception(err.__class__, err, tb))
del tb
msg = ('Error in recipe file "%s":\n %s' %(self.baseName, err))
raise builderrors.RecipeFileError, msg, sys.exc_info()[2]
class RecipeLoaderFromString(object):
loadedTroves = None
loadedSpecs = None
cookType = recipe.Recipe.COOK_TYPE_LOCAL
def __init__(self, codeString, filename, cfg=None, repos=None,
component=None, branch=None, ignoreInstalled=False,
directory=None, buildFlavor=None, db=None, overrides = None,
factory = False, objDict = {}, loadAutoRecipes = True):
try:
self._load(codeString, filename, cfg, repos, component,
branch, ignoreInstalled, directory,
buildFlavor=buildFlavor, db=db,
overrides=overrides, factory = factory,
objDict = objDict, loadAutoRecipes = loadAutoRecipes)
except Exception, err:
raise builderrors.LoadRecipeError, \
'unable to load recipe file %s:\n%s' % (filename, err), \
sys.exc_info()[2]
@staticmethod
def _loadAutoRecipes(importer, cfg, repos, db = None, buildFlavor = None):
def _loadTroves(repos, nvfDict, troveSpecStrs, troveSpecs):
"""
Loads troves from the repository after they've been found
"""
trovesNeeded = []
for i, (specStr, spec) in \
enumerate(itertools.izip(troveSpecStrs, troveSpecs)):
nvf = nvfDict.get(spec, None)
if not nvf:
raise builderrors.RecipeFileError('no match for '
'autoLoadRecipe entry %s' % specStr)
if len(nvf) > 1:
raise builderrors.RecipeFileError('too many matches for '
'autoLoadRecipe entry %s' % specStr)
trovesNeeded.append((i, nvf[0]))
troves = repos.getTroves([ x[1] for x in trovesNeeded],
withFiles = False)
result = [ None ] * len(troveSpecs)
for ((i, nvf), trv) in itertools.izip(trovesNeeded, troves):
result[i] = trv
return result
# def _loadDefaultPackages begins here
if not cfg.autoLoadRecipes:
return
RecipeLoaderFromString._loadingDefaults = True
if db is None:
db = database.Database(cfg.root, cfg.dbPath)
# This stack looks in the database before looking at a repository,
# avoiding repository calls where they aren't needed.
ts = trovesource.stack(db, repos)
troveSpecs = [ cmdline.parseTroveSpec(x) for x in cfg.autoLoadRecipes ]
# Look on the repository first to match the trove specs
try:
nvfDict = repos.findTroves(cfg.installLabelPath, troveSpecs,
cfg.flavor,
allowMissing=True,
bestFlavor=True)
except errors.OpenError, err:
nvfDict = {}
neededTroveSpecs = [ x for x in troveSpecs if x not in nvfDict ]
nvfDict.update(db.findTroves(cfg.installLabelPath, neededTroveSpecs,
allowMissing = True))
groupTroves = _loadTroves(ts, nvfDict, cfg.autoLoadRecipes, troveSpecs)
# We look for recipes in reverse order to allow the troves at the
# front of the list to override those at the end
recipeTroves = {}
for trv in reversed(groupTroves):
for x in itertools.chain([ trv.getNameVersionFlavor() ],
trv.iterTroveList(weakRefs = True,
strongRefs = True) ):
if x[0].endswith(':recipe'):
recipeTroves[x[0]] = x
# We have a list of the troves to autoload recipes from now. Go get
# those troves so we can get the file information we need. The
# sort here is to keep this order repeatable. Note that we need
# to get the package which contains the recipe as well because
# that's where the loadedTroves information is stored. We depend
# on the :recipe component coming after the package itself later
# on, which the sorting keeps true!
unorderedTroveList = ts.getTroves(
sorted(itertools.chain(
*[ ( x, ( x[0].split(':')[0], x[1], x[2] )) for
x in recipeTroves.values() ] ) ),
withFiles = True)
# Last one by name wins. They're sorted by version (thanks to the
# above) so it's consistent at least.
trovesByName = dict( (x.getName(), x) for x in unorderedTroveList)
# Reorder troveList based on the loadedTroves for each one to
# get the final list of troves we should load as well as the load
# order for them.
g = graph.DirectedGraph()
for trv in unorderedTroveList:
# create the nodes
if trv.getName().endswith(':recipe'):
g.addNode(trv)
# Edges point from what's depended on to what depends on it since
# getTotalOrdering() returns children after parents.
while unorderedTroveList:
trv = unorderedTroveList.pop(0)
recipeTrv = unorderedTroveList.pop(0)
assert(( trv.getName() + ':recipe', trv.getVersion(),
trv.getFlavor() ) ==
recipeTrv.getNameVersionFlavor())
for (name, version, flavor) in trv.getLoadedTroves():
if name in trovesByName:
g.addEdge(trovesByName[name], recipeTrv)
try:
orderedTroveList = g.getTotalOrdering(
lambda a, b: cmp(a[1].getNameVersionFlavor(),
b[1].getNameVersionFlavor()))
except graph.BackEdgeError, e:
raise builderrors.RecipeFileError(
"Cannot autoload recipes due to a loadedRecipes loop involving"
" %s=%s[%s] and %s=%s[%s]" %
tuple(itertools.chain(e.src.getNameVersionFlavor(),
e.dst.getNameVersionFlavor())))
filesNeeded = []
for trv in orderedTroveList:
l = [ x for x in trv.iterFileList() if x[1].endswith('.recipe') ]
assert(len(l) == 1)
filesNeeded += l
recipes = ts.getFileContents([ (x[2], x[3]) for x in filesNeeded ])
objDict = {}
objDict.update(importer.module.__dict__)
for (fileContents, fileInfo, trv) in \
itertools.izip(recipes, filesNeeded,
orderedTroveList):
loader = RecipeLoaderFromString(fileContents.get().read(),
fileInfo[1], cfg, repos = repos,
ignoreInstalled = True,
buildFlavor = buildFlavor, db = db,
loadAutoRecipes = False, objDict = objDict)
recipe = loader.getRecipe()
recipe.internalAbstractBaseClass = True
recipe._loadedFromSource = (trv.getNameVersionFlavor())
importer.updateModuleDict(loader.recipes)
objDict.update(loader.recipes)
RecipeLoaderFromString._loadingDefaults = False
def _findRecipeClass(self, pkgname, basename, objDict, factory = False):
result = None
for (name, obj) in objDict.items():
if not inspect.isclass(obj):
continue
if name == 'FactoryRecipeClass':
continue
# if a recipe has been marked to be ignored (for example, if
# it was loaded from another recipe by loadRecipe()
# (don't use hasattr here, we want to check only the recipe
# class itself, not any parent class
if 'internalAbstractBaseClass' in obj.__dict__:
continue
# make sure the class is derived from either Recipe or Factory
if (( factory and not issubclass(obj, objDict['Factory'])) or
(not factory and not issubclass(obj, recipe.Recipe ))):
continue
if hasattr(obj, 'name') and hasattr(obj, 'version'):
self._validateRecipe(obj, pkgname, basename)
if result:
raise builderrors.RecipeFileError(
'Error in recipe file "%s": multiple recipe classes '
'with both name and version exist' % basename)
result = (name, obj)
else:
raise builderrors.RecipeFileError(
"Recipe in file/component '%s' did not contain both a name"
" and a version attribute." % pkgname)
if not result:
raise builderrors.RecipeFileError(
"file/component '%s' did not contain a valid recipe" % pkgname)
return result
# this is overridden in the testsuite to let it validate by class name
# instead of the name attribute; it's a shame it works that way
@staticmethod
def _validateName(recipeClass, nameToCheck):
return recipeClass.name == nameToCheck
@classmethod
def _validateRecipe(klass, recipeClass, packageName, fileName):
if recipeClass.name[0] not in string.ascii_letters + string.digits:
raise RecipeFileError(
'Error in recipe file "%s": package name must start '
'with an ascii letter or digit.' % fileName)
if not hasattr(recipeClass,'parent') and '-' in recipeClass.version:
raise RecipeFileError(
"Version string %s has illegal '-' character" % recipeClass.version)
if not(klass._validateName(recipeClass, packageName)):
raise RecipeFileError(
"Recipe object name '%s' does not match "
"file/component name '%s'"
% (recipeClass.name, packageName))
packageType = recipeClass.getType()
prefixes = {recipe.RECIPE_TYPE_INFO: 'info-',
recipe.RECIPE_TYPE_GROUP: 'group-',
recipe.RECIPE_TYPE_FILESET: 'fileset-'}
# don't enforce the prefix convention if the class in question is
# actully a superclass. especially needed for repo based *InfoRecipe
if packageType in prefixes and \
'abstractBaseClass' not in recipeClass.__dict__:
if not recipeClass.name.startswith(prefixes[packageType]):
raise builderrors.BadRecipeNameError(
'recipe name must start with "%s"' % prefixes[packageType])
elif packageType == recipe.RECIPE_TYPE_REDIRECT:
# redirects are allowed to have any format
pass
else:
for prefix in prefixes.itervalues():
if recipeClass.name.startswith(prefix):
raise builderrors.BadRecipeNameError(
'recipe name cannot start with "%s"' % prefix)
recipeClass.validateClass()
def _load(self, codeString, filename, cfg=None, repos=None, component=None,
branch=None, ignoreInstalled=False, directory=None,
buildFlavor=None, db=None, overrides=None, factory=False,
objDict = None, loadAutoRecipes = True):
self.recipes = {}
if filename[0] != "/":
raise builderrors.LoadRecipeError("recipe file names must be absolute paths")
if component:
pkgname = component.split(':')[0]
else:
pkgname = filename.split('/')[-1]
pkgname = pkgname[:-len('.recipe')]
basename = os.path.basename(filename)
self.file = basename.replace('.', '-')
if not directory:
directory = os.path.dirname(filename)
subloadData = SubloadData(cfg = cfg, repos = repos, db = db,
buildFlavor = buildFlavor, directory = directory,
branch = branch, name = pkgname,
ignoreInstalled = ignoreInstalled, overrides = overrides)
importer = Importer(objDict, fileName = filename, baseName = basename,
factory = factory, subloadData = subloadData)
if loadAutoRecipes:
self._loadAutoRecipes(importer, cfg, repos, db,
buildFlavor = buildFlavor)
importer.execString(codeString)
self.module = importer.module
(name, obj) = self._findRecipeClass(pkgname, basename,
self.module.__dict__,
factory = factory)
self.recipes[name] = obj
obj.filename = filename
self.recipe = obj
# create a reference to this module inside of the recipe to prevent
# the module from getting unloaded
obj.__moduleObj__ = self.module
# Look through the base classes for this recipe to see if any
# of them were autoloaded, and if so include that information
# in the loaded troves information
for baseClass in inspect.getmro(self.recipe):
if (hasattr(baseClass, '_loadedFromSource') and
baseClass._loadedFromSource not in importer.loadedTroves):
importer.loadedTroves.append(baseClass._loadedFromSource)
# inherit any tracked flags that we found while loading parent
# classes. Also inherit the list of recipes classes needed to load
# this recipe.
self.addLoadedTroves(importer.loadedTroves)
self.addLoadedSpecs(importer.loadedSpecs)
if self.recipe._trackedFlags is not None:
use.setUsed(self.recipe._trackedFlags)
self.recipe._trackedFlags = use.getUsed()
if buildFlavor is not None:
self.recipe._buildFlavor = buildFlavor
self.recipe._localFlavor = use.localFlagsToFlavor(self.recipe.name)
# _usedFlavor here is a complete hack. Unfortuantely _trackedFlags
# can change because it contains global flags, and if we make a copy
# of it those copies can't be passed to use.setUsed() somewhere
# else because of those same globals. Sweet.
self.recipe._usedFlavor = use.createFlavor(self.recipe.name,
self.recipe._trackedFlags)
self.recipe._sourcePath = directory
def allRecipes(self):
return self.recipes
@api.developerApi
def getRecipe(self):
return self.recipe
def getModuleDict(self):
return self.module.__dict__
def getLoadedTroves(self):
return list(self.loadedTroves)
def addLoadedTroves(self, newTroves):
# This is awful, but it switches loadedTroves from a class variable
# to a instance variable. We don't just set this up in __init__
# because we have descendents which call addLoadedTroves before
# initializing the parent class.
if self.loadedTroves is None:
self.loadedTroves = []
self.loadedTroves = self.loadedTroves + newTroves
def getLoadedSpecs(self):
return self.loadedSpecs
def addLoadedSpecs(self, newSpecs):
# see the comment for addLoadedTroves
if self.loadedSpecs is None:
self.loadedSpecs = {}
self.loadedSpecs.update(newSpecs)
class RecipeLoader(RecipeLoaderFromString):
@api.developerApi
def __init__(self, filename, cfg=None, repos=None, component=None,
branch=None, ignoreInstalled=False, directory=None,
buildFlavor=None, db=None, overrides = None,
factory = False, objDict = {}):
try:
f = open(filename)
codeString = f.read()
f.close()
except Exception, err:
raise builderrors.LoadRecipeError, \
'unable to load recipe file %s:\n%s' % (filename, err), \
sys.exc_info()[2]
RecipeLoaderFromString.__init__(self, codeString, filename,
cfg = cfg, repos = repos, component = component,
branch = branch, ignoreInstalled = ignoreInstalled,
directory = directory, buildFlavor = buildFlavor,
db = db, overrides = overrides, factory = factory,
objDict = objDict)
class RecipeLoaderFromSourceTrove(RecipeLoader):
# When building from a source trove, we should only search the repo
cookType = recipe.Recipe.COOK_TYPE_REPOSITORY
@staticmethod
def findFileByPath(sourceTrove, path):
for (pathId, filePath, fileId, fileVersion) in sourceTrove.iterFileList():
if filePath == path:
return (fileId, fileVersion)
return None
def __init__(self, sourceTrove, repos, cfg, versionStr=None, labelPath=None,
ignoreInstalled=False, filterVersions=False,
parentDir=None, defaultToLatest = False,
buildFlavor = None, db = None, overrides = None,
getFileFunction = None, branch = None):
self.recipes = {}
if getFileFunction is None:
getFileFunction = lambda repos, fileId, fileVersion, path: \
repos.getFileContents([ (fileId, fileVersion) ])[0].get()
name = sourceTrove.getName().split(':')[0]
if (sourceTrove.getFactory() and
sourceTrove.getFactory() != 'factory'):
if not versionStr:
if branch:
versionStr = str(branch)
else:
versionStr = sourceTrove.getVersion().branch()
factoryName = 'factory-' + sourceTrove.getFactory()
loader = ChainedRecipeLoader(factoryName, None, True, cfg,
repos, branch, name, parentDir,
buildFlavor, ignoreInstalled,
overrides, db)
# XXX name + '.recipe' sucks, but there isn't a filename that
# actually exists
factoryCreatedRecipe = self.recipeFromFactory(sourceTrove,
loader.getRecipe(),
name,
name + '.recipe',
repos,
getFileFunction)
factoryCreatedRecipe._trove = sourceTrove.copy()
factoryCreatedRecipe._sourcePath = parentDir
self.recipes.update(loader.recipes)
self.addLoadedTroves(loader.getLoadedTroves())
self.recipes[factoryCreatedRecipe.name] = factoryCreatedRecipe
else:
factoryCreatedRecipe = None
recipePath = name + '.recipe'
match = self.findFileByPath(sourceTrove, recipePath)
if not match and factoryCreatedRecipe:
# this is a recipeless factory; use the recipe class created
# by the factory for this build
self.recipe = factoryCreatedRecipe
# this validates the class is well-formed as a recipe
self._findRecipeClass(name, name + '.recipe',
{ self.recipe.name : self.recipe })
return
elif not match:
# this is just missing the recipe; we need it
raise builderrors.RecipeFileError("version %s of %s does not "
"contain %s" %
(sourceTrove.getName(),
sourceTrove.getVersion().asString(),
recipePath))
(fd, recipeFile) = tempfile.mkstemp(".recipe", 'temp-%s-' %name,
dir=cfg.tmpDir)
outF = os.fdopen(fd, "w")
inF = getFileFunction(repos, match[0], match[1], recipePath)
util.copyfileobj(inF, outF)
del inF
outF.close()
del outF
if branch is None:
branch = sourceTrove.getVersion().branch()
if factoryCreatedRecipe:
objDict = { 'FactoryRecipeClass' : factoryCreatedRecipe }
else:
objDict = {}
try:
RecipeLoader.__init__(self, recipeFile, cfg, repos,
sourceTrove.getName(),
branch = branch,
ignoreInstalled=ignoreInstalled,
directory=parentDir, buildFlavor=buildFlavor,
db=db, overrides=overrides,
factory = (sourceTrove.getFactory() == 'factory'),
objDict = objDict)
finally:
os.unlink(recipeFile)
self.recipe._trove = sourceTrove.copy()
def recipeFromFactory(self, sourceTrv, factoryClass, pkgname,
recipeFileName, repos, getFileFunction):
# (fileId, fileVersion) by path
pathDict = dict( (x[1], (x[2], x[3])) for x in
sourceTrv.iterFileList() )
def openSourceFile(path):
if path not in pathDict:
raise builderrors.LoadRecipeError(
'Path %s not found in %s=%s' %(path,
sourceTrv.getName(), sourceTrv.getVersion()))
fileId, fileVersion = pathDict[path]
return getFileFunction(repos, fileId, fileVersion, path)
files = sorted([ x[1] for x in sourceTrv.iterFileList() ])
factory = factoryClass(pkgname, sourceFiles = files,
openSourceFileFn = openSourceFile)
recipe = factory.getRecipeClass()
if factoryClass._trove:
# this doesn't happen if you load from the local directory
self.addLoadedTroves(
[ factoryClass._trove.getNameVersionFlavor() ])
self.addLoadedSpecs(
{ factoryClass.name :
(factoryClass._trove.getNameVersionFlavor(), {} ) } )
return recipe
def getSourceComponentVersion(self):
return self.recipe._trove.getVersion()
class RecipeLoaderFromRepository(RecipeLoaderFromSourceTrove):
def __init__(self, name, cfg, repos, versionStr=None, labelPath=None,
ignoreInstalled=False, filterVersions=False,
parentDir=None, defaultToLatest = False,
buildFlavor = None, db = None, overrides = None):
# FIXME parentDir specifies the directory to look for
# local copies of recipes called with loadRecipe. If
# empty, we'll look in the tmp directory where we create the recipe
# file for this source component - probably not intended behavior.
name = name.split(':')[0]
component = name + ":source"
if not labelPath:
if not cfg.buildLabel:
raise builderrors.LoadRecipeError(
'no build label set - cannot find source component %s' % component)
labelPath = [cfg.buildLabel]
if repos is None:
raise builderrors.LoadRecipeError(
'cannot find source component %s: No repository access' % (component, ))
try:
pkgs = repos.findTrove(labelPath,
(component, versionStr, deps.Flavor()))
except (errors.TroveNotFound, errors.OpenError), err:
raise builderrors.LoadRecipeError(
'cannot find source component %s: %s' %
(component, err))
if filterVersions:
pkgs = getBestLoadRecipeChoices(labelPath, pkgs)
if len(pkgs) > 1:
pkgs = sorted(pkgs, reverse=True)
if defaultToLatest:
log.warning("source component %s has multiple versions "
"on labelPath %s\n\n"
"Picking latest: \n %s\n\n"
"Not using:\n %s"
%(component,
', '.join(x.asString() for x in labelPath),
'%s=%s' % pkgs[0][:2],
'\n '.join('%s=%s' % x[:2] for x in pkgs[1:])))
else:
raise builderrors.LoadRecipeError(
"source component %s has multiple versions "
"on labelPath %s: %s"
%(component,
', '.join(x.asString() for x in labelPath),
', '.join('%s=%s' % x[:2] for x in pkgs)))
sourceComponent = repos.getTrove(*pkgs[0])
RecipeLoaderFromSourceTrove.__init__(self, sourceComponent, repos, cfg,
versionStr=versionStr, labelPath=labelPath,
ignoreInstalled=ignoreInstalled, filterVersions=filterVersions,
parentDir=parentDir, defaultToLatest = defaultToLatest,
buildFlavor = buildFlavor, db = db, overrides = overrides)
def _scoreLoadRecipeChoice(labelPath, version):
# FIXME I'm quite sure this heuristic will get replaced with
# something smarter/more sane as time progresses
if not labelPath:
return 0
score = 0
labelPath = [ x for x in reversed(labelPath)]
branch = version.branch()
while True:
label = branch.label()
try:
index = labelPath.index(label)
except ValueError:
index = -1
score += index
if not branch.hasParentBranch():
break
branch = branch.parentBranch()
return score
def getBestLoadRecipeChoices(labelPath, troveTups):
""" These labels all match the given labelPath.
We score them based on the number of matching labels in
the label path, and return the one that's "best".
The following rules should apply:
- If the labelPath is [bar, foo] and you are choosing between
/foo/bar/ and /foo/blah/bar, choose /foo/bar. Assumption
is that any other shadow/branch in the path may be from a
maintenance branch.
- If the labelPath is [bar] and you are choosing between
/foo/bar/ and /foo/blah/bar, choose /foo/bar.
- If two troves are on the same branch, prefer the later trove.
"""
scores = [ (_scoreLoadRecipeChoice(labelPath, x[1]), x) for x in troveTups ]
maxScore = max(scores)[0]
troveTups = [x for x in scores if x[0] == maxScore ]
if len(troveTups) <= 1:
return [x[1] for x in troveTups]
else:
byBranch = {}
for score, troveTup in troveTups:
branch = troveTup[1].branch()
if branch in byBranch:
byBranch[branch] = max(byBranch[branch], (score, troveTup))
else:
byBranch[branch] = (score, troveTup)
return [x[1] for x in byBranch.itervalues()]
def recipeLoaderFromSourceComponent(name, cfg, repos,
versionStr=None, labelPath=None,
ignoreInstalled=False,
filterVersions=False,
parentDir=None,
defaultToLatest = False,
buildFlavor = None,
db = None, overrides = None):
l = RecipeLoaderFromRepository(name, cfg, repos, versionStr=versionStr,
labelPath=labelPath,
ignoreInstalled=ignoreInstalled,
filterVersions=filterVersions,
parentDir=parentDir,
defaultToLatest=defaultToLatest,
buildFlavor=buildFlavor, db=db,
overrides=overrides)
return l, l.getSourceComponentVersion()
def _pickLatest(component, troves, labelPath=None):
troves.sort(reverse=True)
err = "source component %s has multiple versions" % component
if labelPath:
err += " on labelPath %s:" % ', '.join(x.asString() for x in labelPath)
else:
err += ':'
err += ("\nPicking latest:\n %s\n\n"
"Not using:\n %s\n"
%('%s=%s' % troves[0][:2],
'\n '.join('%s=%s' % x[:2] for x in troves[1:])))
log.warning(err)
return troves[0]
def ChainedRecipeLoader(troveSpec, label, findInstalled, cfg,
repos, branch, parentPackageName,
parentDir, buildFlavor,
alwaysIgnoreInstalled, overrides, db):
# This loads a recipe from another recipe. It's used to load factory
# recipes as well as superclasses. It returns a child of RecipeLoader
def _findInstalledVersion(db, labelPath, name, versionStr, flavor, repos):
""" Specialized search of the installed system along a labelPath,
defaulting to searching the whole system if the trove is not
found along the label path.
The version and flavor of the first found installed trove is
returned, or C{None} if no trove is found.
"""
# first search on the labelPath.
troves = []
try:
troves = db.findTrove(labelPath, (name, versionStr, flavor))
if len(troves) > 1:
troves = getBestLoadRecipeChoices(labelPath, troves)
if len(troves) > 1:
# sort by timeStamp even though they're across
# branches. This will give us _some_ result to move
# forward with, which is better than blowing up.
troves = [_pickLatest(name, troves, labelPath)]
except errors.TroveNotFound:
pass
if not troves:
if labelPath is None:
return None
try:
troves = db.findTrove(None, (name, versionStr, flavor))
troves = getBestLoadRecipeChoices(None, troves)
except errors.TroveNotFound:
pass
if not troves:
return None
if len(troves) > 1:
troves = [_pickLatest(name, troves)]
if troves:
sourceVersion = troves[0][1].getSourceVersion(False)
flavor = troves[0][2]
sourceName = name.split(':')[0] + ':source'
noFlavor = deps.parseFlavor('')
if not repos.hasTrove(sourceName, sourceVersion, noFlavor):
while sourceVersion.hasParentVersion():
sourceVersion = sourceVersion.parentVersion()
if repos.hasTrove(sourceName, sourceVersion, noFlavor):
break
return sourceVersion, flavor
return None
# def ChainedRecipeLoader begins here
oldUsed = use.getUsed()
name, versionStr, flavor = cmdline.parseTroveSpec(troveSpec)
versionSpec, flavorSpec = versionStr, flavor
if db is None:
db = database.Database(cfg.root, cfg.dbPath)
if name.endswith('.recipe'):
file = name
name = name[:-len('.recipe')]
else:
file = name + '.recipe'
if label and not versionSpec:
# If they used the old-style specification of label, we should
# convert to new style for purposes of storing in troveInfo
troveSpec = '%s=%s' % (name, label)
if flavorSpec is not None and not troveSpec.isEmpty():
troveSpec = '%s[%s]' % (troveSpec, flavorSpec)
if overrides and troveSpec in overrides:
recipeToLoad, newOverrideDict = overrides[troveSpec]
else:
recipeToLoad = newOverrideDict = None
#first check to see if a filename was specified, and if that
#recipe actually exists.
loader = None
if parentDir and not (recipeToLoad or label or versionStr or
(flavor is not None)):
if name[0] != '/':
localfile = parentDir + '/' + file
else:
localfile = name + '.recipe'
if os.path.exists(localfile):
# XXX: FIXME: this next test is unreachable
if flavor is not None and not flavor.isEmpty():
if buildFlavor is None:
oldBuildFlavor = cfg.buildFlavor
use.setBuildFlagsFromFlavor()
else:
oldBuildFlavor = buildFlavor
buildFlavor = deps.overrideFlavor(oldBuildFlavor, flavor)
use.setBuildFlagsFromFlavor(name, buildFlavor, error=False)
log.info('Loading %s from %s' % (name, localfile))
# ick
factory = name.startswith('factory-')
loader = RecipeLoader(localfile, cfg, repos=repos,
ignoreInstalled=alwaysIgnoreInstalled,
buildFlavor=buildFlavor,
db=db, factory=factory)
loader.recipe._trove = None
if not loader:
if label:
labelPath = [versions.Label(label)]
elif branch:
# if no labelPath was specified, search backwards through the
# labels on the current branch.
labelPath = list(branch.iterLabels())
labelPath.reverse()
else:
labelPath = None
if cfg.installLabelPath:
if labelPath:
for label in cfg.installLabelPath:
if label not in labelPath:
labelPath.append(label)
else:
labelPath = cfg.installLabelPath
if not recipeToLoad and findInstalled and not alwaysIgnoreInstalled:
# look on the local system to find a trove that is installed that
# matches this loadrecipe request. Use that trove's version
# and flavor information to grab the source out of the repository
parts = _findInstalledVersion(db, labelPath, name, versionStr,
flavor, repos)
if parts:
version, flavor = parts
while version.isOnLocalHost():
version = version.parentVersion()
versionStr = str(version)
flavorSpec = flavor
if recipeToLoad:
name, versionStr, flavor = recipeToLoad
if flavorSpec is not None:
# override the current flavor with the flavor found in the
# installed trove (or the troveSpec flavor, if no installed
# trove was found.
if buildFlavor is None:
oldBuildFlavor = cfg.buildFlavor
cfg.buildFlavor = deps.overrideFlavor(oldBuildFlavor,
flavorSpec)
use.setBuildFlagsFromFlavor(name, cfg.buildFlavor, error=False)
else:
oldBuildFlavor = buildFlavor
buildFlavor = deps.overrideFlavor(oldBuildFlavor, flavorSpec)
use.setBuildFlagsFromFlavor(name, buildFlavor, error=False)
loader = RecipeLoaderFromRepository(name, cfg, repos,
labelPath=labelPath,
buildFlavor=buildFlavor,
versionStr=versionStr,
ignoreInstalled=alwaysIgnoreInstalled,
filterVersions=True,
parentDir=parentDir,
defaultToLatest=True,
db=db, overrides=newOverrideDict)
if flavorSpec is not None:
if buildFlavor is None:
buildFlavor = cfg.buildFlavor = oldBuildFlavor
else:
buildFlavor = oldBuildFlavor
# must set this flavor back after the above use.createFlavor()
use.setBuildFlagsFromFlavor(parentPackageName, buildFlavor, error=False)
# return the tracked flags to their state before loading this recipe
use.resetUsed()
use.setUsed(oldUsed)
return loader
class RecipeLoaderFromSourceDirectory(RecipeLoaderFromSourceTrove):
cookType = recipe.Recipe.COOK_TYPE_LOCAL
def __init__(self, trv, branch = None, cfg = None, repos = None,
ignoreInstalled = None, sourceFiles = None,
buildFlavor = None, labelPath = None, parentDir = None):
def getFile(repos, fileId, fileVersion, path):
if parentDir:
return open(os.sep.join((parentDir, path)))
return open(path)
if parentDir is None:
parentDir = os.getcwd()
if branch:
versionStr = str(branch)
else:
versionStr = None
RecipeLoaderFromSourceTrove.__init__(self, trv, repos, cfg,
versionStr = versionStr,
ignoreInstalled=ignoreInstalled,
getFileFunction = getFile,
branch = branch,
buildFlavor = buildFlavor,
parentDir = parentDir)
| |
# -*- coding: utf-8 -*-
"""
%jot magic for lightweight persistence.
Stores variables in Struct with some notes in PicleShare database
"""
from datetime import datetime
from IPython.core import ipapi
ip = ipapi.get()
import pickleshare
import inspect,pickle,os,sys,textwrap
from IPython.core.fakemodule import FakeModule
from IPython.utils.ipstruct import Struct
from IPython.utils.warn import error
def refresh_variables(ip, key=None):
db = ip.db
if key is None:
keys = db.keys('jot/*')
else:
keys = db.keys('jot/'+key)
for key in keys:
# strip autorestore
justkey = os.path.basename(key)
print "Restoring from", justkey, "..."
try:
obj = db[key]
except KeyError:
print "Unable to restore variable '%s', ignoring (use %%jot -d to forget!)" % justkey
print "The error was:",sys.exc_info()[0]
else:
#print "restored",justkey,"=",obj #dbg
try:
origname = obj.name
except:
ip.user_ns[justkey] = obj
print "Restored", justkey
else:
ip.user_ns[origname] = obj['val']
print "Restored", origname
def read_variables(ip, key=None):
db = ip.db
if key is None:
return None
else:
keys = db.keys('jot/'+key)
for key in keys:
# strip autorestore
justkey = os.path.basename(key)
print "restoring from ", justkey
try:
obj = db[key]
except KeyError:
print "Unable to read variable '%s', ignoring (use %%jot -d to forget!)" % justkey
print "The error was:",sys.exc_info()[0]
else:
return obj
def detail_variables(ip, key=None):
db, get = ip.db, ip.db.get
if key is None:
keys = db.keys('jot/*')
else:
keys = db.keys('jot/'+key)
if keys:
size = max(map(len,keys))
else:
size = 0
fmthead = '%-'+str(size)+'s [%s]'
fmtbody = 'Comment:\n %s'
fmtdata = 'Data:\n %s, %s'
for key in keys:
v = get(key,'<unavailable>')
justkey = os.path.basename(key)
try:
print fmthead % (justkey, datetime.ctime(v.get('time','<unavailable>')))
print fmtbody % (v.get('comment','<unavailable>'))
d = v.get('val','unavailable')
print fmtdata % (repr(type(d)), '')
print repr(d)[0:200]
print
print
except AttributeError:
print fmt % (justkey, '<unavailable>', '<unavailable>', repr(v)[:50])
def intm(n):
try:
return int(n)
except:
return 0
def jot_obj(self, obj, name, comment=''):
"""
write obj data to the note database, with whatever that should be noted.
"""
had = self.db.keys('jot/'+name+'*')
# if it the same name but a later version, we stupidly add a number to the
# so the name doesn't collide. Any better idea?
suffix = ''
if len(had)>0:
pre = os.path.commonprefix(had)
suf = [n.split(pre)[1] for n in had]
versions = map(intm, suf)
suffix = str(max(versions)+1)
uname = 'jot/'+name+suffix
all = ip.shell.history_manager.input_hist_parsed
# We may actually want to make snapshot of files that are run-ned.
# get the comment
try:
comment = ip.magic_edit('-x').strip()
except:
print "No comment is recorded."
comment = ''
self.db[uname] = Struct({'val':obj,
'time' : datetime.now(),
'hist' : all,
'name' : name,
'comment' : comment,})
print "Jotted down notes for '%s' (%s)" % (uname, obj.__class__.__name__)
def magic_jot(self, parameter_s=''):
"""Lightweight persistence for python variables.
Example:
ville@badger[~]|1> A = ['hello',10,'world']\\
ville@badger[~]|2> %jot A\\
ville@badger[~]|3> Exit
(IPython session is closed and started again...)
ville@badger:~$ ipython -p pysh\\
ville@badger[~]|1> print A
['hello', 10, 'world']
Usage:
%jot - Show list of all variables and their current values\\
%jot -l - Show list of all variables and their current values in detail\\
%jot -l <var> - Show one variable and its current values in detail\\
%jot <var> - Store the *current* value of the variable to disk\\
%jot -d <var> - Remove the variable and its value from storage\\
%jot -z - Remove all variables from storage (disabled)\\
%jot -r <var> - Refresh/Load variable from jot (delete current vals)\\
%jot foo >a.txt - Store value of foo to new file a.txt\\
%jot foo >>a.txt - Append value of foo to file a.txt\\
It should be noted that if you change the value of a variable, you
need to %note it again if you want to persist the new value.
Note also that the variables will need to be pickleable; most basic
python types can be safely %stored.
"""
opts,argsl = self.parse_options(parameter_s,'drzl',mode='string')
args = argsl.split(None,1)
ip = self.getapi()
db = ip.db
# delete
if opts.has_key('d'):
try:
todel = args[0]
except IndexError:
error('You must provide the variable to forget')
else:
try:
del db['jot/' + todel]
except:
error("Can't delete variable '%s'" % todel)
# reset the whole database
elif opts.has_key('z'):
print "reseting the whole database has been disabled."
#for k in db.keys('autorestore/*'):
# del db[k]
elif opts.has_key('r'):
try:
toret = args[0]
except:
print "restoring all the variables jotted down..."
refresh_variables(ip)
else:
refresh_variables(ip, toret)
elif opts.has_key('l'):
try:
tolist = args[0]
except:
print "List details for all the items."
detail_variables(ip)
else:
print "Details for", tolist, ":"
detail_variables(ip, tolist)
# run without arguments -> list noted variables & notes
elif not args:
vars = self.db.keys('jot/*')
vars.sort()
if vars:
size = max(map(len,vars)) - 4
else:
size = 0
print 'Variables and their in-db values:'
fmt = '%-'+str(size)+'s [%s] -> %s'
get = db.get
for var in vars:
justkey = os.path.basename(var)
v = get(var,'<unavailable>')
try:
print fmt % (justkey,\
datetime.ctime(v.get('time','<unavailable>')),\
v.get('comment','<unavailable>')[:70].replace('\n',' '),)
except AttributeError:
print fmt % (justkey, '<unavailable>', '<unavailable>', repr(v)[:50])
# default action - store the variable
else:
# %store foo >file.txt or >>file.txt
if len(args) > 1 and args[1].startswith('>'):
fnam = os.path.expanduser(args[1].lstrip('>').lstrip())
if args[1].startswith('>>'):
fil = open(fnam,'a')
else:
fil = open(fnam,'w')
obj = ip.ev(args[0])
print "Writing '%s' (%s) to file '%s'." % (args[0],
obj.__class__.__name__, fnam)
if not isinstance (obj,basestring):
from pprint import pprint
pprint(obj,fil)
else:
fil.write(obj)
if not obj.endswith('\n'):
fil.write('\n')
fil.close()
return
# %note foo
try:
obj = ip.user_ns[args[0]]
except KeyError:
# this should not be alias, for aliases, use %store
print
print "Error: %s doesn't exist." % args[0]
print
print "Use %note -r <var> to retrieve variables. This should not be used " +\
"to store alias, for saving aliases, use %store"
return
else:
if isinstance(inspect.getmodule(obj), FakeModule):
print textwrap.dedent("""\
Warning:%s is %s
Proper storage of interactively declared classes (or instances
of those classes) is not possible! Only instances
of classes in real modules on file system can be %%store'd.
""" % (args[0], obj) )
return
#pickled = pickle.dumps(obj)
#self.db[ 'jot/' + args[0] ] = obj
jot_obj(self, obj, args[0])
def magic_read(self, parameter_s=''):
"""
%read <var> - Load variable from data that is jotted down.\\
"""
opts,argsl = self.parse_options(parameter_s,'drzl',mode='string')
args = argsl.split(None,1)
ip = self.getapi()
db = ip.db
#if opts.has_key('r'):
try:
toret = args[0]
except:
print "which record do you want to read out?"
return
else:
return read_variables(ip, toret)
ip.define_magic('jot',magic_jot)
ip.define_magic('read',magic_read)
| |
"""
Implementation of LambdaMART.
Interface is very similar to sklearn's tree ensembles. In fact, the majority
of this code is just a port of GradientBoostingRegressor customized for LTR.
The most notable difference is that fit() now takes another `qids` parameter
containing query ids for all the samples.
"""
# Derivative of scikit-learn
# https://github.com/scikit-learn/scikit-learn/
# sklearn/ensemble/gradient_boosting.py
# License: BSD 3 clause
import numbers
import numpy as np
import scipy
import sklearn.ensemble
import sklearn.externals
import sklearn.utils
import sklearn.tree
import time
from . import AdditiveModel
from .. import metrics
from ..util.group import check_qids, get_groups
from ..util.sort import get_sorted_y_positions
import six
from six.moves import range
class LambdaMART(AdditiveModel):
"""Tree-based learning to rank model.
Parameters
----------
metric : object
The metric to be maximized by the model.
learning_rate : float, optional (default=0.1)
Shrinks the contribution of each tree by `learning_rate`.
There is a trade-off between learning_rate and n_estimators.
n_estimators : int, optional (default=100)
The number of boosting stages to perform. Gradient boosting
is fairly robust to over-fitting so a large number usually
results in better performance.
max_depth : int, optional (default=3)
Maximum depth of the individual regression estimators. The maximum
depth limits the number of nodes in the tree. Tune this parameter
for best performance; the best value depends on the interaction
of the input variables.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
subsample : float, optional (default=1.0)
The fraction of samples to be used for fitting the individual base
learners. I have no idea why one would set this to something lower than
one, and results will probably be strange if this is changed from the
default.
query_subsample : float, optional (default=1.0)
The fraction of queries to be used for fitting the individual base
learners.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Choosing `max_features < n_features` leads to a reduction of variance
and an increase in bias.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_leaf_nodes : int or None, optional (default=None)
Grow trees with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
verbose : int, optional (default=0)
Enable verbose output. If 1 then it prints progress and performance
once in a while (the more trees the lower the frequency). If greater
than 1 then it prints progress and performance for every tree.
warm_start : bool, optional (default=False)
When set to ``True``, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just erase the
previous solution.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array, shape = [n_features]
The feature importances (the higher, the more important the feature).
oob_improvement_ : array, shape = [n_estimators]
The improvement in loss (= deviance) on the out-of-bag samples
relative to the previous iteration.
``oob_improvement_[0]`` is the improvement in
loss of the first stage over the ``init`` estimator.
train_score_ : array, shape = [n_estimators]
The i-th score ``train_score_[i]`` is the deviance (= loss) of the
model at iteration ``i`` on the in-bag sample.
If ``subsample == 1`` this is the deviance on the training data.
estimators_ : ndarray of DecisionTreeRegressor, shape = [n_estimators, 1]
The collection of fitted sub-estimators. ``loss_.K`` is 1 for binary
classification, otherwise n_classes.
estimators_fitted_ : int
The number of sub-estimators actually fitted. This may be different
from n_estimators in the case of early stoppage, trimming, etc.
"""
def __init__(self, metric=None, learning_rate=0.1, n_estimators=100,
query_subsample=1.0, subsample=1.0, min_samples_split=2,
min_samples_leaf=1, max_depth=3, random_state=None,
max_features=None, verbose=0, max_leaf_nodes=None,
warm_start=True):
super(LambdaMART, self).__init__()
self.metric = metrics.dcg.NDCG() if metric is None else metric
self.learning_rate = learning_rate
self.n_estimators = n_estimators
self.query_subsample = query_subsample
self.subsample = subsample
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.max_depth = max_depth
self.random_state = random_state
self.max_features = max_features
self.verbose = verbose
self.max_leaf_nodes = max_leaf_nodes
self.warm_start = warm_start
def fit(self, X, y, qids, monitor=None):
"""Fit lambdamart onto a dataset.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array_like, shape = [n_samples]
Target values (integers in classification, real numbers in
regression)
For classification, labels must correspond to classes.
qids : array_like, shape = [n_samples]
Query ids for each sample. Samples must be grouped by query such
that all queries with the same qid appear in one contiguous block.
monitor : callable, optional
The monitor is called after each iteration with the current
iteration, a reference to the estimator and the local variables of
``_fit_stages`` as keyword arguments ``callable(i, self,
locals())``. If the callable returns ``True`` the fitting procedure
is stopped. The monitor can be used for various things such as
computing held-out estimates, early stopping, model introspecting,
and snapshoting.
"""
if not self.warm_start:
self._clear_state()
X, y = sklearn.utils.check_X_y(X, y, dtype=sklearn.tree._tree.DTYPE)
n_samples, self.n_features = X.shape
sklearn.utils.check_consistent_length(X, y, qids)
if y.dtype.kind == 'O':
y = y.astype(np.float64)
random_state = sklearn.utils.check_random_state(self.random_state)
self._check_params()
if not self._is_initialized():
self._init_state()
begin_at_stage = 0
y_pred = np.zeros(y.shape[0])
else:
if self.n_estimators < self.estimators_.shape[0]:
raise ValueError('n_estimators=%d must be larger or equal to '
'estimators_.shape[0]=%d when '
'warm_start==True'
% (self.n_estimators,
self.estimators_.shape[0]))
begin_at_stage = self.estimators_.shape[0]
self.estimators_fitted_ = begin_at_stage
self.estimators_.resize((self.n_estimators, 1))
self.train_score_.resize(self.n_estimators)
if self.query_subsample < 1.0:
self.oob_improvement_.resize(self.n_estimators)
y_pred = self.predict(X)
n_stages = self._fit_stages(X, y, qids, y_pred,
random_state, begin_at_stage, monitor)
if n_stages < self.estimators_.shape[0]:
self.trim(n_stages)
return self
def predict(self, X):
X = sklearn.utils.validation.check_array(
X, dtype=sklearn.tree._tree.DTYPE, order='C')
score = np.zeros((X.shape[0], 1))
estimators = self.estimators_
if self.estimators_fitted_ < len(estimators):
estimators = estimators[:self.estimators_fitted_]
sklearn.ensemble._gradient_boosting.predict_stages(
estimators, X, self.learning_rate, score)
return score.ravel()
def iter_y_delta(self, i, X):
assert i >= 0 and i < self.estimators_fitted_
X = sklearn.utils.validation.check_array(
X, dtype=sklearn.tree._tree.DTYPE, order='C')
score = np.zeros((X.shape[0], 1))
sklearn.ensemble._gradient_boosting.predict_stage(
self.estimators_, i, X, self.learning_rate, score)
return score.ravel()
def trim(self, n):
assert n <= self.estimators_fitted_
self.estimators_fitted_ = n
self.estimators_ = self.estimators_[:n]
self.train_score_ = self.train_score_[:n]
if hasattr(self, 'oob_improvement_'):
self.oob_improvement_ = self.oob_improvement_[:n]
@property
def feature_importances_(self):
"""Return the feature importances (the higher, the more important the
feature).
Returns
-------
feature_importances_ : array, shape = [n_features]
Array of summed variance reductions.
"""
if self.estimators_ is None or len(self.estimators_) == 0:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
total_sum = np.zeros((self.n_features, ), dtype=np.float64)
for stage in self.estimators_:
stage_sum = sum(tree.feature_importances_
for tree in stage) / len(stage)
total_sum += stage_sum
importances = total_sum / len(self.estimators_)
return importances
def _calc_lambdas_deltas(self, qid, y, y_pred):
ns = y.shape[0]
positions = get_sorted_y_positions(y, y_pred, check=False)
actual = y[positions]
swap_deltas = self.metric.calc_swap_deltas(qid, actual)
max_k = self.metric.max_k()
if max_k is None or ns < max_k:
max_k = ns
lambdas = np.zeros(ns)
deltas = np.zeros(ns)
for i in range(max_k):
for j in range(i + 1, ns):
if actual[i] == actual[j]:
continue
delta_metric = swap_deltas[i, j]
if delta_metric == 0.0:
continue
a, b = positions[i], positions[j]
# invariant: y_pred[a] >= y_pred[b]
if actual[i] < actual[j]:
assert delta_metric > 0.0
logistic = scipy.special.expit(y_pred[a] - y_pred[b])
l = logistic * delta_metric
lambdas[a] -= l
lambdas[b] += l
else:
assert delta_metric < 0.0
logistic = scipy.special.expit(y_pred[b] - y_pred[a])
l = logistic * -delta_metric
lambdas[a] += l
lambdas[b] -= l
gradient = (1 - logistic) * l
deltas[a] += gradient
deltas[b] += gradient
return lambdas, deltas
def _update_terminal_regions(self, tree, X, y, lambdas, deltas, y_pred,
sample_mask):
terminal_regions = tree.apply(X)
masked_terminal_regions = terminal_regions.copy()
masked_terminal_regions[~sample_mask] = -1
for leaf in np.where(tree.children_left ==
sklearn.tree._tree.TREE_LEAF)[0]:
terminal_region = np.where(masked_terminal_regions == leaf)
suml = np.sum(lambdas[terminal_region])
sumd = np.sum(deltas[terminal_region])
tree.value[leaf, 0, 0] = 0.0 if abs(sumd) < 1e-300 else (suml / sumd)
y_pred += tree.value[terminal_regions, 0, 0] * self.learning_rate
def _fit_stage(self, i, X, y, qids, y_pred, sample_weight, sample_mask,
query_groups, random_state):
"""Fit another tree to the boosting model."""
assert sample_mask.dtype == np.bool
n_samples = X.shape[0]
all_lambdas = np.zeros(n_samples)
all_deltas = np.zeros(n_samples)
for qid, a, b, _ in query_groups:
lambdas, deltas = self._calc_lambdas_deltas(qid, y[a:b],
y_pred[a:b])
all_lambdas[a:b] = lambdas
all_deltas[a:b] = deltas
tree = sklearn.tree.DecisionTreeRegressor(
criterion='friedman_mse',
splitter='best',
max_depth=self.max_depth,
min_samples_split=self.min_samples_split,
min_samples_leaf=self.min_samples_leaf,
min_weight_fraction_leaf=0.0,
max_features=self.max_features,
max_leaf_nodes=self.max_leaf_nodes,
random_state=random_state)
if self.subsample < 1.0 or self.query_subsample < 1.0:
sample_weight = sample_weight * sample_mask.astype(np.float64)
tree.fit(X, all_lambdas, sample_weight=sample_weight,
check_input=False)
self._update_terminal_regions(tree.tree_, X, y, all_lambdas,
all_deltas, y_pred, sample_mask)
self.estimators_[i, 0] = tree
self.estimators_fitted_ = i + 1
return y_pred
def _fit_stages(self, X, y, qids, y_pred, random_state,
begin_at_stage=0, monitor=None):
n_samples = X.shape[0]
do_subsample = self.subsample < 1.0
sample_weight = np.ones(n_samples, dtype=np.float64)
n_queries = check_qids(qids)
query_groups = np.array([(qid, a, b, np.arange(a, b))
for qid, a, b in get_groups(qids)],
dtype=np.object)
assert n_queries == len(query_groups)
do_query_oob = self.query_subsample < 1.0
query_mask = np.ones(n_queries, dtype=np.bool)
query_idx = np.arange(n_queries)
q_inbag = max(1, int(self.query_subsample * n_queries))
if self.verbose:
verbose_reporter = _VerboseReporter(self.verbose)
verbose_reporter.init(self, begin_at_stage)
for i in range(begin_at_stage, self.n_estimators):
if do_query_oob:
random_state.shuffle(query_idx)
query_mask = np.zeros(n_queries, dtype=np.bool)
query_mask[query_idx[:q_inbag]] = 1
query_groups_to_use = query_groups[query_mask]
sample_mask = np.zeros(n_samples, dtype=np.bool)
for qid, a, b, sidx in query_groups_to_use:
sidx_to_use = sidx
if do_subsample:
query_samples_inbag = max(
1, int(self.subsample * (b - 1)))
random_state.shuffle(sidx)
sidx_to_use = sidx[:query_samples_inbag]
sample_mask[sidx_to_use] = 1
if do_query_oob:
old_oob_total_score = 0.0
for qid, a, b, _ in query_groups[~query_mask]:
old_oob_total_score += self.metric.evaluate_preds(
qid, y[a:b], y_pred[a:b])
y_pred = self._fit_stage(i, X, y, qids, y_pred, sample_weight,
sample_mask, query_groups_to_use,
random_state)
train_total_score, oob_total_score = 0.0, 0.0
for qidx, (qid, a, b, _) in enumerate(query_groups):
score = self.metric.evaluate_preds(
qid, y[a:b], y_pred[a:b])
if query_mask[qidx]:
train_total_score += score
else:
oob_total_score += score
self.train_score_[i] = train_total_score / q_inbag
if do_query_oob:
if q_inbag < n_queries:
self.oob_improvement_[i] = \
((oob_total_score - old_oob_total_score) /
(n_queries - q_inbag))
early_stop = False
monitor_output = None
if monitor is not None:
monitor_output = monitor(i, self, locals())
if monitor_output is True:
early_stop = True
if self.verbose > 0:
verbose_reporter.update(i, self, monitor_output)
if early_stop:
break
return i + 1
def _is_initialized(self):
return len(getattr(self, 'estimators_', [])) > 0
def _init_state(self):
self.estimators_ = np.empty((self.n_estimators, 1), dtype=np.object)
self.estimators_fitted_ = 0
self.train_score_ = np.zeros(self.n_estimators, dtype=np.float64)
if self.query_subsample < 1.0:
self.oob_improvement_ = np.zeros((self.n_estimators,),
dtype=np.float64)
def _clear_state(self):
if hasattr(self, 'estimators_'):
del self.estimators_
if hasattr(self, 'esimators_trained'):
del self.estimators_fitted_
if hasattr(self, 'train_score_'):
del self.train_score_
if hasattr(self, 'oob_improvement_'):
del self.oob_improvement_
def _check_params(self):
if self.n_estimators <= 0:
raise ValueError("n_estimators must be greater than 0 but "
"was %r" % self.n_estimators)
if self.learning_rate <= 0.0:
raise ValueError("learning_rate must be greater than 0 but "
"was %r" % self.learning_rate)
if not (0.0 < self.subsample <= 1.0):
raise ValueError("subsample must be in (0,1] but "
"was %r" % self.subsample)
if not (0.0 < self.query_subsample <= 1.0):
raise ValueError("query_subsample must be in (0,1] but "
"was %r" % self.query_subsample)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
max_features = self.n_features
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features)))
else:
raise ValueError("Invalid value for max_features: %r. "
"Allowed string values are 'auto', 'sqrt' "
"or 'log2'." % self.max_features)
elif self.max_features is None:
max_features = self.n_features
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if 0. < self.max_features <= 1.:
max_features = max(int(self.max_features * self.n_features), 1)
else:
raise ValueError("max_features must be in (0,1]")
self.max_features_ = max_features
class _VerboseReporter(object):
"""Reports verbose output to stdout.
If ``verbose==1`` output is printed once in a while (when iteration mod
verbose_mod is zero).; if larger than 1 then output is printed for
each update.
"""
def __init__(self, verbose):
self.verbose = verbose
def init(self, est, begin_at_stage=0):
# header fields and line format str
header_fields = ['Iter', 'Train score']
verbose_fmt = ['{iter:>5d}', '{train_score:>12.4f}']
# do oob?
if est.query_subsample < 1:
header_fields.append('OOB Improve')
verbose_fmt.append('{oob_impr:>12.4f}')
header_fields.append('Remaining')
verbose_fmt.append('{remaining_time:>12s}')
header_fields.append('Monitor Output')
verbose_fmt.append('{monitor_output:>40s}')
# print the header line
print(('%5s ' + '%12s ' *
(len(header_fields) - 2) + '%40s ') % tuple(header_fields))
self.verbose_fmt = ' '.join(verbose_fmt)
# plot verbose info each time i % verbose_mod == 0
self.verbose_mod = 1
self.start_time = time.time()
self.begin_at_stage = begin_at_stage
def update(self, j, est, monitor_output):
"""Update reporter with new iteration. """
if monitor_output is True:
print('Early termination at iteration ', j)
return
do_query_oob = est.query_subsample < 1
# we need to take into account if we fit additional estimators.
i = j - self.begin_at_stage # iteration relative to the start iter
if self.verbose > 1 or (i + 1) % self.verbose_mod == 0:
oob_impr = est.oob_improvement_[j] if do_query_oob else 0
remaining_time = ((est.n_estimators - (j + 1)) *
(time.time() - self.start_time) / float(i + 1))
if remaining_time > 60:
remaining_time = '{0:.2f}m'.format(remaining_time / 60.0)
else:
remaining_time = '{0:.2f}s'.format(remaining_time)
if monitor_output is None:
monitor_output = ''
print(self.verbose_fmt.format(iter=j + 1,
train_score=est.train_score_[j],
oob_impr=oob_impr,
remaining_time=remaining_time,
monitor_output=monitor_output))
if i + 1 >= 10:
self.verbose_mod = 5
if i + 1 >= 50:
self.verbose_mod = 10
if i + 1 >= 100:
self.verbose_mod = 20
if i + 1 >= 500:
self.verbose_mod = 50
if i + 1 >= 1000:
self.verbose_mod = 100
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Perturb a `LinearOperator` with a rank `K` update."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.contrib.linalg.python.ops import linear_operator
from tensorflow.contrib.linalg.python.ops import linear_operator_diag
from tensorflow.contrib.linalg.python.ops import linear_operator_identity
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
__all__ = ["LinearOperatorUDVHUpdate",]
class LinearOperatorUDVHUpdate(linear_operator.LinearOperator):
"""Perturb a `LinearOperator` with a rank `K` update.
This operator acts like a [batch] matrix `A` with shape
`[B1,...,Bb, M, N]` for some `b >= 0`. The first `b` indices index a
batch member. For every batch index `(i1,...,ib)`, `A[i1,...,ib, : :]` is
an `M x N` matrix.
`LinearOperatorUDVHUpdate` represents `A = L + U D V^H`, where
```
L, is a LinearOperator representing [batch] M x N matrices
U, is a [batch] M x K matrix. Typically K << M.
D, is a [batch] K x K matrix.
V, is a [batch] N x K matrix. Typically K << N.
V^H is the Hermitian transpose (adjoint) of V.
```
If `M = N`, determinants and solves are done using the matrix determinant
lemma and Woodbury identities, and thus require L and D to be non-singular.
Solves and determinants will be attempted unless the "is_non_singular"
property of L and D is False.
In the event that L and D are positive-definite, and U = V, solves and
determinants can be done using a Cholesky factorization.
```python
# Create a 3 x 3 diagonal linear operator.
diag_operator = LinearOperatorDiag(
diag=[1., 2., 3.], is_non_singular=True, is_self_adjoint=True,
is_positive_definite=True)
# Perturb with a rank 2 perturbation
operator = LinearOperatorUDVHUpdate(
operator=diag_operator,
u=[[1., 2.], [-1., 3.], [0., 0.]],
diag=[11., 12.],
v=[[1., 2.], [-1., 3.], [10., 10.]])
operator.shape
==> [3, 3]
operator.log_determinant()
==> scalar Tensor
x = ... Shape [3, 4] Tensor
operator.apply(x)
==> Shape [3, 4] Tensor
```
### Shape compatibility
This operator acts on [batch] matrix with compatible shape.
`x` is a batch matrix with compatible shape for `apply` and `solve` if
```
operator.shape = [B1,...,Bb] + [M, N], with b >= 0
x.shape = [B1,...,Bb] + [N, R], with R >= 0.
```
### Performance
Suppose `operator` is a `LinearOperatorUDVHUpdate` of shape `[M, N]`,
made from a rank `K` update of `base_operator` which performs `.apply(x)` on
`x` having `x.shape = [N, R]` with `O(L_apply*N*R)` complexity (and similarly
for `solve`, `determinant`. Then, if `x.shape = [N, R]`,
* `operator.apply(x)` is `O(L_apply*N*R + K*N*R)`
and if `M = N`,
* `operator.solve(x)` is `O(L_apply*N*R + N*K*R + K^2*R + K^3)`
* `operator.determinant()` is `O(L_determinant + L_solve*N*K + K^2*N + K^3)`
If instead `operator` and `x` have shape `[B1,...,Bb, M, N]` and
`[B1,...,Bb, N, R]`, every operation increases in complexity by `B1*...*Bb`.
#### Matrix property hints
This `LinearOperator` is initialized with boolean flags of the form `is_X`,
for `X = non_singular, self_adjoint, positive_definite, diag_positive, square`
These have the following meaning
* If `is_X == True`, callers should expect the operator to have the
property `X`. This is a promise that should be fulfilled, but is *not* a
runtime assert. For example, finite floating point precision may result
in these promises being violated.
* If `is_X == False`, callers should expect the operator to not have `X`.
* If `is_X == None` (the default), callers should have no expectation either
way.
"""
def __init__(self,
base_operator,
u,
diag=None,
v=None,
is_diag_positive=None,
is_non_singular=None,
is_self_adjoint=None,
is_positive_definite=None,
is_square=None,
name="LinearOperatorUDVHUpdate"):
"""Initialize a `LinearOperatorUDVHUpdate`.
This creates a `LinearOperator` of the form `A = L + U D V^H`, with
`L` a `LinearOperator`, `U, V` both [batch] matrices, and `D` a [batch]
diagonal matrix.
If `L` is non-singular, solves and determinants are available.
Solves/determinants both involve a solve/determinant of a `K x K` system.
In the event that L and D are self-adjoint positive-definite, and U = V,
this can be done using a Cholesky factorization. The user should set the
`is_X` matrix property hints, which will trigger the appropriate code path.
Args:
base_operator: Shape `[B1,...,Bb, M, N]` real `float32` or `float64`
`LinearOperator`. This is `L` above.
u: Shape `[B1,...,Bb, M, K]` `Tensor` of same `dtype` as `base_operator`.
This is `U` above.
diag: Optional shape `[B1,...,Bb, K]` `Tensor` with same `dtype` as
`base_operator`. This is the diagonal of `D` above.
Defaults to `D` being the identity operator.
v: Optional `Tensor` of same `dtype` as `u` and shape `[B1,...,Bb, N, K]`
Defaults to `v = u`, in which case the perturbation is symmetric.
If `M != N`, then `v` must be set since the pertrubation is not square.
is_diag_positive: Python `bool`. If `True`, expect `diag > 0`.
is_non_singular: Expect that this operator is non-singular.
Default is `None`, unless `is_positive_definite` is auto-set to be
`True` (see below).
is_self_adjoint: Expect that this operator is equal to its hermitian
transpose. Default is `None`, unless `base_operator` is self-adjoint
and `v = None` (meaning `u=v`), in which case this defaults to `True`.
is_positive_definite: Expect that this operator is positive definite.
Default is `None`, unless `base_operator` is positive-definite
`v = None` (meaning `u=v`), and `is_diag_positive`, in which case this
defaults to `True`.
is_square: Expect that this operator acts like square [batch] matrices.
name: A name for this `LinearOperator`.
Raises:
ValueError: If `is_X` flags are set in an inconsistent way.
"""
# TODO(langmore) support complex types.
# Complex types are not allowed due to tf.cholesky() requiring float.
# If complex dtypes are allowed, we update the following
# 1. is_diag_positive should still imply that `diag > 0`, but we need to
# remind the user that this implies diag is real. This is needed because
# if diag has non-zero imaginary part, it will not be self-adjoint
# positive definite.
dtype = base_operator.dtype
allowed_dtypes = [dtypes.float32, dtypes.float64]
if dtype not in allowed_dtypes:
raise TypeError(
"Argument matrix must have dtype in %s. Found: %s"
% (allowed_dtypes, dtype))
if diag is None:
if is_diag_positive is False:
raise ValueError(
"Default diagonal is the identity, which is positive. However, "
"user set 'is_diag_positive' to False.")
is_diag_positive = True
# In this case, we can use a Cholesky decomposition to help us solve/det.
self._use_cholesky = (
base_operator.is_positive_definite and base_operator.is_self_adjoint
and is_diag_positive
and v is None)
# Possibly auto-set some characteristic flags from None to True.
# If the Flags were set (by the user) incorrectly to False, then raise.
if base_operator.is_self_adjoint and v is None and not dtype.is_complex:
if is_self_adjoint is False:
raise ValueError(
"A = L + UDU^H, with L self-adjoint and D real diagonal. Since"
" UDU^H is self-adjoint, this must be a self-adjoint operator.")
is_self_adjoint = True
# The condition for using a cholesky is sufficient for SPD, and
# we no weaker choice of these hints leads to SPD. Therefore,
# the following line reads "if hints indicate SPD..."
if self._use_cholesky:
if (
is_positive_definite is False
or is_self_adjoint is False
or is_non_singular is False):
raise ValueError(
"Arguments imply this is self-adjoint positive-definite operator.")
is_positive_definite = True
is_self_adjoint = True
values = base_operator.graph_parents + [u, diag, v]
with ops.name_scope(name, values=values):
# Create U and V.
self._u = ops.convert_to_tensor(u, name="u")
if v is None:
self._v = self._u
else:
self._v = ops.convert_to_tensor(v, name="v")
if diag is None:
self._diag = None
else:
self._diag = ops.convert_to_tensor(diag, name="diag")
# Create base_operator L.
self._base_operator = base_operator
graph_parents = base_operator.graph_parents + [self.u, self.diag, self.v]
graph_parents = [p for p in graph_parents if p is not None]
super(LinearOperatorUDVHUpdate, self).__init__(
dtype=self._base_operator.dtype,
graph_parents=graph_parents,
is_non_singular=is_non_singular,
is_self_adjoint=is_self_adjoint,
is_positive_definite=is_positive_definite,
is_square=is_square,
name=name)
# Create the diagonal operator D.
self._set_diag_operators(diag, is_diag_positive)
# Pre-compute the so-called "capacitance" matrix
# C := D^{-1} + V^H L^{-1} U
self._capacitance = self._make_capacitance()
if self._use_cholesky:
self._chol_capacitance = linalg_ops.cholesky(self._capacitance)
contrib_tensor_util.assert_same_float_dtype(
(base_operator, self.u, self.v, self.diag))
def _set_diag_operators(self, diag, is_diag_positive):
"""Set attributes self._diag and self._diag_operator."""
if diag is not None:
self._diag_operator = linear_operator_diag.LinearOperatorDiag(
self._diag, is_positive_definite=is_diag_positive)
self._diag_inv_operator = linear_operator_diag.LinearOperatorDiag(
1. / self._diag, is_positive_definite=is_diag_positive)
else:
if self.u.get_shape()[-1].value is not None:
r = self.u.get_shape()[-1].value
else:
r = array_ops.shape(self.u)[-1]
self._diag_operator = linear_operator_identity.LinearOperatorIdentity(
num_rows=r, dtype=self.dtype)
self._diag_inv_operator = self._diag_operator
@property
def u(self):
"""If this operator is `A = L + U D V^H`, this is the `U`."""
return self._u
@property
def v(self):
"""If this operator is `A = L + U D V^H`, this is the `V`."""
return self._v
@property
def diag(self):
"""If this operator is `A = L + U D V^H`, this is the diagonal of `D`."""
return self._diag
@property
def diag_operator(self):
"""If this operator is `A = L + U D V^H`, this is `D`."""
return self._diag_operator
@property
def base_operator(self):
"""If this operator is `A = L + U D V^H`, this is the `L`."""
return self._base_operator
def _shape(self):
batch_shape = array_ops.broadcast_static_shape(
self.base_operator.batch_shape,
self.u.get_shape()[:-2])
return batch_shape.concatenate(self.base_operator.shape[-2:])
def _shape_tensor(self):
batch_shape = array_ops.broadcast_dynamic_shape(
self.base_operator.batch_shape_tensor(),
array_ops.shape(self.u)[:-2])
return array_ops.concat(
[batch_shape, self.base_operator.shape_tensor()[-2:]], axis=0)
def _apply(self, x, adjoint=False):
u = self.u
v = self.v
l = self.base_operator
d = self.diag_operator
leading_term = l.apply(x, adjoint=adjoint)
if adjoint:
uh_x = math_ops.matmul(u, x, adjoint_a=True)
d_uh_x = d.apply(uh_x, adjoint=adjoint)
v_d_uh_x = math_ops.matmul(v, d_uh_x)
return leading_term + v_d_uh_x
else:
vh_x = math_ops.matmul(v, x, adjoint_a=True)
d_vh_x = d.apply(vh_x, adjoint=adjoint)
u_d_vh_x = math_ops.matmul(u, d_vh_x)
return leading_term + u_d_vh_x
def _determinant(self):
if self.is_positive_definite:
return math_ops.exp(self.log_abs_determinant())
# The matrix determinant lemma gives
# https://en.wikipedia.org/wiki/Matrix_determinant_lemma
# det(L + UDV^H) = det(D^{-1} + V^H L^{-1} U) det(D) det(L)
# = det(C) det(D) det(L)
# where C is sometimes known as the capacitance matrix,
# C := D^{-1} + V^H L^{-1} U
det_c = linalg_ops.matrix_determinant(self._capacitance)
det_d = self.diag_operator.determinant()
det_l = self.base_operator.determinant()
return det_c * det_d * det_l
def _log_abs_determinant(self):
# Recall
# det(L + UDV^H) = det(D^{-1} + V^H L^{-1} U) det(D) det(L)
# = det(C) det(D) det(L)
log_abs_det_d = self.diag_operator.log_abs_determinant()
log_abs_det_l = self.base_operator.log_abs_determinant()
if self._use_cholesky:
chol_cap_diag = array_ops.matrix_diag_part(self._chol_capacitance)
log_abs_det_c = 2 * math_ops.reduce_sum(
math_ops.log(chol_cap_diag), reduction_indices=[-1])
else:
det_c = linalg_ops.matrix_determinant(self._capacitance)
log_abs_det_c = math_ops.log(math_ops.abs(det_c))
return log_abs_det_c + log_abs_det_d + log_abs_det_l
def _solve(self, rhs, adjoint=False):
if self.base_operator.is_non_singular is False:
raise ValueError(
"Solve not implemented unless this is a perturbation of a "
"non-singular LinearOperator.")
# The Woodbury formula gives:
# https://en.wikipedia.org/wiki/Woodbury_matrix_identity
# (L + UDV^H)^{-1}
# = L^{-1} - L^{-1} U (D^{-1} + V^H L^{-1} U)^{-1} V^H L^{-1}
# = L^{-1} - L^{-1} U C^{-1} V^H L^{-1}
# where C is the capacitance matrix, C := D^{-1} + V^H L^{-1} U
# Note also that, with ^{-H} being the inverse of the adjoint,
# (L + UDV^H)^{-H}
# = L^{-H} - L^{-H} V C^{-H} U^H L^{-H}
l = self.base_operator
if adjoint:
v = self.u
u = self.v
else:
v = self.v
u = self.u
# L^{-1} rhs
linv_rhs = l.solve(rhs, adjoint=adjoint)
# V^H L^{-1} rhs
vh_linv_rhs = math_ops.matmul(v, linv_rhs, adjoint_a=True)
# C^{-1} V^H L^{-1} rhs
if self._use_cholesky:
capinv_vh_linv_rhs = linalg_ops.cholesky_solve(
self._chol_capacitance, vh_linv_rhs)
else:
capinv_vh_linv_rhs = linalg_ops.matrix_solve(
self._capacitance, vh_linv_rhs, adjoint=adjoint)
# U C^{-1} V^H M^{-1} rhs
u_capinv_vh_linv_rhs = math_ops.matmul(u, capinv_vh_linv_rhs)
# L^{-1} U C^{-1} V^H L^{-1} rhs
linv_u_capinv_vh_linv_rhs = l.solve(u_capinv_vh_linv_rhs, adjoint=adjoint)
# L^{-1} - L^{-1} U C^{-1} V^H L^{-1}
return linv_rhs - linv_u_capinv_vh_linv_rhs
def _make_capacitance(self):
# C := D^{-1} + V^H L^{-1} U
# which is sometimes known as the "capacitance" matrix.
# L^{-1} U
linv_u = self.base_operator.solve(self.u)
# V^H L^{-1} U
vh_linv_u = math_ops.matmul(self.v, linv_u, adjoint_a=True)
# D^{-1} + V^H L^{-1} V
capacitance = self._diag_inv_operator.add_to_tensor(vh_linv_u)
return capacitance
| |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module for Google Connection and Authentication classes.
Information about setting up your Google OAUTH2 credentials:
For libcloud, there are two basic methods for authenticating to Google using
OAUTH2: Service Accounts and Client IDs for Installed Applications.
Both are initially set up from the Cloud Console Console -
https://cloud.google.com/console
Setting up Service Account authentication (note that you need the PyCrypto
package installed to use this):
- Go to the Console
- Go to your project and then to "APIs & auth" on the left
- Click on "Credentials"
- Click on "Create New Client ID..."
- Select "Service account" and click on "Create Client ID"
- Download the Private Key (should happen automatically). The key you download
is in JSON format.
- Move the .json file to a safe location.
- Optionally, you may choose to Generate a PKCS12 key from the Console.
It needs to be converted to the PEM format. Please note, the PKCS12 format
is deprecated and may be removed in a future release.
- Convert the key using OpenSSL (the default password is 'notasecret').
- Move the .pem file to a safe location.
- To Authenticate, you will need to pass the Service Account's "Email
address" in as the user_id and the path to the .pem file as the key.
Setting up Installed Application authentication:
- Go to the Console
- Go to your project and then to "APIs & auth" on the left
- Click on "Credentials"
- Select "Installed application" and "Other" then click on
"Create Client ID"
- To Authenticate, pass in the "Client ID" as the user_id and the "Client
secret" as the key
- The first time that you do this, the libcloud will give you a URL to
visit. Copy and paste the URL into a browser.
- When you go to the URL it will ask you to log in (if you aren't already)
and ask you if you want to allow the project access to your account.
- Click on Accept and you will be given a code.
- Paste that code at the prompt given to you by the Google libcloud
connection.
- At that point, a token & refresh token will be stored in your home
directory and will be used for authentication.
Please remember to secure your keys and access tokens.
"""
from __future__ import with_statement
try:
import simplejson as json
except ImportError:
import json
import logging
import base64
import errno
import time
import datetime
import os
import socket
import sys
from libcloud.utils.connection import get_response_object
from libcloud.utils.py3 import b, httplib, urlencode, urlparse, PY3
from libcloud.common.base import (ConnectionUserAndKey, JsonResponse,
PollingConnection)
from libcloud.common.types import (ProviderError,
LibcloudError)
try:
from Crypto.Hash import SHA256
from Crypto.PublicKey import RSA
from Crypto.Signature import PKCS1_v1_5
import Crypto.Random
Crypto.Random.atfork()
except ImportError:
# The pycrypto library is unavailable
SHA256 = None
RSA = None
PKCS1_v1_5 = None
UTC_TIMESTAMP_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
LOG = logging.getLogger(__name__)
def _utcnow():
"""
Mocked in libcloud.test.common.google.GoogleTestCase.
"""
return datetime.datetime.utcnow()
def _utc_timestamp(datetime_obj):
return datetime_obj.strftime(UTC_TIMESTAMP_FORMAT)
def _from_utc_timestamp(timestamp):
return datetime.datetime.strptime(timestamp, UTC_TIMESTAMP_FORMAT)
def _get_gce_metadata(path=''):
try:
url = 'http://metadata/computeMetadata/v1/' + path.lstrip('/')
headers = {'Metadata-Flavor': 'Google'}
response = get_response_object(url, headers=headers)
return response.status, '', response.body
except Exception as e:
return -1, str(e), None
class GoogleAuthError(LibcloudError):
"""Generic Error class for various authentication errors."""
def __init__(self, value):
self.value = value
def __repr__(self):
return repr(self.value)
class GoogleBaseError(ProviderError):
def __init__(self, value, http_code, code, driver=None):
self.code = code
super(GoogleBaseError, self).__init__(value, http_code, driver)
class InvalidRequestError(GoogleBaseError):
pass
class JsonParseError(GoogleBaseError):
pass
class ResourceNotFoundError(GoogleBaseError):
def __init__(self, value, http_code, code, driver=None):
self.code = code
if isinstance(value, dict) and 'message' in value and \
value['message'].count('/') == 1 and \
value['message'].count('projects/') == 1:
value['message'] = value['message'] + ". A missing project " \
"error may be an authentication issue. " \
"Please ensure your auth credentials match " \
"your project. "
super(ResourceNotFoundError, self).__init__(value, http_code, driver)
class QuotaExceededError(GoogleBaseError):
pass
class ResourceExistsError(GoogleBaseError):
pass
class ResourceInUseError(GoogleBaseError):
pass
class GoogleResponse(JsonResponse):
"""
Google Base Response class.
"""
def success(self):
"""
Determine if the request was successful.
For the Google response class, tag all responses as successful and
raise appropriate Exceptions from parse_body.
:return: C{True}
"""
return True
def _get_error(self, body):
"""
Get the error code and message from a JSON response.
Return just the first error if there are multiple errors.
:param body: The body of the JSON response dictionary
:type body: ``dict``
:return: Tuple containing error code and message
:rtype: ``tuple`` of ``str`` or ``int``
"""
if 'errors' in body['error']:
err = body['error']['errors'][0]
else:
err = body['error']
if 'code' in err:
code = err.get('code')
message = err.get('message')
else:
code = err.get('reason', None)
message = body.get('error_description', err)
return (code, message)
def parse_body(self):
"""
Parse the JSON response body, or raise exceptions as appropriate.
:return: JSON dictionary
:rtype: ``dict``
"""
if len(self.body) == 0 and not self.parse_zero_length_body:
return self.body
json_error = False
try:
body = json.loads(self.body)
except:
# If there is both a JSON parsing error and an unsuccessful http
# response (like a 404), we want to raise the http error and not
# the JSON one, so don't raise JsonParseError here.
body = self.body
json_error = True
valid_http_codes = [
httplib.OK,
httplib.CREATED,
httplib.ACCEPTED,
httplib.CONFLICT,
]
if self.status in valid_http_codes:
if json_error:
raise JsonParseError(body, self.status, None)
elif 'error' in body:
(code, message) = self._get_error(body)
if code == 'QUOTA_EXCEEDED':
raise QuotaExceededError(message, self.status, code)
elif code == 'RESOURCE_ALREADY_EXISTS':
raise ResourceExistsError(message, self.status, code)
elif code == 'alreadyExists':
raise ResourceExistsError(message, self.status, code)
elif code.startswith('RESOURCE_IN_USE'):
raise ResourceInUseError(message, self.status, code)
else:
raise GoogleBaseError(message, self.status, code)
else:
return body
elif self.status == httplib.NOT_FOUND:
if (not json_error) and ('error' in body):
(code, message) = self._get_error(body)
else:
message = body
code = None
raise ResourceNotFoundError(message, self.status, code)
elif self.status == httplib.BAD_REQUEST:
if (not json_error) and ('error' in body):
(code, message) = self._get_error(body)
else:
message = body
code = None
raise InvalidRequestError(message, self.status, code)
else:
if (not json_error) and ('error' in body):
(code, message) = self._get_error(body)
else:
message = body
code = None
raise GoogleBaseError(message, self.status, code)
class GoogleBaseDriver(object):
name = "Google API"
class GoogleBaseAuthConnection(ConnectionUserAndKey):
"""
Base class for Google Authentication. Should be subclassed for specific
types of authentication.
"""
driver = GoogleBaseDriver
responseCls = GoogleResponse
name = 'Google Auth'
host = 'accounts.google.com'
auth_path = '/o/oauth2/auth'
def __init__(self, user_id, key=None, scopes=None,
redirect_uri='urn:ietf:wg:oauth:2.0:oob',
login_hint=None, **kwargs):
"""
:param user_id: The email address (for service accounts) or Client ID
(for installed apps) to be used for authentication.
:type user_id: ``str``
:param key: The RSA Key (for service accounts) or file path containing
key or Client Secret (for installed apps) to be used for
authentication.
:type key: ``str``
:param scopes: A list of urls defining the scope of authentication
to grant.
:type scopes: ``list``
:keyword redirect_uri: The Redirect URI for the authentication
request. See Google OAUTH2 documentation for
more info.
:type redirect_uri: ``str``
:keyword login_hint: Login hint for authentication request. Useful
for Installed Application authentication.
:type login_hint: ``str``
"""
scopes = scopes or []
self.scopes = " ".join(scopes)
self.redirect_uri = redirect_uri
self.login_hint = login_hint
super(GoogleBaseAuthConnection, self).__init__(user_id, key, **kwargs)
def add_default_headers(self, headers):
headers['Content-Type'] = "application/x-www-form-urlencoded"
headers['Host'] = self.host
return headers
def _token_request(self, request_body):
"""
Return an updated token from a token request body.
:param request_body: A dictionary of values to send in the body of the
token request.
:type request_body: ``dict``
:return: A dictionary with updated token information
:rtype: ``dict``
"""
data = urlencode(request_body)
try:
response = self.request('/o/oauth2/token', method='POST',
data=data)
except AttributeError:
raise GoogleAuthError('Invalid authorization response, please '
'check your credentials and time drift.')
token_info = response.object
if 'expires_in' in token_info:
expire_time = _utcnow() + datetime.timedelta(
seconds=token_info['expires_in'])
token_info['expire_time'] = _utc_timestamp(expire_time)
return token_info
def refresh_token(self, token_info):
"""
Refresh the current token.
Fetch an updated refresh token from internal metadata service.
:param token_info: Dictionary containing token information.
(Not used, but here for compatibility)
:type token_info: ``dict``
:return: A dictionary containing updated token information.
:rtype: ``dict``
"""
# pylint: disable=no-member
return self.get_new_token()
class GoogleInstalledAppAuthConnection(GoogleBaseAuthConnection):
"""Authentication connection for "Installed Application" authentication."""
def get_code(self):
"""
Give the user a URL that they can visit to authenticate and obtain a
code. This method will ask for that code that the user can paste in.
Mocked in libcloud.test.common.google.GoogleTestCase.
:return: Code supplied by the user after authenticating
:rtype: ``str``
"""
auth_params = {'response_type': 'code',
'client_id': self.user_id,
'redirect_uri': self.redirect_uri,
'scope': self.scopes,
'state': 'Libcloud Request'}
if self.login_hint:
auth_params['login_hint'] = self.login_hint
data = urlencode(auth_params)
url = 'https://%s%s?%s' % (self.host, self.auth_path, data)
print('\nPlease Go to the following URL and sign in:')
print(url)
if PY3:
code = input('Enter Code: ')
else:
code = raw_input('Enter Code: ')
return code
def get_new_token(self):
"""
Get a new token. Generally used when no previous token exists or there
is no refresh token
:return: Dictionary containing token information
:rtype: ``dict``
"""
# Ask the user for a code
code = self.get_code()
token_request = {'code': code,
'client_id': self.user_id,
'client_secret': self.key,
'redirect_uri': self.redirect_uri,
'grant_type': 'authorization_code'}
return self._token_request(token_request)
def refresh_token(self, token_info):
"""
Use the refresh token supplied in the token info to get a new token.
:param token_info: Dictionary containing current token information
:type token_info: ``dict``
:return: A dictionary containing updated token information.
:rtype: ``dict``
"""
if 'refresh_token' not in token_info:
return self.get_new_token()
refresh_request = {'refresh_token': token_info['refresh_token'],
'client_id': self.user_id,
'client_secret': self.key,
'grant_type': 'refresh_token'}
new_token = self._token_request(refresh_request)
if 'refresh_token' not in new_token:
new_token['refresh_token'] = token_info['refresh_token']
return new_token
class GoogleServiceAcctAuthConnection(GoogleBaseAuthConnection):
"""Authentication class for "Service Account" authentication."""
def __init__(self, user_id, key, *args, **kwargs):
"""
Check to see if PyCrypto is available, and convert key file path into a
key string if the key is in a file.
:param user_id: Email address to be used for Service Account
authentication.
:type user_id: ``str``
:param key: The RSA Key or path to file containing the key.
:type key: ``str``
"""
if SHA256 is None:
raise GoogleAuthError('PyCrypto library required for '
'Service Account Authentication.')
# Check to see if 'key' is a file and read the file if it is.
if key.find("PRIVATE KEY---") == -1:
# key is a file
keypath = os.path.expanduser(key)
is_file_path = os.path.exists(keypath) and os.path.isfile(keypath)
if not is_file_path:
raise ValueError("Missing (or not readable) key "
"file: '%s'" % key)
with open(keypath, 'r') as f:
contents = f.read()
try:
key = json.loads(contents)
key = key['private_key']
except ValueError:
key = contents
super(GoogleServiceAcctAuthConnection, self).__init__(
user_id, key, *args, **kwargs)
def get_new_token(self):
"""
Get a new token using the email address and RSA Key.
:return: Dictionary containing token information
:rtype: ``dict``
"""
# The header is always the same
header = {'alg': 'RS256', 'typ': 'JWT'}
header_enc = base64.urlsafe_b64encode(b(json.dumps(header)))
# Construct a claim set
claim_set = {'iss': self.user_id,
'scope': self.scopes,
'aud': 'https://accounts.google.com/o/oauth2/token',
'exp': int(time.time()) + 3600,
'iat': int(time.time())}
claim_set_enc = base64.urlsafe_b64encode(b(json.dumps(claim_set)))
# The message contains both the header and claim set
message = b'.'.join((header_enc, claim_set_enc))
# Then the message is signed using the key supplied
key = RSA.importKey(self.key)
hash_func = SHA256.new(message)
signer = PKCS1_v1_5.new(key)
signature = base64.urlsafe_b64encode(signer.sign(hash_func))
# Finally the message and signature are sent to get a token
jwt = b'.'.join((message, signature))
request = {'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer',
'assertion': jwt}
return self._token_request(request)
class GoogleGCEServiceAcctAuthConnection(GoogleBaseAuthConnection):
"""Authentication class for self-authentication when used with a GCE
instance that supports serviceAccounts.
"""
def get_new_token(self):
"""
Get a new token from the internal metadata service.
:return: Dictionary containing token information
:rtype: ``dict``
"""
path = '/instance/service-accounts/default/token'
http_code, http_reason, token_info = _get_gce_metadata(path)
if http_code == httplib.NOT_FOUND:
raise ValueError("Service Accounts are not enabled for this "
"GCE instance.")
if http_code != httplib.OK:
raise ValueError("Internal GCE Authorization failed: "
"'%s'" % str(http_reason))
token_info = json.loads(token_info)
if 'expires_in' in token_info:
expire_time = _utcnow() + datetime.timedelta(
seconds=token_info['expires_in'])
token_info['expire_time'] = _utc_timestamp(expire_time)
return token_info
class GoogleAuthType(object):
"""
SA (Service Account),
IA (Installed Application),
GCE (Auth from a GCE instance with service account enabled)
GCS_S3 (Cloud Storage S3 interoperability authentication)
"""
SA = 'SA'
IA = 'IA'
GCE = 'GCE'
GCS_S3 = 'GCS_S3'
ALL_TYPES = [SA, IA, GCE, GCS_S3]
OAUTH2_TYPES = [SA, IA, GCE]
@classmethod
def guess_type(cls, user_id):
if cls._is_sa(user_id):
return cls.SA
elif cls._is_gce():
return cls.GCE
elif cls._is_gcs_s3(user_id):
return cls.GCS_S3
else:
return cls.IA
@classmethod
def is_oauth2(cls, auth_type):
return auth_type in cls.OAUTH2_TYPES
@staticmethod
def _is_gce():
"""
Checks if we can access the GCE metadata server.
Mocked in libcloud.test.common.google.GoogleTestCase.
"""
http_code, http_reason, body = _get_gce_metadata()
if http_code == httplib.OK and body:
return True
return False
@staticmethod
def _is_gcs_s3(user_id):
"""
Checks S3 key format: 20 alphanumeric chars starting with GOOG.
"""
return len(user_id) == 20 and user_id.startswith('GOOG')
@staticmethod
def _is_sa(user_id):
return user_id.endswith('.gserviceaccount.com')
class GoogleOAuth2Credential(object):
default_credential_file = '~/.google_libcloud_auth'
def __init__(self, user_id, key, auth_type=None, credential_file=None,
scopes=None, **kwargs):
self.auth_type = auth_type or GoogleAuthType.guess_type(user_id)
if self.auth_type not in GoogleAuthType.ALL_TYPES:
raise GoogleAuthError('Invalid auth type: %s' % self.auth_type)
if not GoogleAuthType.is_oauth2(self.auth_type):
raise GoogleAuthError(('Auth type %s cannot be used with OAuth2' %
self.auth_type))
self.user_id = user_id
self.key = key
default_credential_file = '.'.join([self.default_credential_file,
user_id])
self.credential_file = credential_file or default_credential_file
# Default scopes to read/write for compute, storage, and dns.
self.scopes = scopes or [
'https://www.googleapis.com/auth/compute',
'https://www.googleapis.com/auth/devstorage.full_control',
'https://www.googleapis.com/auth/ndev.clouddns.readwrite',
]
self.token = self._get_token_from_file()
if self.auth_type == GoogleAuthType.GCE:
self.oauth2_conn = GoogleGCEServiceAcctAuthConnection(
self.user_id, self.scopes, **kwargs)
elif self.auth_type == GoogleAuthType.SA:
self.oauth2_conn = GoogleServiceAcctAuthConnection(
self.user_id, self.key, self.scopes, **kwargs)
elif self.auth_type == GoogleAuthType.IA:
self.oauth2_conn = GoogleInstalledAppAuthConnection(
self.user_id, self.key, self.scopes, **kwargs)
else:
raise GoogleAuthError('Invalid auth_type: %s' %
str(self.auth_type))
if self.token is None:
self.token = self.oauth2_conn.get_new_token()
self._write_token_to_file()
@property
def access_token(self):
if self.token_expire_utc_datetime < _utcnow():
self._refresh_token()
return self.token['access_token']
@property
def token_expire_utc_datetime(self):
return _from_utc_timestamp(self.token['expire_time'])
def _refresh_token(self):
self.token = self.oauth2_conn.refresh_token(self.token)
self._write_token_to_file()
def _get_token_from_file(self):
"""
Read credential file and return token information.
Mocked in libcloud.test.common.google.GoogleTestCase.
:return: Token information dictionary, or None
:rtype: ``dict`` or ``None``
"""
token = None
filename = os.path.realpath(os.path.expanduser(self.credential_file))
try:
with open(filename, 'r') as f:
data = f.read()
token = json.loads(data)
except (IOError, ValueError):
# Note: File related errors (IOError) and errors related to json
# parsing of the data (ValueError) are not fatal.
e = sys.exc_info()[1]
LOG.info('Failed to read cached auth token from file "%s": %s',
filename, str(e))
return token
def _write_token_to_file(self):
"""
Write token to credential file.
Mocked in libcloud.test.common.google.GoogleTestCase.
"""
filename = os.path.expanduser(self.credential_file)
filename = os.path.realpath(filename)
try:
data = json.dumps(self.token)
write_flags = os.O_CREAT | os.O_WRONLY | os.O_TRUNC
with os.fdopen(os.open(filename, write_flags,
int('600', 8)), 'w') as f:
f.write(data)
except:
# Note: Failure to write (cache) token in a file is not fatal. It
# simply means degraded performance since we will need to acquire a
# new token each time script runs.
e = sys.exc_info()[1]
LOG.info('Failed to write auth token to file "%s": %s',
filename, str(e))
class GoogleBaseConnection(ConnectionUserAndKey, PollingConnection):
"""Base connection class for interacting with Google APIs."""
driver = GoogleBaseDriver
responseCls = GoogleResponse
host = 'www.googleapis.com'
poll_interval = 2.0
timeout = 180
def __init__(self, user_id, key=None, auth_type=None,
credential_file=None, scopes=None, **kwargs):
"""
Determine authentication type, set up appropriate authentication
connection and get initial authentication information.
:param user_id: The email address (for service accounts) or Client ID
(for installed apps) to be used for authentication.
:type user_id: ``str``
:param key: The RSA Key (for service accounts) or file path containing
key or Client Secret (for installed apps) to be used for
authentication.
:type key: ``str``
:keyword auth_type: See GoogleAuthType class for list and description
of accepted values.
If not supplied, auth_type will be guessed based
on value of user_id or if the code is running
on a GCE instance.
:type auth_type: ``str``
:keyword credential_file: Path to file for caching authentication
information.
:type credential_file: ``str``
:keyword scopes: List of OAuth2 scope URLs. The empty default sets
read/write access to Compute, Storage, and DNS.
:type scopes: ``list``
"""
super(GoogleBaseConnection, self).__init__(user_id, key, **kwargs)
self.oauth2_credential = GoogleOAuth2Credential(
user_id, key, auth_type, credential_file, scopes, **kwargs)
python_ver = '%s.%s.%s' % (sys.version_info[0], sys.version_info[1],
sys.version_info[2])
ver_platform = 'Python %s/%s' % (python_ver, sys.platform)
self.user_agent_append(ver_platform)
def add_default_headers(self, headers):
"""
@inherits: :class:`Connection.add_default_headers`
"""
headers['Content-Type'] = 'application/json'
headers['Host'] = self.host
return headers
def pre_connect_hook(self, params, headers):
"""
Check to make sure that token hasn't expired. If it has, get an
updated token. Also, add the token to the headers.
@inherits: :class:`Connection.pre_connect_hook`
"""
headers['Authorization'] = ('Bearer ' +
self.oauth2_credential.access_token)
return params, headers
def encode_data(self, data):
"""Encode data to JSON"""
return json.dumps(data)
def request(self, *args, **kwargs):
"""
@inherits: :class:`Connection.request`
"""
# Adds some retry logic for the occasional
# "Connection Reset by peer" error.
retries = 4
tries = 0
while tries < (retries - 1):
try:
return super(GoogleBaseConnection, self).request(
*args, **kwargs)
except socket.error:
e = sys.exc_info()[1]
if e.errno == errno.ECONNRESET:
tries = tries + 1
else:
raise e
# One more time, then give up.
return super(GoogleBaseConnection, self).request(*args, **kwargs)
def has_completed(self, response):
"""
Determine if operation has completed based on response.
:param response: JSON response
:type response: I{responseCls}
:return: True if complete, False otherwise
:rtype: ``bool``
"""
if response.object['status'] == 'DONE':
return True
else:
return False
def get_poll_request_kwargs(self, response, context, request_kwargs):
"""
@inherits: :class:`PollingConnection.get_poll_request_kwargs`
"""
return {'action': response.object['selfLink']}
def morph_action_hook(self, action):
"""
Update action to correct request path.
In many places, the Google API returns a full URL to a resource.
This will strip the scheme and host off of the path and just return
the request. Otherwise, it will prepend the base request_path to
the action.
:param action: The action to be called in the http request
:type action: ``str``
:return: The modified request based on the action
:rtype: ``str``
"""
if action.startswith('https://'):
u = urlparse.urlsplit(action)
request = urlparse.urlunsplit(('', '', u[2], u[3], u[4]))
else:
request = self.request_path + action
return request
| |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Firebase Admin SDK for Python."""
import datetime
import json
import os
import threading
from firebase_admin import credentials
from firebase_admin.__about__ import __version__
_apps = {}
_apps_lock = threading.RLock()
_clock = datetime.datetime.utcnow
_DEFAULT_APP_NAME = '[DEFAULT]'
_FIREBASE_CONFIG_ENV_VAR = 'FIREBASE_CONFIG'
_CONFIG_VALID_KEYS = ['databaseAuthVariableOverride', 'databaseURL', 'httpTimeout', 'projectId',
'storageBucket']
def initialize_app(credential=None, options=None, name=_DEFAULT_APP_NAME):
"""Initializes and returns a new App instance.
Creates a new App instance using the specified options
and the app name. If an instance already exists by the same
app name a ValueError is raised.
If options are not provided an attempt is made to load the options from the environment.
This is done by looking up the ``FIREBASE_CONFIG`` environment variable. If the value of
the variable starts with ``"{"``, it is parsed as a JSON object. Otherwise it is treated
as a file name and the JSON content is read from the corresponding file.
Use this function whenever a new App instance is required. Do not directly invoke the
App constructor.
Args:
credential: A credential object used to initialize the SDK (optional). If none is provided,
Google Application Default Credentials are used.
options: A dictionary of configuration options (optional). Supported options include
``databaseURL``, ``storageBucket``, ``projectId``, ``databaseAuthVariableOverride``,
``serviceAccountId`` and ``httpTimeout``. If ``httpTimeout`` is not set, the SDK
uses a default timeout of 120 seconds.
name: Name of the app (optional).
Returns:
App: A newly initialized instance of App.
Raises:
ValueError: If the app name is already in use, or any of the
provided arguments are invalid.
"""
if credential is None:
credential = credentials.ApplicationDefault()
app = App(name, credential, options)
with _apps_lock:
if app.name not in _apps:
_apps[app.name] = app
return app
if name == _DEFAULT_APP_NAME:
raise ValueError((
'The default Firebase app already exists. This means you called '
'initialize_app() more than once without providing an app name as '
'the second argument. In most cases you only need to call '
'initialize_app() once. But if you do want to initialize multiple '
'apps, pass a second argument to initialize_app() to give each app '
'a unique name.'))
raise ValueError((
'Firebase app named "{0}" already exists. This means you called '
'initialize_app() more than once with the same app name as the '
'second argument. Make sure you provide a unique name every time '
'you call initialize_app().').format(name))
def delete_app(app):
"""Gracefully deletes an App instance.
Args:
app: The app instance to be deleted.
Raises:
ValueError: If the app is not initialized.
"""
if not isinstance(app, App):
raise ValueError('Illegal app argument type: "{}". Argument must be of '
'type App.'.format(type(app)))
with _apps_lock:
if _apps.get(app.name) is app:
del _apps[app.name]
app._cleanup() # pylint: disable=protected-access
return
if app.name == _DEFAULT_APP_NAME:
raise ValueError(
'The default Firebase app is not initialized. Make sure to initialize '
'the default app by calling initialize_app().')
raise ValueError(
('Firebase app named "{0}" is not initialized. Make sure to initialize '
'the app by calling initialize_app() with your app name as the '
'second argument.').format(app.name))
def get_app(name=_DEFAULT_APP_NAME):
"""Retrieves an App instance by name.
Args:
name: Name of the App instance to retrieve (optional).
Returns:
App: An App instance with the given name.
Raises:
ValueError: If the specified name is not a string, or if the specified
app does not exist.
"""
if not isinstance(name, str):
raise ValueError('Illegal app name argument type: "{}". App name '
'must be a string.'.format(type(name)))
with _apps_lock:
if name in _apps:
return _apps[name]
if name == _DEFAULT_APP_NAME:
raise ValueError(
'The default Firebase app does not exist. Make sure to initialize '
'the SDK by calling initialize_app().')
raise ValueError(
('Firebase app named "{0}" does not exist. Make sure to initialize '
'the SDK by calling initialize_app() with your app name as the '
'second argument.').format(name))
class _AppOptions:
"""A collection of configuration options for an App."""
def __init__(self, options):
if options is None:
options = self._load_from_environment()
if not isinstance(options, dict):
raise ValueError('Illegal Firebase app options type: {0}. Options '
'must be a dictionary.'.format(type(options)))
self._options = options
def get(self, key, default=None):
"""Returns the option identified by the provided key."""
return self._options.get(key, default)
def _load_from_environment(self):
"""Invoked when no options are passed to __init__, loads options from FIREBASE_CONFIG.
If the value of the FIREBASE_CONFIG environment variable starts with "{" an attempt is made
to parse it as a JSON object, otherwise it is assumed to be pointing to a JSON file.
"""
config_file = os.getenv(_FIREBASE_CONFIG_ENV_VAR)
if not config_file:
return {}
if config_file.startswith('{'):
json_str = config_file
else:
try:
with open(config_file, 'r') as json_file:
json_str = json_file.read()
except Exception as err:
raise ValueError('Unable to read file {}. {}'.format(config_file, err))
try:
json_data = json.loads(json_str)
except Exception as err:
raise ValueError('JSON string "{0}" is not valid json. {1}'.format(json_str, err))
return {k: v for k, v in json_data.items() if k in _CONFIG_VALID_KEYS}
class App:
"""The entry point for Firebase Python SDK.
Represents a Firebase app, while holding the configuration and state
common to all Firebase APIs.
"""
def __init__(self, name, credential, options):
"""Constructs a new App using the provided name and options.
Args:
name: Name of the application.
credential: A credential object.
options: A dictionary of configuration options.
Raises:
ValueError: If an argument is None or invalid.
"""
if not name or not isinstance(name, str):
raise ValueError('Illegal Firebase app name "{0}" provided. App name must be a '
'non-empty string.'.format(name))
self._name = name
if not isinstance(credential, credentials.Base):
raise ValueError('Illegal Firebase credential provided. App must be initialized '
'with a valid credential instance.')
self._credential = credential
self._options = _AppOptions(options)
self._lock = threading.RLock()
self._services = {}
App._validate_project_id(self._options.get('projectId'))
self._project_id_initialized = False
@classmethod
def _validate_project_id(cls, project_id):
if project_id is not None and not isinstance(project_id, str):
raise ValueError(
'Invalid project ID: "{0}". project ID must be a string.'.format(project_id))
@property
def name(self):
return self._name
@property
def credential(self):
return self._credential
@property
def options(self):
return self._options
@property
def project_id(self):
if not self._project_id_initialized:
self._project_id = self._lookup_project_id()
self._project_id_initialized = True
return self._project_id
def _lookup_project_id(self):
"""Looks up the Firebase project ID associated with an App.
If a ``projectId`` is specified in app options, it is returned. Then tries to
get the project ID from the credential used to initialize the app. If that also fails,
attempts to look up the ``GOOGLE_CLOUD_PROJECT`` and ``GCLOUD_PROJECT`` environment
variables.
Returns:
str: A project ID string or None.
"""
project_id = self._options.get('projectId')
if not project_id:
try:
project_id = self._credential.project_id
except AttributeError:
pass
if not project_id:
project_id = os.environ.get('GOOGLE_CLOUD_PROJECT',
os.environ.get('GCLOUD_PROJECT'))
App._validate_project_id(self._options.get('projectId'))
return project_id
def _get_service(self, name, initializer):
"""Returns the service instance identified by the given name.
Services are functional entities exposed by the Admin SDK (e.g. auth, database). Each
service instance is associated with exactly one App. If the named service
instance does not exist yet, _get_service() calls the provided initializer function to
create the service instance. The created instance will be cached, so that subsequent
calls would always fetch it from the cache.
Args:
name: Name of the service to retrieve.
initializer: A function that can be used to initialize a service for the first time.
Returns:
object: The specified service instance.
Raises:
ValueError: If the provided name is invalid, or if the App is already deleted.
"""
if not name or not isinstance(name, str):
raise ValueError(
'Illegal name argument: "{0}". Name must be a non-empty string.'.format(name))
with self._lock:
if self._services is None:
raise ValueError(
'Service requested from deleted Firebase App: "{0}".'.format(self._name))
if name not in self._services:
self._services[name] = initializer(self)
return self._services[name]
def _cleanup(self):
"""Cleans up any services associated with this App.
Checks whether each service contains a close() method, and calls it if available.
This is to be called when an App is being deleted, thus ensuring graceful termination of
any services started by the App.
"""
with self._lock:
for service in self._services.values():
if hasattr(service, 'close') and hasattr(service.close, '__call__'):
service.close()
self._services = None
| |
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
import six
import math
import itertools
import numbers
import textwrap
from skbio.sequence._base import ElasticLines
from skbio.util._misc import chunk_str
class _SequenceReprBuilder(object):
"""Build a ``Sequence`` repr.
Parameters
----------
seq : Sequence
Sequence to repr.
width : int
Maximum width of the repr.
indent : int
Number of spaces to use for indented lines.
chunk_size: int
Number of characters in each chunk of a sequence.
"""
def __init__(self, seq, width, indent, chunk_size):
self._seq = seq
self._width = width
self._indent = ' ' * indent
self._chunk_size = chunk_size
def build(self):
lines = ElasticLines()
cls_name = self._seq.__class__.__name__
lines.add_line(cls_name)
lines.add_separator()
if self._seq.has_metadata():
lines.add_line('Metadata:')
# Python 3 doesn't allow sorting of mixed types so we can't just
# use sorted() on the metadata keys. Sort first by type then sort
# by value within each type.
for key in self._sorted_keys_grouped_by_type(self._seq.metadata):
value = self._seq.metadata[key]
lines.add_lines(self._format_metadata_key_value(key, value))
if self._seq.has_positional_metadata():
lines.add_line('Positional metadata:')
for key in self._seq.positional_metadata.columns.values.tolist():
dtype = self._seq.positional_metadata[key].dtype
lines.add_lines(
self._format_positional_metadata_column(key, dtype))
lines.add_line('Stats:')
for label, value in self._seq._repr_stats():
lines.add_line('%s%s: %s' % (self._indent, label, value))
lines.add_separator()
num_lines, num_chars, column_width = self._find_optimal_seq_chunking()
# display entire sequence if we can, else display the first two and
# last two lines separated by ellipsis
if num_lines <= 5:
lines.add_lines(self._format_chunked_seq(
range(num_lines), num_chars, column_width))
else:
lines.add_lines(self._format_chunked_seq(
range(2), num_chars, column_width))
lines.add_line('...')
lines.add_lines(self._format_chunked_seq(
range(num_lines - 2, num_lines), num_chars, column_width))
return lines.to_str()
def _sorted_keys_grouped_by_type(self, dict_):
"""Group keys within a dict by their type and sort within type."""
type_sorted = sorted(dict_, key=self._type_sort_key)
type_and_value_sorted = []
for _, group in itertools.groupby(type_sorted, self._type_sort_key):
type_and_value_sorted.extend(sorted(group))
return type_and_value_sorted
def _type_sort_key(self, key):
return repr(type(key))
def _format_metadata_key_value(self, key, value):
"""Format metadata key:value, wrapping across lines if necessary."""
key_fmt = self._format_key(key)
supported_type = True
if isinstance(value, (six.text_type, six.binary_type)):
# for stringy values, there may be u'' or b'' depending on the type
# of `value` and version of Python. find the starting quote
# character so that wrapped text will line up with that instead of
# the string literal prefix character. for example:
#
# 'foo': u'abc def ghi
# jkl mno'
value_repr = repr(value)
extra_indent = 1
if not (value_repr.startswith("'") or value_repr.startswith('"')):
extra_indent = 2
# handles any number, this includes bool
elif value is None or isinstance(value, numbers.Number):
value_repr = repr(value)
extra_indent = 0
else:
supported_type = False
if not supported_type or len(value_repr) > 140:
value_repr = str(type(value))
# extra indent of 1 so that wrapped text lines up past the bracket:
#
# 'foo': <type
# 'dict'>
extra_indent = 1
return self._wrap_text_with_indent(value_repr, key_fmt, extra_indent)
def _format_key(self, key):
"""Format metadata key.
Includes initial indent and trailing colon and space:
<indent>'foo':<space>
"""
key_fmt = self._indent + repr(key)
supported_types = (six.text_type, six.binary_type, numbers.Number,
type(None))
if len(key_fmt) > (self._width / 2) or not isinstance(key,
supported_types):
key_fmt = self._indent + str(type(key))
return '%s: ' % key_fmt
def _wrap_text_with_indent(self, text, initial_text, extra_indent):
"""Wrap text across lines with an initial indentation.
For example:
'foo': 'abc def
ghi jkl
mno pqr'
<indent>'foo':<space> is `initial_text`. `extra_indent` is 1. Wrapped
lines are indented such that they line up with the start of the
previous line of wrapped text.
"""
return textwrap.wrap(
text, width=self._width, expand_tabs=False,
initial_indent=initial_text,
subsequent_indent=' ' * (len(initial_text) + extra_indent))
def _format_positional_metadata_column(self, key, dtype):
key_fmt = self._format_key(key)
dtype_fmt = '<dtype: %s>' % str(dtype)
return self._wrap_text_with_indent(dtype_fmt, key_fmt, 1)
def _find_optimal_seq_chunking(self):
"""Find the optimal number of sequence chunks to fit on a single line.
Returns the number of lines the sequence will occupy, the number of
sequence characters displayed on each line, and the column width
necessary to display position info using the optimal number of sequence
chunks.
"""
# strategy: use an iterative approach to find the optimal number of
# sequence chunks per line. start with a single chunk and increase
# until the max line width is exceeded. when this happens, the previous
# number of chunks is optimal
num_lines = 0
num_chars = 0
column_width = 0
num_chunks = 1
not_exceeded = True
while not_exceeded:
line_len, new_chunk_info = self._compute_chunked_seq_line_len(
num_chunks)
not_exceeded = line_len <= self._width
if not_exceeded:
num_lines, num_chars, column_width = new_chunk_info
num_chunks += 1
return num_lines, num_chars, column_width
def _compute_chunked_seq_line_len(self, num_chunks):
"""Compute line length based on a number of chunks."""
num_chars = num_chunks * self._chunk_size
# ceil to account for partial line
num_lines = int(math.ceil(len(self._seq) / num_chars))
# position column width is fixed width, based on the number of
# characters necessary to display the position of the final line (all
# previous positions will be left justified using this width)
column_width = len('%d ' % ((num_lines - 1) * num_chars))
# column width + number of sequence characters + spaces between chunks
line_len = column_width + num_chars + (num_chunks - 1)
return line_len, (num_lines, num_chars, column_width)
def _format_chunked_seq(self, line_idxs, num_chars, column_width):
"""Format specified lines of chunked sequence data."""
lines = []
for line_idx in line_idxs:
seq_idx = line_idx * num_chars
chars = str(self._seq[seq_idx:seq_idx+num_chars])
chunked_chars = chunk_str(chars, self._chunk_size, ' ')
lines.append(('%d' % seq_idx).ljust(column_width) + chunked_chars)
return lines
| |
# -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import hashlib
import base64
import os
import re
import hmac
import version
from util import print_error, InvalidPassword
import ecdsa
import aes
################################## transactions
FEE_STEP = 10000
RECOMMENDED_FEE = 50000
COINBASE_MATURITY = 100
COIN = 100000000
# supported types of transction outputs
TYPE_ADDRESS = 0
TYPE_PUBKEY = 1
TYPE_SCRIPT = 2
# AES encryption
EncodeAES = lambda secret, s: base64.b64encode(aes.encryptData(secret,s))
DecodeAES = lambda secret, e: aes.decryptData(secret, base64.b64decode(e))
def strip_PKCS7_padding(s):
"""return s stripped of PKCS7 padding"""
if len(s)%16 or not s:
raise ValueError("String of len %d can't be PCKS7-padded" % len(s))
numpads = ord(s[-1])
if numpads > 16:
raise ValueError("String ending with %r can't be PCKS7-padded" % s[-1])
if s[-numpads:] != numpads*chr(numpads):
raise ValueError("Invalid PKCS7 padding")
return s[:-numpads]
# backport padding fix to AES module
aes.strip_PKCS7_padding = strip_PKCS7_padding
def aes_encrypt_with_iv(key, iv, data):
mode = aes.AESModeOfOperation.modeOfOperation["CBC"]
key = map(ord, key)
iv = map(ord, iv)
data = aes.append_PKCS7_padding(data)
keysize = len(key)
assert keysize in aes.AES.keySize.values(), 'invalid key size: %s' % keysize
moo = aes.AESModeOfOperation()
(mode, length, ciph) = moo.encrypt(data, mode, key, keysize, iv)
return ''.join(map(chr, ciph))
def aes_decrypt_with_iv(key, iv, data):
mode = aes.AESModeOfOperation.modeOfOperation["CBC"]
key = map(ord, key)
iv = map(ord, iv)
keysize = len(key)
assert keysize in aes.AES.keySize.values(), 'invalid key size: %s' % keysize
data = map(ord, data)
moo = aes.AESModeOfOperation()
decr = moo.decrypt(data, None, mode, key, keysize, iv)
decr = strip_PKCS7_padding(decr)
return decr
def pw_encode(s, password):
if password:
secret = Hash(password)
return EncodeAES(secret, s.encode("utf8"))
else:
return s
def pw_decode(s, password):
if password is not None:
secret = Hash(password)
try:
d = DecodeAES(secret, s).decode("utf8")
except Exception:
raise InvalidPassword()
return d
else:
return s
def rev_hex(s):
return s.decode('hex')[::-1].encode('hex')
def int_to_hex(i, length=1):
s = hex(i)[2:].rstrip('L')
s = "0"*(2*length - len(s)) + s
return rev_hex(s)
def var_int(i):
# https://en.bitcoin.it/wiki/Protocol_specification#Variable_length_integer
if i<0xfd:
return int_to_hex(i)
elif i<=0xffff:
return "fd"+int_to_hex(i,2)
elif i<=0xffffffff:
return "fe"+int_to_hex(i,4)
else:
return "ff"+int_to_hex(i,8)
def op_push(i):
if i<0x4c:
return int_to_hex(i)
elif i<0xff:
return '4c' + int_to_hex(i)
elif i<0xffff:
return '4d' + int_to_hex(i,2)
else:
return '4e' + int_to_hex(i,4)
def sha256(x):
return hashlib.sha256(x).digest()
def Hash(x):
if type(x) is unicode: x=x.encode('utf-8')
return sha256(sha256(x))
hash_encode = lambda x: x[::-1].encode('hex')
hash_decode = lambda x: x.decode('hex')[::-1]
hmac_sha_512 = lambda x,y: hmac.new(x, y, hashlib.sha512).digest()
def is_new_seed(x, prefix=version.SEED_PREFIX):
import mnemonic
x = mnemonic.normalize_text(x)
s = hmac_sha_512("Seed version", x.encode('utf8')).encode('hex')
return s.startswith(prefix)
def is_old_seed(seed):
import old_mnemonic
words = seed.strip().split()
try:
old_mnemonic.mn_decode(words)
uses_electrum_words = True
except Exception:
uses_electrum_words = False
try:
seed.decode('hex')
is_hex = (len(seed) == 32 or len(seed) == 64)
except Exception:
is_hex = False
return is_hex or (uses_electrum_words and (len(words) == 12 or len(words) == 24))
def seed_type(x):
if is_old_seed(x):
return 'old'
elif is_new_seed(x):
return 'standard'
elif is_new_seed(x, version.SEED_PREFIX_2FA):
return '2fa'
return ''
is_seed = lambda x: bool(seed_type(x))
# pywallet openssl private key implementation
def i2o_ECPublicKey(pubkey, compressed=False):
# public keys are 65 bytes long (520 bits)
# 0x04 + 32-byte X-coordinate + 32-byte Y-coordinate
# 0x00 = point at infinity, 0x02 and 0x03 = compressed, 0x04 = uncompressed
# compressed keys: <sign> <x> where <sign> is 0x02 if y is even and 0x03 if y is odd
if compressed:
if pubkey.point.y() & 1:
key = '03' + '%064x' % pubkey.point.x()
else:
key = '02' + '%064x' % pubkey.point.x()
else:
key = '04' + \
'%064x' % pubkey.point.x() + \
'%064x' % pubkey.point.y()
return key.decode('hex')
# end pywallet openssl private key implementation
############ functions from pywallet #####################
def hash_160(public_key):
if 'ANDROID_DATA' in os.environ:
from Crypto.Hash import RIPEMD
md = RIPEMD.new()
else:
md = hashlib.new('ripemd')
md.update(sha256(public_key))
return md.digest()
def public_key_to_bc_address(public_key):
h160 = hash_160(public_key)
return hash_160_to_bc_address(h160)
def hash_160_to_bc_address(h160, addrtype = 0):
vh160 = chr(addrtype) + h160
h = Hash(vh160)
addr = vh160 + h[0:4]
return base_encode(addr, base=58)
def bc_address_to_hash_160(addr):
bytes = base_decode(addr, 25, base=58)
return ord(bytes[0]), bytes[1:21]
__b58chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
assert len(__b58chars) == 58
__b43chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ$*+-./:'
assert len(__b43chars) == 43
def base_encode(v, base):
""" encode v, which is a string of bytes, to base58."""
if base == 58:
chars = __b58chars
elif base == 43:
chars = __b43chars
long_value = 0L
for (i, c) in enumerate(v[::-1]):
long_value += (256**i) * ord(c)
result = ''
while long_value >= base:
div, mod = divmod(long_value, base)
result = chars[mod] + result
long_value = div
result = chars[long_value] + result
# Bitcoin does a little leading-zero-compression:
# leading 0-bytes in the input become leading-1s
nPad = 0
for c in v:
if c == '\0': nPad += 1
else: break
return (chars[0]*nPad) + result
def base_decode(v, length, base):
""" decode v into a string of len bytes."""
if base == 58:
chars = __b58chars
elif base == 43:
chars = __b43chars
long_value = 0L
for (i, c) in enumerate(v[::-1]):
long_value += chars.find(c) * (base**i)
result = ''
while long_value >= 256:
div, mod = divmod(long_value, 256)
result = chr(mod) + result
long_value = div
result = chr(long_value) + result
nPad = 0
for c in v:
if c == chars[0]: nPad += 1
else: break
result = chr(0)*nPad + result
if length is not None and len(result) != length:
return None
return result
def EncodeBase58Check(vchIn):
hash = Hash(vchIn)
return base_encode(vchIn + hash[0:4], base=58)
def DecodeBase58Check(psz):
vchRet = base_decode(psz, None, base=58)
key = vchRet[0:-4]
csum = vchRet[-4:]
hash = Hash(key)
cs32 = hash[0:4]
if cs32 != csum:
return None
else:
return key
def PrivKeyToSecret(privkey):
return privkey[9:9+32]
def SecretToASecret(secret, compressed=False, addrtype=0):
vchIn = chr((addrtype+128)&255) + secret
if compressed: vchIn += '\01'
return EncodeBase58Check(vchIn)
def ASecretToSecret(key, addrtype=0):
vch = DecodeBase58Check(key)
if vch and vch[0] == chr((addrtype+128)&255):
return vch[1:]
elif is_minikey(key):
return minikey_to_private_key(key)
else:
return False
def regenerate_key(sec):
b = ASecretToSecret(sec)
if not b:
return False
b = b[0:32]
return EC_KEY(b)
def GetPubKey(pubkey, compressed=False):
return i2o_ECPublicKey(pubkey, compressed)
def GetSecret(pkey):
return ('%064x' % pkey.secret).decode('hex')
def is_compressed(sec):
b = ASecretToSecret(sec)
return len(b) == 33
def public_key_from_private_key(sec):
# rebuild public key from private key, compressed or uncompressed
pkey = regenerate_key(sec)
assert pkey
compressed = is_compressed(sec)
public_key = GetPubKey(pkey.pubkey, compressed)
return public_key.encode('hex')
def address_from_private_key(sec):
public_key = public_key_from_private_key(sec)
address = public_key_to_bc_address(public_key.decode('hex'))
return address
def is_valid(addr):
return is_address(addr)
def is_address(addr):
ADDRESS_RE = re.compile('[1-9A-HJ-NP-Za-km-z]{26,}\\Z')
if not ADDRESS_RE.match(addr):
return False
try:
addrtype, h = bc_address_to_hash_160(addr)
except Exception:
return False
if addrtype not in [0, 5]:
return False
return addr == hash_160_to_bc_address(h, addrtype)
def is_private_key(key):
try:
k = ASecretToSecret(key)
return k is not False
except:
return False
########### end pywallet functions #######################
def is_minikey(text):
# Minikeys are typically 22 or 30 characters, but this routine
# permits any length of 20 or more provided the minikey is valid.
# A valid minikey must begin with an 'S', be in base58, and when
# suffixed with '?' have its SHA256 hash begin with a zero byte.
# They are widely used in Casascius physical bitoins.
return (len(text) >= 20 and text[0] == 'S'
and all(c in __b58chars for c in text)
and ord(sha256(text + '?')[0]) == 0)
def minikey_to_private_key(text):
return sha256(text)
from ecdsa.ecdsa import curve_secp256k1, generator_secp256k1
from ecdsa.curves import SECP256k1
from ecdsa.ellipticcurve import Point
from ecdsa.util import string_to_number, number_to_string
def msg_magic(message):
varint = var_int(len(message))
encoded_varint = "".join([chr(int(varint[i:i+2], 16)) for i in xrange(0, len(varint), 2)])
return "\x18Bitcoin Signed Message:\n" + encoded_varint + message
def verify_message(address, sig, message):
try:
public_key, compressed = pubkey_from_signature(sig, message)
# check public key using the address
pubkey = point_to_ser(public_key.pubkey.point, compressed)
addr = public_key_to_bc_address(pubkey)
if address != addr:
raise Exception("Bad signature")
# check message
h = Hash(msg_magic(message))
public_key.verify_digest(sig[1:], h, sigdecode = ecdsa.util.sigdecode_string)
return True
except Exception as e:
print_error("Verification error: {0}".format(e))
return False
def encrypt_message(message, pubkey):
return EC_KEY.encrypt_message(message, pubkey.decode('hex'))
def chunks(l, n):
return [l[i:i+n] for i in xrange(0, len(l), n)]
def ECC_YfromX(x,curved=curve_secp256k1, odd=True):
_p = curved.p()
_a = curved.a()
_b = curved.b()
for offset in range(128):
Mx = x + offset
My2 = pow(Mx, 3, _p) + _a * pow(Mx, 2, _p) + _b % _p
My = pow(My2, (_p+1)/4, _p )
if curved.contains_point(Mx,My):
if odd == bool(My&1):
return [My,offset]
return [_p-My,offset]
raise Exception('ECC_YfromX: No Y found')
def negative_point(P):
return Point( P.curve(), P.x(), -P.y(), P.order() )
def point_to_ser(P, comp=True ):
if comp:
return ( ('%02x'%(2+(P.y()&1)))+('%064x'%P.x()) ).decode('hex')
return ( '04'+('%064x'%P.x())+('%064x'%P.y()) ).decode('hex')
def ser_to_point(Aser):
curve = curve_secp256k1
generator = generator_secp256k1
_r = generator.order()
assert Aser[0] in ['\x02','\x03','\x04']
if Aser[0] == '\x04':
return Point( curve, string_to_number(Aser[1:33]), string_to_number(Aser[33:]), _r )
Mx = string_to_number(Aser[1:])
return Point( curve, Mx, ECC_YfromX(Mx, curve, Aser[0]=='\x03')[0], _r )
class MyVerifyingKey(ecdsa.VerifyingKey):
@classmethod
def from_signature(klass, sig, recid, h, curve):
""" See http://www.secg.org/download/aid-780/sec1-v2.pdf, chapter 4.1.6 """
from ecdsa import util, numbertheory
import msqr
curveFp = curve.curve
G = curve.generator
order = G.order()
# extract r,s from signature
r, s = util.sigdecode_string(sig, order)
# 1.1
x = r + (recid/2) * order
# 1.3
alpha = ( x * x * x + curveFp.a() * x + curveFp.b() ) % curveFp.p()
beta = msqr.modular_sqrt(alpha, curveFp.p())
y = beta if (beta - recid) % 2 == 0 else curveFp.p() - beta
# 1.4 the constructor checks that nR is at infinity
R = Point(curveFp, x, y, order)
# 1.5 compute e from message:
e = string_to_number(h)
minus_e = -e % order
# 1.6 compute Q = r^-1 (sR - eG)
inv_r = numbertheory.inverse_mod(r,order)
Q = inv_r * ( s * R + minus_e * G )
return klass.from_public_point( Q, curve )
def pubkey_from_signature(sig, message):
if len(sig) != 65:
raise Exception("Wrong encoding")
nV = ord(sig[0])
if nV < 27 or nV >= 35:
raise Exception("Bad encoding")
if nV >= 31:
compressed = True
nV -= 4
else:
compressed = False
recid = nV - 27
h = Hash(msg_magic(message))
return MyVerifyingKey.from_signature(sig[1:], recid, h, curve = SECP256k1), compressed
class MySigningKey(ecdsa.SigningKey):
"""Enforce low S values in signatures"""
def sign_number(self, number, entropy=None, k=None):
curve = SECP256k1
G = curve.generator
order = G.order()
r, s = ecdsa.SigningKey.sign_number(self, number, entropy, k)
if s > order/2:
s = order - s
return r, s
class EC_KEY(object):
def __init__( self, k ):
secret = string_to_number(k)
self.pubkey = ecdsa.ecdsa.Public_key( generator_secp256k1, generator_secp256k1 * secret )
self.privkey = ecdsa.ecdsa.Private_key( self.pubkey, secret )
self.secret = secret
def get_public_key(self, compressed=True):
return point_to_ser(self.pubkey.point, compressed).encode('hex')
def sign(self, msg_hash):
private_key = MySigningKey.from_secret_exponent(self.secret, curve = SECP256k1)
public_key = private_key.get_verifying_key()
signature = private_key.sign_digest_deterministic(msg_hash, hashfunc=hashlib.sha256, sigencode = ecdsa.util.sigencode_string)
assert public_key.verify_digest(signature, msg_hash, sigdecode = ecdsa.util.sigdecode_string)
return signature
def sign_message(self, message, is_compressed):
signature = self.sign(Hash(msg_magic(message)))
for i in range(4):
sig = chr(27 + i + (4 if is_compressed else 0)) + signature
try:
self.verify_message(sig, message)
return sig
except Exception:
continue
else:
raise Exception("error: cannot sign message")
def verify_message(self, sig, message):
public_key, compressed = pubkey_from_signature(sig, message)
# check public key
if point_to_ser(public_key.pubkey.point, compressed) != point_to_ser(self.pubkey.point, compressed):
raise Exception("Bad signature")
# check message
h = Hash(msg_magic(message))
public_key.verify_digest(sig[1:], h, sigdecode = ecdsa.util.sigdecode_string)
# ECIES encryption/decryption methods; AES-128-CBC with PKCS7 is used as the cipher; hmac-sha256 is used as the mac
@classmethod
def encrypt_message(self, message, pubkey):
pk = ser_to_point(pubkey)
if not ecdsa.ecdsa.point_is_valid(generator_secp256k1, pk.x(), pk.y()):
raise Exception('invalid pubkey')
ephemeral_exponent = number_to_string(ecdsa.util.randrange(pow(2,256)), generator_secp256k1.order())
ephemeral = EC_KEY(ephemeral_exponent)
ecdh_key = point_to_ser(pk * ephemeral.privkey.secret_multiplier)
key = hashlib.sha512(ecdh_key).digest()
iv, key_e, key_m = key[0:16], key[16:32], key[32:]
ciphertext = aes_encrypt_with_iv(key_e, iv, message)
ephemeral_pubkey = ephemeral.get_public_key(compressed=True).decode('hex')
encrypted = 'BIE1' + ephemeral_pubkey + ciphertext
mac = hmac.new(key_m, encrypted, hashlib.sha256).digest()
return base64.b64encode(encrypted + mac)
def decrypt_message(self, encrypted):
encrypted = base64.b64decode(encrypted)
if len(encrypted) < 85:
raise Exception('invalid ciphertext: length')
magic = encrypted[:4]
ephemeral_pubkey = encrypted[4:37]
ciphertext = encrypted[37:-32]
mac = encrypted[-32:]
if magic != 'BIE1':
raise Exception('invalid ciphertext: invalid magic bytes')
try:
ephemeral_pubkey = ser_to_point(ephemeral_pubkey)
except AssertionError, e:
raise Exception('invalid ciphertext: invalid ephemeral pubkey')
if not ecdsa.ecdsa.point_is_valid(generator_secp256k1, ephemeral_pubkey.x(), ephemeral_pubkey.y()):
raise Exception('invalid ciphertext: invalid ephemeral pubkey')
ecdh_key = point_to_ser(ephemeral_pubkey * self.privkey.secret_multiplier)
key = hashlib.sha512(ecdh_key).digest()
iv, key_e, key_m = key[0:16], key[16:32], key[32:]
if mac != hmac.new(key_m, encrypted[:-32], hashlib.sha256).digest():
raise Exception('invalid ciphertext: invalid mac')
return aes_decrypt_with_iv(key_e, iv, ciphertext)
###################################### BIP32 ##############################
random_seed = lambda n: "%032x"%ecdsa.util.randrange( pow(2,n) )
BIP32_PRIME = 0x80000000
def get_pubkeys_from_secret(secret):
# public key
private_key = ecdsa.SigningKey.from_string( secret, curve = SECP256k1 )
public_key = private_key.get_verifying_key()
K = public_key.to_string()
K_compressed = GetPubKey(public_key.pubkey,True)
return K, K_compressed
# Child private key derivation function (from master private key)
# k = master private key (32 bytes)
# c = master chain code (extra entropy for key derivation) (32 bytes)
# n = the index of the key we want to derive. (only 32 bits will be used)
# If n is negative (i.e. the 32nd bit is set), the resulting private key's
# corresponding public key can NOT be determined without the master private key.
# However, if n is positive, the resulting private key's corresponding
# public key can be determined without the master private key.
def CKD_priv(k, c, n):
is_prime = n & BIP32_PRIME
return _CKD_priv(k, c, rev_hex(int_to_hex(n,4)).decode('hex'), is_prime)
def _CKD_priv(k, c, s, is_prime):
order = generator_secp256k1.order()
keypair = EC_KEY(k)
cK = GetPubKey(keypair.pubkey,True)
data = chr(0) + k + s if is_prime else cK + s
I = hmac.new(c, data, hashlib.sha512).digest()
k_n = number_to_string( (string_to_number(I[0:32]) + string_to_number(k)) % order , order )
c_n = I[32:]
return k_n, c_n
# Child public key derivation function (from public key only)
# K = master public key
# c = master chain code
# n = index of key we want to derive
# This function allows us to find the nth public key, as long as n is
# non-negative. If n is negative, we need the master private key to find it.
def CKD_pub(cK, c, n):
if n & BIP32_PRIME: raise
return _CKD_pub(cK, c, rev_hex(int_to_hex(n,4)).decode('hex'))
# helper function, callable with arbitrary string
def _CKD_pub(cK, c, s):
order = generator_secp256k1.order()
I = hmac.new(c, cK + s, hashlib.sha512).digest()
curve = SECP256k1
pubkey_point = string_to_number(I[0:32])*curve.generator + ser_to_point(cK)
public_key = ecdsa.VerifyingKey.from_public_point( pubkey_point, curve = SECP256k1 )
c_n = I[32:]
cK_n = GetPubKey(public_key.pubkey,True)
return cK_n, c_n
BITCOIN_HEADER_PRIV = "0488ade4"
BITCOIN_HEADER_PUB = "0488b21e"
TESTNET_HEADER_PRIV = "04358394"
TESTNET_HEADER_PUB = "043587cf"
BITCOIN_HEADERS = (BITCOIN_HEADER_PUB, BITCOIN_HEADER_PRIV)
TESTNET_HEADERS = (TESTNET_HEADER_PUB, TESTNET_HEADER_PRIV)
def _get_headers(testnet):
"""Returns the correct headers for either testnet or bitcoin, in the form
of a 2-tuple, like (public, private)."""
if testnet:
return TESTNET_HEADERS
else:
return BITCOIN_HEADERS
def deserialize_xkey(xkey):
xkey = DecodeBase58Check(xkey)
assert len(xkey) == 78
xkey_header = xkey[0:4].encode('hex')
# Determine if the key is a bitcoin key or a testnet key.
if xkey_header in TESTNET_HEADERS:
head = TESTNET_HEADER_PRIV
elif xkey_header in BITCOIN_HEADERS:
head = BITCOIN_HEADER_PRIV
else:
raise Exception("Unknown xkey header: '%s'" % xkey_header)
depth = ord(xkey[4])
fingerprint = xkey[5:9]
child_number = xkey[9:13]
c = xkey[13:13+32]
if xkey[0:4].encode('hex') == head:
K_or_k = xkey[13+33:]
else:
K_or_k = xkey[13+32:]
return depth, fingerprint, child_number, c, K_or_k
def get_xkey_name(xkey, testnet=False):
depth, fingerprint, child_number, c, K = deserialize_xkey(xkey)
n = int(child_number.encode('hex'), 16)
if n & BIP32_PRIME:
child_id = "%d'"%(n - BIP32_PRIME)
else:
child_id = "%d"%n
if depth == 0:
return ''
elif depth == 1:
return child_id
else:
raise BaseException("xpub depth error")
def xpub_from_xprv(xprv, testnet=False):
depth, fingerprint, child_number, c, k = deserialize_xkey(xprv)
K, cK = get_pubkeys_from_secret(k)
header_pub, _ = _get_headers(testnet)
xpub = header_pub.decode('hex') + chr(depth) + fingerprint + child_number + c + cK
return EncodeBase58Check(xpub)
def bip32_root(seed, testnet=False):
header_pub, header_priv = _get_headers(testnet)
I = hmac.new("Bitcoin seed", seed, hashlib.sha512).digest()
master_k = I[0:32]
master_c = I[32:]
K, cK = get_pubkeys_from_secret(master_k)
xprv = (header_priv + "00" + "00000000" + "00000000").decode("hex") + master_c + chr(0) + master_k
xpub = (header_pub + "00" + "00000000" + "00000000").decode("hex") + master_c + cK
return EncodeBase58Check(xprv), EncodeBase58Check(xpub)
def xpub_from_pubkey(cK, testnet=False):
header_pub, header_priv = _get_headers(testnet)
assert cK[0] in ['\x02','\x03']
master_c = chr(0)*32
xpub = (header_pub + "00" + "00000000" + "00000000").decode("hex") + master_c + cK
return EncodeBase58Check(xpub)
def bip32_private_derivation(xprv, branch, sequence, testnet=False):
assert sequence.startswith(branch)
if branch == sequence:
return xprv, xpub_from_xprv(xprv, testnet)
header_pub, header_priv = _get_headers(testnet)
depth, fingerprint, child_number, c, k = deserialize_xkey(xprv)
sequence = sequence[len(branch):]
for n in sequence.split('/'):
if n == '': continue
i = int(n[:-1]) + BIP32_PRIME if n[-1] == "'" else int(n)
parent_k = k
k, c = CKD_priv(k, c, i)
depth += 1
_, parent_cK = get_pubkeys_from_secret(parent_k)
fingerprint = hash_160(parent_cK)[0:4]
child_number = ("%08X"%i).decode('hex')
K, cK = get_pubkeys_from_secret(k)
xprv = header_priv.decode('hex') + chr(depth) + fingerprint + child_number + c + chr(0) + k
xpub = header_pub.decode('hex') + chr(depth) + fingerprint + child_number + c + cK
return EncodeBase58Check(xprv), EncodeBase58Check(xpub)
def bip32_public_derivation(xpub, branch, sequence, testnet=False):
header_pub, _ = _get_headers(testnet)
depth, fingerprint, child_number, c, cK = deserialize_xkey(xpub)
assert sequence.startswith(branch)
sequence = sequence[len(branch):]
for n in sequence.split('/'):
if n == '': continue
i = int(n)
parent_cK = cK
cK, c = CKD_pub(cK, c, i)
depth += 1
fingerprint = hash_160(parent_cK)[0:4]
child_number = ("%08X"%i).decode('hex')
xpub = header_pub.decode('hex') + chr(depth) + fingerprint + child_number + c + cK
return EncodeBase58Check(xpub)
def bip32_private_key(sequence, k, chain):
for i in sequence:
k, chain = CKD_priv(k, chain, i)
return SecretToASecret(k, True)
def xkeys_from_seed(seed, passphrase, derivation):
from mnemonic import Mnemonic
xprv, xpub = bip32_root(Mnemonic.mnemonic_to_seed(seed, passphrase))
xprv, xpub = bip32_private_derivation(xprv, "m/", derivation)
return xprv, xpub
| |
#
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class sslcipher_servicegroup_binding(base_resource) :
""" Binding class showing the servicegroup that can be bound to sslcipher.
"""
def __init__(self) :
self._ciphergroupname = ""
self._servicename = ""
self._service = False
self._servicegroupname = ""
self._servicegroup = False
self._cipheroperation = ""
self._ciphgrpals = ""
@property
def ciphgrpals(self) :
ur"""A cipher-suite can consist of an individual cipher name, the system predefined cipher-alias name, or user defined cipher-group name.<br/>Minimum length = 1.
"""
try :
return self._ciphgrpals
except Exception as e:
raise e
@ciphgrpals.setter
def ciphgrpals(self, ciphgrpals) :
ur"""A cipher-suite can consist of an individual cipher name, the system predefined cipher-alias name, or user defined cipher-group name.<br/>Minimum length = 1
"""
try :
self._ciphgrpals = ciphgrpals
except Exception as e:
raise e
@property
def servicegroupname(self) :
try :
return self._servicegroupname
except Exception as e:
raise e
@servicegroupname.setter
def servicegroupname(self, servicegroupname) :
try :
self._servicegroupname = servicegroupname
except Exception as e:
raise e
@property
def servicegroup(self) :
ur"""Indicates that the cipher operation is to be performed on the named SSL service or service group.
"""
try :
return self._servicegroup
except Exception as e:
raise e
@servicegroup.setter
def servicegroup(self, servicegroup) :
ur"""Indicates that the cipher operation is to be performed on the named SSL service or service group.
"""
try :
self._servicegroup = servicegroup
except Exception as e:
raise e
@property
def service(self) :
ur"""Indicates that the cipher operation is to be performed on the named SSL service or service group.
"""
try :
return self._service
except Exception as e:
raise e
@service.setter
def service(self, service) :
ur"""Indicates that the cipher operation is to be performed on the named SSL service or service group.
"""
try :
self._service = service
except Exception as e:
raise e
@property
def servicename(self) :
try :
return self._servicename
except Exception as e:
raise e
@servicename.setter
def servicename(self, servicename) :
try :
self._servicename = servicename
except Exception as e:
raise e
@property
def ciphergroupname(self) :
ur"""Name of the user-defined cipher group.<br/>Minimum length = 1.
"""
try :
return self._ciphergroupname
except Exception as e:
raise e
@ciphergroupname.setter
def ciphergroupname(self, ciphergroupname) :
ur"""Name of the user-defined cipher group.<br/>Minimum length = 1
"""
try :
self._ciphergroupname = ciphergroupname
except Exception as e:
raise e
@property
def cipheroperation(self) :
ur"""The operation that is performed when adding the cipher-suite.
Possible cipher operations are:
ADD - Appends the given cipher-suite to the existing one configured for the virtual server.
REM - Removes the given cipher-suite from the existing one configured for the virtual server.
ORD - Overrides the current configured cipher-suite for the virtual server with the given cipher-suite.<br/>Default value: 0<br/>Possible values = ADD, REM, ORD.
"""
try :
return self._cipheroperation
except Exception as e:
raise e
@cipheroperation.setter
def cipheroperation(self, cipheroperation) :
ur"""The operation that is performed when adding the cipher-suite.
Possible cipher operations are:
ADD - Appends the given cipher-suite to the existing one configured for the virtual server.
REM - Removes the given cipher-suite from the existing one configured for the virtual server.
ORD - Overrides the current configured cipher-suite for the virtual server with the given cipher-suite.<br/>Default value: 0<br/>Possible values = ADD, REM, ORD
"""
try :
self._cipheroperation = cipheroperation
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
ur""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(sslcipher_servicegroup_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.sslcipher_servicegroup_binding
except Exception as e :
raise e
def _get_object_name(self) :
ur""" Returns the value of object identifier argument
"""
try :
if self.ciphergroupname is not None :
return str(self.ciphergroupname)
return None
except Exception as e :
raise e
class Cipheroperation:
ADD = "ADD"
REM = "REM"
ORD = "ORD"
class sslcipher_servicegroup_binding_response(base_response) :
def __init__(self, length=1) :
self.sslcipher_servicegroup_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.sslcipher_servicegroup_binding = [sslcipher_servicegroup_binding() for _ in range(length)]
| |
from __future__ import absolute_import, unicode_literals
import pykka
from mopidy.core import PlaybackState
from mopidy.mpd import exceptions, protocol, translator
#: Subsystems that can be registered with idle command.
SUBSYSTEMS = [
'database', 'mixer', 'options', 'output', 'player', 'playlist',
'stored_playlist', 'update']
@protocol.commands.add('clearerror')
def clearerror(context):
"""
*musicpd.org, status section:*
``clearerror``
Clears the current error message in status (this is also
accomplished by any command that starts playback).
"""
raise exceptions.MpdNotImplemented # TODO
@protocol.commands.add('currentsong')
def currentsong(context):
"""
*musicpd.org, status section:*
``currentsong``
Displays the song info of the current song (same song that is
identified in status).
"""
tl_track = context.core.playback.get_current_tl_track().get()
stream_title = context.core.playback.get_stream_title().get()
if tl_track is not None:
position = context.core.tracklist.index(tl_track).get()
return translator.track_to_mpd_format(
tl_track, position=position, stream_title=stream_title)
@protocol.commands.add('idle', list_command=False)
def idle(context, *subsystems):
"""
*musicpd.org, status section:*
``idle [SUBSYSTEMS...]``
Waits until there is a noteworthy change in one or more of MPD's
subsystems. As soon as there is one, it lists all changed systems
in a line in the format ``changed: SUBSYSTEM``, where ``SUBSYSTEM``
is one of the following:
- ``database``: the song database has been modified after update.
- ``update``: a database update has started or finished. If the
database was modified during the update, the database event is
also emitted.
- ``stored_playlist``: a stored playlist has been modified,
renamed, created or deleted
- ``playlist``: the current playlist has been modified
- ``player``: the player has been started, stopped or seeked
- ``mixer``: the volume has been changed
- ``output``: an audio output has been enabled or disabled
- ``options``: options like repeat, random, crossfade, replay gain
While a client is waiting for idle results, the server disables
timeouts, allowing a client to wait for events as long as MPD runs.
The idle command can be canceled by sending the command ``noidle``
(no other commands are allowed). MPD will then leave idle mode and
print results immediately; might be empty at this time.
If the optional ``SUBSYSTEMS`` argument is used, MPD will only send
notifications when something changed in one of the specified
subsystems.
"""
# TODO: test against valid subsystems
if not subsystems:
subsystems = SUBSYSTEMS
for subsystem in subsystems:
context.subscriptions.add(subsystem)
active = context.subscriptions.intersection(context.events)
if not active:
context.session.prevent_timeout = True
return
response = []
context.events = set()
context.subscriptions = set()
for subsystem in active:
response.append('changed: %s' % subsystem)
return response
@protocol.commands.add('noidle', list_command=False)
def noidle(context):
"""See :meth:`_status_idle`."""
if not context.subscriptions:
return
context.subscriptions = set()
context.events = set()
context.session.prevent_timeout = False
@protocol.commands.add('stats')
def stats(context):
"""
*musicpd.org, status section:*
``stats``
Displays statistics.
- ``artists``: number of artists
- ``songs``: number of albums
- ``uptime``: daemon uptime in seconds
- ``db_playtime``: sum of all song times in the db
- ``db_update``: last db update in UNIX time
- ``playtime``: time length of music played
"""
return {
'artists': 0, # TODO
'albums': 0, # TODO
'songs': 0, # TODO
'uptime': 0, # TODO
'db_playtime': 0, # TODO
'db_update': 0, # TODO
'playtime': 0, # TODO
}
@protocol.commands.add('status')
def status(context):
"""
*musicpd.org, status section:*
``status``
Reports the current status of the player and the volume level.
- ``volume``: 0-100 or -1
- ``repeat``: 0 or 1
- ``single``: 0 or 1
- ``consume``: 0 or 1
- ``playlist``: 31-bit unsigned integer, the playlist version
number
- ``playlistlength``: integer, the length of the playlist
- ``state``: play, stop, or pause
- ``song``: playlist song number of the current song stopped on or
playing
- ``songid``: playlist songid of the current song stopped on or
playing
- ``nextsong``: playlist song number of the next song to be played
- ``nextsongid``: playlist songid of the next song to be played
- ``time``: total time elapsed (of current playing/paused song)
- ``elapsed``: Total time elapsed within the current song, but with
higher resolution.
- ``bitrate``: instantaneous bitrate in kbps
- ``xfade``: crossfade in seconds
- ``audio``: sampleRate``:bits``:channels
- ``updatings_db``: job id
- ``error``: if there is an error, returns message here
*Clarifications based on experience implementing*
- ``volume``: can also be -1 if no output is set.
- ``elapsed``: Higher resolution means time in seconds with three
decimal places for millisecond precision.
"""
tl_track = context.core.playback.get_current_tl_track()
next_tlid = context.core.tracklist.get_next_tlid()
futures = {
'tracklist.length': context.core.tracklist.get_length(),
'tracklist.version': context.core.tracklist.get_version(),
'mixer.volume': context.core.mixer.get_volume(),
'tracklist.consume': context.core.tracklist.get_consume(),
'tracklist.random': context.core.tracklist.get_random(),
'tracklist.repeat': context.core.tracklist.get_repeat(),
'tracklist.single': context.core.tracklist.get_single(),
'playback.state': context.core.playback.get_state(),
'playback.current_tl_track': tl_track,
'tracklist.index': context.core.tracklist.index(tl_track.get()),
'tracklist.next_tlid': next_tlid,
'tracklist.next_index': context.core.tracklist.index(
tlid=next_tlid.get()),
'playback.time_position': context.core.playback.get_time_position(),
}
pykka.get_all(futures.values())
result = [
('volume', _status_volume(futures)),
('repeat', _status_repeat(futures)),
('random', _status_random(futures)),
('single', _status_single(futures)),
('consume', _status_consume(futures)),
('playlist', _status_playlist_version(futures)),
('playlistlength', _status_playlist_length(futures)),
('xfade', _status_xfade(futures)),
('state', _status_state(futures)),
]
if futures['playback.current_tl_track'].get() is not None:
result.append(('song', _status_songpos(futures)))
result.append(('songid', _status_songid(futures)))
if futures['tracklist.next_tlid'].get() is not None:
result.append(('nextsong', _status_nextsongpos(futures)))
result.append(('nextsongid', _status_nextsongid(futures)))
if futures['playback.state'].get() in (
PlaybackState.PLAYING, PlaybackState.PAUSED):
result.append(('time', _status_time(futures)))
result.append(('elapsed', _status_time_elapsed(futures)))
result.append(('bitrate', _status_bitrate(futures)))
return result
def _status_bitrate(futures):
current_tl_track = futures['playback.current_tl_track'].get()
if current_tl_track is None:
return 0
if current_tl_track.track.bitrate is None:
return 0
return current_tl_track.track.bitrate
def _status_consume(futures):
if futures['tracklist.consume'].get():
return 1
else:
return 0
def _status_playlist_length(futures):
return futures['tracklist.length'].get()
def _status_playlist_version(futures):
return futures['tracklist.version'].get()
def _status_random(futures):
return int(futures['tracklist.random'].get())
def _status_repeat(futures):
return int(futures['tracklist.repeat'].get())
def _status_single(futures):
return int(futures['tracklist.single'].get())
def _status_songid(futures):
current_tl_track = futures['playback.current_tl_track'].get()
if current_tl_track is not None:
return current_tl_track.tlid
else:
return _status_songpos(futures)
def _status_songpos(futures):
return futures['tracklist.index'].get()
def _status_nextsongid(futures):
return futures['tracklist.next_tlid'].get()
def _status_nextsongpos(futures):
return futures['tracklist.next_index'].get()
def _status_state(futures):
state = futures['playback.state'].get()
if state == PlaybackState.PLAYING:
return 'play'
elif state == PlaybackState.STOPPED:
return 'stop'
elif state == PlaybackState.PAUSED:
return 'pause'
def _status_time(futures):
return '%d:%d' % (
futures['playback.time_position'].get() // 1000,
_status_time_total(futures) // 1000)
def _status_time_elapsed(futures):
return '%.3f' % (futures['playback.time_position'].get() / 1000.0)
def _status_time_total(futures):
current_tl_track = futures['playback.current_tl_track'].get()
if current_tl_track is None:
return 0
elif current_tl_track.track.length is None:
return 0
else:
return current_tl_track.track.length
def _status_volume(futures):
volume = futures['mixer.volume'].get()
if volume is not None:
return volume
else:
return -1
def _status_xfade(futures):
return 0 # Not supported
| |
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
To test pool layer operations between NervanaGPU, NervanaCPU against numpy.
"""
import itertools as itt
import numpy as np
from operator import mul
from neon.backends.nervanagpu import NervanaGPU
from neon.backends.nervanacpu import NervanaCPU
from tests.utils import allclose_with_out
# how many times to repeat the fprop and bprop
repeat = 5
def sliceable(dim, pad=0):
"""
colapse outer dimensions into one and preserve inner dimension
this allows for easy cpu operations in numpy
"""
dim0 = reduce(mul, dim[:-1], 1) + pad
return (dim0, dim[-1])
def pixel_indices(pool, kj, mt, pr, qs):
C = pool.C
J, T, R, S = pool.JTRS
D, H, W = pool.DHW
HW = H * W
DHW = D * H * W
idx = []
for j in range(J):
c = kj + j
ci = c * DHW
cb = c >= 0 and c < C
for t in range(T):
z = mt + t
zi = ci + z * HW
zb = cb and z >= 0 and z < D
for r in range(R):
y = pr + r
yi = zi + y * W
yb = zb and y >= 0 and y < H
for s in range(S):
x = qs + s
if yb and x >= 0 and x < W:
xi = yi + x
idx.append(xi)
return idx
def run_backend_pool(lib, layer, I, E, dtype):
beI = lib.array(I, dtype=dtype)
beE = lib.array(E, dtype=dtype)
beO = lib.zeros(layer.dimO, dtype=dtype)
beA = lib.zeros(layer.dimO, dtype=np.int8)
beB = lib.zeros(layer.dimI, dtype=dtype)
for i in range(repeat):
lib.fprop_pool(layer, beI, beO, beA)
lib.bprop_pool(layer, beE, beB, beA)
return beO, beB
def run_numpy_pool(op, cpuI, cpuE, dytpe, be_layer):
# pass in the backend layer for the parameters
dimI = be_layer.dimI
dimO = be_layer.dimO
op = be_layer.op
K = be_layer.K
N = be_layer.N
M, P, Q = be_layer.MPQ
pad_j, pad_d, pad_h, pad_w = be_layer.padding
str_j, str_d, str_h, str_w = be_layer.strides
# cpu output arrays
cpuO = np.empty(dimO, dtype=dytpe)
cpuB = np.zeros(sliceable(dimI, 1), dtype=dytpe)
for i in range(repeat):
cpuB.fill(0)
for k in range(K):
kj = k * str_j - pad_j
for m in range(M):
mt = m * str_d - pad_d
for p in range(P):
pr = p * str_h - pad_h
for q in range(Q):
qs = q * str_w - pad_w
idx = pixel_indices(be_layer, kj, mt, pr, qs)
if op == "max":
cpuO[k, m, p, q, :] = np.max(cpuI[idx, :], axis=0)
b_idx = np.argmax(cpuI[idx, :], axis=0)
for n in range(N):
cpuB[idx[b_idx[n]], n] += cpuE[k, m, p, q, n]
elif op == "avg":
cpuO[k, m, p, q, :] = np.mean(cpuI[idx, :], axis=0)
cpuB[idx, :] += cpuE[k, m, p, q, :] * (1.0/len(idx))
elif op == "l2":
cpuO[k, m, p, q, :] = np.sqrt(
np.sum(cpuI[idx, :] ** 2, axis=0))
return cpuO, cpuB
def pytest_generate_tests(metafunc):
if 'poolargs' in metafunc.fixturenames:
fargs = []
op_list = ["avg", "max"]
fargs = itt.product(op_list)
metafunc.parametrize('poolargs', fargs)
def test_pool_layer(poolargs, device_id):
op = poolargs[0]
dtype = np.float32
ng = NervanaGPU(stochastic_round=False, bench=True, device_id=device_id)
nc = NervanaCPU()
N, C = 32, 32
D, H, W = 1, 32, 32
J, T, R, S = 2, 1, 3, 3
padding_j, padding_d, padding_h, padding_w = 0, 0, 0, 0
strides_j, strides_d, strides_h, strides_w = 2, 1, 2, 2
# op = 'max'
pool_ng = ng.pool_layer(
dtype,
op,
N,
C, D, H, W,
J, T, R, S,
padding_j, padding_d, padding_h, padding_w,
strides_j, strides_d, strides_h, strides_w)
pool_nc = nc.pool_layer(
dtype,
op,
N,
C, D, H, W,
J, T, R, S,
padding_j, padding_d, padding_h, padding_w,
strides_j, strides_d, strides_h, strides_w)
assert pool_ng.dimI == pool_nc.dimI
assert pool_ng.dimO == pool_nc.dimO
dimI = pool_ng.dimI
dimO = pool_ng.dimO
# generating input arrays for inputs and errors
cpuI = np.random.uniform(0.0, 1.0, sliceable(dimI, 1)).astype(
np.float16).astype(dtype)
cpuE = np.random.uniform(-0.2, 0.2, dimO).astype(dtype)
# zero pad the last row of cpu input for the sake of numpy
if op == "max":
cpuI[-1, :] = np.finfo(dtype).min
else:
cpuI[-1, :] = 0
# =========GPU and CPU and numpy ==========
beI = cpuI[:-1, :].reshape(dimI)
beE = cpuE
ngO, ngB = run_backend_pool(ng, pool_ng, beI, beE, dtype)
ncO, ncB = run_backend_pool(nc, pool_nc, beI, beE, dtype)
cpuO, cpuB = run_numpy_pool(op, cpuI, cpuE, dtype, pool_ng)
for opA, ngA, ncA, cpuA in (
("fprop", ngO, ncO, cpuO),
("bprop", ngB, ncB.reshape(dimI), cpuB[:-1, :].reshape(dimI))):
print opA
assert allclose_with_out(ngA.get(), ncA.get(), rtol=0, atol=1e-4)
assert allclose_with_out(ncA.get(), cpuA, rtol=0, atol=1e-5)
del ng, nc
if __name__ == '__main__':
fargs = ["max"]
test_pool_layer(fargs)
| |
# -*- coding: utf-8 -*-
"""The larger a programming ecosystem gets, the greater the chances of
runtime variability become. Currently, Python is one of the most
widely deployed high-level programming environments available, making
it a viable target for all manner of application. But it's important
to know what you're working with.
Some basic variations that are common among development machines:
* **Executable runtime**: CPython, PyPy, Jython, etc., plus build date and compiler
* **Language version**: 2.6, 2.7, 3.3, 3.4, 3.5
* **Host operating system**: Windows, OS X, Ubuntu, Debian, CentOS, RHEL, etc.
* **Features**: 64-bit, IPv6, Unicode character support (UCS-2/UCS-4)
* **Built-in library support**: OpenSSL, threading, SQLite, zlib
* **User environment**: umask, ulimit, working directory path
* **Machine info**: CPU count, hostname, filesystem encoding
See the full example profile below for more.
ecoutils was created to quantify that variability. ecoutils quickly
produces an information-dense description of critical runtime factors,
with minimal side effects. In short, ecoutils is like browser and user
agent analytics, but for Python environments.
Transmission and collection
---------------------------
The data is all JSON serializable, and is suitable for sending to a
central analytics server. An HTTP-backed service for this can be found
at: https://github.com/mahmoud/espymetrics/
Notable omissions
-----------------
Due to space constraints (and possibly latency constraints), the
following information is deemed not dense enough, and thus omitted:
* :data:`sys.path`
* full :mod:`sysconfig`
* environment variables (:data:`os.environ`)
Compatibility
-------------
So far ecoutils has has been tested on Python 2.6, 2.7, 3.4, 3.5, and
PyPy. Various versions have been tested on Ubuntu, Debian, RHEL, OS X,
FreeBSD, and Windows 7.
Profile generation
------------------
Profiles are generated by :func:`ecoutils.get_profile`.
When run as a module, ecoutils will call :func:`~ecoutils.get_profile`
and print a profile in JSON format::
$ python -m boltons.ecoutils
{
"_eco_version": "1.0.0",
"cpu_count": 4,
"cwd": "/home/mahmoud/projects/boltons",
"fs_encoding": "UTF-8",
"guid": "6b139e7bbf5ad4ed8d4063bf6235b4d2",
"hostfqdn": "mahmoud-host",
"hostname": "mahmoud-host",
"linux_dist_name": "Ubuntu",
"linux_dist_version": "14.04",
"python": {
"argv": "boltons/ecoutils.py",
"bin": "/usr/bin/python",
"build_date": "Jun 22 2015 17:58:13",
"compiler": "GCC 4.8.2",
"features": {
"64bit": true,
"expat": "expat_2.1.0",
"ipv6": true,
"openssl": "OpenSSL 1.0.1f 6 Jan 2014",
"readline": true,
"sqlite": "3.8.2",
"threading": true,
"tkinter": "8.6",
"unicode_wide": true,
"zlib": "1.2.8"
},
"version": "2.7.6 (default, Jun 22 2015, 17:58:13) [GCC 4.8.2]",
"version_info": [
2,
7,
6,
"final",
0
]
},
"time_utc": "2016-05-24 07:59:40.473140",
"time_utc_offset": -8.0,
"ulimit_hard": 4096,
"ulimit_soft": 1024,
"umask": "002",
"uname": {
"machine": "x86_64",
"node": "mahmoud-host",
"processor": "x86_64",
"release": "3.13.0-85-generic",
"system": "Linux",
"version": "#129-Ubuntu SMP Thu Mar 17 20:50:15 UTC 2016"
},
"username": "mahmoud"
}
``pip install boltons`` and try it yourself!
"""
# TODO: some hash of the less-dynamic bits to put it all together
import re
import os
import sys
import json
import time
import random
import socket
import getpass
import datetime
import platform
ECO_VERSION = '1.0.0'
PY_GT_2 = sys.version_info[0] > 2
# 128-bit GUID just like a UUID, but backwards compatible to 2.4
INSTANCE_ID = hex(random.getrandbits(128))[2:-1]
IS_64BIT = sys.maxsize > 2 ** 32
HAVE_UCS4 = getattr(sys, 'maxunicode', 0) > 65536
HAVE_READLINE = True
try:
import readline
except Exception:
HAVE_READLINE = False
try:
import sqlite3
SQLITE_VERSION = sqlite3.sqlite_version
except Exception:
SQLITE_VERSION = ''
try:
import ssl
try:
OPENSSL_VERSION = ssl.OPENSSL_VERSION
except AttributeError:
# This is a conservative estimate for Python <2.6
# SSL module added in 2006, when 0.9.7 was standard
OPENSSL_VERSION = 'OpenSSL >0.8.0'
except Exception:
OPENSSL_VERSION = ''
try:
if PY_GT_2:
import tkinter
else:
import Tkinter as tkinter
TKINTER_VERSION = str(tkinter.TkVersion)
except Exception:
TKINTER_VERSION = ''
try:
import zlib
ZLIB_VERSION = zlib.ZLIB_VERSION
except Exception:
ZLIB_VERSION = ''
try:
from xml.parsers import expat
EXPAT_VERSION = expat.EXPAT_VERSION
except Exception:
EXPAT_VERSION = ''
try:
from multiprocessing import cpu_count
CPU_COUNT = cpu_count()
except Exception:
CPU_COUNT = None
try:
import threading
HAVE_THREADING = True
except Exception:
HAVE_THREADING = False
try:
HAVE_IPV6 = socket.has_ipv6
except Exception:
HAVE_IPV6 = False
try:
from resource import getrlimit, RLIMIT_NOFILE
RLIMIT_FDS_SOFT, RLIMIT_FDS_HARD = getrlimit(RLIMIT_NOFILE)
except Exception:
RLIMIT_FDS_SOFT, RLIMIT_FDS_HARD = 0, 0
START_TIME_INFO = {'time_utc': str(datetime.datetime.utcnow()),
'time_utc_offset': -time.timezone / 3600.0}
def get_python_info():
ret = {}
ret['argv'] = _escape_shell_args(sys.argv)
ret['bin'] = sys.executable
# Even though compiler/build_date are already here, they're
# actually parsed from the version string. So, in the rare case of
# the unparsable version string, we're still transmitting it.
ret['version'] = sys.version
ret['compiler'] = platform.python_compiler()
ret['build_date'] = platform.python_build()[1]
ret['version_info'] = list(sys.version_info)
ret['features'] = {'openssl': OPENSSL_VERSION,
'expat': EXPAT_VERSION,
'sqlite': SQLITE_VERSION,
'tkinter': TKINTER_VERSION,
'zlib': ZLIB_VERSION,
'unicode_wide': HAVE_UCS4,
'readline': HAVE_READLINE,
'64bit': IS_64BIT,
'ipv6': HAVE_IPV6,
'threading': HAVE_THREADING}
return ret
def get_profile(**kwargs):
"""The main entrypoint to ecoutils. Calling this will return a
JSON-serializable dictionary of information about the current
process.
It is very unlikely that the information returned will change
during the lifetime of the process, and in most cases the majority
of the information stays the same between runs as well.
:func:`get_profile` takes one optional keyword argument, *scrub*,
a :class:`bool` that, if True, blanks out identifiable
information. This includes current working directory, hostname,
Python executable path, command-line arguments, and
username. Values are replaced with '-', but for compatibility keys
remain in place.
"""
scrub = kwargs.pop('scrub', False)
if kwargs:
raise TypeError('unexpected keyword arguments: %r' % (kwargs.keys(),))
ret = {}
try:
ret['username'] = getpass.getuser()
except Exception:
ret['username'] = ''
ret['guid'] = str(INSTANCE_ID)
ret['hostname'] = socket.gethostname()
ret['hostfqdn'] = socket.getfqdn()
uname = platform.uname()
ret['uname'] = {'system': uname[0],
'node': uname[1],
'release': uname[2], # linux: distro name
'version': uname[3], # linux: kernel version
'machine': uname[4],
'processor': uname[5]}
linux_dist = platform.linux_distribution()
ret['linux_dist_name'] = linux_dist[0]
ret['linux_dist_version'] = linux_dist[1]
ret['cpu_count'] = CPU_COUNT
ret['fs_encoding'] = sys.getfilesystemencoding()
ret['ulimit_soft'] = RLIMIT_FDS_SOFT
ret['ulimit_hard'] = RLIMIT_FDS_HARD
ret['cwd'] = os.getcwd()
ret['umask'] = '{0:03o}'.format(os.umask(os.umask(2)))
ret['python'] = get_python_info()
ret.update(START_TIME_INFO)
ret['_eco_version'] = ECO_VERSION
if scrub:
# mask identifiable information
ret['cwd'] = '-'
ret['hostname'] = '-'
ret['hostfqdn'] = '-'
ret['python']['bin'] = '-'
ret['python']['argv'] = '-'
ret['uname']['node'] = '-'
ret['username'] = '-'
return ret
def main():
data_dict = get_profile()
print(json.dumps(data_dict, sort_keys=True, indent=2))
return
#############################################
# The shell escaping copied in from strutils
#############################################
def _escape_shell_args(args, sep=' ', style=None):
if not style:
style = 'cmd' if sys.platform == 'win32' else 'sh'
if style == 'sh':
return _args2sh(args, sep=sep)
elif style == 'cmd':
return _args2cmd(args, sep=sep)
raise ValueError("style expected one of 'cmd' or 'sh', not %r" % style)
_find_sh_unsafe = re.compile(r'[^a-zA-Z0-9_@%+=:,./-]').search
def _args2sh(args, sep=' '):
# see strutils
ret_list = []
for arg in args:
if not arg:
ret_list.append("''")
continue
if _find_sh_unsafe(arg) is None:
ret_list.append(arg)
continue
# use single quotes, and put single quotes into double quotes
# the string $'b is then quoted as '$'"'"'b'
ret_list.append("'" + arg.replace("'", "'\"'\"'") + "'")
return ' '.join(ret_list)
def _args2cmd(args, sep=' '):
# see strutils
result = []
needquote = False
for arg in args:
bs_buf = []
# Add a space to separate this argument from the others
if result:
result.append(' ')
needquote = (" " in arg) or ("\t" in arg) or not arg
if needquote:
result.append('"')
for c in arg:
if c == '\\':
# Don't know if we need to double yet.
bs_buf.append(c)
elif c == '"':
# Double backslashes.
result.append('\\' * len(bs_buf)*2)
bs_buf = []
result.append('\\"')
else:
# Normal char
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
# Add remaining backslashes, if any.
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return ''.join(result)
############################
# End shell escaping code
############################
if __name__ == '__main__':
main()
| |
from django.conf import settings
import re
class Menu(object):
"""
Menu is a class that generates menus
It allows for multiple named menus, which can be accessed in your templates
using the generate_menu template tag.
Menus are loaded from the INSTALLED_APPS, inside a file named menus.py.
This file should import the Menu & MenuItem classes and then call add_item:
Menu.add_item("main", MenuItem("My item",
reverse("myapp.views.myview"),
weight=10))
Note: You cannot have the same URL in a MenuItem in different
Menus, but it is not enforced. If you do submenus will not work
properly.
"""
items = {}
sorted = {}
loaded = False
@classmethod
def add_item(c, name, item):
"""
add_item adds MenuItems to the menu identified by 'name'
"""
if isinstance(item, MenuItem):
if name not in c.items:
c.items[name] = []
c.items[name].append(item)
c.sorted[name] = False
@classmethod
def load_menus(c):
"""
load_menus loops through INSTALLED_APPS and loads the menu.py
files from them.
"""
# we don't need to do this more than once
if c.loaded:
return
# loop through our INSTALLED_APPS
for app in settings.INSTALLED_APPS:
# skip any django apps
if app.startswith("django."):
continue
menu_module = '%s.menus' % app
try:
__import__(menu_module, fromlist=["menu"])
except ImportError:
pass
c.loaded = True
@classmethod
def sort_menus(c):
"""
sort_menus goes through the items and sorts them based on
their weight
"""
for name in c.items:
if not c.sorted[name]:
c.items[name].sort(key=lambda x: x.weight)
c.sorted[name] = True
@classmethod
def process(c, request, name=None):
"""
process uses the current request to determine which menus
should be visible, which are selected, etc.
"""
# make sure we're loaded & sorted
c.load_menus()
c.sort_menus()
if name is None:
# special case, process all menus
items = {}
for name in c.items:
items[name] = c.process(request, name)
return items
if name not in c.items:
return []
curitem = None
for item in c.items[name]:
item.process(request)
if item.visible:
item.selected = False
if item.match_url(request):
if curitem is None or len(curitem.url) < len(item.url):
curitem = item
if curitem is not None:
curitem.selected = True
# return only visible items
visible = [
item
for item in c.items[name]
if item.visible
]
# determine if we should apply 'selected' to parents when one of their
# children is the 'selected' menu
if getattr(settings, 'MENU_SELECT_PARENTS', False):
def is_child_selected(item):
for child in item.children:
if child.selected or is_child_selected(child):
return True
for item in visible:
if is_child_selected(item):
item.selected = True
return visible
class MenuItem(object):
"""
MenuItem represents an item in a menu, possibly one that has a sub-
menu (children).
"""
def __init__(self, title, url, children=[], weight=1, check=None,
visible=True, slug=None, exact_url=False, **kwargs):
"""
MenuItem constructor
title either a string or a callable to be used for the title
url the url of the item
children an array of MenuItems that are sub menus to this item
this can also be a callable that generates an array
weight used to sort adjacent MenuItems
check a callable to determine if this item is visible
slug used to generate id's in the HTML, auto generated from
the title if left as None
exact_url normally we check if the url matches the request prefix
this requires an exact match if set
All other keyword arguments passed into the MenuItem constructor are
assigned to the MenuItem object as attributes so that they may be used
in your templates. This allows you to attach arbitrary data and use it
in which ever way suits your menus the best.
"""
self.url = url
self.title = title
self._title = None
self.visible = visible
self.children = children
self.children_sorted = False
self.weight = weight
self.check = check
self.slug = slug
self.exact_url = exact_url
self.selected = False
self.parent = None
# merge our kwargs into our self
for k in kwargs:
setattr(self, k, kwargs[k])
# if title is a callable store a reference to it for later
# then we'll process it at runtime
if callable(title):
self.title = ""
self._title = title
def process(self, request):
"""
process determines if this item should visible, if its selected, etc...
"""
self.check_check(request)
if not self.visible:
return
self.check_title(request)
self.check_children(request)
children = [
kid
for kid in self.children
if kid.visible
]
curitem = None
for item in children:
item.process(request)
item.selected = False
if item.match_url(request):
if curitem is None or len(curitem.url) < len(item.url):
curitem = item
if curitem is not None:
curitem.selected = True
def match_url(self, request):
"""
match url determines if this is selected
"""
matched = False
if self.exact_url:
if re.match("%s$" % (self.url,), request.path):
matched = True
elif re.match("%s" % self.url, request.path):
matched = True
return matched
def check_children(self, request):
if hasattr(self, '_children'):
self.children = self._children(request)
if callable(self.children):
kids = self.children(request)
self._children = self.children
self.children = kids
for kid in self.children:
kid.parent = self
def check_check(self, request):
if callable(self.check):
self.visible = self.check(request)
def check_title(self, request):
if callable(self._title):
self.title = self._title(request)
if self.slug is None:
self.slug = re.sub(r'[^a-zA-Z0-9\-]+', '_',
self.title.lower()).strip('_')
| |
import os
import shutil
from django.contrib.auth.models import Group
from django.contrib.messages import get_messages
from django.core.urlresolvers import reverse
from django.core.files.uploadedfile import UploadedFile
from rest_framework import status
from hs_core import hydroshare
from hs_core.views import set_resource_flag
from hs_core.testing import MockIRODSTestCaseMixin, ViewTestCase
class TestSetResourceFlag(MockIRODSTestCaseMixin, ViewTestCase):
def setUp(self):
super(TestSetResourceFlag, self).setUp()
self.group, _ = Group.objects.get_or_create(name='Resource Author')
self.username = 'john'
self.password = 'jhmypassword'
self.user = hydroshare.create_account(
'john@gmail.com',
username=self.username,
first_name='John',
last_name='Clarson',
superuser=False,
password=self.password,
groups=[]
)
self.gen_res_one = hydroshare.create_resource(
resource_type='GenericResource',
owner=self.user,
title='Generic Resource Set Flag Testing-1'
)
# Make a text file
self.txt_file_name = 'text.txt'
self.txt_file_path = os.path.join(self.temp_dir, self.txt_file_name)
txt = open(self.txt_file_path, 'w')
txt.write("Hello World\n")
txt.close()
self.txt_file = open(self.txt_file_path, 'r')
files = [UploadedFile(self.txt_file, name=self.txt_file_name)]
metadata_dict = [
{'description': {'abstract': 'My test abstract'}},
{'subject': {'value': 'sub-1'}}
]
self.gen_res_two = hydroshare.create_resource(
resource_type='GenericResource',
owner=self.user,
title='Generic Resource Set Flag Testing-2',
files=files,
metadata=metadata_dict
)
def tearDown(self):
if os.path.exists(self.temp_dir):
shutil.rmtree(self.temp_dir)
super(TestSetResourceFlag, self).tearDown()
def test_set_resource_flag_make_public(self):
# here we are testing the set_resource_flag view function to make a resource public
# test that trying set the resource flag to public when there is no content file
# or required metadata it should not change the resource flag
# test that the resource is not public
self.assertEqual(self.gen_res_one.raccess.public, False)
url_params = {'shortkey': self.gen_res_one.short_id}
post_data = {'t': 'make_public'}
url = reverse('set_resource_flag', kwargs=url_params)
request = self.factory.post(url, data=post_data)
request.user = self.user
self.set_request_message_attributes(request)
self.add_session_to_request(request)
set_resource_flag(request, shortkey=self.gen_res_one.short_id)
# check that the resource is still not public
self.gen_res_one.raccess.refresh_from_db()
self.assertEqual(self.gen_res_one.raccess.public, False)
flag_messages = get_messages(request)
err_messages = [m for m in flag_messages if m.tags == 'error']
self.assertNotEqual(len(err_messages), 0)
# setting flag to public for 2nd resource should succeed
# test that the resource is not public
self.assertEqual(self.gen_res_two.raccess.public, False)
url_params = {'shortkey': self.gen_res_two.short_id}
post_data = {'t': 'make_public'}
url = reverse('set_resource_flag', kwargs=url_params)
request = self.factory.post(url, data=post_data)
request.user = self.user
self.set_request_message_attributes(request)
self.add_session_to_request(request)
response = set_resource_flag(request, shortkey=self.gen_res_two.short_id)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
flag_messages = get_messages(request)
err_messages = [m for m in flag_messages if m.tags == 'error']
self.assertEqual(len(err_messages), 0)
# check that the resource is public now
self.gen_res_two.raccess.refresh_from_db()
self.assertEqual(self.gen_res_two.raccess.public, True)
# clean up
hydroshare.delete_resource(self.gen_res_one.short_id)
hydroshare.delete_resource(self.gen_res_two.short_id)
def test_set_resource_flag_make_private(self):
# here we are testing the set_resource_flag view function to make a resource private
# test that the resource is not public
self.assertEqual(self.gen_res_one.raccess.public, False)
# set it to public
self.gen_res_one.raccess.public = True
self.gen_res_one.raccess.save()
url_params = {'shortkey': self.gen_res_one.short_id}
post_data = {'t': 'make_private'}
url = reverse('set_resource_flag', kwargs=url_params)
request = self.factory.post(url, data=post_data)
request.user = self.user
self.set_request_message_attributes(request)
self.add_session_to_request(request)
response = set_resource_flag(request, shortkey=self.gen_res_one.short_id)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
flag_messages = get_messages(request)
err_messages = [m for m in flag_messages if m.tags == 'error']
self.assertEqual(len(err_messages), 0)
# check that the resource is private now
self.gen_res_one.raccess.refresh_from_db()
self.assertEqual(self.gen_res_one.raccess.public, False)
# clean up
hydroshare.delete_resource(self.gen_res_one.short_id)
hydroshare.delete_resource(self.gen_res_two.short_id)
def test_set_resource_flag_make_discoverable(self):
# here we are testing the set_resource_flag view function to make a resource discoverable
# test that trying set the resource discoverable when there is no content file
# or required metadata it should not make the resource discoverable
# test that the resource is not discoverable
self.assertEqual(self.gen_res_one.raccess.discoverable, False)
url_params = {'shortkey': self.gen_res_one.short_id}
post_data = {'t': 'make_discoverable'}
url = reverse('set_resource_flag', kwargs=url_params)
request = self.factory.post(url, data=post_data)
request.user = self.user
self.set_request_message_attributes(request)
self.add_session_to_request(request)
set_resource_flag(request, shortkey=self.gen_res_one.short_id)
# check that the resource is still not discoverable
self.gen_res_one.raccess.refresh_from_db()
self.assertEqual(self.gen_res_one.raccess.discoverable, False)
flag_messages = get_messages(request)
err_messages = [m for m in flag_messages if m.tags == 'error']
self.assertNotEqual(len(err_messages), 0)
# setting flag to discoverable for 2nd resource should succeed
# test that the resource is not discoverable
self.assertEqual(self.gen_res_two.raccess.discoverable, False)
url_params = {'shortkey': self.gen_res_two.short_id}
post_data = {'t': 'make_discoverable'}
url = reverse('set_resource_flag', kwargs=url_params)
request = self.factory.post(url, data=post_data)
request.user = self.user
self.set_request_message_attributes(request)
self.add_session_to_request(request)
response = set_resource_flag(request, shortkey=self.gen_res_two.short_id)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
flag_messages = get_messages(request)
err_messages = [m for m in flag_messages if m.tags == 'error']
self.assertEqual(len(err_messages), 0)
# check that the resource is discoverable now
self.gen_res_two.raccess.refresh_from_db()
self.assertEqual(self.gen_res_two.raccess.discoverable, True)
# clean up
hydroshare.delete_resource(self.gen_res_one.short_id)
hydroshare.delete_resource(self.gen_res_two.short_id)
def test_set_resource_flag_make_not_discoverable(self):
# here we are testing the set_resource_flag view function to make a resource
# not discoverable
# test that the resource is not discoverable
self.assertEqual(self.gen_res_one.raccess.discoverable, False)
# make it discoverable
self.gen_res_one.raccess.discoverable = True
self.gen_res_one.raccess.save()
url_params = {'shortkey': self.gen_res_one.short_id}
post_data = {'t': 'make_not_discoverable'}
url = reverse('set_resource_flag', kwargs=url_params)
request = self.factory.post(url, data=post_data)
request.user = self.user
self.set_request_message_attributes(request)
self.add_session_to_request(request)
response = set_resource_flag(request, shortkey=self.gen_res_one.short_id)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
flag_messages = get_messages(request)
err_messages = [m for m in flag_messages if m.tags == 'error']
self.assertEqual(len(err_messages), 0)
# check that the resource is not discoverable now
self.gen_res_one.raccess.refresh_from_db()
self.assertEqual(self.gen_res_one.raccess.discoverable, False)
# clean up
hydroshare.delete_resource(self.gen_res_one.short_id)
hydroshare.delete_resource(self.gen_res_two.short_id)
def test_set_resource_flag_make_shareable(self):
# here we are testing the set_resource_flag view function to make a resource shareable
# test that the resource is shareable
self.assertEqual(self.gen_res_one.raccess.shareable, True)
# set it not shareable
self.gen_res_one.raccess.shareable = False
self.gen_res_one.raccess.save()
url_params = {'shortkey': self.gen_res_one.short_id}
post_data = {'t': 'make_shareable'}
url = reverse('set_resource_flag', kwargs=url_params)
request = self.factory.post(url, data=post_data)
request.user = self.user
self.set_request_message_attributes(request)
self.add_session_to_request(request)
response = set_resource_flag(request, shortkey=self.gen_res_one.short_id)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
flag_messages = get_messages(request)
err_messages = [m for m in flag_messages if m.tags == 'error']
self.assertEqual(len(err_messages), 0)
# check that the resource is shareable now
self.gen_res_one.raccess.refresh_from_db()
self.assertEqual(self.gen_res_one.raccess.shareable, True)
# clean up
hydroshare.delete_resource(self.gen_res_one.short_id)
hydroshare.delete_resource(self.gen_res_two.short_id)
def test_set_resource_flag_make_not_shareable(self):
# here we are testing the set_resource_flag view function to make a resource not shareable
# test that the resource is shareable
self.assertEqual(self.gen_res_one.raccess.shareable, True)
url_params = {'shortkey': self.gen_res_one.short_id}
post_data = {'t': 'make_not_shareable'}
url = reverse('set_resource_flag', kwargs=url_params)
request = self.factory.post(url, data=post_data)
request.user = self.user
self.set_request_message_attributes(request)
self.add_session_to_request(request)
response = set_resource_flag(request, shortkey=self.gen_res_one.short_id)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
flag_messages = get_messages(request)
err_messages = [m for m in flag_messages if m.tags == 'error']
self.assertEqual(len(err_messages), 0)
# check that the resource is not shareable now
self.gen_res_one.raccess.refresh_from_db()
self.assertEqual(self.gen_res_one.raccess.shareable, False)
# clean up
hydroshare.delete_resource(self.gen_res_one.short_id)
hydroshare.delete_resource(self.gen_res_two.short_id)
| |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import pkg_resources
import unittest
from pylons import tmpl_context as c
from alluratest.controller import TestController, setup_basic_test, setup_global_objects
from allura.tests import decorators as td
from allura.lib import helpers as h
from allura.model import User
from allura import model as M
from forgetracker import model as TM
# important to be distinct from 'test' which ForgeGit uses, so that the
# tests can run in parallel and not clobber each other
test_project_with_repo = 'test2'
with_git = td.with_tool(test_project_with_repo, 'Git',
'src-git', 'Git', type='git')
class TestStats(TestController):
def setUp(self):
super(TestStats, self).setUp()
p = M.Project.query.get(shortname='test')
p.add_user(M.User.by_username('test-user'), ['Admin'])
def test_login(self):
user = User.by_username('test-user')
init_logins = user.stats.tot_logins_count
self.app.get('/') # establish session
self.app.post('/auth/do_login', params=dict(
username=user.username, password='foo',
_session_id=self.app.cookies['_session_id']
))
assert user.stats.tot_logins_count == 1 + init_logins
assert user.stats.getLastMonthLogins() == 1 + init_logins
@td.with_user_project('test-admin')
@td.with_tool('test', 'wiki', mount_point='wiki', mount_label='wiki', username='test-admin')
def test_wiki_stats(self):
initial_artifacts = c.user.stats.getArtifacts()
initial_wiki = c.user.stats.getArtifacts(art_type="Wiki")
self.app.post('/wiki/TestPage/update',
params=dict(title='TestPage', text='some text'),
extra_environ=dict(username=str(c.user.username)))
artifacts = c.user.stats.getArtifacts()
wiki = c.user.stats.getArtifacts(art_type="Wiki")
assert artifacts['created'] == 1 + initial_artifacts['created']
assert artifacts['modified'] == initial_artifacts['modified']
assert wiki['created'] == 1 + initial_wiki['created']
assert wiki['modified'] == initial_wiki['modified']
self.app.post('/wiki/TestPage2/update',
params=dict(title='TestPage2', text='some text'),
extra_environ=dict(username=str(c.user.username)))
artifacts = c.user.stats.getArtifacts()
wiki = c.user.stats.getArtifacts(art_type="Wiki")
assert artifacts['created'] == 2 + initial_artifacts['created']
assert artifacts['modified'] == initial_artifacts['modified']
assert wiki['created'] == 2 + initial_wiki['created']
assert wiki['modified'] == initial_wiki['modified']
self.app.post('/wiki/TestPage2/update',
params=dict(title='TestPage2',
text='some modified text'),
extra_environ=dict(username=str(c.user.username)))
artifacts = c.user.stats.getArtifacts()
wiki = c.user.stats.getArtifacts(art_type="Wiki")
assert artifacts['created'] == 2 + initial_artifacts['created']
assert artifacts['modified'] == 1 + initial_artifacts['modified']
assert wiki['created'] == 2 + initial_wiki['created']
assert wiki['modified'] == 1 + initial_wiki['modified']
@td.with_tool('test', 'tickets', mount_point='tickets', mount_label='tickets', username='test-admin')
def test_tracker_stats(self):
initial_tickets = c.user.stats.getTickets()
initial_tickets_artifacts = c.user.stats.getArtifacts(
art_type="Ticket")
self.app.post('/tickets/save_ticket',
params={'ticket_form.summary': 'test',
'ticket_form.assigned_to': str(c.user.username)},
extra_environ=dict(username=str(c.user.username)))
ticketnum = str(TM.Ticket.query.get(summary='test').ticket_num)
tickets = c.user.stats.getTickets()
tickets_artifacts = c.user.stats.getArtifacts(art_type="Ticket")
assert tickets['assigned'] == initial_tickets['assigned'] + 1
assert tickets['solved'] == initial_tickets['solved']
assert tickets['revoked'] == initial_tickets['revoked']
assert tickets_artifacts[
'created'] == initial_tickets_artifacts['created'] + 1
assert tickets_artifacts[
'modified'] == initial_tickets_artifacts['modified']
self.app.post('/tickets/%s/update_ticket_from_widget' % ticketnum,
params={'ticket_form.ticket_num': ticketnum,
'ticket_form.summary': 'footext3',
'ticket_form.status': 'closed'},
extra_environ=dict(username=str(c.user.username)))
tickets = c.user.stats.getTickets()
tickets_artifacts = c.user.stats.getArtifacts(art_type="Ticket")
assert tickets['assigned'] == initial_tickets['assigned'] + 1
assert tickets['solved'] == initial_tickets['solved'] + 1
assert tickets['revoked'] == initial_tickets['revoked']
assert tickets_artifacts[
'created'] == initial_tickets_artifacts['created'] + 1
assert tickets_artifacts[
'modified'] == initial_tickets_artifacts['modified'] + 1
self.app.post('/tickets/save_ticket',
params={'ticket_form.summary': 'test2'},
extra_environ=dict(username=str(c.user.username)))
ticketnum = str(TM.Ticket.query.get(summary='test2').ticket_num)
tickets = c.user.stats.getTickets()
tickets_artifacts = c.user.stats.getArtifacts(art_type="Ticket")
assert tickets['assigned'] == initial_tickets['assigned'] + 1
assert tickets['solved'] == initial_tickets['solved'] + 1
assert tickets['revoked'] == initial_tickets['revoked']
assert tickets_artifacts[
'created'] == initial_tickets_artifacts['created'] + 2
assert tickets_artifacts[
'modified'] == initial_tickets_artifacts['modified'] + 1
self.app.post('/tickets/%s/update_ticket_from_widget' % ticketnum,
params={'ticket_form.ticket_num': ticketnum,
'ticket_form.summary': 'test2',
'ticket_form.assigned_to': str(c.user.username)},
extra_environ=dict(username=str(c.user.username)))
tickets = c.user.stats.getTickets()
tickets_artifacts = c.user.stats.getArtifacts(art_type="Ticket")
assert tickets['assigned'] == initial_tickets['assigned'] + 2
assert tickets['solved'] == initial_tickets['solved'] + 1
assert tickets['revoked'] == initial_tickets['revoked']
assert tickets_artifacts[
'created'] == initial_tickets_artifacts['created'] + 2
assert tickets_artifacts[
'modified'] == initial_tickets_artifacts['modified'] + 2
self.app.post('/tickets/%s/update_ticket_from_widget' % ticketnum,
params={'ticket_form.ticket_num': ticketnum,
'ticket_form.summary': 'test2',
'ticket_form.assigned_to': 'test-user'},
extra_environ=dict(username=str(c.user.username)))
tickets = c.user.stats.getTickets()
tickets_artifacts = c.user.stats.getArtifacts(art_type="Ticket")
assert tickets['assigned'] == initial_tickets['assigned'] + 2
assert tickets['solved'] == initial_tickets['solved'] + 1
assert tickets['revoked'] == initial_tickets['revoked'] + 1
assert tickets_artifacts[
'created'] == initial_tickets_artifacts['created'] + 2
assert tickets_artifacts[
'modified'] == initial_tickets_artifacts['modified'] + 3
class TestGitCommit(TestController, unittest.TestCase):
def setUp(self):
super(TestGitCommit, self).setUp()
setup_basic_test()
user = User.by_username('test-admin')
user.set_password('testpassword')
user.claim_address('rcopeland@geek.net')
addr = M.EmailAddress.get(email='rcopeland@geek.net')
addr.confirmed = True
self.setup_with_tools()
@with_git
@td.with_wiki
def setup_with_tools(self):
setup_global_objects()
h.set_context(test_project_with_repo, 'src-git',
neighborhood='Projects')
repo_dir = pkg_resources.resource_filename(
'forgeuserstats', 'tests/data')
c.app.repo.fs_path = repo_dir
c.app.repo.name = 'testgit.git'
self.repo = c.app.repo
self.repo.refresh()
self.rev = self.repo.commit('HEAD')
@td.with_user_project('test-admin')
def test_commit(self):
commits = c.user.stats.getCommits()
assert commits['number'] == 4
lmcommits = c.user.stats.getLastMonthCommits()
assert lmcommits['number'] == 4
| |
from tcga_encoder.utils.helpers import *
from tcga_encoder.data.data import *
from tcga_encoder.definitions.tcga import *
from tcga_encoder.definitions.nn import *
from tcga_encoder.definitions.locations import *
from sklearn.model_selection import StratifiedKFold
from scipy import stats
from tcga_encoder.models.dna.models import *
BETA_METHOD = "beta"
BETA_METHOD2 = "beta2"
POISSON_METHOD = "poisson"
GAUSSIAN_METHOD = "gaussian"
KDE_METHOD = "kde"
NEGBIN_METHOD = "negbin"
LOGREG_METHOD = "logreg"
log_prior = 1e-2
def model_by_method( method ):
if method == BETA_METHOD or method == BETA_METHOD2:
return BetaNaiveBayesModel()
elif method == POISSON_METHOD:
return PoissonNaiveBayesModel()
elif method == KDE_METHOD:
return KernelDensityNaiveBayesModel()
elif method == GAUSSIAN_METHOD:
return GaussianNaiveBayesModel()
elif method == NEGBIN_METHOD:
return NegBinNaiveBayesModel()
elif method == LOGREG_METHOD:
return LogisticRegressionModel()
else:
assert False, "No model called %s"%(method)
return None
def compute_auc_by_disease( true_y, est_y, diseases ):
u_diseases = np.unique(diseases)
aucs = np.nan*np.ones( len(u_diseases) )
d_idx = 0
for disease in u_diseases:
I = pp.find( disease == diseases )
y = true_y[I]
x = est_y[I]
n = len(I)
n_1 = y.sum()
n_0 = n-n_1
if n_1 > 0 and n_0 < n:
aucs[ d_idx ] = roc_auc_score( y, x )
d_idx += 1
return aucs
def run_method( data, results_location, results_store, \
dna_gene, source, method, disease_string, \
n_folds, n_xval_repeats, \
randomize_labels, label_permutation_idx ):
colors = "rgkcmrgkcmrgkcmrgkcmrgkcmrgkcmrgkcmrgkcmrgkcmrgkcmrgkcmrgkcmrgkcmrgkcmrgkcmrgkcmrgkcm"
dna_data, source_data = data
barcodes = source_data.index.values
targets = dna_data.loc[ barcodes ].values
inputs = source_data.loc[ barcodes ].values
n_1 = np.sum(targets)
n_0 = len(targets)-n_1
# permute the original targets
permuted_targets = targets.copy()
if randomize_labels == True:
np.random.seed( label_permutation_idx )
np.random.shuffle( permuted_targets )
labels = np.vstack((targets,permuted_targets)).T
N = len(permuted_targets)
D = source_data.shape[1]
test_predictions = np.zeros( (N,n_xval_repeats), dtype=float )
test_predictions_elementwise = np.zeros( (N,D), dtype=float )
diseases = np.array( [s.split("_")[0] for s in barcodes])
u_diseases = np.unique( diseases )
n_diseases = len(u_diseases)
one_hot_diseases = np.zeros( (N,n_diseases) )
aucs = []
aucs_by_disease=[]
elementwise_aucs = np.zeros( (D,n_xval_repeats) )
elementwise_aucs_by_disease = np.zeros( (n_diseases, D, n_xval_repeats) )
weights = np.ones( len(diseases))
k=0
for disease in u_diseases:
I = disease == diseases
weights[I] = permuted_targets[I].sum()
one_hot_diseases[I,k] = 1
k+=1
weights = np.array(weights)
if label_permutation_idx == 0:
f = pp.figure()
ax_roc = f.add_subplot(111)
n_folds = np.minimum( n_folds, np.minimum(n_0,n_1) )
for xval_repeat_idx in range( n_xval_repeats ):
print "\t\tINFO (%s): running xval repeat %d of %d"%(dna_gene,xval_repeat_idx+1, n_xval_repeats)
# get train/test splits
skf = StratifiedKFold(n_splits=n_folds, shuffle = True, random_state=xval_repeat_idx+1)
fold_idx = 1
for train_index, test_index in skf.split(inputs, permuted_targets):
#print "\t\t\tINFO (%s): running fold %d of %d"%(dna_gene,fold_idx, n_folds)
model = model_by_method( method )
model.fit( inputs[train_index,:], permuted_targets[train_index] )#, one_hot_diseases[train_index,:] )
#fold_test_predictions = model.predict( inputs[test_index,:], one_hot_groups=one_hot_diseases[test_index,:] )
#fold_test_predictions_elementwise = model.predict( inputs[test_index,:], elementwise=True, one_hot_groups=one_hot_diseases[test_index,:] )
fold_test_predictions = model.predict( inputs[test_index,:] )
fold_test_predictions_elementwise = model.predict( inputs[test_index,:], elementwise=True )
test_predictions[:,xval_repeat_idx][ test_index ] = fold_test_predictions
test_predictions_elementwise[test_index,:] = fold_test_predictions_elementwise
#pdb.set_trace()
fold_idx+=1
#pdb.set_trace()
aucs_by_disease.append( compute_auc_by_disease( permuted_targets, test_predictions[:, xval_repeat_idx], diseases ) )
aucs.append( roc_auc_score( permuted_targets, test_predictions[:, xval_repeat_idx], sample_weight=weights ) )
print "\t\tINFO (%s): running AUC compute repeat %d of %d"%(dna_gene,xval_repeat_idx+1, n_xval_repeats)
for d in xrange(D):
elementwise_aucs[d,xval_repeat_idx] = roc_auc_score( permuted_targets, test_predictions_elementwise[:, d], sample_weight=np.array(weights) )
elementwise_aucs_by_disease[:,d,xval_repeat_idx] = compute_auc_by_disease( permuted_targets, test_predictions_elementwise[:, d], diseases )
if d==3 and label_permutation_idx==0:
d_idx = 0
for disease in u_diseases:
I = disease == diseases
fpr,tpr,thre = roc_curve( permuted_targets[I], test_predictions_elementwise[:, d][I] )
#pdb.set_trace()
#print d_idx, colors
ax_roc.plot( fpr, tpr, colors[d_idx]+'.-', alpha=0.4 )
d_idx+=1
fpr,tpr,thre = roc_curve( permuted_targets, test_predictions_elementwise[:, d], sample_weight=np.array(weights) )
ax_roc.plot( fpr, tpr, 'b.-', alpha = 0.4, lw=0.5)
#pdb.set_trace()
pp.title( source_data.columns[d])
#pdb.set_trace()
#pdb.set_trace()
# if label_permutation_idx == 0:
# figname1 = os.path.join( HOME_DIR, os.path.dirname(results_location) ) + "/rocs_%s_%s_%s_%s.png"%(dna_gene,source, method, disease_string)
# f.savefig( figname1, dpi=300 )
xval_columns = np.array( ["seed_%d"%(seed+1) for seed in range(n_xval_repeats) ] )
results_store[ "/%s/%s/%s/%s/labels_%d/xval_aucs_elementwise"%(disease_string,dna_gene,source, method,label_permutation_idx)] = pd.DataFrame( elementwise_aucs, index = source_data.columns, columns=xval_columns )
results_store[ "/%s/%s/%s/%s/labels_%d/xval_aucs"%(disease_string,dna_gene,source, method,label_permutation_idx)] = pd.Series( np.array(aucs), index=xval_columns )
results_store[ "/%s/%s/%s/%s/labels_%d/xval_predictions"%(disease_string,dna_gene,source, method,label_permutation_idx)] = pd.DataFrame( test_predictions, index = barcodes, columns = xval_columns )
results_store[ "/%s/%s/%s/%s/labels_%d/xval_targets"%(disease_string,dna_gene,source, method,label_permutation_idx)] = pd.DataFrame( labels, index = barcodes, columns = ["true","permuted"])
for d_idx in range(len(u_diseases)):
disease = u_diseases[d_idx]
results_store[ "/%s/%s/%s/%s/labels_%d/diseases/%s/xval_aucs_elementwise"%(disease_string,dna_gene,source, method,label_permutation_idx,disease)] = pd.DataFrame( elementwise_aucs_by_disease[d_idx,:,:], index = source_data.columns, columns=xval_columns )
results_store[ "/%s/%s/%s/%s/labels_%d/xval_disease_aucs"%(disease_string,dna_gene,source, method,label_permutation_idx)] = pd.DataFrame( np.array(aucs_by_disease).T, index = u_diseases, columns=xval_columns )
def prepare_results_store( results_location, mode = "a" ):
# create directory path for results
check_and_mkdir( os.path.join( HOME_DIR, os.path.dirname(results_location) ) )
# open store in append mode, incase it already exists
results_store = pd.HDFStore( os.path.join( HOME_DIR, results_location ), mode )
return results_store
def add_dna_data_store( data_file, dna_gene, predictions ):
# later add restrictions on tissue type
#source = source.upper()
barcodes = predictions.index.values
data_store = pd.HDFStore( data_file, "r" )
dna_data = data_store["/DNA/channel/0"].loc[ barcodes ] #[ dna_gene ]
# /DNA/variant/Frame_Shift_Del frame (shape->[9316,168])
# /DNA/variant/Frame_Shift_Ins frame (shape->[9316,168])
# /DNA/variant/In_Frame_Del frame (shape->[9316,168])
# /DNA/variant/In_Frame_Ins frame (shape->[9316,168])
# /DNA/variant/Missense_Mutation frame (shape->[9316,168])
# /DNA/variant/Nonsense_Mutation frame (shape->[9316,168])
# /DNA/variant/Nonstop_Mutation frame (shape->[9316,168])
auc = roc_auc_score( predictions["target"].values, predictions["prediction"].values)
print "Target AUC: %0.3f"%(auc)
v_aucs = []
variants = ["Frame_Shift_Del","Frame_Shift_Ins","In_Frame_Del","In_Frame_Ins","Missense_Mutation","Nonsense_Mutation","Nonstop_Mutation"]
for v in variants:
dna = data_store["/DNA/variant/%s"%(v)].loc[ barcodes ][dna_gene]
if np.sum(dna.values)>0:
predictions[v] = dna
v_auc = roc_auc_score( predictions[v].values, predictions["prediction"].values)
print "variant %15s AUC: %0.3f"%(v, v_auc)
v_aucs.append(v_auc)
predictions["KRAS"] = data_store["/DNA/channel/0"].loc[ barcodes ]["KRAS"]
predictions["BRAF"] = data_store["/DNA/channel/0"].loc[ barcodes ]["BRAF"]
predictions["CTNNB1"] = data_store["/DNA/channel/0"].loc[ barcodes ]["CTNNB1"]
predictions["AXIN1"] = data_store["/DNA/channel/0"].loc[ barcodes ]["AXIN1"]
predictions.sort_values(by="prediction", ascending=False, inplace=True)
print predictions.sum(0)
#pdb.set_trace()
return predictions
def prepare_data_store( data_file, dna_gene, source, method, restricted_diseases ):
# later add restrictions on tissue type
#source = source.upper()
data_store = pd.HDFStore( data_file, "r" )
#data_store.open()
# first get columns on observed, then get intersection
# sources = [DNA,source]
sources = [DNA, source]
observed = data_store["/CLINICAL/observed"][ sources ]
barcodes = observed[ observed.sum(1)==len(sources) ].index.values
#variants = ["Frame_Shift_Del","Frame_Shift_Ins","In_Frame_Del","In_Frame_Ins","Missense_Mutation","Nonsense_Mutation","Nonstop_Mutation"]
variant = "Missense_Mutation"
dna_data = data_store["/DNA/channel/0"].loc[ barcodes ] #[ dna_gene ]
# zeros = data_store["/DNA/channel/0"].loc[ barcodes ][ data_store["/DNA/channel/0"].loc[ barcodes ][ dna_gene ]==0].index.values
#
# v_ones = data_store["/DNA/variant/%s"%(variant)].loc[barcodes][ data_store["/DNA/variant/%s"%(variant)].loc[barcodes][dna_gene]==1 ].index.values
#
# barcodes = np.hstack( (v_ones,zeros))
# dna_data = data_store["/DNA/variant/%s"%(variant)].loc[ barcodes ] #[ dna_gene ]
#pdb.set_trace()
#dna_data = data_store["/DNA/variant/Missense_Mutation"].loc[ barcodes ] #[ dna_gene ]
source_data = None
if source == RNA:
if method == BETA_METHOD:
source_data = data_store["/RNA/FAIR"].loc[ barcodes ]
elif method == POISSON_METHOD or method == GAUSSIAN_METHOD or method == KDE_METHOD or method == LOGREG_METHOD:
source_data = np.log2( data_store["/RNA/RSEM"].loc[ barcodes ] + log_prior )
elif method == NEGBIN_METHOD:
#source_data = np.log2( np.maximum( 2.0, data_store["/RNA/RSEM"].loc[ barcodes ]+ log_prior ) )
source_data = data_store["/RNA/RSEM"].loc[ barcodes ]
elif source == miRNA:
if method == BETA_METHOD:
source_data = data_store["/miRNA/FAIR"].loc[ barcodes ]
elif method == POISSON_METHOD or method == GAUSSIAN_METHOD or method == KDE_METHOD or method == LOGREG_METHOD:
source_data = np.log2( data_store["/miRNA/READS"].loc[ barcodes ] + log_prior )
elif method == NEGBIN_METHOD:
source_data = data_store["/miRNA/READS"].loc[ barcodes ]
elif source == METH:
if method == BETA_METHOD:
source_data = data_store["/METH/FAIR"].loc[ barcodes ]
elif method == POISSON_METHOD or method == GAUSSIAN_METHOD or method == KDE_METHOD or method == LOGREG_METHOD:
source_data = np.log2( data_store["/METH/METH"].loc[ barcodes ] )
elif method == BETA_METHOD2:
source_data = data_store["/METH/METH"].loc[ barcodes ]
elif method == NEGBIN_METHOD:
source_data = data_store["/METH/METH"].loc[ barcodes ]
data_store.close()
if source_data is None or dna_data is None:
pdb.set_trace()
return None
else:
if len(restricted_diseases)>0:
diseases = np.array( [s.split("_")[0] for s in barcodes])
ok = np.zeros( len(diseases), dtype = bool)
for disease in restricted_diseases:
ok |= diseases == disease
source_data = source_data[ ok ]
dna_data = dna_data[ ok ]
#pdb.set_trace()
barcodes = barcodes[ok]
print "Trying to filter coadread MSI barcodes"
try:
msi_bcs = np.loadtxt( os.path.join( HOME_DIR, "data/broad_processed_april_2017" )+"/coadread_msi_barcodes.txt", dtype=str )
ok = np.zeros( len(barcodes), dtype=bool)
non_disease2idx = {}
non_disease_bcs = np.array([s.split("_")[1] for s in barcodes])
for idx in xrange(len(barcodes)):
non_disease2idx[non_disease_bcs[idx]] = idx
msi_bcs = np.array( [s.lower() for s in msi_bcs])
ok_bcs = np.setdiff1d( non_disease_bcs, msi_bcs )
for bc in ok_bcs:
ok[ non_disease2idx[bc] ] = True
source_data = source_data[ ok ]
dna_data = dna_data[ ok ]
barcodes = barcodes[ok]
#pdb.set_trace()
except:
print "could not load msi"
#pdb.set_trace()
#data_store["/DNA/channel/0"].loc[ barcodes ][ dna_gene ]
#pdb.set_trace()
dna_data = dna_data[ dna_gene ]
print "\tINFO: %s has %d of %d mutated (%0.2f percent)"%( dna_gene, dna_data.sum(), len(barcodes), 100.0*dna_data.sum() / float(len(barcodes)) )
return dna_data, source_data
def run_train( data_file, results_location, dna_gene, source, method, n_folds, n_xval_repeats, n_permutations, restricted_diseases ):
disease_string = "ALL"
if len(restricted_diseases)>0:
disease_string = restricted_diseases[0]
for d in restricted_diseases[1:]:
disease_string += "_%s"%(d)
# extract in for the dna_gene
data = prepare_data_store( data_file, dna_gene, source, method, restricted_diseases )
if data is None:
print "Skipping gene %s"%dna_gene
return
dna_data,s_data = data
if dna_data.sum()<10:
print "not enough data"
return
# prepare HDF store
results_store = prepare_results_store( results_location )
# run train with correct labels
print "..............................................................."
print "\tINFO (%s): Running with correct labels..."%(dna_gene)
run_method( data, results_location, results_store, dna_gene, source, method, disease_string, n_folds, n_xval_repeats, randomize_labels = False, label_permutation_idx = 0)
# run a nbr of permutated xval repeats
for permutation_idx in range(n_permutations):
print "..............................................................."
print "\tINFO (%s): Running with permuted labels...%d of %d"%(dna_gene,permutation_idx+1, n_permutations)
run_method( data, results_location, results_store, dna_gene, source, method, disease_string, n_folds, n_xval_repeats, randomize_labels = True, label_permutation_idx = permutation_idx+1)
#view_results( results_location, results_store, dna_gene, n_permutations, source, method, disease_string, title_str = "all", max_nbr=1000, zoom = False )
view_results( results_location, results_store, dna_gene, n_permutations, source, method, disease_string, title_str = "zoom", max_nbr=100, zoom=True )
predictions = results_store["/%s/%s/%s/%s/labels_0/xval_predictions"%(disease_string,dna_gene,source, method)]
targets = results_store["/%s/%s/%s/%s/labels_0/xval_targets"%(disease_string,dna_gene,source, method)]
barcodes = predictions.index.values
#pdb.set_trace()
cohorts = np.array( [s.split("_")[0] for s in barcodes],dtype=str)
bcs = np.array( [s.split("_")[1] for s in barcodes], dtype=str)
#values = np.vstack( (predictions.loc[barcodes].values[:,0],targets.loc[barcodes].values[:,0].astype(int)) ).T
p = predictions.loc[barcodes].values[:,0]
t = targets.loc[barcodes].values[:,0].astype(int)
predictions = pd.DataFrame( [], index=barcodes)
predictions["cohort"] = pd.Series( cohorts, index=barcodes )
predictions["barcodes"] = pd.Series( bcs, index=barcodes )
predictions["prediction"] = pd.Series( p, index=barcodes )
predictions["target"] = pd.Series( t, index=barcodes )
#pdb.set_trace()
predictions = add_dna_data_store( data_file, dna_gene, predictions )
#predictinos =
predictions.to_csv( os.path.join( HOME_DIR, os.path.dirname(results_location) )+"/predictions.csv" )
#pdb.set_trace()
print "... done run_train."
return predictions
def view_results( results_location, store, gene, n_permutations, source, method, disease_string, title_str = "", max_nbr = 100, zoom = True ):
mean_aucs = store["/%s/%s/%s/%s/labels_0/xval_aucs"%(disease_string,gene,source, method)]
mean_auc = store["/%s/%s/%s/%s/labels_0/xval_aucs"%(disease_string,gene,source, method)].mean()
var_auc = store["/%s/%s/%s/%s/labels_0/xval_aucs"%(disease_string,gene,source, method)].var()
barcodes = store["/%s/%s/%s/%s/labels_0/xval_predictions"%(disease_string,gene,source, method)].index.values
diseases = np.array( [s.split("_")[0] for s in barcodes])
u_diseases = np.unique( diseases )
disease_aucs = store[ "/%s/%s/%s/%s/labels_0/xval_disease_aucs"%(disease_string,gene,source, method)]
mean_disease_aucs = disease_aucs.mean(1)
var_disease_aucs = disease_aucs.var(1)
std_auc = np.sqrt( var_auc )
ordered_mean_aucs = store["/%s/%s/%s/%s/labels_0/xval_aucs_elementwise"%(disease_string,gene,source, method)].mean(1).sort_values(ascending=False)
ordered_source_genes = ordered_mean_aucs.index.values
ordered_var_aucs = store["/%s/%s/%s/%s/labels_0/xval_aucs_elementwise"%(disease_string,gene,source, method)].loc[ordered_source_genes].var(1)
order_std_aucs = np.sqrt(ordered_var_aucs)
D = len(ordered_mean_aucs.values)
element_aucs = store["/%s/%s/%s/%s/labels_0/xval_aucs_elementwise"%(disease_string,gene,source, method)]
element_aucs=element_aucs.T
element_aucs["ALL"] = mean_aucs
element_aucs=element_aucs.T
orientation = "horizontal"
if zoom is True:
marker = 'o'
else:
marker = '.'
nD = np.minimum( D, max_nbr )
if orientation == "vertical":
f1=pp.figure( figsize=(6,16))
else:
f1=pp.figure( figsize=(16,6))
ax11 = f1.add_subplot(111)
#
# disease_aucs = []
# for disease in u_diseases:
# disease_aucs.append( store[ "/%s/%s/%s/%s/labels_0/diseases/%s/xval_aucs_elementwise"%(disease_string,gene,source, method, disease)].mean(1) )
#
# disease_aucs = pd.concat(disease_aucs,axis=1)
#pdb.set_trace()
if orientation == "vertical":
# for d_idx in range(len(u_diseases)):
# disease = u_diseases[d_idx]
# results_store[ "/%s/%s/%s/labels_%d/diseases/%s/xval_aucs_elementwise"%(dna_gene,source, method,label_permutation_idx,disease)] = pd.DataFrame( elementwise_aucs_by_disease[d_idx,:,:], index = source_data.columns, columns=xval_columns )
# results_store[ "/%s/%s/%s/labels_%d/diseases/%s/xval_aucs"%(dna_gene,source, method,label_permutation_idx,disease)] = pd.DataFrame( np.array(aucs_by_disease).T, index = u_diseases, columns=xval_columns )
#
ax11.vlines( mean_disease_aucs.values, 0, nD-1, color='g' )
for disease in u_diseases:
aucs =store[ "/%s/%s/%s/%s/labels_0/diseases/%s/xval_aucs_elementwise"%(disease_string,gene,source, method, disease)].mean(1)
ax11.plot( aucs.loc[ordered_source_genes].values[:nD], nD-np.arange(nD)-1, '.-', mec = 'k', label = "%s"%(disease) )
#pdb.set_trace()
ax11.plot( ordered_mean_aucs.values[:nD], nD-np.arange(nD)-1, 'b'+marker+"-", mec = 'k', label = "True" )
ax11.fill_betweenx( nD-np.arange(nD), \
ordered_mean_aucs.values[:nD] + 2*order_std_aucs.values[:nD], \
ordered_mean_aucs.values[:nD] - 2*order_std_aucs.values[:nD], facecolor='blue', edgecolor = 'k', alpha=0.5 )
ax11.plot( ordered_mean_aucs.values[:nD], nD-np.arange(nD)-1, 'b'+marker+"-", mec = 'k', label = "True" )
ax11.fill_betweenx( nD-np.arange(nD), \
mean_auc*np.ones(nD) -2*std_auc, \
mean_auc*np.ones(nD) +2*std_auc, facecolor='blue',edgecolor='k', alpha=0.5 )
ax11.vlines( mean_auc, 0, nD-1, color='b' )
if zoom is True:
ax11.set_yticks( nD-1-np.arange(nD) )
ax11.set_yticklabels( ordered_source_genes[:nD], rotation='horizontal', fontsize=8 )
else:
#ax11.fill_between( 2+np.arange(nD), \
# ordered_mean_aucs.values[:nD] + 2*order_std_aucs.values[:nD], \
# ordered_mean_aucs.values[:nD] - 2*order_std_aucs.values[:nD], facecolor='blue', edgecolor = 'k', alpha=0.5 )
#ax11.plot( np.arange(nD)+2, ordered_mean_aucs.values[:nD], 'b'+marker+"-", mec = 'k', label = "True" )
ax11.plot( np.arange(nD)+2, ordered_mean_aucs.values[:nD], 'b-', mec = 'k', label = "True" )
# ax11.fill_between( 1+np.arange(nD), \
# mean_auc*np.ones(nD) -2*std_auc, \
# mean_auc*np.ones(nD) +2*std_auc, facecolor='blue',edgecolor='k', alpha=0.5 )
#
# ax11.hlines( mean_auc, 1, nD, color='b' )
if zoom is True:
ax11.set_xticks( 2+np.arange(nD) )
ax11.set_xticklabels( ordered_source_genes[:nD], rotation='vertical', fontsize=8 )
#
#pdb.set_trace()
#ax11.plot( np.ones( len(mean_aucs.values)), mean_aucs.values, 'o', ms=10, color='orange', mec='k', alpha=0.75)
#ax11.plot( [1], [mean_auc], 'd', color='orchid',mec='orchid' ,ms=30, mew=2, lw=2, alpha=0.75 )
permutations = []
combined_permutations = []
for permutation_idx in range(n_permutations):
mean_auc_p = store["/%s/%s/%s/%s/labels_%d/xval_aucs"%(disease_string,gene,source, method,permutation_idx+1)].mean()
combined_permutations.append( mean_auc_p)
combined_permutations = pd.Series( np.array(combined_permutations), index = np.arange(n_permutations) )
#permutations.append(combined_permutations )
for permutation_idx in range(n_permutations):
mean_auc_p = store["/%s/%s/%s/%s/labels_%d/xval_aucs"%(disease_string,gene,source, method,permutation_idx+1)].mean()
var_auc_p = store["/%s/%s/%s/%s/labels_%d/xval_aucs"%(disease_string,gene,source, method, permutation_idx+1)].var()
std_auc_p = np.sqrt( var_auc_p )
mean_aucs = store["/%s/%s/%s/%s/labels_%d/xval_aucs_elementwise"%(disease_string,gene,source, method,permutation_idx+1)].loc[ordered_source_genes].mean(1)
#permutations.append( store["/%s/%s/%s/%s/labels_%d/xval_aucs_elementwise"%(disease_string,gene,source, method,permutation_idx+1)].loc[ordered_source_genes] )
permutations.append( mean_aucs )
# if orientation == "vertical":
# ax11.vlines( mean_auc_p, 0, nD-1, color='r' )
# #ax11.plot( mean_aucs[:nD], nD-1-np.arange(nD), 'o', color='orange', mec='k', alpha=0.5)
# else:
# ax11.hlines( mean_auc_p, 0, nD-1, color='r' )
# #ax11.plot( nD-1-np.arange(nD), mean_aucs[:nD], 'o', color='orange', mec='k', alpha=0.5)
#
permutations = pd.concat( permutations,axis=1 )
permutations = permutations.T
permutations["ALL"] = combined_permutations
new_order = ["ALL"]
new_order.extend(ordered_source_genes[:nD] )
permutations = permutations.T.loc[new_order]
element_aucs=element_aucs.loc[new_order]
print permutations
#pdb.set_trace()
correct_labels = store["/%s/%s/%s/%s/labels_%d/xval_aucs_elementwise"%(disease_string,gene,source, method,0)].loc[ordered_source_genes]
if orientation == "vertical":
color = dict(boxes='DarkRed', whiskers='DarkOrange', medians='Red', caps='Black')
color2 = dict(boxes='DarkBlue', whiskers='DarkBlue', medians='DarkBlue', caps='Cyan')
permutations.T.boxplot(ax=ax11,color=color)
element_aucs.T.boxplot(ax=ax11,color=color2)
else:
color = dict(boxes='LightCoral', whiskers='DarkRed', medians='DarkRed', caps='LightCoral')
color2 = dict(boxes='SkyBlue', whiskers='DarkBlue', medians='DarkBlue', caps='SkyBlue')
permutations.T.plot.box(ax=ax11,color=color,patch_artist=True)
element_aucs.T.plot.box(ax=ax11,color=color2,patch_artist=True, widths=0.25)
if zoom is True:
ax11.set_xticks( 1+np.arange(len(new_order)) )
ax11.set_xticklabels( new_order, rotation='vertical', fontsize=8 )
#pdb.set_trace()
t_tests = []
for this_gene in ordered_source_genes[:nD]:
k = n_permutations
p_value = ( np.sum( correct_labels.loc[this_gene].values.mean() < permutations.loc[this_gene].values ) + 1.0 )/ (k+1.0)
#t_tests.append( [gene,stats.ttest_ind( permutations.loc[gene].values, correct_labels.loc[gene], equal_var=False )] )
t_tests.append(p_value)
#pdb.set_trace()
pp.grid('on')
pp.title( "%s using %s of %s with %s mean AUC = %0.3f"%(gene,disease_string, source, method, mean_auc))
pp.subplots_adjust(bottom=0.2)
figname1 = os.path.join( HOME_DIR, os.path.dirname(results_location) ) + "/aucs_%s_%s_%s_%s_%s.png"%(gene,source, method,disease_string,title_str)
f1.savefig( figname1, dpi=300 )
def main( data_file, results_location, dna_gene, source, method, n_folds, n_xval_repeats, n_permutations, train, restricted_diseases ):
print "***************************************************************"
print "Data: ", data_file
print "Results: ", results_location
print "DNA Gene: ", dna_gene
print "source: ", source
print "method: ", method
print "folds: ", n_folds
print "n xvals: ", n_xval_repeats
print "permutes: ", n_permutations
if train == True:
print "TRAINING"
else:
print "REPORTING"
if len(restricted_diseases) > 0:
print "diseases: ", restricted_diseases
else:
print "diseases: ALL"
print "***************************************************************"
if train:
predictions = run_train( data_file, results_location, dna_gene, source, method, n_folds, n_xval_repeats, n_permutations, restricted_diseases )
else:
run_report( data_file, results_location, dna_gene, source, method, n_folds, n_xval_repeats, n_permutations, restricted_diseases )
if __name__ == "__main__":
#assert len(sys.argv) >= 2, "Must pass yaml file."
data_file = sys.argv[1]
results_location = sys.argv[2]
dna_gene = sys.argv[3]
source = sys.argv[4]
method = sys.argv[5]
n_folds = 4 # nbr of folds per xval repeat
n_xval_repeats = 5 # nbr of xval permutations/repeats to try
n_permutations = 10 # nbr of random label assignments to try
if len(sys.argv) >= 9:
n_folds = int( sys.argv[6] )
n_xval_repeats = int( sys.argv[7] )
n_permutations = int( sys.argv[8] )
#train = False
#if len(sys.argv) == 10:
train = bool(int( sys.argv[9]))
restricted_diseases = []
idx = 10
while len(sys.argv) > idx:
restricted_diseases.append( sys.argv[idx] )
idx += 1
main( data_file, results_location, dna_gene, source, method, n_folds, n_xval_repeats, n_permutations, train, restricted_diseases )
#pdb.set_trace()
| |
from __future__ import unicode_literals
import six
import datetime
from decimal import Decimal
from hashlib import md5
from os.path import join as pjoin
import time
import os
from django import forms
from django.conf import settings
from django.core.files.uploadedfile import SimpleUploadedFile
from django.utils import formats
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext_lazy as _
from dbsettings.loading import get_setting_storage, set_setting_value
__all__ = ['Value', 'BooleanValue', 'DecimalValue', 'EmailValue',
'DurationValue', 'FloatValue', 'IntegerValue', 'PercentValue',
'PositiveIntegerValue', 'StringValue', 'TextValue', 'PasswordValue',
'MultiSeparatorValue', 'ImageValue',
'DateTimeValue', 'DateValue', 'TimeValue']
class Value(object):
creation_counter = 0
unitialized_value = None
def __init__(self, description=None, help_text=None, choices=None, required=True, default=None):
self.description = description
self.help_text = help_text
self.choices = choices or []
self.required = required
if default is None:
self.default = self.unitialized_value
else:
self.default = default
self.creation_counter = Value.creation_counter
Value.creation_counter += 1
def __lt__(self, other):
# This is needed because bisect does not take a comparison function.
return self.creation_counter < other.creation_counter
def copy(self):
new_value = self.__class__()
new_value.__dict__ = self.__dict__.copy()
return new_value
@property
def key(self):
return self.module_name, self.class_name, self.attribute_name
def contribute_to_class(self, cls, attribute_name):
self.module_name = cls.__module__
self.class_name = ''
self.attribute_name = attribute_name
self.description = self.description or attribute_name.replace('_', ' ')
setattr(cls, self.attribute_name, self)
@property
def app(self):
return getattr(self, '_app', self.module_name.split('.')[-2])
def __get__(self, instance=None, cls=None):
if instance is None:
raise AttributeError("%r is only accessible from %s instances." %
(self.attribute_name, cls.__name__))
try:
storage = get_setting_storage(*self.key)
# Depending on the setting type, we pass that specific database value to generate a python object
if isinstance(self, ImageValue):
return self.to_python(storage.value_image)
else:
# Legacy fix because default=None was saving as the string None...
if storage.value_text == "None":
return None
return self.to_python(storage.value_text)
except:
return None
def __set__(self, instance, value):
current_value = self.__get__(instance)
if self.to_python(value) != current_value:
set_setting_value(*(self.key + (value,)))
# Subclasses should override the following methods where applicable
def to_python(self, value):
"Returns a native Python object suitable for immediate use"
return value
def get_db_prep_save(self, value):
"Returns a value suitable for storage into a CharField"
return six.text_type(value)
def to_editor(self, value):
"Returns a value suitable for display in a form widget"
return six.text_type(value)
###############
# VALUE TYPES #
###############
class BooleanValue(Value):
unitialized_value = False
class field(forms.BooleanField):
def __init__(self, *args, **kwargs):
kwargs['required'] = False
forms.BooleanField.__init__(self, *args, **kwargs)
def to_python(self, value):
if value in (True, 't', 'True'):
return True
return False
to_editor = to_python
class DecimalValue(Value):
field = forms.DecimalField
def to_python(self, value):
return Decimal(value)
# DurationValue has a lot of duplication and ugliness because of issue #2443
# Until DurationField is sorted out, this has to do some extra work
class DurationValue(Value):
class field(forms.CharField):
def clean(self, value):
try:
return datetime.timedelta(seconds=float(value))
except (ValueError, TypeError):
raise forms.ValidationError('This value must be a real number.')
except OverflowError:
raise forms.ValidationError('The maximum allowed value is %s' %
datetime.timedelta.max)
def to_python(self, value):
if isinstance(value, datetime.timedelta):
return value
try:
return datetime.timedelta(seconds=float(value))
except (ValueError, TypeError):
raise forms.ValidationError('This value must be a real number.')
except OverflowError:
raise forms.ValidationError('The maximum allowed value is %s' % datetime.timedelta.max)
def get_db_prep_save(self, value):
return six.text_type(value.days * 24 * 3600 + value.seconds
+ float(value.microseconds) / 1000000)
class FloatValue(Value):
field = forms.FloatField
def to_python(self, value):
return float(value)
class IntegerValue(Value):
field = forms.IntegerField
def to_python(self, value):
return int(value)
class PercentValue(Value):
class field(forms.DecimalField):
def __init__(self, *args, **kwargs):
forms.DecimalField.__init__(self, 100, 0, 5, 2, *args, **kwargs)
class widget(forms.TextInput):
def render(self, *args, **kwargs):
# Place a percent sign after a smaller text field
attrs = kwargs.pop('attrs', {})
attrs['size'] = attrs['max_length'] = 6
return forms.TextInput.render(self, attrs=attrs, *args, **kwargs) + '%'
def to_python(self, value):
return Decimal(value) / 100
class PositiveIntegerValue(IntegerValue):
class field(forms.IntegerField):
def __init__(self, *args, **kwargs):
kwargs['min_value'] = 0
forms.IntegerField.__init__(self, *args, **kwargs)
class StringValue(Value):
unitialized_value = ''
field = forms.CharField
class TextValue(Value):
unitialized_value = ''
class field(forms.CharField):
widget = forms.Textarea
def to_python(self, value):
return six.text_type(value)
class EmailValue(Value):
unitialized_value = ''
field = forms.EmailField
def to_python(self, value):
return six.text_type(value)
class PasswordValue(Value):
class field(forms.CharField):
widget = forms.PasswordInput
def __init__(self, **kwargs):
if not kwargs.get('help_text'):
kwargs['help_text'] = _(
'Leave empty in order to retain old password. Provide new value to change.')
forms.CharField.__init__(self, **kwargs)
def clean(self, value):
# Retain old password if not changed
if value == '':
value = self.initial
return forms.CharField.clean(self, value)
class MultiSeparatorValue(TextValue):
"""Provides a way to store list-like string settings.
e.g 'mail@test.com;*@blah.com' would be returned as
['mail@test.com', '*@blah.com']. What the method
uses to split on can be defined by passing in a
separator string (default is semi-colon as above).
"""
def __init__(self, description=None, help_text=None, separator=';', required=True,
default=None):
self.separator = separator
if default is not None:
# convert from list to string
default = separator.join(default)
super(MultiSeparatorValue, self).__init__(description=description,
help_text=help_text,
required=required,
default=default)
class field(forms.CharField):
class widget(forms.Textarea):
pass
def to_python(self, value):
if value:
value = six.text_type(value)
value = value.split(self.separator)
value = [x.strip() for x in value]
else:
value = []
return value
class ImageValue(Value):
def __init__(self, *args, **kwargs):
if 'upload_to' in kwargs:
self._upload_to = kwargs.pop('upload_to', '')
super(ImageValue, self).__init__(*args, **kwargs)
class field(forms.ImageField):
class widget(forms.FileInput):
"Widget with preview"
def render(self, name, value, attrs=None):
output = []
try:
if not value:
raise IOError('No value')
from PIL import Image
Image.open(value.file)
file_name = pjoin(settings.MEDIA_URL, value.name).replace("\\", "/")
params = {"file_name": file_name}
output.append('<p><img src="%(file_name)s" width="100" /></p>' % params)
except IOError:
pass
output.append(forms.FileInput.render(self, name, value, attrs))
return mark_safe(''.join(output))
def to_python(self, value):
# Return the value which is an ImageField image
return value
def get_db_prep_save(self, value):
# Return the value which is an ImageField image
return value
def to_editor(self, value):
"Returns a value suitable for display in a form widget"
if not value:
return None
return value
class DateTimeValue(Value):
field = forms.DateTimeField
formats_source = 'DATETIME_INPUT_FORMATS'
@property
def _formats(self):
return formats.get_format(self.formats_source)
def _parse_format(self, value):
for format in self._formats:
try:
return datetime.datetime.strptime(value, format)
except (ValueError, TypeError):
continue
return None
def get_db_prep_save(self, value):
if isinstance(value, six.string_types):
return value
return value.strftime(self._formats[0])
def to_python(self, value):
if isinstance(value, datetime.datetime):
return value
return self._parse_format(value)
class DateValue(DateTimeValue):
field = forms.DateField
formats_source = 'DATE_INPUT_FORMATS'
def to_python(self, value):
if isinstance(value, datetime.datetime):
return value.date()
elif isinstance(value, datetime.date):
return value
res = self._parse_format(value)
if res is not None:
return res.date()
return res
class TimeValue(DateTimeValue):
field = forms.TimeField
formats_source = 'TIME_INPUT_FORMATS'
def to_python(self, value):
if isinstance(value, datetime.datetime):
return value.time()
elif isinstance(value, datetime.time):
return value
res = self._parse_format(value)
if res is not None:
return res.time()
return res
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.